]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.0.4-201110080819.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.4-201110080819.patch
CommitLineData
66ccdfdc
PK
1diff -urNp linux-3.0.4/arch/alpha/include/asm/elf.h linux-3.0.4/arch/alpha/include/asm/elf.h
2--- linux-3.0.4/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
3+++ linux-3.0.4/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
4@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-3.0.4/arch/alpha/include/asm/pgtable.h linux-3.0.4/arch/alpha/include/asm/pgtable.h
19--- linux-3.0.4/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
20+++ linux-3.0.4/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-3.0.4/arch/alpha/kernel/module.c linux-3.0.4/arch/alpha/kernel/module.c
40--- linux-3.0.4/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
41+++ linux-3.0.4/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-3.0.4/arch/alpha/kernel/osf_sys.c linux-3.0.4/arch/alpha/kernel/osf_sys.c
52--- linux-3.0.4/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
53+++ linux-3.0.4/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
54@@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-3.0.4/arch/alpha/mm/fault.c linux-3.0.4/arch/alpha/mm/fault.c
86--- linux-3.0.4/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
87+++ linux-3.0.4/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-3.0.4/arch/arm/include/asm/elf.h linux-3.0.4/arch/arm/include/asm/elf.h
245--- linux-3.0.4/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
246+++ linux-3.0.4/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
247@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267-struct mm_struct;
268-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269-#define arch_randomize_brk arch_randomize_brk
270-
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274diff -urNp linux-3.0.4/arch/arm/include/asm/kmap_types.h linux-3.0.4/arch/arm/include/asm/kmap_types.h
275--- linux-3.0.4/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
276+++ linux-3.0.4/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
277@@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281+ KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285diff -urNp linux-3.0.4/arch/arm/include/asm/uaccess.h linux-3.0.4/arch/arm/include/asm/uaccess.h
286--- linux-3.0.4/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
287+++ linux-3.0.4/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
288@@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292+extern void check_object_size(const void *ptr, unsigned long n, bool to);
293+
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297@@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305+
306+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307+{
308+ if (!__builtin_constant_p(n))
309+ check_object_size(to, n, false);
310+ return ___copy_from_user(to, from, n);
311+}
312+
313+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314+{
315+ if (!__builtin_constant_p(n))
316+ check_object_size(from, n, true);
317+ return ___copy_to_user(to, from, n);
318+}
319+
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327+ if ((long)n < 0)
328+ return n;
329+
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333@@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337+ if ((long)n < 0)
338+ return n;
339+
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343diff -urNp linux-3.0.4/arch/arm/kernel/armksyms.c linux-3.0.4/arch/arm/kernel/armksyms.c
344--- linux-3.0.4/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
345+++ linux-3.0.4/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
346@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350-EXPORT_SYMBOL(__copy_from_user);
351-EXPORT_SYMBOL(__copy_to_user);
352+EXPORT_SYMBOL(___copy_from_user);
353+EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357diff -urNp linux-3.0.4/arch/arm/kernel/process.c linux-3.0.4/arch/arm/kernel/process.c
358--- linux-3.0.4/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
359+++ linux-3.0.4/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
360@@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364-#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368@@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372-unsigned long arch_randomize_brk(struct mm_struct *mm)
373-{
374- unsigned long range_end = mm->brk + 0x02000000;
375- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376-}
377-
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381diff -urNp linux-3.0.4/arch/arm/kernel/traps.c linux-3.0.4/arch/arm/kernel/traps.c
382--- linux-3.0.4/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383+++ linux-3.0.4/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384@@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388+extern void gr_handle_kernel_exploit(void);
389+
390 /*
391 * This function is protected against re-entrancy.
392 */
393@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397+
398+ gr_handle_kernel_exploit();
399+
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403diff -urNp linux-3.0.4/arch/arm/lib/copy_from_user.S linux-3.0.4/arch/arm/lib/copy_from_user.S
404--- linux-3.0.4/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
405+++ linux-3.0.4/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
406@@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410- * size_t __copy_from_user(void *to, const void *from, size_t n)
411+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415@@ -84,11 +84,11 @@
416
417 .text
418
419-ENTRY(__copy_from_user)
420+ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424-ENDPROC(__copy_from_user)
425+ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429diff -urNp linux-3.0.4/arch/arm/lib/copy_to_user.S linux-3.0.4/arch/arm/lib/copy_to_user.S
430--- linux-3.0.4/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
431+++ linux-3.0.4/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
432@@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436- * size_t __copy_to_user(void *to, const void *from, size_t n)
437+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441@@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445-WEAK(__copy_to_user)
446+WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450-ENDPROC(__copy_to_user)
451+ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455diff -urNp linux-3.0.4/arch/arm/lib/uaccess.S linux-3.0.4/arch/arm/lib/uaccess.S
456--- linux-3.0.4/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
457+++ linux-3.0.4/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
458@@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471-ENTRY(__copy_to_user)
472+ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480-ENDPROC(__copy_to_user)
481+ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497-ENTRY(__copy_from_user)
498+ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506-ENDPROC(__copy_from_user)
507+ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511diff -urNp linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c
512--- linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
513+++ linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
514@@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518-__copy_to_user(void __user *to, const void *from, unsigned long n)
519+___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523diff -urNp linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c
524--- linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
525+++ linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
526@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535diff -urNp linux-3.0.4/arch/arm/mm/fault.c linux-3.0.4/arch/arm/mm/fault.c
536--- linux-3.0.4/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
537+++ linux-3.0.4/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
538@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542+#ifdef CONFIG_PAX_PAGEEXEC
543+ if (fsr & FSR_LNX_PF) {
544+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545+ do_group_exit(SIGKILL);
546+ }
547+#endif
548+
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552@@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556+#ifdef CONFIG_PAX_PAGEEXEC
557+void pax_report_insns(void *pc, void *sp)
558+{
559+ long i;
560+
561+ printk(KERN_ERR "PAX: bytes at PC: ");
562+ for (i = 0; i < 20; i++) {
563+ unsigned char c;
564+ if (get_user(c, (__force unsigned char __user *)pc+i))
565+ printk(KERN_CONT "?? ");
566+ else
567+ printk(KERN_CONT "%02x ", c);
568+ }
569+ printk("\n");
570+
571+ printk(KERN_ERR "PAX: bytes at SP-4: ");
572+ for (i = -1; i < 20; i++) {
573+ unsigned long c;
574+ if (get_user(c, (__force unsigned long __user *)sp+i))
575+ printk(KERN_CONT "???????? ");
576+ else
577+ printk(KERN_CONT "%08lx ", c);
578+ }
579+ printk("\n");
580+}
581+#endif
582+
583 /*
584 * First Level Translation Fault Handler
585 *
586diff -urNp linux-3.0.4/arch/arm/mm/mmap.c linux-3.0.4/arch/arm/mm/mmap.c
587--- linux-3.0.4/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
588+++ linux-3.0.4/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
589@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593+#ifdef CONFIG_PAX_RANDMMAP
594+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595+#endif
596+
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604- if (TASK_SIZE - len >= addr &&
605- (!vma || addr + len <= vma->vm_start))
606+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610- start_addr = addr = mm->free_area_cache;
611+ start_addr = addr = mm->free_area_cache;
612 } else {
613- start_addr = addr = TASK_UNMAPPED_BASE;
614- mm->cached_hole_size = 0;
615+ start_addr = addr = mm->mmap_base;
616+ mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620@@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624- if (start_addr != TASK_UNMAPPED_BASE) {
625- start_addr = addr = TASK_UNMAPPED_BASE;
626+ if (start_addr != mm->mmap_base) {
627+ start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633- if (!vma || addr + len <= vma->vm_start) {
634+ if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638diff -urNp linux-3.0.4/arch/avr32/include/asm/elf.h linux-3.0.4/arch/avr32/include/asm/elf.h
639--- linux-3.0.4/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
640+++ linux-3.0.4/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
641@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648+#ifdef CONFIG_PAX_ASLR
649+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650+
651+#define PAX_DELTA_MMAP_LEN 15
652+#define PAX_DELTA_STACK_LEN 15
653+#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657diff -urNp linux-3.0.4/arch/avr32/include/asm/kmap_types.h linux-3.0.4/arch/avr32/include/asm/kmap_types.h
658--- linux-3.0.4/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
659+++ linux-3.0.4/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
660@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664-D(14) KM_TYPE_NR
665+D(14) KM_CLEARPAGE,
666+D(15) KM_TYPE_NR
667 };
668
669 #undef D
670diff -urNp linux-3.0.4/arch/avr32/mm/fault.c linux-3.0.4/arch/avr32/mm/fault.c
671--- linux-3.0.4/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
672+++ linux-3.0.4/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
673@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677+#ifdef CONFIG_PAX_PAGEEXEC
678+void pax_report_insns(void *pc, void *sp)
679+{
680+ unsigned long i;
681+
682+ printk(KERN_ERR "PAX: bytes at PC: ");
683+ for (i = 0; i < 20; i++) {
684+ unsigned char c;
685+ if (get_user(c, (unsigned char *)pc+i))
686+ printk(KERN_CONT "???????? ");
687+ else
688+ printk(KERN_CONT "%02x ", c);
689+ }
690+ printk("\n");
691+}
692+#endif
693+
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697@@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706+ do_group_exit(SIGKILL);
707+ }
708+ }
709+#endif
710+
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714diff -urNp linux-3.0.4/arch/frv/include/asm/kmap_types.h linux-3.0.4/arch/frv/include/asm/kmap_types.h
715--- linux-3.0.4/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
716+++ linux-3.0.4/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
717@@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721+ KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725diff -urNp linux-3.0.4/arch/frv/mm/elf-fdpic.c linux-3.0.4/arch/frv/mm/elf-fdpic.c
726--- linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727+++ linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732- if (TASK_SIZE - len >= addr &&
733- (!vma || addr + len <= vma->vm_start))
734+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742- if (addr + len <= vma->vm_start)
743+ if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751- if (addr + len <= vma->vm_start)
752+ if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756diff -urNp linux-3.0.4/arch/ia64/include/asm/elf.h linux-3.0.4/arch/ia64/include/asm/elf.h
757--- linux-3.0.4/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
758+++ linux-3.0.4/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
759@@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763+#ifdef CONFIG_PAX_ASLR
764+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765+
766+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768+#endif
769+
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773diff -urNp linux-3.0.4/arch/ia64/include/asm/pgtable.h linux-3.0.4/arch/ia64/include/asm/pgtable.h
774--- linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775+++ linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776@@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780-
781+#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785@@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789+
790+#ifdef CONFIG_PAX_PAGEEXEC
791+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794+#else
795+# define PAGE_SHARED_NOEXEC PAGE_SHARED
796+# define PAGE_READONLY_NOEXEC PAGE_READONLY
797+# define PAGE_COPY_NOEXEC PAGE_COPY
798+#endif
799+
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803diff -urNp linux-3.0.4/arch/ia64/include/asm/spinlock.h linux-3.0.4/arch/ia64/include/asm/spinlock.h
804--- linux-3.0.4/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
805+++ linux-3.0.4/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
806@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815diff -urNp linux-3.0.4/arch/ia64/include/asm/uaccess.h linux-3.0.4/arch/ia64/include/asm/uaccess.h
816--- linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817+++ linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822- if (__access_ok(__cu_to, __cu_len, get_fs())) \
823+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831- if (__access_ok(__cu_from, __cu_len, get_fs())) \
832+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836diff -urNp linux-3.0.4/arch/ia64/kernel/module.c linux-3.0.4/arch/ia64/kernel/module.c
837--- linux-3.0.4/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
838+++ linux-3.0.4/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
839@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843- if (mod && mod->arch.init_unw_table &&
844- module_region == mod->module_init) {
845+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853+in_init_rx (const struct module *mod, uint64_t addr)
854+{
855+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856+}
857+
858+static inline int
859+in_init_rw (const struct module *mod, uint64_t addr)
860+{
861+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862+}
863+
864+static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867- return addr - (uint64_t) mod->module_init < mod->init_size;
868+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869+}
870+
871+static inline int
872+in_core_rx (const struct module *mod, uint64_t addr)
873+{
874+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875+}
876+
877+static inline int
878+in_core_rw (const struct module *mod, uint64_t addr)
879+{
880+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886- return addr - (uint64_t) mod->module_core < mod->core_size;
887+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896+ if (in_init_rx(mod, val))
897+ val -= (uint64_t) mod->module_init_rx;
898+ else if (in_init_rw(mod, val))
899+ val -= (uint64_t) mod->module_init_rw;
900+ else if (in_core_rx(mod, val))
901+ val -= (uint64_t) mod->module_core_rx;
902+ else if (in_core_rw(mod, val))
903+ val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911- if (mod->core_size > MAX_LTOFF)
912+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917- gp = mod->core_size - MAX_LTOFF / 2;
918+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920- gp = mod->core_size / 2;
921- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927diff -urNp linux-3.0.4/arch/ia64/kernel/sys_ia64.c linux-3.0.4/arch/ia64/kernel/sys_ia64.c
928--- linux-3.0.4/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
929+++ linux-3.0.4/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
930@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934+
935+#ifdef CONFIG_PAX_RANDMMAP
936+ if (mm->pax_flags & MF_PAX_RANDMMAP)
937+ addr = mm->free_area_cache;
938+ else
939+#endif
940+
941 if (!addr)
942 addr = mm->free_area_cache;
943
944@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948- if (start_addr != TASK_UNMAPPED_BASE) {
949+ if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951- addr = TASK_UNMAPPED_BASE;
952+ addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957- if (!vma || addr + len <= vma->vm_start) {
958+ if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962diff -urNp linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S
963--- linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
964+++ linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
965@@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969- __phys_per_cpu_start = __per_cpu_load;
970+ __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974diff -urNp linux-3.0.4/arch/ia64/mm/fault.c linux-3.0.4/arch/ia64/mm/fault.c
975--- linux-3.0.4/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
976+++ linux-3.0.4/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
977@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981+#ifdef CONFIG_PAX_PAGEEXEC
982+void pax_report_insns(void *pc, void *sp)
983+{
984+ unsigned long i;
985+
986+ printk(KERN_ERR "PAX: bytes at PC: ");
987+ for (i = 0; i < 8; i++) {
988+ unsigned int c;
989+ if (get_user(c, (unsigned int *)pc+i))
990+ printk(KERN_CONT "???????? ");
991+ else
992+ printk(KERN_CONT "%08x ", c);
993+ }
994+ printk("\n");
995+}
996+#endif
997+
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005- if ((vma->vm_flags & mask) != mask)
1006+ if ((vma->vm_flags & mask) != mask) {
1007+
1008+#ifdef CONFIG_PAX_PAGEEXEC
1009+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011+ goto bad_area;
1012+
1013+ up_read(&mm->mmap_sem);
1014+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015+ do_group_exit(SIGKILL);
1016+ }
1017+#endif
1018+
1019 goto bad_area;
1020
1021+ }
1022+
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026diff -urNp linux-3.0.4/arch/ia64/mm/hugetlbpage.c linux-3.0.4/arch/ia64/mm/hugetlbpage.c
1027--- linux-3.0.4/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1028+++ linux-3.0.4/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1029@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033- if (!vmm || (addr + len) <= vmm->vm_start)
1034+ if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038diff -urNp linux-3.0.4/arch/ia64/mm/init.c linux-3.0.4/arch/ia64/mm/init.c
1039--- linux-3.0.4/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1040+++ linux-3.0.4/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1041@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045+
1046+#ifdef CONFIG_PAX_PAGEEXEC
1047+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048+ vma->vm_flags &= ~VM_EXEC;
1049+
1050+#ifdef CONFIG_PAX_MPROTECT
1051+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052+ vma->vm_flags &= ~VM_MAYEXEC;
1053+#endif
1054+
1055+ }
1056+#endif
1057+
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061diff -urNp linux-3.0.4/arch/m32r/lib/usercopy.c linux-3.0.4/arch/m32r/lib/usercopy.c
1062--- linux-3.0.4/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1063+++ linux-3.0.4/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1064@@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068+ if ((long)n < 0)
1069+ return n;
1070+
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078+ if ((long)n < 0)
1079+ return n;
1080+
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084diff -urNp linux-3.0.4/arch/mips/include/asm/elf.h linux-3.0.4/arch/mips/include/asm/elf.h
1085--- linux-3.0.4/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1086+++ linux-3.0.4/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1087@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091+#ifdef CONFIG_PAX_ASLR
1092+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093+
1094+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096+#endif
1097+
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103-struct mm_struct;
1104-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105-#define arch_randomize_brk arch_randomize_brk
1106-
1107 #endif /* _ASM_ELF_H */
1108diff -urNp linux-3.0.4/arch/mips/include/asm/page.h linux-3.0.4/arch/mips/include/asm/page.h
1109--- linux-3.0.4/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1110+++ linux-3.0.4/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1111@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120diff -urNp linux-3.0.4/arch/mips/include/asm/system.h linux-3.0.4/arch/mips/include/asm/system.h
1121--- linux-3.0.4/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1122+++ linux-3.0.4/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1123@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127-extern unsigned long arch_align_stack(unsigned long sp);
1128+#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c
1132--- linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1133+++ linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1134@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138+#ifdef CONFIG_PAX_ASLR
1139+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140+
1141+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143+#endif
1144+
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c
1149--- linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1150+++ linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1151@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155+#ifdef CONFIG_PAX_ASLR
1156+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157+
1158+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160+#endif
1161+
1162 #include <asm/processor.h>
1163
1164 /*
1165diff -urNp linux-3.0.4/arch/mips/kernel/process.c linux-3.0.4/arch/mips/kernel/process.c
1166--- linux-3.0.4/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1167+++ linux-3.0.4/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1168@@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172-
1173-/*
1174- * Don't forget that the stack pointer must be aligned on a 8 bytes
1175- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176- */
1177-unsigned long arch_align_stack(unsigned long sp)
1178-{
1179- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180- sp -= get_random_int() & ~PAGE_MASK;
1181-
1182- return sp & ALMASK;
1183-}
1184diff -urNp linux-3.0.4/arch/mips/mm/fault.c linux-3.0.4/arch/mips/mm/fault.c
1185--- linux-3.0.4/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1186+++ linux-3.0.4/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1187@@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191+#ifdef CONFIG_PAX_PAGEEXEC
1192+void pax_report_insns(void *pc, void *sp)
1193+{
1194+ unsigned long i;
1195+
1196+ printk(KERN_ERR "PAX: bytes at PC: ");
1197+ for (i = 0; i < 5; i++) {
1198+ unsigned int c;
1199+ if (get_user(c, (unsigned int *)pc+i))
1200+ printk(KERN_CONT "???????? ");
1201+ else
1202+ printk(KERN_CONT "%08x ", c);
1203+ }
1204+ printk("\n");
1205+}
1206+#endif
1207+
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211diff -urNp linux-3.0.4/arch/mips/mm/mmap.c linux-3.0.4/arch/mips/mm/mmap.c
1212--- linux-3.0.4/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1213+++ linux-3.0.4/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1214@@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215 do_color_align = 0;
1216 if (filp || (flags & MAP_SHARED))
1217 do_color_align = 1;
1218+
1219+#ifdef CONFIG_PAX_RANDMMAP
1220+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221+#endif
1222+
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226 else
1227 addr = PAGE_ALIGN(addr);
1228 vmm = find_vma(current->mm, addr);
1229- if (TASK_SIZE - len >= addr &&
1230- (!vmm || addr + len <= vmm->vm_start))
1231+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232 return addr;
1233 }
1234 addr = current->mm->mmap_base;
1235@@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236 /* At this point: (!vmm || addr < vmm->vm_end). */
1237 if (TASK_SIZE - len < addr)
1238 return -ENOMEM;
1239- if (!vmm || addr + len <= vmm->vm_start)
1240+ if (check_heap_stack_gap(vmm, addr, len))
1241 return addr;
1242 addr = vmm->vm_end;
1243 if (do_color_align)
1244@@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245 mm->get_unmapped_area = arch_get_unmapped_area;
1246 mm->unmap_area = arch_unmap_area;
1247 }
1248-
1249-static inline unsigned long brk_rnd(void)
1250-{
1251- unsigned long rnd = get_random_int();
1252-
1253- rnd = rnd << PAGE_SHIFT;
1254- /* 8MB for 32bit, 256MB for 64bit */
1255- if (TASK_IS_32BIT_ADDR)
1256- rnd = rnd & 0x7ffffful;
1257- else
1258- rnd = rnd & 0xffffffful;
1259-
1260- return rnd;
1261-}
1262-
1263-unsigned long arch_randomize_brk(struct mm_struct *mm)
1264-{
1265- unsigned long base = mm->brk;
1266- unsigned long ret;
1267-
1268- ret = PAGE_ALIGN(base + brk_rnd());
1269-
1270- if (ret < mm->brk)
1271- return mm->brk;
1272-
1273- return ret;
1274-}
1275diff -urNp linux-3.0.4/arch/parisc/include/asm/elf.h linux-3.0.4/arch/parisc/include/asm/elf.h
1276--- linux-3.0.4/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1277+++ linux-3.0.4/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1278@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1279
1280 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1281
1282+#ifdef CONFIG_PAX_ASLR
1283+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1284+
1285+#define PAX_DELTA_MMAP_LEN 16
1286+#define PAX_DELTA_STACK_LEN 16
1287+#endif
1288+
1289 /* This yields a mask that user programs can use to figure out what
1290 instruction set this CPU supports. This could be done in user space,
1291 but it's not easy, and we've already done it here. */
1292diff -urNp linux-3.0.4/arch/parisc/include/asm/pgtable.h linux-3.0.4/arch/parisc/include/asm/pgtable.h
1293--- linux-3.0.4/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1294+++ linux-3.0.4/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1295@@ -210,6 +210,17 @@ struct vm_area_struct;
1296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297 #define PAGE_COPY PAGE_EXECREAD
1298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299+
1300+#ifdef CONFIG_PAX_PAGEEXEC
1301+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304+#else
1305+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1306+# define PAGE_COPY_NOEXEC PAGE_COPY
1307+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1308+#endif
1309+
1310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1313diff -urNp linux-3.0.4/arch/parisc/kernel/module.c linux-3.0.4/arch/parisc/kernel/module.c
1314--- linux-3.0.4/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1315+++ linux-3.0.4/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1316@@ -98,16 +98,38 @@
1317
1318 /* three functions to determine where in the module core
1319 * or init pieces the location is */
1320+static inline int in_init_rx(struct module *me, void *loc)
1321+{
1322+ return (loc >= me->module_init_rx &&
1323+ loc < (me->module_init_rx + me->init_size_rx));
1324+}
1325+
1326+static inline int in_init_rw(struct module *me, void *loc)
1327+{
1328+ return (loc >= me->module_init_rw &&
1329+ loc < (me->module_init_rw + me->init_size_rw));
1330+}
1331+
1332 static inline int in_init(struct module *me, void *loc)
1333 {
1334- return (loc >= me->module_init &&
1335- loc <= (me->module_init + me->init_size));
1336+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1337+}
1338+
1339+static inline int in_core_rx(struct module *me, void *loc)
1340+{
1341+ return (loc >= me->module_core_rx &&
1342+ loc < (me->module_core_rx + me->core_size_rx));
1343+}
1344+
1345+static inline int in_core_rw(struct module *me, void *loc)
1346+{
1347+ return (loc >= me->module_core_rw &&
1348+ loc < (me->module_core_rw + me->core_size_rw));
1349 }
1350
1351 static inline int in_core(struct module *me, void *loc)
1352 {
1353- return (loc >= me->module_core &&
1354- loc <= (me->module_core + me->core_size));
1355+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1356 }
1357
1358 static inline int in_local(struct module *me, void *loc)
1359@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360 }
1361
1362 /* align things a bit */
1363- me->core_size = ALIGN(me->core_size, 16);
1364- me->arch.got_offset = me->core_size;
1365- me->core_size += gots * sizeof(struct got_entry);
1366-
1367- me->core_size = ALIGN(me->core_size, 16);
1368- me->arch.fdesc_offset = me->core_size;
1369- me->core_size += fdescs * sizeof(Elf_Fdesc);
1370+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371+ me->arch.got_offset = me->core_size_rw;
1372+ me->core_size_rw += gots * sizeof(struct got_entry);
1373+
1374+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375+ me->arch.fdesc_offset = me->core_size_rw;
1376+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377
1378 me->arch.got_max = gots;
1379 me->arch.fdesc_max = fdescs;
1380@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1381
1382 BUG_ON(value == 0);
1383
1384- got = me->module_core + me->arch.got_offset;
1385+ got = me->module_core_rw + me->arch.got_offset;
1386 for (i = 0; got[i].addr; i++)
1387 if (got[i].addr == value)
1388 goto out;
1389@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1390 #ifdef CONFIG_64BIT
1391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392 {
1393- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395
1396 if (!value) {
1397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1399
1400 /* Create new one */
1401 fdesc->addr = value;
1402- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404 return (Elf_Addr)fdesc;
1405 }
1406 #endif /* CONFIG_64BIT */
1407@@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408
1409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410 end = table + sechdrs[me->arch.unwind_section].sh_size;
1411- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413
1414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415 me->arch.unwind_section, table, end, gp);
1416diff -urNp linux-3.0.4/arch/parisc/kernel/sys_parisc.c linux-3.0.4/arch/parisc/kernel/sys_parisc.c
1417--- linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418+++ linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420 /* At this point: (!vma || addr < vma->vm_end). */
1421 if (TASK_SIZE - len < addr)
1422 return -ENOMEM;
1423- if (!vma || addr + len <= vma->vm_start)
1424+ if (check_heap_stack_gap(vma, addr, len))
1425 return addr;
1426 addr = vma->vm_end;
1427 }
1428@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429 /* At this point: (!vma || addr < vma->vm_end). */
1430 if (TASK_SIZE - len < addr)
1431 return -ENOMEM;
1432- if (!vma || addr + len <= vma->vm_start)
1433+ if (check_heap_stack_gap(vma, addr, len))
1434 return addr;
1435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436 if (addr < vma->vm_end) /* handle wraparound */
1437@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438 if (flags & MAP_FIXED)
1439 return addr;
1440 if (!addr)
1441- addr = TASK_UNMAPPED_BASE;
1442+ addr = current->mm->mmap_base;
1443
1444 if (filp) {
1445 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446diff -urNp linux-3.0.4/arch/parisc/kernel/traps.c linux-3.0.4/arch/parisc/kernel/traps.c
1447--- linux-3.0.4/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1448+++ linux-3.0.4/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1449@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450
1451 down_read(&current->mm->mmap_sem);
1452 vma = find_vma(current->mm,regs->iaoq[0]);
1453- if (vma && (regs->iaoq[0] >= vma->vm_start)
1454- && (vma->vm_flags & VM_EXEC)) {
1455-
1456+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457 fault_address = regs->iaoq[0];
1458 fault_space = regs->iasq[0];
1459
1460diff -urNp linux-3.0.4/arch/parisc/mm/fault.c linux-3.0.4/arch/parisc/mm/fault.c
1461--- linux-3.0.4/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1462+++ linux-3.0.4/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1463@@ -15,6 +15,7 @@
1464 #include <linux/sched.h>
1465 #include <linux/interrupt.h>
1466 #include <linux/module.h>
1467+#include <linux/unistd.h>
1468
1469 #include <asm/uaccess.h>
1470 #include <asm/traps.h>
1471@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472 static unsigned long
1473 parisc_acctyp(unsigned long code, unsigned int inst)
1474 {
1475- if (code == 6 || code == 16)
1476+ if (code == 6 || code == 7 || code == 16)
1477 return VM_EXEC;
1478
1479 switch (inst & 0xf0000000) {
1480@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481 }
1482 #endif
1483
1484+#ifdef CONFIG_PAX_PAGEEXEC
1485+/*
1486+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487+ *
1488+ * returns 1 when task should be killed
1489+ * 2 when rt_sigreturn trampoline was detected
1490+ * 3 when unpatched PLT trampoline was detected
1491+ */
1492+static int pax_handle_fetch_fault(struct pt_regs *regs)
1493+{
1494+
1495+#ifdef CONFIG_PAX_EMUPLT
1496+ int err;
1497+
1498+ do { /* PaX: unpatched PLT emulation */
1499+ unsigned int bl, depwi;
1500+
1501+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503+
1504+ if (err)
1505+ break;
1506+
1507+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509+
1510+ err = get_user(ldw, (unsigned int *)addr);
1511+ err |= get_user(bv, (unsigned int *)(addr+4));
1512+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1513+
1514+ if (err)
1515+ break;
1516+
1517+ if (ldw == 0x0E801096U &&
1518+ bv == 0xEAC0C000U &&
1519+ ldw2 == 0x0E881095U)
1520+ {
1521+ unsigned int resolver, map;
1522+
1523+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525+ if (err)
1526+ break;
1527+
1528+ regs->gr[20] = instruction_pointer(regs)+8;
1529+ regs->gr[21] = map;
1530+ regs->gr[22] = resolver;
1531+ regs->iaoq[0] = resolver | 3UL;
1532+ regs->iaoq[1] = regs->iaoq[0] + 4;
1533+ return 3;
1534+ }
1535+ }
1536+ } while (0);
1537+#endif
1538+
1539+#ifdef CONFIG_PAX_EMUTRAMP
1540+
1541+#ifndef CONFIG_PAX_EMUSIGRT
1542+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543+ return 1;
1544+#endif
1545+
1546+ do { /* PaX: rt_sigreturn emulation */
1547+ unsigned int ldi1, ldi2, bel, nop;
1548+
1549+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553+
1554+ if (err)
1555+ break;
1556+
1557+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558+ ldi2 == 0x3414015AU &&
1559+ bel == 0xE4008200U &&
1560+ nop == 0x08000240U)
1561+ {
1562+ regs->gr[25] = (ldi1 & 2) >> 1;
1563+ regs->gr[20] = __NR_rt_sigreturn;
1564+ regs->gr[31] = regs->iaoq[1] + 16;
1565+ regs->sr[0] = regs->iasq[1];
1566+ regs->iaoq[0] = 0x100UL;
1567+ regs->iaoq[1] = regs->iaoq[0] + 4;
1568+ regs->iasq[0] = regs->sr[2];
1569+ regs->iasq[1] = regs->sr[2];
1570+ return 2;
1571+ }
1572+ } while (0);
1573+#endif
1574+
1575+ return 1;
1576+}
1577+
1578+void pax_report_insns(void *pc, void *sp)
1579+{
1580+ unsigned long i;
1581+
1582+ printk(KERN_ERR "PAX: bytes at PC: ");
1583+ for (i = 0; i < 5; i++) {
1584+ unsigned int c;
1585+ if (get_user(c, (unsigned int *)pc+i))
1586+ printk(KERN_CONT "???????? ");
1587+ else
1588+ printk(KERN_CONT "%08x ", c);
1589+ }
1590+ printk("\n");
1591+}
1592+#endif
1593+
1594 int fixup_exception(struct pt_regs *regs)
1595 {
1596 const struct exception_table_entry *fix;
1597@@ -192,8 +303,33 @@ good_area:
1598
1599 acc_type = parisc_acctyp(code,regs->iir);
1600
1601- if ((vma->vm_flags & acc_type) != acc_type)
1602+ if ((vma->vm_flags & acc_type) != acc_type) {
1603+
1604+#ifdef CONFIG_PAX_PAGEEXEC
1605+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606+ (address & ~3UL) == instruction_pointer(regs))
1607+ {
1608+ up_read(&mm->mmap_sem);
1609+ switch (pax_handle_fetch_fault(regs)) {
1610+
1611+#ifdef CONFIG_PAX_EMUPLT
1612+ case 3:
1613+ return;
1614+#endif
1615+
1616+#ifdef CONFIG_PAX_EMUTRAMP
1617+ case 2:
1618+ return;
1619+#endif
1620+
1621+ }
1622+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623+ do_group_exit(SIGKILL);
1624+ }
1625+#endif
1626+
1627 goto bad_area;
1628+ }
1629
1630 /*
1631 * If for any reason at all we couldn't handle the fault, make
1632diff -urNp linux-3.0.4/arch/powerpc/include/asm/elf.h linux-3.0.4/arch/powerpc/include/asm/elf.h
1633--- linux-3.0.4/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1634+++ linux-3.0.4/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1635@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636 the loader. We need to make sure that it is out of the way of the program
1637 that it will "exec", and that there is sufficient room for the brk. */
1638
1639-extern unsigned long randomize_et_dyn(unsigned long base);
1640-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1641+#define ELF_ET_DYN_BASE (0x20000000)
1642+
1643+#ifdef CONFIG_PAX_ASLR
1644+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1645+
1646+#ifdef __powerpc64__
1647+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1648+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1649+#else
1650+#define PAX_DELTA_MMAP_LEN 15
1651+#define PAX_DELTA_STACK_LEN 15
1652+#endif
1653+#endif
1654
1655 /*
1656 * Our registers are always unsigned longs, whether we're a 32 bit
1657@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658 (0x7ff >> (PAGE_SHIFT - 12)) : \
1659 (0x3ffff >> (PAGE_SHIFT - 12)))
1660
1661-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662-#define arch_randomize_brk arch_randomize_brk
1663-
1664 #endif /* __KERNEL__ */
1665
1666 /*
1667diff -urNp linux-3.0.4/arch/powerpc/include/asm/kmap_types.h linux-3.0.4/arch/powerpc/include/asm/kmap_types.h
1668--- linux-3.0.4/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1669+++ linux-3.0.4/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1670@@ -27,6 +27,7 @@ enum km_type {
1671 KM_PPC_SYNC_PAGE,
1672 KM_PPC_SYNC_ICACHE,
1673 KM_KDB,
1674+ KM_CLEARPAGE,
1675 KM_TYPE_NR
1676 };
1677
1678diff -urNp linux-3.0.4/arch/powerpc/include/asm/mman.h linux-3.0.4/arch/powerpc/include/asm/mman.h
1679--- linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680+++ linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682 }
1683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684
1685-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687 {
1688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689 }
1690diff -urNp linux-3.0.4/arch/powerpc/include/asm/page_64.h linux-3.0.4/arch/powerpc/include/asm/page_64.h
1691--- linux-3.0.4/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
1692+++ linux-3.0.4/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
1693@@ -155,15 +155,18 @@ do { \
1694 * stack by default, so in the absence of a PT_GNU_STACK program header
1695 * we turn execute permission off.
1696 */
1697-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1698- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699+#define VM_STACK_DEFAULT_FLAGS32 \
1700+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702
1703 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1704 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705
1706+#ifndef CONFIG_PAX_PAGEEXEC
1707 #define VM_STACK_DEFAULT_FLAGS \
1708 (is_32bit_task() ? \
1709 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710+#endif
1711
1712 #include <asm-generic/getorder.h>
1713
1714diff -urNp linux-3.0.4/arch/powerpc/include/asm/page.h linux-3.0.4/arch/powerpc/include/asm/page.h
1715--- linux-3.0.4/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716+++ linux-3.0.4/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718 * and needs to be executable. This means the whole heap ends
1719 * up being executable.
1720 */
1721-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1722- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723+#define VM_DATA_DEFAULT_FLAGS32 \
1724+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726
1727 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1728 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1731 #endif
1732
1733+#define ktla_ktva(addr) (addr)
1734+#define ktva_ktla(addr) (addr)
1735+
1736 #ifndef __ASSEMBLY__
1737
1738 #undef STRICT_MM_TYPECHECKS
1739diff -urNp linux-3.0.4/arch/powerpc/include/asm/pgtable.h linux-3.0.4/arch/powerpc/include/asm/pgtable.h
1740--- linux-3.0.4/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1741+++ linux-3.0.4/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1742@@ -2,6 +2,7 @@
1743 #define _ASM_POWERPC_PGTABLE_H
1744 #ifdef __KERNEL__
1745
1746+#include <linux/const.h>
1747 #ifndef __ASSEMBLY__
1748 #include <asm/processor.h> /* For TASK_SIZE */
1749 #include <asm/mmu.h>
1750diff -urNp linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h
1751--- linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
1752+++ linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
1753@@ -21,6 +21,7 @@
1754 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1755 #define _PAGE_USER 0x004 /* usermode access allowed */
1756 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1757+#define _PAGE_EXEC _PAGE_GUARDED
1758 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1759 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1760 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1761diff -urNp linux-3.0.4/arch/powerpc/include/asm/reg.h linux-3.0.4/arch/powerpc/include/asm/reg.h
1762--- linux-3.0.4/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
1763+++ linux-3.0.4/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
1764@@ -209,6 +209,7 @@
1765 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1766 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1767 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1768+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1769 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1770 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1771 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1772diff -urNp linux-3.0.4/arch/powerpc/include/asm/system.h linux-3.0.4/arch/powerpc/include/asm/system.h
1773--- linux-3.0.4/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1774+++ linux-3.0.4/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1775@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777 #endif
1778
1779-extern unsigned long arch_align_stack(unsigned long sp);
1780+#define arch_align_stack(x) ((x) & ~0xfUL)
1781
1782 /* Used in very early kernel initialization. */
1783 extern unsigned long reloc_offset(void);
1784diff -urNp linux-3.0.4/arch/powerpc/include/asm/uaccess.h linux-3.0.4/arch/powerpc/include/asm/uaccess.h
1785--- linux-3.0.4/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1786+++ linux-3.0.4/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1787@@ -13,6 +13,8 @@
1788 #define VERIFY_READ 0
1789 #define VERIFY_WRITE 1
1790
1791+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792+
1793 /*
1794 * The fs value determines whether argument validity checking should be
1795 * performed or not. If get_fs() == USER_DS, checking is performed, with
1796@@ -327,52 +329,6 @@ do { \
1797 extern unsigned long __copy_tofrom_user(void __user *to,
1798 const void __user *from, unsigned long size);
1799
1800-#ifndef __powerpc64__
1801-
1802-static inline unsigned long copy_from_user(void *to,
1803- const void __user *from, unsigned long n)
1804-{
1805- unsigned long over;
1806-
1807- if (access_ok(VERIFY_READ, from, n))
1808- return __copy_tofrom_user((__force void __user *)to, from, n);
1809- if ((unsigned long)from < TASK_SIZE) {
1810- over = (unsigned long)from + n - TASK_SIZE;
1811- return __copy_tofrom_user((__force void __user *)to, from,
1812- n - over) + over;
1813- }
1814- return n;
1815-}
1816-
1817-static inline unsigned long copy_to_user(void __user *to,
1818- const void *from, unsigned long n)
1819-{
1820- unsigned long over;
1821-
1822- if (access_ok(VERIFY_WRITE, to, n))
1823- return __copy_tofrom_user(to, (__force void __user *)from, n);
1824- if ((unsigned long)to < TASK_SIZE) {
1825- over = (unsigned long)to + n - TASK_SIZE;
1826- return __copy_tofrom_user(to, (__force void __user *)from,
1827- n - over) + over;
1828- }
1829- return n;
1830-}
1831-
1832-#else /* __powerpc64__ */
1833-
1834-#define __copy_in_user(to, from, size) \
1835- __copy_tofrom_user((to), (from), (size))
1836-
1837-extern unsigned long copy_from_user(void *to, const void __user *from,
1838- unsigned long n);
1839-extern unsigned long copy_to_user(void __user *to, const void *from,
1840- unsigned long n);
1841-extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842- unsigned long n);
1843-
1844-#endif /* __powerpc64__ */
1845-
1846 static inline unsigned long __copy_from_user_inatomic(void *to,
1847 const void __user *from, unsigned long n)
1848 {
1849@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850 if (ret == 0)
1851 return 0;
1852 }
1853+
1854+ if (!__builtin_constant_p(n))
1855+ check_object_size(to, n, false);
1856+
1857 return __copy_tofrom_user((__force void __user *)to, from, n);
1858 }
1859
1860@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861 if (ret == 0)
1862 return 0;
1863 }
1864+
1865+ if (!__builtin_constant_p(n))
1866+ check_object_size(from, n, true);
1867+
1868 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869 }
1870
1871@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872 return __copy_to_user_inatomic(to, from, size);
1873 }
1874
1875+#ifndef __powerpc64__
1876+
1877+static inline unsigned long __must_check copy_from_user(void *to,
1878+ const void __user *from, unsigned long n)
1879+{
1880+ unsigned long over;
1881+
1882+ if ((long)n < 0)
1883+ return n;
1884+
1885+ if (access_ok(VERIFY_READ, from, n)) {
1886+ if (!__builtin_constant_p(n))
1887+ check_object_size(to, n, false);
1888+ return __copy_tofrom_user((__force void __user *)to, from, n);
1889+ }
1890+ if ((unsigned long)from < TASK_SIZE) {
1891+ over = (unsigned long)from + n - TASK_SIZE;
1892+ if (!__builtin_constant_p(n - over))
1893+ check_object_size(to, n - over, false);
1894+ return __copy_tofrom_user((__force void __user *)to, from,
1895+ n - over) + over;
1896+ }
1897+ return n;
1898+}
1899+
1900+static inline unsigned long __must_check copy_to_user(void __user *to,
1901+ const void *from, unsigned long n)
1902+{
1903+ unsigned long over;
1904+
1905+ if ((long)n < 0)
1906+ return n;
1907+
1908+ if (access_ok(VERIFY_WRITE, to, n)) {
1909+ if (!__builtin_constant_p(n))
1910+ check_object_size(from, n, true);
1911+ return __copy_tofrom_user(to, (__force void __user *)from, n);
1912+ }
1913+ if ((unsigned long)to < TASK_SIZE) {
1914+ over = (unsigned long)to + n - TASK_SIZE;
1915+ if (!__builtin_constant_p(n))
1916+ check_object_size(from, n - over, true);
1917+ return __copy_tofrom_user(to, (__force void __user *)from,
1918+ n - over) + over;
1919+ }
1920+ return n;
1921+}
1922+
1923+#else /* __powerpc64__ */
1924+
1925+#define __copy_in_user(to, from, size) \
1926+ __copy_tofrom_user((to), (from), (size))
1927+
1928+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929+{
1930+ if ((long)n < 0 || n > INT_MAX)
1931+ return n;
1932+
1933+ if (!__builtin_constant_p(n))
1934+ check_object_size(to, n, false);
1935+
1936+ if (likely(access_ok(VERIFY_READ, from, n)))
1937+ n = __copy_from_user(to, from, n);
1938+ else
1939+ memset(to, 0, n);
1940+ return n;
1941+}
1942+
1943+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944+{
1945+ if ((long)n < 0 || n > INT_MAX)
1946+ return n;
1947+
1948+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949+ if (!__builtin_constant_p(n))
1950+ check_object_size(from, n, true);
1951+ n = __copy_to_user(to, from, n);
1952+ }
1953+ return n;
1954+}
1955+
1956+extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957+ unsigned long n);
1958+
1959+#endif /* __powerpc64__ */
1960+
1961 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962
1963 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S
1965--- linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
1966+++ linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
1967@@ -567,6 +567,7 @@ storage_fault_common:
1968 std r14,_DAR(r1)
1969 std r15,_DSISR(r1)
1970 addi r3,r1,STACK_FRAME_OVERHEAD
1971+ bl .save_nvgprs
1972 mr r4,r14
1973 mr r5,r15
1974 ld r14,PACA_EXGEN+EX_R14(r13)
1975@@ -576,8 +577,7 @@ storage_fault_common:
1976 cmpdi r3,0
1977 bne- 1f
1978 b .ret_from_except_lite
1979-1: bl .save_nvgprs
1980- mr r5,r3
1981+1: mr r5,r3
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 ld r4,_DAR(r1)
1984 bl .bad_page_fault
1985diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S
1986--- linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
1987+++ linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
1988@@ -956,10 +956,10 @@ handle_page_fault:
1989 11: ld r4,_DAR(r1)
1990 ld r5,_DSISR(r1)
1991 addi r3,r1,STACK_FRAME_OVERHEAD
1992+ bl .save_nvgprs
1993 bl .do_page_fault
1994 cmpdi r3,0
1995 beq+ 13f
1996- bl .save_nvgprs
1997 mr r5,r3
1998 addi r3,r1,STACK_FRAME_OVERHEAD
1999 lwz r4,_DAR(r1)
2000diff -urNp linux-3.0.4/arch/powerpc/kernel/module_32.c linux-3.0.4/arch/powerpc/kernel/module_32.c
2001--- linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002+++ linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004 me->arch.core_plt_section = i;
2005 }
2006 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007- printk("Module doesn't contain .plt or .init.plt sections.\n");
2008+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009 return -ENOEXEC;
2010 }
2011
2012@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013
2014 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015 /* Init, or core PLT? */
2016- if (location >= mod->module_core
2017- && location < mod->module_core + mod->core_size)
2018+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021- else
2022+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025+ else {
2026+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027+ return ~0UL;
2028+ }
2029
2030 /* Find this entry, or if that fails, the next avail. entry */
2031 while (entry->jump[0]) {
2032diff -urNp linux-3.0.4/arch/powerpc/kernel/module.c linux-3.0.4/arch/powerpc/kernel/module.c
2033--- linux-3.0.4/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2034+++ linux-3.0.4/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2035@@ -31,11 +31,24 @@
2036
2037 LIST_HEAD(module_bug_list);
2038
2039+#ifdef CONFIG_PAX_KERNEXEC
2040 void *module_alloc(unsigned long size)
2041 {
2042 if (size == 0)
2043 return NULL;
2044
2045+ return vmalloc(size);
2046+}
2047+
2048+void *module_alloc_exec(unsigned long size)
2049+#else
2050+void *module_alloc(unsigned long size)
2051+#endif
2052+
2053+{
2054+ if (size == 0)
2055+ return NULL;
2056+
2057 return vmalloc_exec(size);
2058 }
2059
2060@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061 vfree(module_region);
2062 }
2063
2064+#ifdef CONFIG_PAX_KERNEXEC
2065+void module_free_exec(struct module *mod, void *module_region)
2066+{
2067+ module_free(mod, module_region);
2068+}
2069+#endif
2070+
2071 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072 const Elf_Shdr *sechdrs,
2073 const char *name)
2074diff -urNp linux-3.0.4/arch/powerpc/kernel/process.c linux-3.0.4/arch/powerpc/kernel/process.c
2075--- linux-3.0.4/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2076+++ linux-3.0.4/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2077@@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078 * Lookup NIP late so we have the best change of getting the
2079 * above info out without failing
2080 */
2081- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085 #endif
2086 show_stack(current, (unsigned long *) regs->gpr[1]);
2087 if (!user_mode(regs))
2088@@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089 newsp = stack[0];
2090 ip = stack[STACK_FRAME_LR_SAVE];
2091 if (!firstframe || ip != lr) {
2092- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096- printk(" (%pS)",
2097+ printk(" (%pA)",
2098 (void *)current->ret_stack[curr_frame].ret);
2099 curr_frame--;
2100 }
2101@@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102 struct pt_regs *regs = (struct pt_regs *)
2103 (sp + STACK_FRAME_OVERHEAD);
2104 lr = regs->link;
2105- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2106+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2107 regs->trap, (void *)regs->nip, (void *)lr);
2108 firstframe = 1;
2109 }
2110@@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111 }
2112
2113 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114-
2115-unsigned long arch_align_stack(unsigned long sp)
2116-{
2117- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118- sp -= get_random_int() & ~PAGE_MASK;
2119- return sp & ~0xf;
2120-}
2121-
2122-static inline unsigned long brk_rnd(void)
2123-{
2124- unsigned long rnd = 0;
2125-
2126- /* 8MB for 32bit, 1GB for 64bit */
2127- if (is_32bit_task())
2128- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129- else
2130- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131-
2132- return rnd << PAGE_SHIFT;
2133-}
2134-
2135-unsigned long arch_randomize_brk(struct mm_struct *mm)
2136-{
2137- unsigned long base = mm->brk;
2138- unsigned long ret;
2139-
2140-#ifdef CONFIG_PPC_STD_MMU_64
2141- /*
2142- * If we are using 1TB segments and we are allowed to randomise
2143- * the heap, we can put it above 1TB so it is backed by a 1TB
2144- * segment. Otherwise the heap will be in the bottom 1TB
2145- * which always uses 256MB segments and this may result in a
2146- * performance penalty.
2147- */
2148- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150-#endif
2151-
2152- ret = PAGE_ALIGN(base + brk_rnd());
2153-
2154- if (ret < mm->brk)
2155- return mm->brk;
2156-
2157- return ret;
2158-}
2159-
2160-unsigned long randomize_et_dyn(unsigned long base)
2161-{
2162- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163-
2164- if (ret < base)
2165- return base;
2166-
2167- return ret;
2168-}
2169diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_32.c linux-3.0.4/arch/powerpc/kernel/signal_32.c
2170--- linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171+++ linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173 /* Save user registers on the stack */
2174 frame = &rt_sf->uc.uc_mcontext;
2175 addr = frame;
2176- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178 if (save_user_regs(regs, frame, 0, 1))
2179 goto badframe;
2180 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_64.c linux-3.0.4/arch/powerpc/kernel/signal_64.c
2182--- linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183+++ linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185 current->thread.fpscr.val = 0;
2186
2187 /* Set up to return from userspace. */
2188- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191 } else {
2192 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193diff -urNp linux-3.0.4/arch/powerpc/kernel/traps.c linux-3.0.4/arch/powerpc/kernel/traps.c
2194--- linux-3.0.4/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2195+++ linux-3.0.4/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2196@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197 static inline void pmac_backlight_unblank(void) { }
2198 #endif
2199
2200+extern void gr_handle_kernel_exploit(void);
2201+
2202 int die(const char *str, struct pt_regs *regs, long err)
2203 {
2204 static struct {
2205@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2206 if (panic_on_oops)
2207 panic("Fatal exception");
2208
2209+ gr_handle_kernel_exploit();
2210+
2211 oops_exit();
2212 do_exit(err);
2213
2214diff -urNp linux-3.0.4/arch/powerpc/kernel/vdso.c linux-3.0.4/arch/powerpc/kernel/vdso.c
2215--- linux-3.0.4/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2216+++ linux-3.0.4/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2217@@ -36,6 +36,7 @@
2218 #include <asm/firmware.h>
2219 #include <asm/vdso.h>
2220 #include <asm/vdso_datapage.h>
2221+#include <asm/mman.h>
2222
2223 #include "setup.h"
2224
2225@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = VDSO32_MBASE;
2227 #endif
2228
2229- current->mm->context.vdso_base = 0;
2230+ current->mm->context.vdso_base = ~0UL;
2231
2232 /* vDSO has a problem and was disabled, just don't "enable" it for the
2233 * process
2234@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235 vdso_base = get_unmapped_area(NULL, vdso_base,
2236 (vdso_pages << PAGE_SHIFT) +
2237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238- 0, 0);
2239+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2240 if (IS_ERR_VALUE(vdso_base)) {
2241 rc = vdso_base;
2242 goto fail_mmapsem;
2243diff -urNp linux-3.0.4/arch/powerpc/lib/usercopy_64.c linux-3.0.4/arch/powerpc/lib/usercopy_64.c
2244--- linux-3.0.4/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2245+++ linux-3.0.4/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2246@@ -9,22 +9,6 @@
2247 #include <linux/module.h>
2248 #include <asm/uaccess.h>
2249
2250-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251-{
2252- if (likely(access_ok(VERIFY_READ, from, n)))
2253- n = __copy_from_user(to, from, n);
2254- else
2255- memset(to, 0, n);
2256- return n;
2257-}
2258-
2259-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260-{
2261- if (likely(access_ok(VERIFY_WRITE, to, n)))
2262- n = __copy_to_user(to, from, n);
2263- return n;
2264-}
2265-
2266 unsigned long copy_in_user(void __user *to, const void __user *from,
2267 unsigned long n)
2268 {
2269@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270 return n;
2271 }
2272
2273-EXPORT_SYMBOL(copy_from_user);
2274-EXPORT_SYMBOL(copy_to_user);
2275 EXPORT_SYMBOL(copy_in_user);
2276
2277diff -urNp linux-3.0.4/arch/powerpc/mm/fault.c linux-3.0.4/arch/powerpc/mm/fault.c
2278--- linux-3.0.4/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279+++ linux-3.0.4/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280@@ -32,6 +32,10 @@
2281 #include <linux/perf_event.h>
2282 #include <linux/magic.h>
2283 #include <linux/ratelimit.h>
2284+#include <linux/slab.h>
2285+#include <linux/pagemap.h>
2286+#include <linux/compiler.h>
2287+#include <linux/unistd.h>
2288
2289 #include <asm/firmware.h>
2290 #include <asm/page.h>
2291@@ -43,6 +47,7 @@
2292 #include <asm/tlbflush.h>
2293 #include <asm/siginfo.h>
2294 #include <mm/mmu_decl.h>
2295+#include <asm/ptrace.h>
2296
2297 #ifdef CONFIG_KPROBES
2298 static inline int notify_page_fault(struct pt_regs *regs)
2299@@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300 }
2301 #endif
2302
2303+#ifdef CONFIG_PAX_PAGEEXEC
2304+/*
2305+ * PaX: decide what to do with offenders (regs->nip = fault address)
2306+ *
2307+ * returns 1 when task should be killed
2308+ */
2309+static int pax_handle_fetch_fault(struct pt_regs *regs)
2310+{
2311+ return 1;
2312+}
2313+
2314+void pax_report_insns(void *pc, void *sp)
2315+{
2316+ unsigned long i;
2317+
2318+ printk(KERN_ERR "PAX: bytes at PC: ");
2319+ for (i = 0; i < 5; i++) {
2320+ unsigned int c;
2321+ if (get_user(c, (unsigned int __user *)pc+i))
2322+ printk(KERN_CONT "???????? ");
2323+ else
2324+ printk(KERN_CONT "%08x ", c);
2325+ }
2326+ printk("\n");
2327+}
2328+#endif
2329+
2330 /*
2331 * Check whether the instruction at regs->nip is a store using
2332 * an update addressing form which will update r1.
2333@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334 * indicate errors in DSISR but can validly be set in SRR1.
2335 */
2336 if (trap == 0x400)
2337- error_code &= 0x48200000;
2338+ error_code &= 0x58200000;
2339 else
2340 is_write = error_code & DSISR_ISSTORE;
2341 #else
2342@@ -259,7 +291,7 @@ good_area:
2343 * "undefined". Of those that can be set, this is the only
2344 * one which seems bad.
2345 */
2346- if (error_code & 0x10000000)
2347+ if (error_code & DSISR_GUARDED)
2348 /* Guarded storage error. */
2349 goto bad_area;
2350 #endif /* CONFIG_8xx */
2351@@ -274,7 +306,7 @@ good_area:
2352 * processors use the same I/D cache coherency mechanism
2353 * as embedded.
2354 */
2355- if (error_code & DSISR_PROTFAULT)
2356+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357 goto bad_area;
2358 #endif /* CONFIG_PPC_STD_MMU */
2359
2360@@ -343,6 +375,23 @@ bad_area:
2361 bad_area_nosemaphore:
2362 /* User mode accesses cause a SIGSEGV */
2363 if (user_mode(regs)) {
2364+
2365+#ifdef CONFIG_PAX_PAGEEXEC
2366+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367+#ifdef CONFIG_PPC_STD_MMU
2368+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369+#else
2370+ if (is_exec && regs->nip == address) {
2371+#endif
2372+ switch (pax_handle_fetch_fault(regs)) {
2373+ }
2374+
2375+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376+ do_group_exit(SIGKILL);
2377+ }
2378+ }
2379+#endif
2380+
2381 _exception(SIGSEGV, regs, code, address);
2382 return 0;
2383 }
2384diff -urNp linux-3.0.4/arch/powerpc/mm/mmap_64.c linux-3.0.4/arch/powerpc/mm/mmap_64.c
2385--- linux-3.0.4/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2386+++ linux-3.0.4/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2387@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388 */
2389 if (mmap_is_legacy()) {
2390 mm->mmap_base = TASK_UNMAPPED_BASE;
2391+
2392+#ifdef CONFIG_PAX_RANDMMAP
2393+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2394+ mm->mmap_base += mm->delta_mmap;
2395+#endif
2396+
2397 mm->get_unmapped_area = arch_get_unmapped_area;
2398 mm->unmap_area = arch_unmap_area;
2399 } else {
2400 mm->mmap_base = mmap_base();
2401+
2402+#ifdef CONFIG_PAX_RANDMMAP
2403+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2404+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405+#endif
2406+
2407 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408 mm->unmap_area = arch_unmap_area_topdown;
2409 }
2410diff -urNp linux-3.0.4/arch/powerpc/mm/slice.c linux-3.0.4/arch/powerpc/mm/slice.c
2411--- linux-3.0.4/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412+++ linux-3.0.4/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414 if ((mm->task_size - len) < addr)
2415 return 0;
2416 vma = find_vma(mm, addr);
2417- return (!vma || (addr + len) <= vma->vm_start);
2418+ return check_heap_stack_gap(vma, addr, len);
2419 }
2420
2421 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422@@ -256,7 +256,7 @@ full_search:
2423 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2424 continue;
2425 }
2426- if (!vma || addr + len <= vma->vm_start) {
2427+ if (check_heap_stack_gap(vma, addr, len)) {
2428 /*
2429 * Remember the place where we stopped the search:
2430 */
2431@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432 }
2433 }
2434
2435- addr = mm->mmap_base;
2436- while (addr > len) {
2437+ if (mm->mmap_base < len)
2438+ addr = -ENOMEM;
2439+ else
2440+ addr = mm->mmap_base - len;
2441+
2442+ while (!IS_ERR_VALUE(addr)) {
2443 /* Go down by chunk size */
2444- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446
2447 /* Check for hit with different page size */
2448 mask = slice_range_to_mask(addr, len);
2449@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450 * return with success:
2451 */
2452 vma = find_vma(mm, addr);
2453- if (!vma || (addr + len) <= vma->vm_start) {
2454+ if (check_heap_stack_gap(vma, addr, len)) {
2455 /* remember the address as a hint for next time */
2456 if (use_cache)
2457 mm->free_area_cache = addr;
2458@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459 mm->cached_hole_size = vma->vm_start - addr;
2460
2461 /* try just below the current vma->vm_start */
2462- addr = vma->vm_start;
2463+ addr = skip_heap_stack_gap(vma, len);
2464 }
2465
2466 /*
2467@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468 if (fixed && addr > (mm->task_size - len))
2469 return -EINVAL;
2470
2471+#ifdef CONFIG_PAX_RANDMMAP
2472+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473+ addr = 0;
2474+#endif
2475+
2476 /* If hint, make sure it matches our alignment restrictions */
2477 if (!fixed && addr) {
2478 addr = _ALIGN_UP(addr, 1ul << pshift);
2479diff -urNp linux-3.0.4/arch/s390/include/asm/elf.h linux-3.0.4/arch/s390/include/asm/elf.h
2480--- linux-3.0.4/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2481+++ linux-3.0.4/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2482@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483 the loader. We need to make sure that it is out of the way of the program
2484 that it will "exec", and that there is sufficient room for the brk. */
2485
2486-extern unsigned long randomize_et_dyn(unsigned long base);
2487-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2488+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2489+
2490+#ifdef CONFIG_PAX_ASLR
2491+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492+
2493+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495+#endif
2496
2497 /* This yields a mask that user programs can use to figure out what
2498 instruction set this CPU supports. */
2499@@ -210,7 +216,4 @@ struct linux_binprm;
2500 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501 int arch_setup_additional_pages(struct linux_binprm *, int);
2502
2503-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504-#define arch_randomize_brk arch_randomize_brk
2505-
2506 #endif
2507diff -urNp linux-3.0.4/arch/s390/include/asm/system.h linux-3.0.4/arch/s390/include/asm/system.h
2508--- linux-3.0.4/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2509+++ linux-3.0.4/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2510@@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511 extern void (*_machine_halt)(void);
2512 extern void (*_machine_power_off)(void);
2513
2514-extern unsigned long arch_align_stack(unsigned long sp);
2515+#define arch_align_stack(x) ((x) & ~0xfUL)
2516
2517 static inline int tprot(unsigned long addr)
2518 {
2519diff -urNp linux-3.0.4/arch/s390/include/asm/uaccess.h linux-3.0.4/arch/s390/include/asm/uaccess.h
2520--- linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521+++ linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523 copy_to_user(void __user *to, const void *from, unsigned long n)
2524 {
2525 might_fault();
2526+
2527+ if ((long)n < 0)
2528+ return n;
2529+
2530 if (access_ok(VERIFY_WRITE, to, n))
2531 n = __copy_to_user(to, from, n);
2532 return n;
2533@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534 static inline unsigned long __must_check
2535 __copy_from_user(void *to, const void __user *from, unsigned long n)
2536 {
2537+ if ((long)n < 0)
2538+ return n;
2539+
2540 if (__builtin_constant_p(n) && (n <= 256))
2541 return uaccess.copy_from_user_small(n, from, to);
2542 else
2543@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544 unsigned int sz = __compiletime_object_size(to);
2545
2546 might_fault();
2547+
2548+ if ((long)n < 0)
2549+ return n;
2550+
2551 if (unlikely(sz != -1 && sz < n)) {
2552 copy_from_user_overflow();
2553 return n;
2554diff -urNp linux-3.0.4/arch/s390/kernel/module.c linux-3.0.4/arch/s390/kernel/module.c
2555--- linux-3.0.4/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2556+++ linux-3.0.4/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2557@@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558
2559 /* Increase core size by size of got & plt and set start
2560 offsets for got and plt. */
2561- me->core_size = ALIGN(me->core_size, 4);
2562- me->arch.got_offset = me->core_size;
2563- me->core_size += me->arch.got_size;
2564- me->arch.plt_offset = me->core_size;
2565- me->core_size += me->arch.plt_size;
2566+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567+ me->arch.got_offset = me->core_size_rw;
2568+ me->core_size_rw += me->arch.got_size;
2569+ me->arch.plt_offset = me->core_size_rx;
2570+ me->core_size_rx += me->arch.plt_size;
2571 return 0;
2572 }
2573
2574@@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575 if (info->got_initialized == 0) {
2576 Elf_Addr *gotent;
2577
2578- gotent = me->module_core + me->arch.got_offset +
2579+ gotent = me->module_core_rw + me->arch.got_offset +
2580 info->got_offset;
2581 *gotent = val;
2582 info->got_initialized = 1;
2583@@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 else if (r_type == R_390_GOTENT ||
2585 r_type == R_390_GOTPLTENT)
2586 *(unsigned int *) loc =
2587- (val + (Elf_Addr) me->module_core - loc) >> 1;
2588+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589 else if (r_type == R_390_GOT64 ||
2590 r_type == R_390_GOTPLT64)
2591 *(unsigned long *) loc = val;
2592@@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2594 if (info->plt_initialized == 0) {
2595 unsigned int *ip;
2596- ip = me->module_core + me->arch.plt_offset +
2597+ ip = me->module_core_rx + me->arch.plt_offset +
2598 info->plt_offset;
2599 #ifndef CONFIG_64BIT
2600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601@@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 val - loc + 0xffffUL < 0x1ffffeUL) ||
2603 (r_type == R_390_PLT32DBL &&
2604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605- val = (Elf_Addr) me->module_core +
2606+ val = (Elf_Addr) me->module_core_rx +
2607 me->arch.plt_offset +
2608 info->plt_offset;
2609 val += rela->r_addend - loc;
2610@@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2613 val = val + rela->r_addend -
2614- ((Elf_Addr) me->module_core + me->arch.got_offset);
2615+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616 if (r_type == R_390_GOTOFF16)
2617 *(unsigned short *) loc = val;
2618 else if (r_type == R_390_GOTOFF32)
2619@@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 break;
2621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2623- val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625 rela->r_addend - loc;
2626 if (r_type == R_390_GOTPC)
2627 *(unsigned int *) loc = val;
2628diff -urNp linux-3.0.4/arch/s390/kernel/process.c linux-3.0.4/arch/s390/kernel/process.c
2629--- linux-3.0.4/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2630+++ linux-3.0.4/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2631@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632 }
2633 return 0;
2634 }
2635-
2636-unsigned long arch_align_stack(unsigned long sp)
2637-{
2638- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639- sp -= get_random_int() & ~PAGE_MASK;
2640- return sp & ~0xf;
2641-}
2642-
2643-static inline unsigned long brk_rnd(void)
2644-{
2645- /* 8MB for 32bit, 1GB for 64bit */
2646- if (is_32bit_task())
2647- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648- else
2649- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650-}
2651-
2652-unsigned long arch_randomize_brk(struct mm_struct *mm)
2653-{
2654- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655-
2656- if (ret < mm->brk)
2657- return mm->brk;
2658- return ret;
2659-}
2660-
2661-unsigned long randomize_et_dyn(unsigned long base)
2662-{
2663- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664-
2665- if (!(current->flags & PF_RANDOMIZE))
2666- return base;
2667- if (ret < base)
2668- return base;
2669- return ret;
2670-}
2671diff -urNp linux-3.0.4/arch/s390/kernel/setup.c linux-3.0.4/arch/s390/kernel/setup.c
2672--- linux-3.0.4/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2673+++ linux-3.0.4/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2674@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675 }
2676 early_param("mem", early_parse_mem);
2677
2678-unsigned int user_mode = HOME_SPACE_MODE;
2679+unsigned int user_mode = SECONDARY_SPACE_MODE;
2680 EXPORT_SYMBOL_GPL(user_mode);
2681
2682 static int set_amode_and_uaccess(unsigned long user_amode,
2683diff -urNp linux-3.0.4/arch/s390/mm/mmap.c linux-3.0.4/arch/s390/mm/mmap.c
2684--- linux-3.0.4/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2685+++ linux-3.0.4/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2686@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690+
2691+#ifdef CONFIG_PAX_RANDMMAP
2692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2693+ mm->mmap_base += mm->delta_mmap;
2694+#endif
2695+
2696 mm->get_unmapped_area = arch_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700+
2701+#ifdef CONFIG_PAX_RANDMMAP
2702+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2703+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704+#endif
2705+
2706 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713+
2714+#ifdef CONFIG_PAX_RANDMMAP
2715+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2716+ mm->mmap_base += mm->delta_mmap;
2717+#endif
2718+
2719 mm->get_unmapped_area = s390_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723+
2724+#ifdef CONFIG_PAX_RANDMMAP
2725+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2726+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727+#endif
2728+
2729 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732diff -urNp linux-3.0.4/arch/score/include/asm/system.h linux-3.0.4/arch/score/include/asm/system.h
2733--- linux-3.0.4/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734+++ linux-3.0.4/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735@@ -17,7 +17,7 @@ do { \
2736 #define finish_arch_switch(prev) do {} while (0)
2737
2738 typedef void (*vi_handler_t)(void);
2739-extern unsigned long arch_align_stack(unsigned long sp);
2740+#define arch_align_stack(x) (x)
2741
2742 #define mb() barrier()
2743 #define rmb() barrier()
2744diff -urNp linux-3.0.4/arch/score/kernel/process.c linux-3.0.4/arch/score/kernel/process.c
2745--- linux-3.0.4/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2746+++ linux-3.0.4/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2747@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748
2749 return task_pt_regs(task)->cp0_epc;
2750 }
2751-
2752-unsigned long arch_align_stack(unsigned long sp)
2753-{
2754- return sp;
2755-}
2756diff -urNp linux-3.0.4/arch/sh/mm/mmap.c linux-3.0.4/arch/sh/mm/mmap.c
2757--- linux-3.0.4/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2758+++ linux-3.0.4/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2759@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760 addr = PAGE_ALIGN(addr);
2761
2762 vma = find_vma(mm, addr);
2763- if (TASK_SIZE - len >= addr &&
2764- (!vma || addr + len <= vma->vm_start))
2765+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766 return addr;
2767 }
2768
2769@@ -106,7 +105,7 @@ full_search:
2770 }
2771 return -ENOMEM;
2772 }
2773- if (likely(!vma || addr + len <= vma->vm_start)) {
2774+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2775 /*
2776 * Remember the place where we stopped the search:
2777 */
2778@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779 addr = PAGE_ALIGN(addr);
2780
2781 vma = find_vma(mm, addr);
2782- if (TASK_SIZE - len >= addr &&
2783- (!vma || addr + len <= vma->vm_start))
2784+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785 return addr;
2786 }
2787
2788@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789 /* make sure it can fit in the remaining address space */
2790 if (likely(addr > len)) {
2791 vma = find_vma(mm, addr-len);
2792- if (!vma || addr <= vma->vm_start) {
2793+ if (check_heap_stack_gap(vma, addr - len, len)) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr-len);
2796 }
2797@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798 if (unlikely(mm->mmap_base < len))
2799 goto bottomup;
2800
2801- addr = mm->mmap_base-len;
2802- if (do_colour_align)
2803- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804+ addr = mm->mmap_base - len;
2805
2806 do {
2807+ if (do_colour_align)
2808+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809 /*
2810 * Lookup failure means no vma is above this address,
2811 * else if new region fits below vma->vm_start,
2812 * return with success:
2813 */
2814 vma = find_vma(mm, addr);
2815- if (likely(!vma || addr+len <= vma->vm_start)) {
2816+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2817 /* remember the address as a hint for next time */
2818 return (mm->free_area_cache = addr);
2819 }
2820@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821 mm->cached_hole_size = vma->vm_start - addr;
2822
2823 /* try just below the current vma->vm_start */
2824- addr = vma->vm_start-len;
2825- if (do_colour_align)
2826- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827- } while (likely(len < vma->vm_start));
2828+ addr = skip_heap_stack_gap(vma, len);
2829+ } while (!IS_ERR_VALUE(addr));
2830
2831 bottomup:
2832 /*
2833diff -urNp linux-3.0.4/arch/sparc/include/asm/atomic_64.h linux-3.0.4/arch/sparc/include/asm/atomic_64.h
2834--- linux-3.0.4/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
2835+++ linux-3.0.4/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
2836@@ -14,18 +14,40 @@
2837 #define ATOMIC64_INIT(i) { (i) }
2838
2839 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2840+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841+{
2842+ return v->counter;
2843+}
2844 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2845+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846+{
2847+ return v->counter;
2848+}
2849
2850 #define atomic_set(v, i) (((v)->counter) = i)
2851+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852+{
2853+ v->counter = i;
2854+}
2855 #define atomic64_set(v, i) (((v)->counter) = i)
2856+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857+{
2858+ v->counter = i;
2859+}
2860
2861 extern void atomic_add(int, atomic_t *);
2862+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863 extern void atomic64_add(long, atomic64_t *);
2864+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865 extern void atomic_sub(int, atomic_t *);
2866+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867 extern void atomic64_sub(long, atomic64_t *);
2868+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869
2870 extern int atomic_add_ret(int, atomic_t *);
2871+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872 extern long atomic64_add_ret(long, atomic64_t *);
2873+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874 extern int atomic_sub_ret(int, atomic_t *);
2875 extern long atomic64_sub_ret(long, atomic64_t *);
2876
2877@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879
2880 #define atomic_inc_return(v) atomic_add_ret(1, v)
2881+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882+{
2883+ return atomic_add_ret_unchecked(1, v);
2884+}
2885 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887+{
2888+ return atomic64_add_ret_unchecked(1, v);
2889+}
2890
2891 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893
2894 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896+{
2897+ return atomic_add_ret_unchecked(i, v);
2898+}
2899 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901+{
2902+ return atomic64_add_ret_unchecked(i, v);
2903+}
2904
2905 /*
2906 * atomic_inc_and_test - increment and test
2907@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908 * other cases.
2909 */
2910 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912+{
2913+ return atomic_inc_return_unchecked(v) == 0;
2914+}
2915 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916
2917 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920
2921 #define atomic_inc(v) atomic_add(1, v)
2922+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923+{
2924+ atomic_add_unchecked(1, v);
2925+}
2926 #define atomic64_inc(v) atomic64_add(1, v)
2927+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928+{
2929+ atomic64_add_unchecked(1, v);
2930+}
2931
2932 #define atomic_dec(v) atomic_sub(1, v)
2933+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934+{
2935+ atomic_sub_unchecked(1, v);
2936+}
2937 #define atomic64_dec(v) atomic64_sub(1, v)
2938+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939+{
2940+ atomic64_sub_unchecked(1, v);
2941+}
2942
2943 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945
2946 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948+{
2949+ return cmpxchg(&v->counter, old, new);
2950+}
2951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953+{
2954+ return xchg(&v->counter, new);
2955+}
2956
2957 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958 {
2959- int c, old;
2960+ int c, old, new;
2961 c = atomic_read(v);
2962 for (;;) {
2963- if (unlikely(c == (u)))
2964+ if (unlikely(c == u))
2965 break;
2966- old = atomic_cmpxchg((v), c, c + (a));
2967+
2968+ asm volatile("addcc %2, %0, %0\n"
2969+
2970+#ifdef CONFIG_PAX_REFCOUNT
2971+ "tvs %%icc, 6\n"
2972+#endif
2973+
2974+ : "=r" (new)
2975+ : "0" (c), "ir" (a)
2976+ : "cc");
2977+
2978+ old = atomic_cmpxchg(v, c, new);
2979 if (likely(old == c))
2980 break;
2981 c = old;
2982 }
2983- return c != (u);
2984+ return c != u;
2985 }
2986
2987 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989 #define atomic64_cmpxchg(v, o, n) \
2990 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993+{
2994+ return xchg(&v->counter, new);
2995+}
2996
2997 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998 {
2999- long c, old;
3000+ long c, old, new;
3001 c = atomic64_read(v);
3002 for (;;) {
3003- if (unlikely(c == (u)))
3004+ if (unlikely(c == u))
3005 break;
3006- old = atomic64_cmpxchg((v), c, c + (a));
3007+
3008+ asm volatile("addcc %2, %0, %0\n"
3009+
3010+#ifdef CONFIG_PAX_REFCOUNT
3011+ "tvs %%xcc, 6\n"
3012+#endif
3013+
3014+ : "=r" (new)
3015+ : "0" (c), "ir" (a)
3016+ : "cc");
3017+
3018+ old = atomic64_cmpxchg(v, c, new);
3019 if (likely(old == c))
3020 break;
3021 c = old;
3022 }
3023- return c != (u);
3024+ return c != u;
3025 }
3026
3027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028diff -urNp linux-3.0.4/arch/sparc/include/asm/cache.h linux-3.0.4/arch/sparc/include/asm/cache.h
3029--- linux-3.0.4/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3030+++ linux-3.0.4/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3031@@ -10,7 +10,7 @@
3032 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3033
3034 #define L1_CACHE_SHIFT 5
3035-#define L1_CACHE_BYTES 32
3036+#define L1_CACHE_BYTES 32UL
3037
3038 #ifdef CONFIG_SPARC32
3039 #define SMP_CACHE_BYTES_SHIFT 5
3040diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_32.h linux-3.0.4/arch/sparc/include/asm/elf_32.h
3041--- linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042+++ linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043@@ -114,6 +114,13 @@ typedef struct {
3044
3045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3046
3047+#ifdef CONFIG_PAX_ASLR
3048+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3049+
3050+#define PAX_DELTA_MMAP_LEN 16
3051+#define PAX_DELTA_STACK_LEN 16
3052+#endif
3053+
3054 /* This yields a mask that user programs can use to figure out what
3055 instruction set this cpu supports. This can NOT be done in userspace
3056 on Sparc. */
3057diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_64.h linux-3.0.4/arch/sparc/include/asm/elf_64.h
3058--- linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-09-02 18:11:21.000000000 -0400
3059+++ linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060@@ -180,6 +180,13 @@ typedef struct {
3061 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3062 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063
3064+#ifdef CONFIG_PAX_ASLR
3065+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066+
3067+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069+#endif
3070+
3071 extern unsigned long sparc64_elf_hwcap;
3072 #define ELF_HWCAP sparc64_elf_hwcap
3073
3074diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtable_32.h linux-3.0.4/arch/sparc/include/asm/pgtable_32.h
3075--- linux-3.0.4/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3076+++ linux-3.0.4/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3077@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078 BTFIXUPDEF_INT(page_none)
3079 BTFIXUPDEF_INT(page_copy)
3080 BTFIXUPDEF_INT(page_readonly)
3081+
3082+#ifdef CONFIG_PAX_PAGEEXEC
3083+BTFIXUPDEF_INT(page_shared_noexec)
3084+BTFIXUPDEF_INT(page_copy_noexec)
3085+BTFIXUPDEF_INT(page_readonly_noexec)
3086+#endif
3087+
3088 BTFIXUPDEF_INT(page_kernel)
3089
3090 #define PMD_SHIFT SUN4C_PMD_SHIFT
3091@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3093 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3094
3095+#ifdef CONFIG_PAX_PAGEEXEC
3096+extern pgprot_t PAGE_SHARED_NOEXEC;
3097+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3098+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099+#else
3100+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3101+# define PAGE_COPY_NOEXEC PAGE_COPY
3102+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3103+#endif
3104+
3105 extern unsigned long page_kernel;
3106
3107 #ifdef MODULE
3108diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h
3109--- linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3110+++ linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3111@@ -115,6 +115,13 @@
3112 SRMMU_EXEC | SRMMU_REF)
3113 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114 SRMMU_EXEC | SRMMU_REF)
3115+
3116+#ifdef CONFIG_PAX_PAGEEXEC
3117+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120+#endif
3121+
3122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123 SRMMU_DIRTY | SRMMU_REF)
3124
3125diff -urNp linux-3.0.4/arch/sparc/include/asm/spinlock_64.h linux-3.0.4/arch/sparc/include/asm/spinlock_64.h
3126--- linux-3.0.4/arch/sparc/include/asm/spinlock_64.h 2011-07-21 22:17:23.000000000 -0400
3127+++ linux-3.0.4/arch/sparc/include/asm/spinlock_64.h 2011-08-23 21:47:55.000000000 -0400
3128@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129
3130 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131
3132-static void inline arch_read_lock(arch_rwlock_t *lock)
3133+static inline void arch_read_lock(arch_rwlock_t *lock)
3134 {
3135 unsigned long tmp1, tmp2;
3136
3137 __asm__ __volatile__ (
3138 "1: ldsw [%2], %0\n"
3139 " brlz,pn %0, 2f\n"
3140-"4: add %0, 1, %1\n"
3141+"4: addcc %0, 1, %1\n"
3142+
3143+#ifdef CONFIG_PAX_REFCOUNT
3144+" tvs %%icc, 6\n"
3145+#endif
3146+
3147 " cas [%2], %0, %1\n"
3148 " cmp %0, %1\n"
3149 " bne,pn %%icc, 1b\n"
3150@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151 " .previous"
3152 : "=&r" (tmp1), "=&r" (tmp2)
3153 : "r" (lock)
3154- : "memory");
3155+ : "memory", "cc");
3156 }
3157
3158-static int inline arch_read_trylock(arch_rwlock_t *lock)
3159+static inline int arch_read_trylock(arch_rwlock_t *lock)
3160 {
3161 int tmp1, tmp2;
3162
3163@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164 "1: ldsw [%2], %0\n"
3165 " brlz,a,pn %0, 2f\n"
3166 " mov 0, %0\n"
3167-" add %0, 1, %1\n"
3168+" addcc %0, 1, %1\n"
3169+
3170+#ifdef CONFIG_PAX_REFCOUNT
3171+" tvs %%icc, 6\n"
3172+#endif
3173+
3174 " cas [%2], %0, %1\n"
3175 " cmp %0, %1\n"
3176 " bne,pn %%icc, 1b\n"
3177@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178 return tmp1;
3179 }
3180
3181-static void inline arch_read_unlock(arch_rwlock_t *lock)
3182+static inline void arch_read_unlock(arch_rwlock_t *lock)
3183 {
3184 unsigned long tmp1, tmp2;
3185
3186 __asm__ __volatile__(
3187 "1: lduw [%2], %0\n"
3188-" sub %0, 1, %1\n"
3189+" subcc %0, 1, %1\n"
3190+
3191+#ifdef CONFIG_PAX_REFCOUNT
3192+" tvs %%icc, 6\n"
3193+#endif
3194+
3195 " cas [%2], %0, %1\n"
3196 " cmp %0, %1\n"
3197 " bne,pn %%xcc, 1b\n"
3198@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199 : "memory");
3200 }
3201
3202-static void inline arch_write_lock(arch_rwlock_t *lock)
3203+static inline void arch_write_lock(arch_rwlock_t *lock)
3204 {
3205 unsigned long mask, tmp1, tmp2;
3206
3207@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208 : "memory");
3209 }
3210
3211-static void inline arch_write_unlock(arch_rwlock_t *lock)
3212+static inline void arch_write_unlock(arch_rwlock_t *lock)
3213 {
3214 __asm__ __volatile__(
3215 " stw %%g0, [%0]"
3216@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217 : "memory");
3218 }
3219
3220-static int inline arch_write_trylock(arch_rwlock_t *lock)
3221+static inline int arch_write_trylock(arch_rwlock_t *lock)
3222 {
3223 unsigned long mask, tmp1, tmp2, result;
3224
3225diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_32.h linux-3.0.4/arch/sparc/include/asm/thread_info_32.h
3226--- linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227+++ linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228@@ -50,6 +50,8 @@ struct thread_info {
3229 unsigned long w_saved;
3230
3231 struct restart_block restart_block;
3232+
3233+ unsigned long lowest_stack;
3234 };
3235
3236 /*
3237diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_64.h linux-3.0.4/arch/sparc/include/asm/thread_info_64.h
3238--- linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239+++ linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240@@ -63,6 +63,8 @@ struct thread_info {
3241 struct pt_regs *kern_una_regs;
3242 unsigned int kern_una_insn;
3243
3244+ unsigned long lowest_stack;
3245+
3246 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3247 };
3248
3249diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_32.h linux-3.0.4/arch/sparc/include/asm/uaccess_32.h
3250--- linux-3.0.4/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3251+++ linux-3.0.4/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3252@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253
3254 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256- if (n && __access_ok((unsigned long) to, n))
3257+ if ((long)n < 0)
3258+ return n;
3259+
3260+ if (n && __access_ok((unsigned long) to, n)) {
3261+ if (!__builtin_constant_p(n))
3262+ check_object_size(from, n, true);
3263 return __copy_user(to, (__force void __user *) from, n);
3264- else
3265+ } else
3266 return n;
3267 }
3268
3269 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270 {
3271+ if ((long)n < 0)
3272+ return n;
3273+
3274+ if (!__builtin_constant_p(n))
3275+ check_object_size(from, n, true);
3276+
3277 return __copy_user(to, (__force void __user *) from, n);
3278 }
3279
3280 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281 {
3282- if (n && __access_ok((unsigned long) from, n))
3283+ if ((long)n < 0)
3284+ return n;
3285+
3286+ if (n && __access_ok((unsigned long) from, n)) {
3287+ if (!__builtin_constant_p(n))
3288+ check_object_size(to, n, false);
3289 return __copy_user((__force void __user *) to, from, n);
3290- else
3291+ } else
3292 return n;
3293 }
3294
3295 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296 {
3297+ if ((long)n < 0)
3298+ return n;
3299+
3300 return __copy_user((__force void __user *) to, from, n);
3301 }
3302
3303diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_64.h linux-3.0.4/arch/sparc/include/asm/uaccess_64.h
3304--- linux-3.0.4/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3305+++ linux-3.0.4/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3306@@ -10,6 +10,7 @@
3307 #include <linux/compiler.h>
3308 #include <linux/string.h>
3309 #include <linux/thread_info.h>
3310+#include <linux/kernel.h>
3311 #include <asm/asi.h>
3312 #include <asm/system.h>
3313 #include <asm/spitfire.h>
3314@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315 static inline unsigned long __must_check
3316 copy_from_user(void *to, const void __user *from, unsigned long size)
3317 {
3318- unsigned long ret = ___copy_from_user(to, from, size);
3319+ unsigned long ret;
3320
3321+ if ((long)size < 0 || size > INT_MAX)
3322+ return size;
3323+
3324+ if (!__builtin_constant_p(size))
3325+ check_object_size(to, size, false);
3326+
3327+ ret = ___copy_from_user(to, from, size);
3328 if (unlikely(ret))
3329 ret = copy_from_user_fixup(to, from, size);
3330
3331@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332 static inline unsigned long __must_check
3333 copy_to_user(void __user *to, const void *from, unsigned long size)
3334 {
3335- unsigned long ret = ___copy_to_user(to, from, size);
3336+ unsigned long ret;
3337+
3338+ if ((long)size < 0 || size > INT_MAX)
3339+ return size;
3340+
3341+ if (!__builtin_constant_p(size))
3342+ check_object_size(from, size, true);
3343
3344+ ret = ___copy_to_user(to, from, size);
3345 if (unlikely(ret))
3346 ret = copy_to_user_fixup(to, from, size);
3347 return ret;
3348diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess.h linux-3.0.4/arch/sparc/include/asm/uaccess.h
3349--- linux-3.0.4/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3350+++ linux-3.0.4/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3351@@ -1,5 +1,13 @@
3352 #ifndef ___ASM_SPARC_UACCESS_H
3353 #define ___ASM_SPARC_UACCESS_H
3354+
3355+#ifdef __KERNEL__
3356+#ifndef __ASSEMBLY__
3357+#include <linux/types.h>
3358+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359+#endif
3360+#endif
3361+
3362 #if defined(__sparc__) && defined(__arch64__)
3363 #include <asm/uaccess_64.h>
3364 #else
3365diff -urNp linux-3.0.4/arch/sparc/kernel/Makefile linux-3.0.4/arch/sparc/kernel/Makefile
3366--- linux-3.0.4/arch/sparc/kernel/Makefile 2011-07-21 22:17:23.000000000 -0400
3367+++ linux-3.0.4/arch/sparc/kernel/Makefile 2011-08-23 21:47:55.000000000 -0400
3368@@ -3,7 +3,7 @@
3369 #
3370
3371 asflags-y := -ansi
3372-ccflags-y := -Werror
3373+#ccflags-y := -Werror
3374
3375 extra-y := head_$(BITS).o
3376 extra-y += init_task.o
3377diff -urNp linux-3.0.4/arch/sparc/kernel/process_32.c linux-3.0.4/arch/sparc/kernel/process_32.c
3378--- linux-3.0.4/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3379+++ linux-3.0.4/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3380@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381 rw->ins[4], rw->ins[5],
3382 rw->ins[6],
3383 rw->ins[7]);
3384- printk("%pS\n", (void *) rw->ins[7]);
3385+ printk("%pA\n", (void *) rw->ins[7]);
3386 rw = (struct reg_window32 *) rw->ins[6];
3387 }
3388 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390
3391 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3392 r->psr, r->pc, r->npc, r->y, print_tainted());
3393- printk("PC: <%pS>\n", (void *) r->pc);
3394+ printk("PC: <%pA>\n", (void *) r->pc);
3395 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3396 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3399 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403
3404 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3405 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407 rw = (struct reg_window32 *) fp;
3408 pc = rw->ins[7];
3409 printk("[%08lx : ", pc);
3410- printk("%pS ] ", (void *) pc);
3411+ printk("%pA ] ", (void *) pc);
3412 fp = rw->ins[6];
3413 } while (++count < 16);
3414 printk("\n");
3415diff -urNp linux-3.0.4/arch/sparc/kernel/process_64.c linux-3.0.4/arch/sparc/kernel/process_64.c
3416--- linux-3.0.4/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3417+++ linux-3.0.4/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3418@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421 if (regs->tstate & TSTATE_PRIV)
3422- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424 }
3425
3426 void show_regs(struct pt_regs *regs)
3427 {
3428 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3429 regs->tpc, regs->tnpc, regs->y, print_tainted());
3430- printk("TPC: <%pS>\n", (void *) regs->tpc);
3431+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3432 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434 regs->u_regs[3]);
3435@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438 regs->u_regs[15]);
3439- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441 show_regwindow(regs);
3442 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443 }
3444@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445 ((tp && tp->task) ? tp->task->pid : -1));
3446
3447 if (gp->tstate & TSTATE_PRIV) {
3448- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450 (void *) gp->tpc,
3451 (void *) gp->o7,
3452 (void *) gp->i7,
3453diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c
3454--- linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3455+++ linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3456@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457 if (ARCH_SUN4C && len > 0x20000000)
3458 return -ENOMEM;
3459 if (!addr)
3460- addr = TASK_UNMAPPED_BASE;
3461+ addr = current->mm->mmap_base;
3462
3463 if (flags & MAP_SHARED)
3464 addr = COLOUR_ALIGN(addr);
3465@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466 }
3467 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468 return -ENOMEM;
3469- if (!vmm || addr + len <= vmm->vm_start)
3470+ if (check_heap_stack_gap(vmm, addr, len))
3471 return addr;
3472 addr = vmm->vm_end;
3473 if (flags & MAP_SHARED)
3474diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c
3475--- linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3476+++ linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3477@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478 /* We do not accept a shared mapping if it would violate
3479 * cache aliasing constraints.
3480 */
3481- if ((flags & MAP_SHARED) &&
3482+ if ((filp || (flags & MAP_SHARED)) &&
3483 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484 return -EINVAL;
3485 return addr;
3486@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487 if (filp || (flags & MAP_SHARED))
3488 do_color_align = 1;
3489
3490+#ifdef CONFIG_PAX_RANDMMAP
3491+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492+#endif
3493+
3494 if (addr) {
3495 if (do_color_align)
3496 addr = COLOUR_ALIGN(addr, pgoff);
3497@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498 addr = PAGE_ALIGN(addr);
3499
3500 vma = find_vma(mm, addr);
3501- if (task_size - len >= addr &&
3502- (!vma || addr + len <= vma->vm_start))
3503+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504 return addr;
3505 }
3506
3507 if (len > mm->cached_hole_size) {
3508- start_addr = addr = mm->free_area_cache;
3509+ start_addr = addr = mm->free_area_cache;
3510 } else {
3511- start_addr = addr = TASK_UNMAPPED_BASE;
3512+ start_addr = addr = mm->mmap_base;
3513 mm->cached_hole_size = 0;
3514 }
3515
3516@@ -174,14 +177,14 @@ full_search:
3517 vma = find_vma(mm, VA_EXCLUDE_END);
3518 }
3519 if (unlikely(task_size < addr)) {
3520- if (start_addr != TASK_UNMAPPED_BASE) {
3521- start_addr = addr = TASK_UNMAPPED_BASE;
3522+ if (start_addr != mm->mmap_base) {
3523+ start_addr = addr = mm->mmap_base;
3524 mm->cached_hole_size = 0;
3525 goto full_search;
3526 }
3527 return -ENOMEM;
3528 }
3529- if (likely(!vma || addr + len <= vma->vm_start)) {
3530+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3531 /*
3532 * Remember the place where we stopped the search:
3533 */
3534@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535 /* We do not accept a shared mapping if it would violate
3536 * cache aliasing constraints.
3537 */
3538- if ((flags & MAP_SHARED) &&
3539+ if ((filp || (flags & MAP_SHARED)) &&
3540 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541 return -EINVAL;
3542 return addr;
3543@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544 addr = PAGE_ALIGN(addr);
3545
3546 vma = find_vma(mm, addr);
3547- if (task_size - len >= addr &&
3548- (!vma || addr + len <= vma->vm_start))
3549+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550 return addr;
3551 }
3552
3553@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554 /* make sure it can fit in the remaining address space */
3555 if (likely(addr > len)) {
3556 vma = find_vma(mm, addr-len);
3557- if (!vma || addr <= vma->vm_start) {
3558+ if (check_heap_stack_gap(vma, addr - len, len)) {
3559 /* remember the address as a hint for next time */
3560 return (mm->free_area_cache = addr-len);
3561 }
3562@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563 if (unlikely(mm->mmap_base < len))
3564 goto bottomup;
3565
3566- addr = mm->mmap_base-len;
3567- if (do_color_align)
3568- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569+ addr = mm->mmap_base - len;
3570
3571 do {
3572+ if (do_color_align)
3573+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574 /*
3575 * Lookup failure means no vma is above this address,
3576 * else if new region fits below vma->vm_start,
3577 * return with success:
3578 */
3579 vma = find_vma(mm, addr);
3580- if (likely(!vma || addr+len <= vma->vm_start)) {
3581+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3582 /* remember the address as a hint for next time */
3583 return (mm->free_area_cache = addr);
3584 }
3585@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586 mm->cached_hole_size = vma->vm_start - addr;
3587
3588 /* try just below the current vma->vm_start */
3589- addr = vma->vm_start-len;
3590- if (do_color_align)
3591- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592- } while (likely(len < vma->vm_start));
3593+ addr = skip_heap_stack_gap(vma, len);
3594+ } while (!IS_ERR_VALUE(addr));
3595
3596 bottomup:
3597 /*
3598@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599 gap == RLIM_INFINITY ||
3600 sysctl_legacy_va_layout) {
3601 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602+
3603+#ifdef CONFIG_PAX_RANDMMAP
3604+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3605+ mm->mmap_base += mm->delta_mmap;
3606+#endif
3607+
3608 mm->get_unmapped_area = arch_get_unmapped_area;
3609 mm->unmap_area = arch_unmap_area;
3610 } else {
3611@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612 gap = (task_size / 6 * 5);
3613
3614 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615+
3616+#ifdef CONFIG_PAX_RANDMMAP
3617+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3618+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619+#endif
3620+
3621 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622 mm->unmap_area = arch_unmap_area_topdown;
3623 }
3624diff -urNp linux-3.0.4/arch/sparc/kernel/traps_32.c linux-3.0.4/arch/sparc/kernel/traps_32.c
3625--- linux-3.0.4/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3626+++ linux-3.0.4/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3627@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630
3631+extern void gr_handle_kernel_exploit(void);
3632+
3633 void die_if_kernel(char *str, struct pt_regs *regs)
3634 {
3635 static int die_counter;
3636@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637 count++ < 30 &&
3638 (((unsigned long) rw) >= PAGE_OFFSET) &&
3639 !(((unsigned long) rw) & 0x7)) {
3640- printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642 (void *) rw->ins[7]);
3643 rw = (struct reg_window32 *)rw->ins[6];
3644 }
3645 }
3646 printk("Instruction DUMP:");
3647 instruction_dump ((unsigned long *) regs->pc);
3648- if(regs->psr & PSR_PS)
3649+ if(regs->psr & PSR_PS) {
3650+ gr_handle_kernel_exploit();
3651 do_exit(SIGKILL);
3652+ }
3653 do_exit(SIGSEGV);
3654 }
3655
3656diff -urNp linux-3.0.4/arch/sparc/kernel/traps_64.c linux-3.0.4/arch/sparc/kernel/traps_64.c
3657--- linux-3.0.4/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3658+++ linux-3.0.4/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3659@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660 i + 1,
3661 p->trapstack[i].tstate, p->trapstack[i].tpc,
3662 p->trapstack[i].tnpc, p->trapstack[i].tt);
3663- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665 }
3666 }
3667
3668@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669
3670 lvl -= 0x100;
3671 if (regs->tstate & TSTATE_PRIV) {
3672+
3673+#ifdef CONFIG_PAX_REFCOUNT
3674+ if (lvl == 6)
3675+ pax_report_refcount_overflow(regs);
3676+#endif
3677+
3678 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679 die_if_kernel(buffer, regs);
3680 }
3681@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683 {
3684 char buffer[32];
3685-
3686+
3687 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688 0, lvl, SIGTRAP) == NOTIFY_STOP)
3689 return;
3690
3691+#ifdef CONFIG_PAX_REFCOUNT
3692+ if (lvl == 6)
3693+ pax_report_refcount_overflow(regs);
3694+#endif
3695+
3696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697
3698 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701 printk("%s" "ERROR(%d): ",
3702 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703- printk("TPC<%pS>\n", (void *) regs->tpc);
3704+ printk("TPC<%pA>\n", (void *) regs->tpc);
3705 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709 smp_processor_id(),
3710 (type & 0x1) ? 'I' : 'D',
3711 regs->tpc);
3712- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714 panic("Irrecoverable Cheetah+ parity error.");
3715 }
3716
3717@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718 smp_processor_id(),
3719 (type & 0x1) ? 'I' : 'D',
3720 regs->tpc);
3721- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723 }
3724
3725 struct sun4v_error_entry {
3726@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727
3728 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729 regs->tpc, tl);
3730- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735 (void *) regs->u_regs[UREG_I7]);
3736 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737 "pte[%lx] error[%lx]\n",
3738@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739
3740 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741 regs->tpc, tl);
3742- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747 (void *) regs->u_regs[UREG_I7]);
3748 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749 "pte[%lx] error[%lx]\n",
3750@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751 fp = (unsigned long)sf->fp + STACK_BIAS;
3752 }
3753
3754- printk(" [%016lx] %pS\n", pc, (void *) pc);
3755+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758 int index = tsk->curr_ret_stack;
3759 if (tsk->ret_stack && index >= graph) {
3760 pc = tsk->ret_stack[index - graph].ret;
3761- printk(" [%016lx] %pS\n", pc, (void *) pc);
3762+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3763 graph++;
3764 }
3765 }
3766@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767 return (struct reg_window *) (fp + STACK_BIAS);
3768 }
3769
3770+extern void gr_handle_kernel_exploit(void);
3771+
3772 void die_if_kernel(char *str, struct pt_regs *regs)
3773 {
3774 static int die_counter;
3775@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776 while (rw &&
3777 count++ < 30 &&
3778 kstack_valid(tp, (unsigned long) rw)) {
3779- printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781 (void *) rw->ins[7]);
3782
3783 rw = kernel_stack_up(rw);
3784@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785 }
3786 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787 }
3788- if (regs->tstate & TSTATE_PRIV)
3789+ if (regs->tstate & TSTATE_PRIV) {
3790+ gr_handle_kernel_exploit();
3791 do_exit(SIGKILL);
3792+ }
3793 do_exit(SIGSEGV);
3794 }
3795 EXPORT_SYMBOL(die_if_kernel);
3796diff -urNp linux-3.0.4/arch/sparc/kernel/unaligned_64.c linux-3.0.4/arch/sparc/kernel/unaligned_64.c
3797--- linux-3.0.4/arch/sparc/kernel/unaligned_64.c 2011-09-02 18:11:21.000000000 -0400
3798+++ linux-3.0.4/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
3799@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801
3802 if (__ratelimit(&ratelimit)) {
3803- printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805 regs->tpc, (void *) regs->tpc);
3806 }
3807 }
3808diff -urNp linux-3.0.4/arch/sparc/lib/atomic_64.S linux-3.0.4/arch/sparc/lib/atomic_64.S
3809--- linux-3.0.4/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
3810+++ linux-3.0.4/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
3811@@ -18,7 +18,12 @@
3812 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813 BACKOFF_SETUP(%o2)
3814 1: lduw [%o1], %g1
3815- add %g1, %o0, %g7
3816+ addcc %g1, %o0, %g7
3817+
3818+#ifdef CONFIG_PAX_REFCOUNT
3819+ tvs %icc, 6
3820+#endif
3821+
3822 cas [%o1], %g1, %g7
3823 cmp %g1, %g7
3824 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3825@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826 2: BACKOFF_SPIN(%o2, %o3, 1b)
3827 .size atomic_add, .-atomic_add
3828
3829+ .globl atomic_add_unchecked
3830+ .type atomic_add_unchecked,#function
3831+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832+ BACKOFF_SETUP(%o2)
3833+1: lduw [%o1], %g1
3834+ add %g1, %o0, %g7
3835+ cas [%o1], %g1, %g7
3836+ cmp %g1, %g7
3837+ bne,pn %icc, 2f
3838+ nop
3839+ retl
3840+ nop
3841+2: BACKOFF_SPIN(%o2, %o3, 1b)
3842+ .size atomic_add_unchecked, .-atomic_add_unchecked
3843+
3844 .globl atomic_sub
3845 .type atomic_sub,#function
3846 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847 BACKOFF_SETUP(%o2)
3848 1: lduw [%o1], %g1
3849- sub %g1, %o0, %g7
3850+ subcc %g1, %o0, %g7
3851+
3852+#ifdef CONFIG_PAX_REFCOUNT
3853+ tvs %icc, 6
3854+#endif
3855+
3856 cas [%o1], %g1, %g7
3857 cmp %g1, %g7
3858 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3859@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860 2: BACKOFF_SPIN(%o2, %o3, 1b)
3861 .size atomic_sub, .-atomic_sub
3862
3863+ .globl atomic_sub_unchecked
3864+ .type atomic_sub_unchecked,#function
3865+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866+ BACKOFF_SETUP(%o2)
3867+1: lduw [%o1], %g1
3868+ sub %g1, %o0, %g7
3869+ cas [%o1], %g1, %g7
3870+ cmp %g1, %g7
3871+ bne,pn %icc, 2f
3872+ nop
3873+ retl
3874+ nop
3875+2: BACKOFF_SPIN(%o2, %o3, 1b)
3876+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
3877+
3878 .globl atomic_add_ret
3879 .type atomic_add_ret,#function
3880 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881 BACKOFF_SETUP(%o2)
3882 1: lduw [%o1], %g1
3883- add %g1, %o0, %g7
3884+ addcc %g1, %o0, %g7
3885+
3886+#ifdef CONFIG_PAX_REFCOUNT
3887+ tvs %icc, 6
3888+#endif
3889+
3890 cas [%o1], %g1, %g7
3891 cmp %g1, %g7
3892 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3893@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3894 2: BACKOFF_SPIN(%o2, %o3, 1b)
3895 .size atomic_add_ret, .-atomic_add_ret
3896
3897+ .globl atomic_add_ret_unchecked
3898+ .type atomic_add_ret_unchecked,#function
3899+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900+ BACKOFF_SETUP(%o2)
3901+1: lduw [%o1], %g1
3902+ addcc %g1, %o0, %g7
3903+ cas [%o1], %g1, %g7
3904+ cmp %g1, %g7
3905+ bne,pn %icc, 2f
3906+ add %g7, %o0, %g7
3907+ sra %g7, 0, %o0
3908+ retl
3909+ nop
3910+2: BACKOFF_SPIN(%o2, %o3, 1b)
3911+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912+
3913 .globl atomic_sub_ret
3914 .type atomic_sub_ret,#function
3915 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916 BACKOFF_SETUP(%o2)
3917 1: lduw [%o1], %g1
3918- sub %g1, %o0, %g7
3919+ subcc %g1, %o0, %g7
3920+
3921+#ifdef CONFIG_PAX_REFCOUNT
3922+ tvs %icc, 6
3923+#endif
3924+
3925 cas [%o1], %g1, %g7
3926 cmp %g1, %g7
3927 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3928@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3929 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930 BACKOFF_SETUP(%o2)
3931 1: ldx [%o1], %g1
3932- add %g1, %o0, %g7
3933+ addcc %g1, %o0, %g7
3934+
3935+#ifdef CONFIG_PAX_REFCOUNT
3936+ tvs %xcc, 6
3937+#endif
3938+
3939 casx [%o1], %g1, %g7
3940 cmp %g1, %g7
3941 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3942@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3943 2: BACKOFF_SPIN(%o2, %o3, 1b)
3944 .size atomic64_add, .-atomic64_add
3945
3946+ .globl atomic64_add_unchecked
3947+ .type atomic64_add_unchecked,#function
3948+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949+ BACKOFF_SETUP(%o2)
3950+1: ldx [%o1], %g1
3951+ addcc %g1, %o0, %g7
3952+ casx [%o1], %g1, %g7
3953+ cmp %g1, %g7
3954+ bne,pn %xcc, 2f
3955+ nop
3956+ retl
3957+ nop
3958+2: BACKOFF_SPIN(%o2, %o3, 1b)
3959+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
3960+
3961 .globl atomic64_sub
3962 .type atomic64_sub,#function
3963 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964 BACKOFF_SETUP(%o2)
3965 1: ldx [%o1], %g1
3966- sub %g1, %o0, %g7
3967+ subcc %g1, %o0, %g7
3968+
3969+#ifdef CONFIG_PAX_REFCOUNT
3970+ tvs %xcc, 6
3971+#endif
3972+
3973 casx [%o1], %g1, %g7
3974 cmp %g1, %g7
3975 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3976@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3977 2: BACKOFF_SPIN(%o2, %o3, 1b)
3978 .size atomic64_sub, .-atomic64_sub
3979
3980+ .globl atomic64_sub_unchecked
3981+ .type atomic64_sub_unchecked,#function
3982+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983+ BACKOFF_SETUP(%o2)
3984+1: ldx [%o1], %g1
3985+ subcc %g1, %o0, %g7
3986+ casx [%o1], %g1, %g7
3987+ cmp %g1, %g7
3988+ bne,pn %xcc, 2f
3989+ nop
3990+ retl
3991+ nop
3992+2: BACKOFF_SPIN(%o2, %o3, 1b)
3993+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994+
3995 .globl atomic64_add_ret
3996 .type atomic64_add_ret,#function
3997 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998 BACKOFF_SETUP(%o2)
3999 1: ldx [%o1], %g1
4000- add %g1, %o0, %g7
4001+ addcc %g1, %o0, %g7
4002+
4003+#ifdef CONFIG_PAX_REFCOUNT
4004+ tvs %xcc, 6
4005+#endif
4006+
4007 casx [%o1], %g1, %g7
4008 cmp %g1, %g7
4009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4010@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011 2: BACKOFF_SPIN(%o2, %o3, 1b)
4012 .size atomic64_add_ret, .-atomic64_add_ret
4013
4014+ .globl atomic64_add_ret_unchecked
4015+ .type atomic64_add_ret_unchecked,#function
4016+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017+ BACKOFF_SETUP(%o2)
4018+1: ldx [%o1], %g1
4019+ addcc %g1, %o0, %g7
4020+ casx [%o1], %g1, %g7
4021+ cmp %g1, %g7
4022+ bne,pn %xcc, 2f
4023+ add %g7, %o0, %g7
4024+ mov %g7, %o0
4025+ retl
4026+ nop
4027+2: BACKOFF_SPIN(%o2, %o3, 1b)
4028+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029+
4030 .globl atomic64_sub_ret
4031 .type atomic64_sub_ret,#function
4032 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033 BACKOFF_SETUP(%o2)
4034 1: ldx [%o1], %g1
4035- sub %g1, %o0, %g7
4036+ subcc %g1, %o0, %g7
4037+
4038+#ifdef CONFIG_PAX_REFCOUNT
4039+ tvs %xcc, 6
4040+#endif
4041+
4042 casx [%o1], %g1, %g7
4043 cmp %g1, %g7
4044 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4045diff -urNp linux-3.0.4/arch/sparc/lib/ksyms.c linux-3.0.4/arch/sparc/lib/ksyms.c
4046--- linux-3.0.4/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4047+++ linux-3.0.4/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4048@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049
4050 /* Atomic counter implementation. */
4051 EXPORT_SYMBOL(atomic_add);
4052+EXPORT_SYMBOL(atomic_add_unchecked);
4053 EXPORT_SYMBOL(atomic_add_ret);
4054+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055 EXPORT_SYMBOL(atomic_sub);
4056+EXPORT_SYMBOL(atomic_sub_unchecked);
4057 EXPORT_SYMBOL(atomic_sub_ret);
4058 EXPORT_SYMBOL(atomic64_add);
4059+EXPORT_SYMBOL(atomic64_add_unchecked);
4060 EXPORT_SYMBOL(atomic64_add_ret);
4061+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062 EXPORT_SYMBOL(atomic64_sub);
4063+EXPORT_SYMBOL(atomic64_sub_unchecked);
4064 EXPORT_SYMBOL(atomic64_sub_ret);
4065
4066 /* Atomic bit operations. */
4067diff -urNp linux-3.0.4/arch/sparc/lib/Makefile linux-3.0.4/arch/sparc/lib/Makefile
4068--- linux-3.0.4/arch/sparc/lib/Makefile 2011-09-02 18:11:21.000000000 -0400
4069+++ linux-3.0.4/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070@@ -2,7 +2,7 @@
4071 #
4072
4073 asflags-y := -ansi -DST_DIV0=0x02
4074-ccflags-y := -Werror
4075+#ccflags-y := -Werror
4076
4077 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079diff -urNp linux-3.0.4/arch/sparc/Makefile linux-3.0.4/arch/sparc/Makefile
4080--- linux-3.0.4/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
4081+++ linux-3.0.4/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
4082@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4083 # Export what is needed by arch/sparc/boot/Makefile
4084 export VMLINUX_INIT VMLINUX_MAIN
4085 VMLINUX_INIT := $(head-y) $(init-y)
4086-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089 VMLINUX_MAIN += $(drivers-y) $(net-y)
4090
4091diff -urNp linux-3.0.4/arch/sparc/mm/fault_32.c linux-3.0.4/arch/sparc/mm/fault_32.c
4092--- linux-3.0.4/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4093+++ linux-3.0.4/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4094@@ -22,6 +22,9 @@
4095 #include <linux/interrupt.h>
4096 #include <linux/module.h>
4097 #include <linux/kdebug.h>
4098+#include <linux/slab.h>
4099+#include <linux/pagemap.h>
4100+#include <linux/compiler.h>
4101
4102 #include <asm/system.h>
4103 #include <asm/page.h>
4104@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105 return safe_compute_effective_address(regs, insn);
4106 }
4107
4108+#ifdef CONFIG_PAX_PAGEEXEC
4109+#ifdef CONFIG_PAX_DLRESOLVE
4110+static void pax_emuplt_close(struct vm_area_struct *vma)
4111+{
4112+ vma->vm_mm->call_dl_resolve = 0UL;
4113+}
4114+
4115+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116+{
4117+ unsigned int *kaddr;
4118+
4119+ vmf->page = alloc_page(GFP_HIGHUSER);
4120+ if (!vmf->page)
4121+ return VM_FAULT_OOM;
4122+
4123+ kaddr = kmap(vmf->page);
4124+ memset(kaddr, 0, PAGE_SIZE);
4125+ kaddr[0] = 0x9DE3BFA8U; /* save */
4126+ flush_dcache_page(vmf->page);
4127+ kunmap(vmf->page);
4128+ return VM_FAULT_MAJOR;
4129+}
4130+
4131+static const struct vm_operations_struct pax_vm_ops = {
4132+ .close = pax_emuplt_close,
4133+ .fault = pax_emuplt_fault
4134+};
4135+
4136+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137+{
4138+ int ret;
4139+
4140+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4141+ vma->vm_mm = current->mm;
4142+ vma->vm_start = addr;
4143+ vma->vm_end = addr + PAGE_SIZE;
4144+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146+ vma->vm_ops = &pax_vm_ops;
4147+
4148+ ret = insert_vm_struct(current->mm, vma);
4149+ if (ret)
4150+ return ret;
4151+
4152+ ++current->mm->total_vm;
4153+ return 0;
4154+}
4155+#endif
4156+
4157+/*
4158+ * PaX: decide what to do with offenders (regs->pc = fault address)
4159+ *
4160+ * returns 1 when task should be killed
4161+ * 2 when patched PLT trampoline was detected
4162+ * 3 when unpatched PLT trampoline was detected
4163+ */
4164+static int pax_handle_fetch_fault(struct pt_regs *regs)
4165+{
4166+
4167+#ifdef CONFIG_PAX_EMUPLT
4168+ int err;
4169+
4170+ do { /* PaX: patched PLT emulation #1 */
4171+ unsigned int sethi1, sethi2, jmpl;
4172+
4173+ err = get_user(sethi1, (unsigned int *)regs->pc);
4174+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176+
4177+ if (err)
4178+ break;
4179+
4180+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183+ {
4184+ unsigned int addr;
4185+
4186+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187+ addr = regs->u_regs[UREG_G1];
4188+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189+ regs->pc = addr;
4190+ regs->npc = addr+4;
4191+ return 2;
4192+ }
4193+ } while (0);
4194+
4195+ { /* PaX: patched PLT emulation #2 */
4196+ unsigned int ba;
4197+
4198+ err = get_user(ba, (unsigned int *)regs->pc);
4199+
4200+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201+ unsigned int addr;
4202+
4203+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204+ regs->pc = addr;
4205+ regs->npc = addr+4;
4206+ return 2;
4207+ }
4208+ }
4209+
4210+ do { /* PaX: patched PLT emulation #3 */
4211+ unsigned int sethi, jmpl, nop;
4212+
4213+ err = get_user(sethi, (unsigned int *)regs->pc);
4214+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216+
4217+ if (err)
4218+ break;
4219+
4220+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222+ nop == 0x01000000U)
4223+ {
4224+ unsigned int addr;
4225+
4226+ addr = (sethi & 0x003FFFFFU) << 10;
4227+ regs->u_regs[UREG_G1] = addr;
4228+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229+ regs->pc = addr;
4230+ regs->npc = addr+4;
4231+ return 2;
4232+ }
4233+ } while (0);
4234+
4235+ do { /* PaX: unpatched PLT emulation step 1 */
4236+ unsigned int sethi, ba, nop;
4237+
4238+ err = get_user(sethi, (unsigned int *)regs->pc);
4239+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241+
4242+ if (err)
4243+ break;
4244+
4245+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247+ nop == 0x01000000U)
4248+ {
4249+ unsigned int addr, save, call;
4250+
4251+ if ((ba & 0xFFC00000U) == 0x30800000U)
4252+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253+ else
4254+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255+
4256+ err = get_user(save, (unsigned int *)addr);
4257+ err |= get_user(call, (unsigned int *)(addr+4));
4258+ err |= get_user(nop, (unsigned int *)(addr+8));
4259+ if (err)
4260+ break;
4261+
4262+#ifdef CONFIG_PAX_DLRESOLVE
4263+ if (save == 0x9DE3BFA8U &&
4264+ (call & 0xC0000000U) == 0x40000000U &&
4265+ nop == 0x01000000U)
4266+ {
4267+ struct vm_area_struct *vma;
4268+ unsigned long call_dl_resolve;
4269+
4270+ down_read(&current->mm->mmap_sem);
4271+ call_dl_resolve = current->mm->call_dl_resolve;
4272+ up_read(&current->mm->mmap_sem);
4273+ if (likely(call_dl_resolve))
4274+ goto emulate;
4275+
4276+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277+
4278+ down_write(&current->mm->mmap_sem);
4279+ if (current->mm->call_dl_resolve) {
4280+ call_dl_resolve = current->mm->call_dl_resolve;
4281+ up_write(&current->mm->mmap_sem);
4282+ if (vma)
4283+ kmem_cache_free(vm_area_cachep, vma);
4284+ goto emulate;
4285+ }
4286+
4287+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289+ up_write(&current->mm->mmap_sem);
4290+ if (vma)
4291+ kmem_cache_free(vm_area_cachep, vma);
4292+ return 1;
4293+ }
4294+
4295+ if (pax_insert_vma(vma, call_dl_resolve)) {
4296+ up_write(&current->mm->mmap_sem);
4297+ kmem_cache_free(vm_area_cachep, vma);
4298+ return 1;
4299+ }
4300+
4301+ current->mm->call_dl_resolve = call_dl_resolve;
4302+ up_write(&current->mm->mmap_sem);
4303+
4304+emulate:
4305+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306+ regs->pc = call_dl_resolve;
4307+ regs->npc = addr+4;
4308+ return 3;
4309+ }
4310+#endif
4311+
4312+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313+ if ((save & 0xFFC00000U) == 0x05000000U &&
4314+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4315+ nop == 0x01000000U)
4316+ {
4317+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318+ regs->u_regs[UREG_G2] = addr + 4;
4319+ addr = (save & 0x003FFFFFU) << 10;
4320+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321+ regs->pc = addr;
4322+ regs->npc = addr+4;
4323+ return 3;
4324+ }
4325+ }
4326+ } while (0);
4327+
4328+ do { /* PaX: unpatched PLT emulation step 2 */
4329+ unsigned int save, call, nop;
4330+
4331+ err = get_user(save, (unsigned int *)(regs->pc-4));
4332+ err |= get_user(call, (unsigned int *)regs->pc);
4333+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334+ if (err)
4335+ break;
4336+
4337+ if (save == 0x9DE3BFA8U &&
4338+ (call & 0xC0000000U) == 0x40000000U &&
4339+ nop == 0x01000000U)
4340+ {
4341+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342+
4343+ regs->u_regs[UREG_RETPC] = regs->pc;
4344+ regs->pc = dl_resolve;
4345+ regs->npc = dl_resolve+4;
4346+ return 3;
4347+ }
4348+ } while (0);
4349+#endif
4350+
4351+ return 1;
4352+}
4353+
4354+void pax_report_insns(void *pc, void *sp)
4355+{
4356+ unsigned long i;
4357+
4358+ printk(KERN_ERR "PAX: bytes at PC: ");
4359+ for (i = 0; i < 8; i++) {
4360+ unsigned int c;
4361+ if (get_user(c, (unsigned int *)pc+i))
4362+ printk(KERN_CONT "???????? ");
4363+ else
4364+ printk(KERN_CONT "%08x ", c);
4365+ }
4366+ printk("\n");
4367+}
4368+#endif
4369+
4370 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371 int text_fault)
4372 {
4373@@ -281,6 +546,24 @@ good_area:
4374 if(!(vma->vm_flags & VM_WRITE))
4375 goto bad_area;
4376 } else {
4377+
4378+#ifdef CONFIG_PAX_PAGEEXEC
4379+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380+ up_read(&mm->mmap_sem);
4381+ switch (pax_handle_fetch_fault(regs)) {
4382+
4383+#ifdef CONFIG_PAX_EMUPLT
4384+ case 2:
4385+ case 3:
4386+ return;
4387+#endif
4388+
4389+ }
4390+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391+ do_group_exit(SIGKILL);
4392+ }
4393+#endif
4394+
4395 /* Allow reads even for write-only mappings */
4396 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397 goto bad_area;
4398diff -urNp linux-3.0.4/arch/sparc/mm/fault_64.c linux-3.0.4/arch/sparc/mm/fault_64.c
4399--- linux-3.0.4/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4400+++ linux-3.0.4/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4401@@ -21,6 +21,9 @@
4402 #include <linux/kprobes.h>
4403 #include <linux/kdebug.h>
4404 #include <linux/percpu.h>
4405+#include <linux/slab.h>
4406+#include <linux/pagemap.h>
4407+#include <linux/compiler.h>
4408
4409 #include <asm/page.h>
4410 #include <asm/pgtable.h>
4411@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413 regs->tpc);
4414 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418 dump_stack();
4419 unhandled_fault(regs->tpc, current, regs);
4420@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421 show_regs(regs);
4422 }
4423
4424+#ifdef CONFIG_PAX_PAGEEXEC
4425+#ifdef CONFIG_PAX_DLRESOLVE
4426+static void pax_emuplt_close(struct vm_area_struct *vma)
4427+{
4428+ vma->vm_mm->call_dl_resolve = 0UL;
4429+}
4430+
4431+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432+{
4433+ unsigned int *kaddr;
4434+
4435+ vmf->page = alloc_page(GFP_HIGHUSER);
4436+ if (!vmf->page)
4437+ return VM_FAULT_OOM;
4438+
4439+ kaddr = kmap(vmf->page);
4440+ memset(kaddr, 0, PAGE_SIZE);
4441+ kaddr[0] = 0x9DE3BFA8U; /* save */
4442+ flush_dcache_page(vmf->page);
4443+ kunmap(vmf->page);
4444+ return VM_FAULT_MAJOR;
4445+}
4446+
4447+static const struct vm_operations_struct pax_vm_ops = {
4448+ .close = pax_emuplt_close,
4449+ .fault = pax_emuplt_fault
4450+};
4451+
4452+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453+{
4454+ int ret;
4455+
4456+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4457+ vma->vm_mm = current->mm;
4458+ vma->vm_start = addr;
4459+ vma->vm_end = addr + PAGE_SIZE;
4460+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462+ vma->vm_ops = &pax_vm_ops;
4463+
4464+ ret = insert_vm_struct(current->mm, vma);
4465+ if (ret)
4466+ return ret;
4467+
4468+ ++current->mm->total_vm;
4469+ return 0;
4470+}
4471+#endif
4472+
4473+/*
4474+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4475+ *
4476+ * returns 1 when task should be killed
4477+ * 2 when patched PLT trampoline was detected
4478+ * 3 when unpatched PLT trampoline was detected
4479+ */
4480+static int pax_handle_fetch_fault(struct pt_regs *regs)
4481+{
4482+
4483+#ifdef CONFIG_PAX_EMUPLT
4484+ int err;
4485+
4486+ do { /* PaX: patched PLT emulation #1 */
4487+ unsigned int sethi1, sethi2, jmpl;
4488+
4489+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4490+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492+
4493+ if (err)
4494+ break;
4495+
4496+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499+ {
4500+ unsigned long addr;
4501+
4502+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503+ addr = regs->u_regs[UREG_G1];
4504+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505+
4506+ if (test_thread_flag(TIF_32BIT))
4507+ addr &= 0xFFFFFFFFUL;
4508+
4509+ regs->tpc = addr;
4510+ regs->tnpc = addr+4;
4511+ return 2;
4512+ }
4513+ } while (0);
4514+
4515+ { /* PaX: patched PLT emulation #2 */
4516+ unsigned int ba;
4517+
4518+ err = get_user(ba, (unsigned int *)regs->tpc);
4519+
4520+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521+ unsigned long addr;
4522+
4523+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524+
4525+ if (test_thread_flag(TIF_32BIT))
4526+ addr &= 0xFFFFFFFFUL;
4527+
4528+ regs->tpc = addr;
4529+ regs->tnpc = addr+4;
4530+ return 2;
4531+ }
4532+ }
4533+
4534+ do { /* PaX: patched PLT emulation #3 */
4535+ unsigned int sethi, jmpl, nop;
4536+
4537+ err = get_user(sethi, (unsigned int *)regs->tpc);
4538+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540+
4541+ if (err)
4542+ break;
4543+
4544+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546+ nop == 0x01000000U)
4547+ {
4548+ unsigned long addr;
4549+
4550+ addr = (sethi & 0x003FFFFFU) << 10;
4551+ regs->u_regs[UREG_G1] = addr;
4552+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553+
4554+ if (test_thread_flag(TIF_32BIT))
4555+ addr &= 0xFFFFFFFFUL;
4556+
4557+ regs->tpc = addr;
4558+ regs->tnpc = addr+4;
4559+ return 2;
4560+ }
4561+ } while (0);
4562+
4563+ do { /* PaX: patched PLT emulation #4 */
4564+ unsigned int sethi, mov1, call, mov2;
4565+
4566+ err = get_user(sethi, (unsigned int *)regs->tpc);
4567+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570+
4571+ if (err)
4572+ break;
4573+
4574+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575+ mov1 == 0x8210000FU &&
4576+ (call & 0xC0000000U) == 0x40000000U &&
4577+ mov2 == 0x9E100001U)
4578+ {
4579+ unsigned long addr;
4580+
4581+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583+
4584+ if (test_thread_flag(TIF_32BIT))
4585+ addr &= 0xFFFFFFFFUL;
4586+
4587+ regs->tpc = addr;
4588+ regs->tnpc = addr+4;
4589+ return 2;
4590+ }
4591+ } while (0);
4592+
4593+ do { /* PaX: patched PLT emulation #5 */
4594+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595+
4596+ err = get_user(sethi, (unsigned int *)regs->tpc);
4597+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604+
4605+ if (err)
4606+ break;
4607+
4608+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611+ (or1 & 0xFFFFE000U) == 0x82106000U &&
4612+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613+ sllx == 0x83287020U &&
4614+ jmpl == 0x81C04005U &&
4615+ nop == 0x01000000U)
4616+ {
4617+ unsigned long addr;
4618+
4619+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620+ regs->u_regs[UREG_G1] <<= 32;
4621+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623+ regs->tpc = addr;
4624+ regs->tnpc = addr+4;
4625+ return 2;
4626+ }
4627+ } while (0);
4628+
4629+ do { /* PaX: patched PLT emulation #6 */
4630+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4631+
4632+ err = get_user(sethi, (unsigned int *)regs->tpc);
4633+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639+
4640+ if (err)
4641+ break;
4642+
4643+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646+ sllx == 0x83287020U &&
4647+ (or & 0xFFFFE000U) == 0x8A116000U &&
4648+ jmpl == 0x81C04005U &&
4649+ nop == 0x01000000U)
4650+ {
4651+ unsigned long addr;
4652+
4653+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654+ regs->u_regs[UREG_G1] <<= 32;
4655+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657+ regs->tpc = addr;
4658+ regs->tnpc = addr+4;
4659+ return 2;
4660+ }
4661+ } while (0);
4662+
4663+ do { /* PaX: unpatched PLT emulation step 1 */
4664+ unsigned int sethi, ba, nop;
4665+
4666+ err = get_user(sethi, (unsigned int *)regs->tpc);
4667+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669+
4670+ if (err)
4671+ break;
4672+
4673+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675+ nop == 0x01000000U)
4676+ {
4677+ unsigned long addr;
4678+ unsigned int save, call;
4679+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680+
4681+ if ((ba & 0xFFC00000U) == 0x30800000U)
4682+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683+ else
4684+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685+
4686+ if (test_thread_flag(TIF_32BIT))
4687+ addr &= 0xFFFFFFFFUL;
4688+
4689+ err = get_user(save, (unsigned int *)addr);
4690+ err |= get_user(call, (unsigned int *)(addr+4));
4691+ err |= get_user(nop, (unsigned int *)(addr+8));
4692+ if (err)
4693+ break;
4694+
4695+#ifdef CONFIG_PAX_DLRESOLVE
4696+ if (save == 0x9DE3BFA8U &&
4697+ (call & 0xC0000000U) == 0x40000000U &&
4698+ nop == 0x01000000U)
4699+ {
4700+ struct vm_area_struct *vma;
4701+ unsigned long call_dl_resolve;
4702+
4703+ down_read(&current->mm->mmap_sem);
4704+ call_dl_resolve = current->mm->call_dl_resolve;
4705+ up_read(&current->mm->mmap_sem);
4706+ if (likely(call_dl_resolve))
4707+ goto emulate;
4708+
4709+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710+
4711+ down_write(&current->mm->mmap_sem);
4712+ if (current->mm->call_dl_resolve) {
4713+ call_dl_resolve = current->mm->call_dl_resolve;
4714+ up_write(&current->mm->mmap_sem);
4715+ if (vma)
4716+ kmem_cache_free(vm_area_cachep, vma);
4717+ goto emulate;
4718+ }
4719+
4720+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722+ up_write(&current->mm->mmap_sem);
4723+ if (vma)
4724+ kmem_cache_free(vm_area_cachep, vma);
4725+ return 1;
4726+ }
4727+
4728+ if (pax_insert_vma(vma, call_dl_resolve)) {
4729+ up_write(&current->mm->mmap_sem);
4730+ kmem_cache_free(vm_area_cachep, vma);
4731+ return 1;
4732+ }
4733+
4734+ current->mm->call_dl_resolve = call_dl_resolve;
4735+ up_write(&current->mm->mmap_sem);
4736+
4737+emulate:
4738+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739+ regs->tpc = call_dl_resolve;
4740+ regs->tnpc = addr+4;
4741+ return 3;
4742+ }
4743+#endif
4744+
4745+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746+ if ((save & 0xFFC00000U) == 0x05000000U &&
4747+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4748+ nop == 0x01000000U)
4749+ {
4750+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751+ regs->u_regs[UREG_G2] = addr + 4;
4752+ addr = (save & 0x003FFFFFU) << 10;
4753+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754+
4755+ if (test_thread_flag(TIF_32BIT))
4756+ addr &= 0xFFFFFFFFUL;
4757+
4758+ regs->tpc = addr;
4759+ regs->tnpc = addr+4;
4760+ return 3;
4761+ }
4762+
4763+ /* PaX: 64-bit PLT stub */
4764+ err = get_user(sethi1, (unsigned int *)addr);
4765+ err |= get_user(sethi2, (unsigned int *)(addr+4));
4766+ err |= get_user(or1, (unsigned int *)(addr+8));
4767+ err |= get_user(or2, (unsigned int *)(addr+12));
4768+ err |= get_user(sllx, (unsigned int *)(addr+16));
4769+ err |= get_user(add, (unsigned int *)(addr+20));
4770+ err |= get_user(jmpl, (unsigned int *)(addr+24));
4771+ err |= get_user(nop, (unsigned int *)(addr+28));
4772+ if (err)
4773+ break;
4774+
4775+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777+ (or1 & 0xFFFFE000U) == 0x88112000U &&
4778+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779+ sllx == 0x89293020U &&
4780+ add == 0x8A010005U &&
4781+ jmpl == 0x89C14000U &&
4782+ nop == 0x01000000U)
4783+ {
4784+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786+ regs->u_regs[UREG_G4] <<= 32;
4787+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789+ regs->u_regs[UREG_G4] = addr + 24;
4790+ addr = regs->u_regs[UREG_G5];
4791+ regs->tpc = addr;
4792+ regs->tnpc = addr+4;
4793+ return 3;
4794+ }
4795+ }
4796+ } while (0);
4797+
4798+#ifdef CONFIG_PAX_DLRESOLVE
4799+ do { /* PaX: unpatched PLT emulation step 2 */
4800+ unsigned int save, call, nop;
4801+
4802+ err = get_user(save, (unsigned int *)(regs->tpc-4));
4803+ err |= get_user(call, (unsigned int *)regs->tpc);
4804+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805+ if (err)
4806+ break;
4807+
4808+ if (save == 0x9DE3BFA8U &&
4809+ (call & 0xC0000000U) == 0x40000000U &&
4810+ nop == 0x01000000U)
4811+ {
4812+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813+
4814+ if (test_thread_flag(TIF_32BIT))
4815+ dl_resolve &= 0xFFFFFFFFUL;
4816+
4817+ regs->u_regs[UREG_RETPC] = regs->tpc;
4818+ regs->tpc = dl_resolve;
4819+ regs->tnpc = dl_resolve+4;
4820+ return 3;
4821+ }
4822+ } while (0);
4823+#endif
4824+
4825+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826+ unsigned int sethi, ba, nop;
4827+
4828+ err = get_user(sethi, (unsigned int *)regs->tpc);
4829+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831+
4832+ if (err)
4833+ break;
4834+
4835+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836+ (ba & 0xFFF00000U) == 0x30600000U &&
4837+ nop == 0x01000000U)
4838+ {
4839+ unsigned long addr;
4840+
4841+ addr = (sethi & 0x003FFFFFU) << 10;
4842+ regs->u_regs[UREG_G1] = addr;
4843+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844+
4845+ if (test_thread_flag(TIF_32BIT))
4846+ addr &= 0xFFFFFFFFUL;
4847+
4848+ regs->tpc = addr;
4849+ regs->tnpc = addr+4;
4850+ return 2;
4851+ }
4852+ } while (0);
4853+
4854+#endif
4855+
4856+ return 1;
4857+}
4858+
4859+void pax_report_insns(void *pc, void *sp)
4860+{
4861+ unsigned long i;
4862+
4863+ printk(KERN_ERR "PAX: bytes at PC: ");
4864+ for (i = 0; i < 8; i++) {
4865+ unsigned int c;
4866+ if (get_user(c, (unsigned int *)pc+i))
4867+ printk(KERN_CONT "???????? ");
4868+ else
4869+ printk(KERN_CONT "%08x ", c);
4870+ }
4871+ printk("\n");
4872+}
4873+#endif
4874+
4875 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876 {
4877 struct mm_struct *mm = current->mm;
4878@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879 if (!vma)
4880 goto bad_area;
4881
4882+#ifdef CONFIG_PAX_PAGEEXEC
4883+ /* PaX: detect ITLB misses on non-exec pages */
4884+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886+ {
4887+ if (address != regs->tpc)
4888+ goto good_area;
4889+
4890+ up_read(&mm->mmap_sem);
4891+ switch (pax_handle_fetch_fault(regs)) {
4892+
4893+#ifdef CONFIG_PAX_EMUPLT
4894+ case 2:
4895+ case 3:
4896+ return;
4897+#endif
4898+
4899+ }
4900+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901+ do_group_exit(SIGKILL);
4902+ }
4903+#endif
4904+
4905 /* Pure DTLB misses do not tell us whether the fault causing
4906 * load/store/atomic was a write or not, it only says that there
4907 * was no match. So in such a case we (carefully) read the
4908diff -urNp linux-3.0.4/arch/sparc/mm/hugetlbpage.c linux-3.0.4/arch/sparc/mm/hugetlbpage.c
4909--- linux-3.0.4/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
4910+++ linux-3.0.4/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
4911@@ -68,7 +68,7 @@ full_search:
4912 }
4913 return -ENOMEM;
4914 }
4915- if (likely(!vma || addr + len <= vma->vm_start)) {
4916+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /*
4918 * Remember the place where we stopped the search:
4919 */
4920@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921 /* make sure it can fit in the remaining address space */
4922 if (likely(addr > len)) {
4923 vma = find_vma(mm, addr-len);
4924- if (!vma || addr <= vma->vm_start) {
4925+ if (check_heap_stack_gap(vma, addr - len, len)) {
4926 /* remember the address as a hint for next time */
4927 return (mm->free_area_cache = addr-len);
4928 }
4929@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930 if (unlikely(mm->mmap_base < len))
4931 goto bottomup;
4932
4933- addr = (mm->mmap_base-len) & HPAGE_MASK;
4934+ addr = mm->mmap_base - len;
4935
4936 do {
4937+ addr &= HPAGE_MASK;
4938 /*
4939 * Lookup failure means no vma is above this address,
4940 * else if new region fits below vma->vm_start,
4941 * return with success:
4942 */
4943 vma = find_vma(mm, addr);
4944- if (likely(!vma || addr+len <= vma->vm_start)) {
4945+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4946 /* remember the address as a hint for next time */
4947 return (mm->free_area_cache = addr);
4948 }
4949@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950 mm->cached_hole_size = vma->vm_start - addr;
4951
4952 /* try just below the current vma->vm_start */
4953- addr = (vma->vm_start-len) & HPAGE_MASK;
4954- } while (likely(len < vma->vm_start));
4955+ addr = skip_heap_stack_gap(vma, len);
4956+ } while (!IS_ERR_VALUE(addr));
4957
4958 bottomup:
4959 /*
4960@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961 if (addr) {
4962 addr = ALIGN(addr, HPAGE_SIZE);
4963 vma = find_vma(mm, addr);
4964- if (task_size - len >= addr &&
4965- (!vma || addr + len <= vma->vm_start))
4966+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967 return addr;
4968 }
4969 if (mm->get_unmapped_area == arch_get_unmapped_area)
4970diff -urNp linux-3.0.4/arch/sparc/mm/init_32.c linux-3.0.4/arch/sparc/mm/init_32.c
4971--- linux-3.0.4/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972+++ linux-3.0.4/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973@@ -316,6 +316,9 @@ extern void device_scan(void);
4974 pgprot_t PAGE_SHARED __read_mostly;
4975 EXPORT_SYMBOL(PAGE_SHARED);
4976
4977+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979+
4980 void __init paging_init(void)
4981 {
4982 switch(sparc_cpu_model) {
4983@@ -344,17 +347,17 @@ void __init paging_init(void)
4984
4985 /* Initialize the protection map with non-constant, MMU dependent values. */
4986 protection_map[0] = PAGE_NONE;
4987- protection_map[1] = PAGE_READONLY;
4988- protection_map[2] = PAGE_COPY;
4989- protection_map[3] = PAGE_COPY;
4990+ protection_map[1] = PAGE_READONLY_NOEXEC;
4991+ protection_map[2] = PAGE_COPY_NOEXEC;
4992+ protection_map[3] = PAGE_COPY_NOEXEC;
4993 protection_map[4] = PAGE_READONLY;
4994 protection_map[5] = PAGE_READONLY;
4995 protection_map[6] = PAGE_COPY;
4996 protection_map[7] = PAGE_COPY;
4997 protection_map[8] = PAGE_NONE;
4998- protection_map[9] = PAGE_READONLY;
4999- protection_map[10] = PAGE_SHARED;
5000- protection_map[11] = PAGE_SHARED;
5001+ protection_map[9] = PAGE_READONLY_NOEXEC;
5002+ protection_map[10] = PAGE_SHARED_NOEXEC;
5003+ protection_map[11] = PAGE_SHARED_NOEXEC;
5004 protection_map[12] = PAGE_READONLY;
5005 protection_map[13] = PAGE_READONLY;
5006 protection_map[14] = PAGE_SHARED;
5007diff -urNp linux-3.0.4/arch/sparc/mm/Makefile linux-3.0.4/arch/sparc/mm/Makefile
5008--- linux-3.0.4/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
5009+++ linux-3.0.4/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
5010@@ -2,7 +2,7 @@
5011 #
5012
5013 asflags-y := -ansi
5014-ccflags-y := -Werror
5015+#ccflags-y := -Werror
5016
5017 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5018 obj-y += fault_$(BITS).o
5019diff -urNp linux-3.0.4/arch/sparc/mm/srmmu.c linux-3.0.4/arch/sparc/mm/srmmu.c
5020--- linux-3.0.4/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5021+++ linux-3.0.4/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5022@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026+
5027+#ifdef CONFIG_PAX_PAGEEXEC
5028+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031+#endif
5032+
5033 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035
5036diff -urNp linux-3.0.4/arch/um/include/asm/kmap_types.h linux-3.0.4/arch/um/include/asm/kmap_types.h
5037--- linux-3.0.4/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5038+++ linux-3.0.4/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5039@@ -23,6 +23,7 @@ enum km_type {
5040 KM_IRQ1,
5041 KM_SOFTIRQ0,
5042 KM_SOFTIRQ1,
5043+ KM_CLEARPAGE,
5044 KM_TYPE_NR
5045 };
5046
5047diff -urNp linux-3.0.4/arch/um/include/asm/page.h linux-3.0.4/arch/um/include/asm/page.h
5048--- linux-3.0.4/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5049+++ linux-3.0.4/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5050@@ -14,6 +14,9 @@
5051 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5052 #define PAGE_MASK (~(PAGE_SIZE-1))
5053
5054+#define ktla_ktva(addr) (addr)
5055+#define ktva_ktla(addr) (addr)
5056+
5057 #ifndef __ASSEMBLY__
5058
5059 struct page;
5060diff -urNp linux-3.0.4/arch/um/kernel/process.c linux-3.0.4/arch/um/kernel/process.c
5061--- linux-3.0.4/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5062+++ linux-3.0.4/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5063@@ -404,22 +404,6 @@ int singlestepping(void * t)
5064 return 2;
5065 }
5066
5067-/*
5068- * Only x86 and x86_64 have an arch_align_stack().
5069- * All other arches have "#define arch_align_stack(x) (x)"
5070- * in their asm/system.h
5071- * As this is included in UML from asm-um/system-generic.h,
5072- * we can use it to behave as the subarch does.
5073- */
5074-#ifndef arch_align_stack
5075-unsigned long arch_align_stack(unsigned long sp)
5076-{
5077- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078- sp -= get_random_int() % 8192;
5079- return sp & ~0xf;
5080-}
5081-#endif
5082-
5083 unsigned long get_wchan(struct task_struct *p)
5084 {
5085 unsigned long stack_page, sp, ip;
5086diff -urNp linux-3.0.4/arch/um/sys-i386/syscalls.c linux-3.0.4/arch/um/sys-i386/syscalls.c
5087--- linux-3.0.4/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5088+++ linux-3.0.4/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5089@@ -11,6 +11,21 @@
5090 #include "asm/uaccess.h"
5091 #include "asm/unistd.h"
5092
5093+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5094+{
5095+ unsigned long pax_task_size = TASK_SIZE;
5096+
5097+#ifdef CONFIG_PAX_SEGMEXEC
5098+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5099+ pax_task_size = SEGMEXEC_TASK_SIZE;
5100+#endif
5101+
5102+ if (len > pax_task_size || addr > pax_task_size - len)
5103+ return -EINVAL;
5104+
5105+ return 0;
5106+}
5107+
5108 /*
5109 * The prototype on i386 is:
5110 *
5111diff -urNp linux-3.0.4/arch/x86/boot/bitops.h linux-3.0.4/arch/x86/boot/bitops.h
5112--- linux-3.0.4/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5113+++ linux-3.0.4/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5114@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5115 u8 v;
5116 const u32 *p = (const u32 *)addr;
5117
5118- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5119+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5120 return v;
5121 }
5122
5123@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5124
5125 static inline void set_bit(int nr, void *addr)
5126 {
5127- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5128+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5129 }
5130
5131 #endif /* BOOT_BITOPS_H */
5132diff -urNp linux-3.0.4/arch/x86/boot/boot.h linux-3.0.4/arch/x86/boot/boot.h
5133--- linux-3.0.4/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5134+++ linux-3.0.4/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5135@@ -85,7 +85,7 @@ static inline void io_delay(void)
5136 static inline u16 ds(void)
5137 {
5138 u16 seg;
5139- asm("movw %%ds,%0" : "=rm" (seg));
5140+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5141 return seg;
5142 }
5143
5144@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5145 static inline int memcmp(const void *s1, const void *s2, size_t len)
5146 {
5147 u8 diff;
5148- asm("repe; cmpsb; setnz %0"
5149+ asm volatile("repe; cmpsb; setnz %0"
5150 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5151 return diff;
5152 }
5153diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_32.S linux-3.0.4/arch/x86/boot/compressed/head_32.S
5154--- linux-3.0.4/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5155+++ linux-3.0.4/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5156@@ -76,7 +76,7 @@ ENTRY(startup_32)
5157 notl %eax
5158 andl %eax, %ebx
5159 #else
5160- movl $LOAD_PHYSICAL_ADDR, %ebx
5161+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5162 #endif
5163
5164 /* Target address to relocate to for decompression */
5165@@ -162,7 +162,7 @@ relocated:
5166 * and where it was actually loaded.
5167 */
5168 movl %ebp, %ebx
5169- subl $LOAD_PHYSICAL_ADDR, %ebx
5170+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5171 jz 2f /* Nothing to be done if loaded at compiled addr. */
5172 /*
5173 * Process relocations.
5174@@ -170,8 +170,7 @@ relocated:
5175
5176 1: subl $4, %edi
5177 movl (%edi), %ecx
5178- testl %ecx, %ecx
5179- jz 2f
5180+ jecxz 2f
5181 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5182 jmp 1b
5183 2:
5184diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_64.S linux-3.0.4/arch/x86/boot/compressed/head_64.S
5185--- linux-3.0.4/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5186+++ linux-3.0.4/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5187@@ -91,7 +91,7 @@ ENTRY(startup_32)
5188 notl %eax
5189 andl %eax, %ebx
5190 #else
5191- movl $LOAD_PHYSICAL_ADDR, %ebx
5192+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5193 #endif
5194
5195 /* Target address to relocate to for decompression */
5196@@ -233,7 +233,7 @@ ENTRY(startup_64)
5197 notq %rax
5198 andq %rax, %rbp
5199 #else
5200- movq $LOAD_PHYSICAL_ADDR, %rbp
5201+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205diff -urNp linux-3.0.4/arch/x86/boot/compressed/Makefile linux-3.0.4/arch/x86/boot/compressed/Makefile
5206--- linux-3.0.4/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5207+++ linux-3.0.4/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5208@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5209 KBUILD_CFLAGS += $(cflags-y)
5210 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5211 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5212+ifdef CONSTIFY_PLUGIN
5213+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5214+endif
5215
5216 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5217 GCOV_PROFILE := n
5218diff -urNp linux-3.0.4/arch/x86/boot/compressed/misc.c linux-3.0.4/arch/x86/boot/compressed/misc.c
5219--- linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5220+++ linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5221@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5222 case PT_LOAD:
5223 #ifdef CONFIG_RELOCATABLE
5224 dest = output;
5225- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5226+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5227 #else
5228 dest = (void *)(phdr->p_paddr);
5229 #endif
5230@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5231 error("Destination address too large");
5232 #endif
5233 #ifndef CONFIG_RELOCATABLE
5234- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5235+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5236 error("Wrong destination address");
5237 #endif
5238
5239diff -urNp linux-3.0.4/arch/x86/boot/compressed/relocs.c linux-3.0.4/arch/x86/boot/compressed/relocs.c
5240--- linux-3.0.4/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5241+++ linux-3.0.4/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5242@@ -13,8 +13,11 @@
5243
5244 static void die(char *fmt, ...);
5245
5246+#include "../../../../include/generated/autoconf.h"
5247+
5248 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5249 static Elf32_Ehdr ehdr;
5250+static Elf32_Phdr *phdr;
5251 static unsigned long reloc_count, reloc_idx;
5252 static unsigned long *relocs;
5253
5254@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5255 }
5256 }
5257
5258+static void read_phdrs(FILE *fp)
5259+{
5260+ unsigned int i;
5261+
5262+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5263+ if (!phdr) {
5264+ die("Unable to allocate %d program headers\n",
5265+ ehdr.e_phnum);
5266+ }
5267+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5268+ die("Seek to %d failed: %s\n",
5269+ ehdr.e_phoff, strerror(errno));
5270+ }
5271+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5272+ die("Cannot read ELF program headers: %s\n",
5273+ strerror(errno));
5274+ }
5275+ for(i = 0; i < ehdr.e_phnum; i++) {
5276+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5277+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5278+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5279+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5280+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5281+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5282+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5283+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5284+ }
5285+
5286+}
5287+
5288 static void read_shdrs(FILE *fp)
5289 {
5290- int i;
5291+ unsigned int i;
5292 Elf32_Shdr shdr;
5293
5294 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5295@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5296
5297 static void read_strtabs(FILE *fp)
5298 {
5299- int i;
5300+ unsigned int i;
5301 for (i = 0; i < ehdr.e_shnum; i++) {
5302 struct section *sec = &secs[i];
5303 if (sec->shdr.sh_type != SHT_STRTAB) {
5304@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5305
5306 static void read_symtabs(FILE *fp)
5307 {
5308- int i,j;
5309+ unsigned int i,j;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_SYMTAB) {
5313@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5314
5315 static void read_relocs(FILE *fp)
5316 {
5317- int i,j;
5318+ unsigned int i,j;
5319+ uint32_t base;
5320+
5321 for (i = 0; i < ehdr.e_shnum; i++) {
5322 struct section *sec = &secs[i];
5323 if (sec->shdr.sh_type != SHT_REL) {
5324@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5325 die("Cannot read symbol table: %s\n",
5326 strerror(errno));
5327 }
5328+ base = 0;
5329+ for (j = 0; j < ehdr.e_phnum; j++) {
5330+ if (phdr[j].p_type != PT_LOAD )
5331+ continue;
5332+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5333+ continue;
5334+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5335+ break;
5336+ }
5337 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5338 Elf32_Rel *rel = &sec->reltab[j];
5339- rel->r_offset = elf32_to_cpu(rel->r_offset);
5340+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5341 rel->r_info = elf32_to_cpu(rel->r_info);
5342 }
5343 }
5344@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5345
5346 static void print_absolute_symbols(void)
5347 {
5348- int i;
5349+ unsigned int i;
5350 printf("Absolute symbols\n");
5351 printf(" Num: Value Size Type Bind Visibility Name\n");
5352 for (i = 0; i < ehdr.e_shnum; i++) {
5353 struct section *sec = &secs[i];
5354 char *sym_strtab;
5355 Elf32_Sym *sh_symtab;
5356- int j;
5357+ unsigned int j;
5358
5359 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360 continue;
5361@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5362
5363 static void print_absolute_relocs(void)
5364 {
5365- int i, printed = 0;
5366+ unsigned int i, printed = 0;
5367
5368 for (i = 0; i < ehdr.e_shnum; i++) {
5369 struct section *sec = &secs[i];
5370 struct section *sec_applies, *sec_symtab;
5371 char *sym_strtab;
5372 Elf32_Sym *sh_symtab;
5373- int j;
5374+ unsigned int j;
5375 if (sec->shdr.sh_type != SHT_REL) {
5376 continue;
5377 }
5378@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5379
5380 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5381 {
5382- int i;
5383+ unsigned int i;
5384 /* Walk through the relocations */
5385 for (i = 0; i < ehdr.e_shnum; i++) {
5386 char *sym_strtab;
5387 Elf32_Sym *sh_symtab;
5388 struct section *sec_applies, *sec_symtab;
5389- int j;
5390+ unsigned int j;
5391 struct section *sec = &secs[i];
5392
5393 if (sec->shdr.sh_type != SHT_REL) {
5394@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5395 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5396 continue;
5397 }
5398+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5399+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5400+ continue;
5401+
5402+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5403+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5404+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5405+ continue;
5406+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5407+ continue;
5408+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5409+ continue;
5410+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5411+ continue;
5412+#endif
5413+
5414 switch (r_type) {
5415 case R_386_NONE:
5416 case R_386_PC32:
5417@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5418
5419 static void emit_relocs(int as_text)
5420 {
5421- int i;
5422+ unsigned int i;
5423 /* Count how many relocations I have and allocate space for them. */
5424 reloc_count = 0;
5425 walk_relocs(count_reloc);
5426@@ -665,6 +725,7 @@ int main(int argc, char **argv)
5427 fname, strerror(errno));
5428 }
5429 read_ehdr(fp);
5430+ read_phdrs(fp);
5431 read_shdrs(fp);
5432 read_strtabs(fp);
5433 read_symtabs(fp);
5434diff -urNp linux-3.0.4/arch/x86/boot/cpucheck.c linux-3.0.4/arch/x86/boot/cpucheck.c
5435--- linux-3.0.4/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5436+++ linux-3.0.4/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5437@@ -74,7 +74,7 @@ static int has_fpu(void)
5438 u16 fcw = -1, fsw = -1;
5439 u32 cr0;
5440
5441- asm("movl %%cr0,%0" : "=r" (cr0));
5442+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
5443 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5444 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5445 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5446@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5447 {
5448 u32 f0, f1;
5449
5450- asm("pushfl ; "
5451+ asm volatile("pushfl ; "
5452 "pushfl ; "
5453 "popl %0 ; "
5454 "movl %0,%1 ; "
5455@@ -115,7 +115,7 @@ static void get_flags(void)
5456 set_bit(X86_FEATURE_FPU, cpu.flags);
5457
5458 if (has_eflag(X86_EFLAGS_ID)) {
5459- asm("cpuid"
5460+ asm volatile("cpuid"
5461 : "=a" (max_intel_level),
5462 "=b" (cpu_vendor[0]),
5463 "=d" (cpu_vendor[1]),
5464@@ -124,7 +124,7 @@ static void get_flags(void)
5465
5466 if (max_intel_level >= 0x00000001 &&
5467 max_intel_level <= 0x0000ffff) {
5468- asm("cpuid"
5469+ asm volatile("cpuid"
5470 : "=a" (tfms),
5471 "=c" (cpu.flags[4]),
5472 "=d" (cpu.flags[0])
5473@@ -136,7 +136,7 @@ static void get_flags(void)
5474 cpu.model += ((tfms >> 16) & 0xf) << 4;
5475 }
5476
5477- asm("cpuid"
5478+ asm volatile("cpuid"
5479 : "=a" (max_amd_level)
5480 : "a" (0x80000000)
5481 : "ebx", "ecx", "edx");
5482@@ -144,7 +144,7 @@ static void get_flags(void)
5483 if (max_amd_level >= 0x80000001 &&
5484 max_amd_level <= 0x8000ffff) {
5485 u32 eax = 0x80000001;
5486- asm("cpuid"
5487+ asm volatile("cpuid"
5488 : "+a" (eax),
5489 "=c" (cpu.flags[6]),
5490 "=d" (cpu.flags[1])
5491@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5492 u32 ecx = MSR_K7_HWCR;
5493 u32 eax, edx;
5494
5495- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5496+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5497 eax &= ~(1 << 15);
5498- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5499+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5500
5501 get_flags(); /* Make sure it really did something */
5502 err = check_flags();
5503@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5504 u32 ecx = MSR_VIA_FCR;
5505 u32 eax, edx;
5506
5507- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5508+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5509 eax |= (1<<1)|(1<<7);
5510- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5511+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5512
5513 set_bit(X86_FEATURE_CX8, cpu.flags);
5514 err = check_flags();
5515@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5516 u32 eax, edx;
5517 u32 level = 1;
5518
5519- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5520- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5521- asm("cpuid"
5522+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5523+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5524+ asm volatile("cpuid"
5525 : "+a" (level), "=d" (cpu.flags[0])
5526 : : "ecx", "ebx");
5527- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5528+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529
5530 err = check_flags();
5531 }
5532diff -urNp linux-3.0.4/arch/x86/boot/header.S linux-3.0.4/arch/x86/boot/header.S
5533--- linux-3.0.4/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
5534+++ linux-3.0.4/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
5535@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5536 # single linked list of
5537 # struct setup_data
5538
5539-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5540+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5541
5542 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5543 #define VO_INIT_SIZE (VO__end - VO__text)
5544diff -urNp linux-3.0.4/arch/x86/boot/Makefile linux-3.0.4/arch/x86/boot/Makefile
5545--- linux-3.0.4/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5546+++ linux-3.0.4/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5547@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5548 $(call cc-option, -fno-stack-protector) \
5549 $(call cc-option, -mpreferred-stack-boundary=2)
5550 KBUILD_CFLAGS += $(call cc-option, -m32)
5551+ifdef CONSTIFY_PLUGIN
5552+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5553+endif
5554 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5555 GCOV_PROFILE := n
5556
5557diff -urNp linux-3.0.4/arch/x86/boot/memory.c linux-3.0.4/arch/x86/boot/memory.c
5558--- linux-3.0.4/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
5559+++ linux-3.0.4/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
5560@@ -19,7 +19,7 @@
5561
5562 static int detect_memory_e820(void)
5563 {
5564- int count = 0;
5565+ unsigned int count = 0;
5566 struct biosregs ireg, oreg;
5567 struct e820entry *desc = boot_params.e820_map;
5568 static struct e820entry buf; /* static so it is zeroed */
5569diff -urNp linux-3.0.4/arch/x86/boot/video.c linux-3.0.4/arch/x86/boot/video.c
5570--- linux-3.0.4/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
5571+++ linux-3.0.4/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
5572@@ -96,7 +96,7 @@ static void store_mode_params(void)
5573 static unsigned int get_entry(void)
5574 {
5575 char entry_buf[4];
5576- int i, len = 0;
5577+ unsigned int i, len = 0;
5578 int key;
5579 unsigned int v;
5580
5581diff -urNp linux-3.0.4/arch/x86/boot/video-vesa.c linux-3.0.4/arch/x86/boot/video-vesa.c
5582--- linux-3.0.4/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
5583+++ linux-3.0.4/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
5584@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5585
5586 boot_params.screen_info.vesapm_seg = oreg.es;
5587 boot_params.screen_info.vesapm_off = oreg.di;
5588+ boot_params.screen_info.vesapm_size = oreg.cx;
5589 }
5590
5591 /*
5592diff -urNp linux-3.0.4/arch/x86/crypto/aes-x86_64-asm_64.S linux-3.0.4/arch/x86/crypto/aes-x86_64-asm_64.S
5593--- linux-3.0.4/arch/x86/crypto/aes-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5594+++ linux-3.0.4/arch/x86/crypto/aes-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5595@@ -8,6 +8,8 @@
5596 * including this sentence is retained in full.
5597 */
5598
5599+#include <asm/alternative-asm.h>
5600+
5601 .extern crypto_ft_tab
5602 .extern crypto_it_tab
5603 .extern crypto_fl_tab
5604@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
5605 je B192; \
5606 leaq 32(r9),r9;
5607
5608+#define ret pax_force_retaddr; ret
5609+
5610 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
5611 movq r1,r2; \
5612 movq r3,r4; \
5613diff -urNp linux-3.0.4/arch/x86/crypto/salsa20-x86_64-asm_64.S linux-3.0.4/arch/x86/crypto/salsa20-x86_64-asm_64.S
5614--- linux-3.0.4/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5615+++ linux-3.0.4/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5616@@ -1,3 +1,5 @@
5617+#include <asm/alternative-asm.h>
5618+
5619 # enter ECRYPT_encrypt_bytes
5620 .text
5621 .p2align 5
5622@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
5623 add %r11,%rsp
5624 mov %rdi,%rax
5625 mov %rsi,%rdx
5626+ pax_force_retaddr
5627 ret
5628 # bytesatleast65:
5629 ._bytesatleast65:
5630@@ -891,6 +894,7 @@ ECRYPT_keysetup:
5631 add %r11,%rsp
5632 mov %rdi,%rax
5633 mov %rsi,%rdx
5634+ pax_force_retaddr
5635 ret
5636 # enter ECRYPT_ivsetup
5637 .text
5638@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
5639 add %r11,%rsp
5640 mov %rdi,%rax
5641 mov %rsi,%rdx
5642+ pax_force_retaddr
5643 ret
5644diff -urNp linux-3.0.4/arch/x86/crypto/twofish-x86_64-asm_64.S linux-3.0.4/arch/x86/crypto/twofish-x86_64-asm_64.S
5645--- linux-3.0.4/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5646+++ linux-3.0.4/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5647@@ -21,6 +21,7 @@
5648 .text
5649
5650 #include <asm/asm-offsets.h>
5651+#include <asm/alternative-asm.h>
5652
5653 #define a_offset 0
5654 #define b_offset 4
5655@@ -269,6 +270,7 @@ twofish_enc_blk:
5656
5657 popq R1
5658 movq $1,%rax
5659+ pax_force_retaddr
5660 ret
5661
5662 twofish_dec_blk:
5663@@ -321,4 +323,5 @@ twofish_dec_blk:
5664
5665 popq R1
5666 movq $1,%rax
5667+ pax_force_retaddr
5668 ret
5669diff -urNp linux-3.0.4/arch/x86/ia32/ia32_aout.c linux-3.0.4/arch/x86/ia32/ia32_aout.c
5670--- linux-3.0.4/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
5671+++ linux-3.0.4/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
5672@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5673 unsigned long dump_start, dump_size;
5674 struct user32 dump;
5675
5676+ memset(&dump, 0, sizeof(dump));
5677+
5678 fs = get_fs();
5679 set_fs(KERNEL_DS);
5680 has_dumped = 1;
5681diff -urNp linux-3.0.4/arch/x86/ia32/ia32entry.S linux-3.0.4/arch/x86/ia32/ia32entry.S
5682--- linux-3.0.4/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
5683+++ linux-3.0.4/arch/x86/ia32/ia32entry.S 2011-08-25 17:36:37.000000000 -0400
5684@@ -13,6 +13,7 @@
5685 #include <asm/thread_info.h>
5686 #include <asm/segment.h>
5687 #include <asm/irqflags.h>
5688+#include <asm/pgtable.h>
5689 #include <linux/linkage.h>
5690
5691 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5692@@ -95,6 +96,29 @@ ENTRY(native_irq_enable_sysexit)
5693 ENDPROC(native_irq_enable_sysexit)
5694 #endif
5695
5696+ .macro pax_enter_kernel_user
5697+#ifdef CONFIG_PAX_MEMORY_UDEREF
5698+ call pax_enter_kernel_user
5699+#endif
5700+ .endm
5701+
5702+ .macro pax_exit_kernel_user
5703+#ifdef CONFIG_PAX_MEMORY_UDEREF
5704+ call pax_exit_kernel_user
5705+#endif
5706+#ifdef CONFIG_PAX_RANDKSTACK
5707+ pushq %rax
5708+ call pax_randomize_kstack
5709+ popq %rax
5710+#endif
5711+ .endm
5712+
5713+ .macro pax_erase_kstack
5714+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5715+ call pax_erase_kstack
5716+#endif
5717+ .endm
5718+
5719 /*
5720 * 32bit SYSENTER instruction entry.
5721 *
5722@@ -121,7 +145,7 @@ ENTRY(ia32_sysenter_target)
5723 CFI_REGISTER rsp,rbp
5724 SWAPGS_UNSAFE_STACK
5725 movq PER_CPU_VAR(kernel_stack), %rsp
5726- addq $(KERNEL_STACK_OFFSET),%rsp
5727+ pax_enter_kernel_user
5728 /*
5729 * No need to follow this irqs on/off section: the syscall
5730 * disabled irqs, here we enable it straight after entry:
5731@@ -134,7 +158,8 @@ ENTRY(ia32_sysenter_target)
5732 CFI_REL_OFFSET rsp,0
5733 pushfq_cfi
5734 /*CFI_REL_OFFSET rflags,0*/
5735- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5736+ GET_THREAD_INFO(%r10)
5737+ movl TI_sysenter_return(%r10), %r10d
5738 CFI_REGISTER rip,r10
5739 pushq_cfi $__USER32_CS
5740 /*CFI_REL_OFFSET cs,0*/
5741@@ -146,6 +171,12 @@ ENTRY(ia32_sysenter_target)
5742 SAVE_ARGS 0,0,1
5743 /* no need to do an access_ok check here because rbp has been
5744 32bit zero extended */
5745+
5746+#ifdef CONFIG_PAX_MEMORY_UDEREF
5747+ mov $PAX_USER_SHADOW_BASE,%r10
5748+ add %r10,%rbp
5749+#endif
5750+
5751 1: movl (%rbp),%ebp
5752 .section __ex_table,"a"
5753 .quad 1b,ia32_badarg
5754@@ -168,6 +199,8 @@ sysenter_dispatch:
5755 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5756 jnz sysexit_audit
5757 sysexit_from_sys_call:
5758+ pax_exit_kernel_user
5759+ pax_erase_kstack
5760 andl $~TS_COMPAT,TI_status(%r10)
5761 /* clear IF, that popfq doesn't enable interrupts early */
5762 andl $~0x200,EFLAGS-R11(%rsp)
5763@@ -194,6 +227,9 @@ sysexit_from_sys_call:
5764 movl %eax,%esi /* 2nd arg: syscall number */
5765 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5766 call audit_syscall_entry
5767+
5768+ pax_erase_kstack
5769+
5770 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5771 cmpq $(IA32_NR_syscalls-1),%rax
5772 ja ia32_badsys
5773@@ -246,6 +282,9 @@ sysenter_tracesys:
5774 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5775 movq %rsp,%rdi /* &pt_regs -> arg1 */
5776 call syscall_trace_enter
5777+
5778+ pax_erase_kstack
5779+
5780 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5781 RESTORE_REST
5782 cmpq $(IA32_NR_syscalls-1),%rax
5783@@ -277,19 +316,24 @@ ENDPROC(ia32_sysenter_target)
5784 ENTRY(ia32_cstar_target)
5785 CFI_STARTPROC32 simple
5786 CFI_SIGNAL_FRAME
5787- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5788+ CFI_DEF_CFA rsp,0
5789 CFI_REGISTER rip,rcx
5790 /*CFI_REGISTER rflags,r11*/
5791 SWAPGS_UNSAFE_STACK
5792 movl %esp,%r8d
5793 CFI_REGISTER rsp,r8
5794 movq PER_CPU_VAR(kernel_stack),%rsp
5795+
5796+#ifdef CONFIG_PAX_MEMORY_UDEREF
5797+ pax_enter_kernel_user
5798+#endif
5799+
5800 /*
5801 * No need to follow this irqs on/off section: the syscall
5802 * disabled irqs and here we enable it straight after entry:
5803 */
5804 ENABLE_INTERRUPTS(CLBR_NONE)
5805- SAVE_ARGS 8,1,1
5806+ SAVE_ARGS 8*6,1,1
5807 movl %eax,%eax /* zero extension */
5808 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5809 movq %rcx,RIP-ARGOFFSET(%rsp)
5810@@ -305,6 +349,12 @@ ENTRY(ia32_cstar_target)
5811 /* no need to do an access_ok check here because r8 has been
5812 32bit zero extended */
5813 /* hardware stack frame is complete now */
5814+
5815+#ifdef CONFIG_PAX_MEMORY_UDEREF
5816+ mov $PAX_USER_SHADOW_BASE,%r10
5817+ add %r10,%r8
5818+#endif
5819+
5820 1: movl (%r8),%r9d
5821 .section __ex_table,"a"
5822 .quad 1b,ia32_badarg
5823@@ -327,6 +377,8 @@ cstar_dispatch:
5824 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5825 jnz sysretl_audit
5826 sysretl_from_sys_call:
5827+ pax_exit_kernel_user
5828+ pax_erase_kstack
5829 andl $~TS_COMPAT,TI_status(%r10)
5830 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5831 movl RIP-ARGOFFSET(%rsp),%ecx
5832@@ -364,6 +416,9 @@ cstar_tracesys:
5833 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5834 movq %rsp,%rdi /* &pt_regs -> arg1 */
5835 call syscall_trace_enter
5836+
5837+ pax_erase_kstack
5838+
5839 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5840 RESTORE_REST
5841 xchgl %ebp,%r9d
5842@@ -409,6 +464,7 @@ ENTRY(ia32_syscall)
5843 CFI_REL_OFFSET rip,RIP-RIP
5844 PARAVIRT_ADJUST_EXCEPTION_FRAME
5845 SWAPGS
5846+ pax_enter_kernel_user
5847 /*
5848 * No need to follow this irqs on/off section: the syscall
5849 * disabled irqs and here we enable it straight after entry:
5850@@ -441,6 +497,9 @@ ia32_tracesys:
5851 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5852 movq %rsp,%rdi /* &pt_regs -> arg1 */
5853 call syscall_trace_enter
5854+
5855+ pax_erase_kstack
5856+
5857 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5858 RESTORE_REST
5859 cmpq $(IA32_NR_syscalls-1),%rax
5860diff -urNp linux-3.0.4/arch/x86/ia32/ia32_signal.c linux-3.0.4/arch/x86/ia32/ia32_signal.c
5861--- linux-3.0.4/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
5862+++ linux-3.0.4/arch/x86/ia32/ia32_signal.c 2011-10-06 04:17:55.000000000 -0400
5863@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const
5864 }
5865 seg = get_fs();
5866 set_fs(KERNEL_DS);
5867- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
5868+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
5869 set_fs(seg);
5870 if (ret >= 0 && uoss_ptr) {
5871 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
5872@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct
5873 */
5874 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
5875 size_t frame_size,
5876- void **fpstate)
5877+ void __user **fpstate)
5878 {
5879 unsigned long sp;
5880
5881@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct
5882
5883 if (used_math()) {
5884 sp = sp - sig_xstate_ia32_size;
5885- *fpstate = (struct _fpstate_ia32 *) sp;
5886+ *fpstate = (struct _fpstate_ia32 __user *) sp;
5887 if (save_i387_xstate_ia32(*fpstate) < 0)
5888 return (void __user *) -1L;
5889 }
5890@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5891 sp -= frame_size;
5892 /* Align the stack pointer according to the i386 ABI,
5893 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5894- sp = ((sp + 4) & -16ul) - 4;
5895+ sp = ((sp - 12) & -16ul) - 4;
5896 return (void __user *) sp;
5897 }
5898
5899@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5900 * These are actually not used anymore, but left because some
5901 * gdb versions depend on them as a marker.
5902 */
5903- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5904+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5905 } put_user_catch(err);
5906
5907 if (err)
5908@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5909 0xb8,
5910 __NR_ia32_rt_sigreturn,
5911 0x80cd,
5912- 0,
5913+ 0
5914 };
5915
5916 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5917@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5918
5919 if (ka->sa.sa_flags & SA_RESTORER)
5920 restorer = ka->sa.sa_restorer;
5921+ else if (current->mm->context.vdso)
5922+ /* Return stub is in 32bit vsyscall page */
5923+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5924 else
5925- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5926- rt_sigreturn);
5927+ restorer = &frame->retcode;
5928 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5929
5930 /*
5931 * Not actually used anymore, but left because some gdb
5932 * versions need it.
5933 */
5934- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5935+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5936 } put_user_catch(err);
5937
5938 if (err)
5939diff -urNp linux-3.0.4/arch/x86/ia32/sys_ia32.c linux-3.0.4/arch/x86/ia32/sys_ia32.c
5940--- linux-3.0.4/arch/x86/ia32/sys_ia32.c 2011-07-21 22:17:23.000000000 -0400
5941+++ linux-3.0.4/arch/x86/ia32/sys_ia32.c 2011-10-06 04:17:55.000000000 -0400
5942@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsign
5943 */
5944 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
5945 {
5946- typeof(ubuf->st_uid) uid = 0;
5947- typeof(ubuf->st_gid) gid = 0;
5948+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
5949+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
5950 SET_UID(uid, stat->uid);
5951 SET_GID(gid, stat->gid);
5952 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
5953@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int
5954 }
5955 set_fs(KERNEL_DS);
5956 ret = sys_rt_sigprocmask(how,
5957- set ? (sigset_t __user *)&s : NULL,
5958- oset ? (sigset_t __user *)&s : NULL,
5959+ set ? (sigset_t __force_user *)&s : NULL,
5960+ oset ? (sigset_t __force_user *)&s : NULL,
5961 sigsetsize);
5962 set_fs(old_fs);
5963 if (ret)
5964@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int
5965 return alarm_setitimer(seconds);
5966 }
5967
5968-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
5969+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
5970 int options)
5971 {
5972 return compat_sys_wait4(pid, stat_addr, options, NULL);
5973@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_inter
5974 mm_segment_t old_fs = get_fs();
5975
5976 set_fs(KERNEL_DS);
5977- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
5978+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
5979 set_fs(old_fs);
5980 if (put_compat_timespec(&t, interval))
5981 return -EFAULT;
5982@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(comp
5983 mm_segment_t old_fs = get_fs();
5984
5985 set_fs(KERNEL_DS);
5986- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
5987+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
5988 set_fs(old_fs);
5989 if (!ret) {
5990 switch (_NSIG_WORDS) {
5991@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(in
5992 if (copy_siginfo_from_user32(&info, uinfo))
5993 return -EFAULT;
5994 set_fs(KERNEL_DS);
5995- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
5996+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
5997 set_fs(old_fs);
5998 return ret;
5999 }
6000@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_f
6001 return -EFAULT;
6002
6003 set_fs(KERNEL_DS);
6004- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
6005+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
6006 count);
6007 set_fs(old_fs);
6008
6009diff -urNp linux-3.0.4/arch/x86/include/asm/alternative-asm.h linux-3.0.4/arch/x86/include/asm/alternative-asm.h
6010--- linux-3.0.4/arch/x86/include/asm/alternative-asm.h 2011-07-21 22:17:23.000000000 -0400
6011+++ linux-3.0.4/arch/x86/include/asm/alternative-asm.h 2011-10-07 19:07:23.000000000 -0400
6012@@ -15,6 +15,20 @@
6013 .endm
6014 #endif
6015
6016+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
6017+ .macro pax_force_retaddr rip=0
6018+ btsq $63,\rip(%rsp)
6019+ .endm
6020+ .macro pax_force_fptr ptr
6021+ btsq $63,\ptr
6022+ .endm
6023+#else
6024+ .macro pax_force_retaddr rip=0
6025+ .endm
6026+ .macro pax_force_fptr ptr
6027+ .endm
6028+#endif
6029+
6030 .macro altinstruction_entry orig alt feature orig_len alt_len
6031 .align 8
6032 .quad \orig
6033diff -urNp linux-3.0.4/arch/x86/include/asm/alternative.h linux-3.0.4/arch/x86/include/asm/alternative.h
6034--- linux-3.0.4/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
6035+++ linux-3.0.4/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
6036@@ -93,7 +93,7 @@ static inline int alternatives_text_rese
6037 ".section .discard,\"aw\",@progbits\n" \
6038 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
6039 ".previous\n" \
6040- ".section .altinstr_replacement, \"ax\"\n" \
6041+ ".section .altinstr_replacement, \"a\"\n" \
6042 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6043 ".previous"
6044
6045diff -urNp linux-3.0.4/arch/x86/include/asm/apic.h linux-3.0.4/arch/x86/include/asm/apic.h
6046--- linux-3.0.4/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
6047+++ linux-3.0.4/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
6048@@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
6049
6050 #ifdef CONFIG_X86_LOCAL_APIC
6051
6052-extern unsigned int apic_verbosity;
6053+extern int apic_verbosity;
6054 extern int local_apic_timer_c2_ok;
6055
6056 extern int disable_apic;
6057diff -urNp linux-3.0.4/arch/x86/include/asm/apm.h linux-3.0.4/arch/x86/include/asm/apm.h
6058--- linux-3.0.4/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
6059+++ linux-3.0.4/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
6060@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6061 __asm__ __volatile__(APM_DO_ZERO_SEGS
6062 "pushl %%edi\n\t"
6063 "pushl %%ebp\n\t"
6064- "lcall *%%cs:apm_bios_entry\n\t"
6065+ "lcall *%%ss:apm_bios_entry\n\t"
6066 "setc %%al\n\t"
6067 "popl %%ebp\n\t"
6068 "popl %%edi\n\t"
6069@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6070 __asm__ __volatile__(APM_DO_ZERO_SEGS
6071 "pushl %%edi\n\t"
6072 "pushl %%ebp\n\t"
6073- "lcall *%%cs:apm_bios_entry\n\t"
6074+ "lcall *%%ss:apm_bios_entry\n\t"
6075 "setc %%bl\n\t"
6076 "popl %%ebp\n\t"
6077 "popl %%edi\n\t"
6078diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_32.h linux-3.0.4/arch/x86/include/asm/atomic64_32.h
6079--- linux-3.0.4/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
6080+++ linux-3.0.4/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
6081@@ -12,6 +12,14 @@ typedef struct {
6082 u64 __aligned(8) counter;
6083 } atomic64_t;
6084
6085+#ifdef CONFIG_PAX_REFCOUNT
6086+typedef struct {
6087+ u64 __aligned(8) counter;
6088+} atomic64_unchecked_t;
6089+#else
6090+typedef atomic64_t atomic64_unchecked_t;
6091+#endif
6092+
6093 #define ATOMIC64_INIT(val) { (val) }
6094
6095 #ifdef CONFIG_X86_CMPXCHG64
6096@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
6097 }
6098
6099 /**
6100+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
6101+ * @p: pointer to type atomic64_unchecked_t
6102+ * @o: expected value
6103+ * @n: new value
6104+ *
6105+ * Atomically sets @v to @n if it was equal to @o and returns
6106+ * the old value.
6107+ */
6108+
6109+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
6110+{
6111+ return cmpxchg64(&v->counter, o, n);
6112+}
6113+
6114+/**
6115 * atomic64_xchg - xchg atomic64 variable
6116 * @v: pointer to type atomic64_t
6117 * @n: value to assign
6118@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
6119 }
6120
6121 /**
6122+ * atomic64_set_unchecked - set atomic64 variable
6123+ * @v: pointer to type atomic64_unchecked_t
6124+ * @n: value to assign
6125+ *
6126+ * Atomically sets the value of @v to @n.
6127+ */
6128+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
6129+{
6130+ unsigned high = (unsigned)(i >> 32);
6131+ unsigned low = (unsigned)i;
6132+ asm volatile(ATOMIC64_ALTERNATIVE(set)
6133+ : "+b" (low), "+c" (high)
6134+ : "S" (v)
6135+ : "eax", "edx", "memory"
6136+ );
6137+}
6138+
6139+/**
6140 * atomic64_read - read atomic64 variable
6141 * @v: pointer to type atomic64_t
6142 *
6143@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
6144 }
6145
6146 /**
6147+ * atomic64_read_unchecked - read atomic64 variable
6148+ * @v: pointer to type atomic64_unchecked_t
6149+ *
6150+ * Atomically reads the value of @v and returns it.
6151+ */
6152+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
6153+{
6154+ long long r;
6155+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
6156+ : "=A" (r), "+c" (v)
6157+ : : "memory"
6158+ );
6159+ return r;
6160+ }
6161+
6162+/**
6163 * atomic64_add_return - add and return
6164 * @i: integer value to add
6165 * @v: pointer to type atomic64_t
6166@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
6167 return i;
6168 }
6169
6170+/**
6171+ * atomic64_add_return_unchecked - add and return
6172+ * @i: integer value to add
6173+ * @v: pointer to type atomic64_unchecked_t
6174+ *
6175+ * Atomically adds @i to @v and returns @i + *@v
6176+ */
6177+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
6178+{
6179+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6180+ : "+A" (i), "+c" (v)
6181+ : : "memory"
6182+ );
6183+ return i;
6184+}
6185+
6186 /*
6187 * Other variants with different arithmetic operators:
6188 */
6189@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6190 return a;
6191 }
6192
6193+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6194+{
6195+ long long a;
6196+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6197+ : "=A" (a)
6198+ : "S" (v)
6199+ : "memory", "ecx"
6200+ );
6201+ return a;
6202+}
6203+
6204 static inline long long atomic64_dec_return(atomic64_t *v)
6205 {
6206 long long a;
6207@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6208 }
6209
6210 /**
6211+ * atomic64_add_unchecked - add integer to atomic64 variable
6212+ * @i: integer value to add
6213+ * @v: pointer to type atomic64_unchecked_t
6214+ *
6215+ * Atomically adds @i to @v.
6216+ */
6217+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6218+{
6219+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6220+ : "+A" (i), "+c" (v)
6221+ : : "memory"
6222+ );
6223+ return i;
6224+}
6225+
6226+/**
6227 * atomic64_sub - subtract the atomic64 variable
6228 * @i: integer value to subtract
6229 * @v: pointer to type atomic64_t
6230diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_64.h linux-3.0.4/arch/x86/include/asm/atomic64_64.h
6231--- linux-3.0.4/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
6232+++ linux-3.0.4/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
6233@@ -18,7 +18,19 @@
6234 */
6235 static inline long atomic64_read(const atomic64_t *v)
6236 {
6237- return (*(volatile long *)&(v)->counter);
6238+ return (*(volatile const long *)&(v)->counter);
6239+}
6240+
6241+/**
6242+ * atomic64_read_unchecked - read atomic64 variable
6243+ * @v: pointer of type atomic64_unchecked_t
6244+ *
6245+ * Atomically reads the value of @v.
6246+ * Doesn't imply a read memory barrier.
6247+ */
6248+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6249+{
6250+ return (*(volatile const long *)&(v)->counter);
6251 }
6252
6253 /**
6254@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6255 }
6256
6257 /**
6258+ * atomic64_set_unchecked - set atomic64 variable
6259+ * @v: pointer to type atomic64_unchecked_t
6260+ * @i: required value
6261+ *
6262+ * Atomically sets the value of @v to @i.
6263+ */
6264+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6265+{
6266+ v->counter = i;
6267+}
6268+
6269+/**
6270 * atomic64_add - add integer to atomic64 variable
6271 * @i: integer value to add
6272 * @v: pointer to type atomic64_t
6273@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6274 */
6275 static inline void atomic64_add(long i, atomic64_t *v)
6276 {
6277+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
6278+
6279+#ifdef CONFIG_PAX_REFCOUNT
6280+ "jno 0f\n"
6281+ LOCK_PREFIX "subq %1,%0\n"
6282+ "int $4\n0:\n"
6283+ _ASM_EXTABLE(0b, 0b)
6284+#endif
6285+
6286+ : "=m" (v->counter)
6287+ : "er" (i), "m" (v->counter));
6288+}
6289+
6290+/**
6291+ * atomic64_add_unchecked - add integer to atomic64 variable
6292+ * @i: integer value to add
6293+ * @v: pointer to type atomic64_unchecked_t
6294+ *
6295+ * Atomically adds @i to @v.
6296+ */
6297+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6298+{
6299 asm volatile(LOCK_PREFIX "addq %1,%0"
6300 : "=m" (v->counter)
6301 : "er" (i), "m" (v->counter));
6302@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6303 */
6304 static inline void atomic64_sub(long i, atomic64_t *v)
6305 {
6306- asm volatile(LOCK_PREFIX "subq %1,%0"
6307+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6308+
6309+#ifdef CONFIG_PAX_REFCOUNT
6310+ "jno 0f\n"
6311+ LOCK_PREFIX "addq %1,%0\n"
6312+ "int $4\n0:\n"
6313+ _ASM_EXTABLE(0b, 0b)
6314+#endif
6315+
6316+ : "=m" (v->counter)
6317+ : "er" (i), "m" (v->counter));
6318+}
6319+
6320+/**
6321+ * atomic64_sub_unchecked - subtract the atomic64 variable
6322+ * @i: integer value to subtract
6323+ * @v: pointer to type atomic64_unchecked_t
6324+ *
6325+ * Atomically subtracts @i from @v.
6326+ */
6327+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6328+{
6329+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6330 : "=m" (v->counter)
6331 : "er" (i), "m" (v->counter));
6332 }
6333@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6334 {
6335 unsigned char c;
6336
6337- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6338+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
6339+
6340+#ifdef CONFIG_PAX_REFCOUNT
6341+ "jno 0f\n"
6342+ LOCK_PREFIX "addq %2,%0\n"
6343+ "int $4\n0:\n"
6344+ _ASM_EXTABLE(0b, 0b)
6345+#endif
6346+
6347+ "sete %1\n"
6348 : "=m" (v->counter), "=qm" (c)
6349 : "er" (i), "m" (v->counter) : "memory");
6350 return c;
6351@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6352 */
6353 static inline void atomic64_inc(atomic64_t *v)
6354 {
6355+ asm volatile(LOCK_PREFIX "incq %0\n"
6356+
6357+#ifdef CONFIG_PAX_REFCOUNT
6358+ "jno 0f\n"
6359+ LOCK_PREFIX "decq %0\n"
6360+ "int $4\n0:\n"
6361+ _ASM_EXTABLE(0b, 0b)
6362+#endif
6363+
6364+ : "=m" (v->counter)
6365+ : "m" (v->counter));
6366+}
6367+
6368+/**
6369+ * atomic64_inc_unchecked - increment atomic64 variable
6370+ * @v: pointer to type atomic64_unchecked_t
6371+ *
6372+ * Atomically increments @v by 1.
6373+ */
6374+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6375+{
6376 asm volatile(LOCK_PREFIX "incq %0"
6377 : "=m" (v->counter)
6378 : "m" (v->counter));
6379@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6380 */
6381 static inline void atomic64_dec(atomic64_t *v)
6382 {
6383- asm volatile(LOCK_PREFIX "decq %0"
6384+ asm volatile(LOCK_PREFIX "decq %0\n"
6385+
6386+#ifdef CONFIG_PAX_REFCOUNT
6387+ "jno 0f\n"
6388+ LOCK_PREFIX "incq %0\n"
6389+ "int $4\n0:\n"
6390+ _ASM_EXTABLE(0b, 0b)
6391+#endif
6392+
6393+ : "=m" (v->counter)
6394+ : "m" (v->counter));
6395+}
6396+
6397+/**
6398+ * atomic64_dec_unchecked - decrement atomic64 variable
6399+ * @v: pointer to type atomic64_t
6400+ *
6401+ * Atomically decrements @v by 1.
6402+ */
6403+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6404+{
6405+ asm volatile(LOCK_PREFIX "decq %0\n"
6406 : "=m" (v->counter)
6407 : "m" (v->counter));
6408 }
6409@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6410 {
6411 unsigned char c;
6412
6413- asm volatile(LOCK_PREFIX "decq %0; sete %1"
6414+ asm volatile(LOCK_PREFIX "decq %0\n"
6415+
6416+#ifdef CONFIG_PAX_REFCOUNT
6417+ "jno 0f\n"
6418+ LOCK_PREFIX "incq %0\n"
6419+ "int $4\n0:\n"
6420+ _ASM_EXTABLE(0b, 0b)
6421+#endif
6422+
6423+ "sete %1\n"
6424 : "=m" (v->counter), "=qm" (c)
6425 : "m" (v->counter) : "memory");
6426 return c != 0;
6427@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6428 {
6429 unsigned char c;
6430
6431- asm volatile(LOCK_PREFIX "incq %0; sete %1"
6432+ asm volatile(LOCK_PREFIX "incq %0\n"
6433+
6434+#ifdef CONFIG_PAX_REFCOUNT
6435+ "jno 0f\n"
6436+ LOCK_PREFIX "decq %0\n"
6437+ "int $4\n0:\n"
6438+ _ASM_EXTABLE(0b, 0b)
6439+#endif
6440+
6441+ "sete %1\n"
6442 : "=m" (v->counter), "=qm" (c)
6443 : "m" (v->counter) : "memory");
6444 return c != 0;
6445@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6446 {
6447 unsigned char c;
6448
6449- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6450+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
6451+
6452+#ifdef CONFIG_PAX_REFCOUNT
6453+ "jno 0f\n"
6454+ LOCK_PREFIX "subq %2,%0\n"
6455+ "int $4\n0:\n"
6456+ _ASM_EXTABLE(0b, 0b)
6457+#endif
6458+
6459+ "sets %1\n"
6460 : "=m" (v->counter), "=qm" (c)
6461 : "er" (i), "m" (v->counter) : "memory");
6462 return c;
6463@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6464 static inline long atomic64_add_return(long i, atomic64_t *v)
6465 {
6466 long __i = i;
6467- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6468+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6469+
6470+#ifdef CONFIG_PAX_REFCOUNT
6471+ "jno 0f\n"
6472+ "movq %0, %1\n"
6473+ "int $4\n0:\n"
6474+ _ASM_EXTABLE(0b, 0b)
6475+#endif
6476+
6477+ : "+r" (i), "+m" (v->counter)
6478+ : : "memory");
6479+ return i + __i;
6480+}
6481+
6482+/**
6483+ * atomic64_add_return_unchecked - add and return
6484+ * @i: integer value to add
6485+ * @v: pointer to type atomic64_unchecked_t
6486+ *
6487+ * Atomically adds @i to @v and returns @i + @v
6488+ */
6489+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6490+{
6491+ long __i = i;
6492+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
6493 : "+r" (i), "+m" (v->counter)
6494 : : "memory");
6495 return i + __i;
6496@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6497 }
6498
6499 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6500+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6501+{
6502+ return atomic64_add_return_unchecked(1, v);
6503+}
6504 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6505
6506 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6507@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6508 return cmpxchg(&v->counter, old, new);
6509 }
6510
6511+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6512+{
6513+ return cmpxchg(&v->counter, old, new);
6514+}
6515+
6516 static inline long atomic64_xchg(atomic64_t *v, long new)
6517 {
6518 return xchg(&v->counter, new);
6519@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6520 */
6521 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6522 {
6523- long c, old;
6524+ long c, old, new;
6525 c = atomic64_read(v);
6526 for (;;) {
6527- if (unlikely(c == (u)))
6528+ if (unlikely(c == u))
6529 break;
6530- old = atomic64_cmpxchg((v), c, c + (a));
6531+
6532+ asm volatile("add %2,%0\n"
6533+
6534+#ifdef CONFIG_PAX_REFCOUNT
6535+ "jno 0f\n"
6536+ "sub %2,%0\n"
6537+ "int $4\n0:\n"
6538+ _ASM_EXTABLE(0b, 0b)
6539+#endif
6540+
6541+ : "=r" (new)
6542+ : "0" (c), "ir" (a));
6543+
6544+ old = atomic64_cmpxchg(v, c, new);
6545 if (likely(old == c))
6546 break;
6547 c = old;
6548 }
6549- return c != (u);
6550+ return c != u;
6551 }
6552
6553 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6554diff -urNp linux-3.0.4/arch/x86/include/asm/atomic.h linux-3.0.4/arch/x86/include/asm/atomic.h
6555--- linux-3.0.4/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6556+++ linux-3.0.4/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6557@@ -22,7 +22,18 @@
6558 */
6559 static inline int atomic_read(const atomic_t *v)
6560 {
6561- return (*(volatile int *)&(v)->counter);
6562+ return (*(volatile const int *)&(v)->counter);
6563+}
6564+
6565+/**
6566+ * atomic_read_unchecked - read atomic variable
6567+ * @v: pointer of type atomic_unchecked_t
6568+ *
6569+ * Atomically reads the value of @v.
6570+ */
6571+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6572+{
6573+ return (*(volatile const int *)&(v)->counter);
6574 }
6575
6576 /**
6577@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6578 }
6579
6580 /**
6581+ * atomic_set_unchecked - set atomic variable
6582+ * @v: pointer of type atomic_unchecked_t
6583+ * @i: required value
6584+ *
6585+ * Atomically sets the value of @v to @i.
6586+ */
6587+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6588+{
6589+ v->counter = i;
6590+}
6591+
6592+/**
6593 * atomic_add - add integer to atomic variable
6594 * @i: integer value to add
6595 * @v: pointer of type atomic_t
6596@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6597 */
6598 static inline void atomic_add(int i, atomic_t *v)
6599 {
6600- asm volatile(LOCK_PREFIX "addl %1,%0"
6601+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6602+
6603+#ifdef CONFIG_PAX_REFCOUNT
6604+ "jno 0f\n"
6605+ LOCK_PREFIX "subl %1,%0\n"
6606+ "int $4\n0:\n"
6607+ _ASM_EXTABLE(0b, 0b)
6608+#endif
6609+
6610+ : "+m" (v->counter)
6611+ : "ir" (i));
6612+}
6613+
6614+/**
6615+ * atomic_add_unchecked - add integer to atomic variable
6616+ * @i: integer value to add
6617+ * @v: pointer of type atomic_unchecked_t
6618+ *
6619+ * Atomically adds @i to @v.
6620+ */
6621+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6622+{
6623+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6624 : "+m" (v->counter)
6625 : "ir" (i));
6626 }
6627@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6628 */
6629 static inline void atomic_sub(int i, atomic_t *v)
6630 {
6631- asm volatile(LOCK_PREFIX "subl %1,%0"
6632+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6633+
6634+#ifdef CONFIG_PAX_REFCOUNT
6635+ "jno 0f\n"
6636+ LOCK_PREFIX "addl %1,%0\n"
6637+ "int $4\n0:\n"
6638+ _ASM_EXTABLE(0b, 0b)
6639+#endif
6640+
6641+ : "+m" (v->counter)
6642+ : "ir" (i));
6643+}
6644+
6645+/**
6646+ * atomic_sub_unchecked - subtract integer from atomic variable
6647+ * @i: integer value to subtract
6648+ * @v: pointer of type atomic_unchecked_t
6649+ *
6650+ * Atomically subtracts @i from @v.
6651+ */
6652+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6653+{
6654+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6655 : "+m" (v->counter)
6656 : "ir" (i));
6657 }
6658@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6659 {
6660 unsigned char c;
6661
6662- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6663+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6664+
6665+#ifdef CONFIG_PAX_REFCOUNT
6666+ "jno 0f\n"
6667+ LOCK_PREFIX "addl %2,%0\n"
6668+ "int $4\n0:\n"
6669+ _ASM_EXTABLE(0b, 0b)
6670+#endif
6671+
6672+ "sete %1\n"
6673 : "+m" (v->counter), "=qm" (c)
6674 : "ir" (i) : "memory");
6675 return c;
6676@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6677 */
6678 static inline void atomic_inc(atomic_t *v)
6679 {
6680- asm volatile(LOCK_PREFIX "incl %0"
6681+ asm volatile(LOCK_PREFIX "incl %0\n"
6682+
6683+#ifdef CONFIG_PAX_REFCOUNT
6684+ "jno 0f\n"
6685+ LOCK_PREFIX "decl %0\n"
6686+ "int $4\n0:\n"
6687+ _ASM_EXTABLE(0b, 0b)
6688+#endif
6689+
6690+ : "+m" (v->counter));
6691+}
6692+
6693+/**
6694+ * atomic_inc_unchecked - increment atomic variable
6695+ * @v: pointer of type atomic_unchecked_t
6696+ *
6697+ * Atomically increments @v by 1.
6698+ */
6699+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6700+{
6701+ asm volatile(LOCK_PREFIX "incl %0\n"
6702 : "+m" (v->counter));
6703 }
6704
6705@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6706 */
6707 static inline void atomic_dec(atomic_t *v)
6708 {
6709- asm volatile(LOCK_PREFIX "decl %0"
6710+ asm volatile(LOCK_PREFIX "decl %0\n"
6711+
6712+#ifdef CONFIG_PAX_REFCOUNT
6713+ "jno 0f\n"
6714+ LOCK_PREFIX "incl %0\n"
6715+ "int $4\n0:\n"
6716+ _ASM_EXTABLE(0b, 0b)
6717+#endif
6718+
6719+ : "+m" (v->counter));
6720+}
6721+
6722+/**
6723+ * atomic_dec_unchecked - decrement atomic variable
6724+ * @v: pointer of type atomic_unchecked_t
6725+ *
6726+ * Atomically decrements @v by 1.
6727+ */
6728+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6729+{
6730+ asm volatile(LOCK_PREFIX "decl %0\n"
6731 : "+m" (v->counter));
6732 }
6733
6734@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6735 {
6736 unsigned char c;
6737
6738- asm volatile(LOCK_PREFIX "decl %0; sete %1"
6739+ asm volatile(LOCK_PREFIX "decl %0\n"
6740+
6741+#ifdef CONFIG_PAX_REFCOUNT
6742+ "jno 0f\n"
6743+ LOCK_PREFIX "incl %0\n"
6744+ "int $4\n0:\n"
6745+ _ASM_EXTABLE(0b, 0b)
6746+#endif
6747+
6748+ "sete %1\n"
6749 : "+m" (v->counter), "=qm" (c)
6750 : : "memory");
6751 return c != 0;
6752@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6753 {
6754 unsigned char c;
6755
6756- asm volatile(LOCK_PREFIX "incl %0; sete %1"
6757+ asm volatile(LOCK_PREFIX "incl %0\n"
6758+
6759+#ifdef CONFIG_PAX_REFCOUNT
6760+ "jno 0f\n"
6761+ LOCK_PREFIX "decl %0\n"
6762+ "int $4\n0:\n"
6763+ _ASM_EXTABLE(0b, 0b)
6764+#endif
6765+
6766+ "sete %1\n"
6767+ : "+m" (v->counter), "=qm" (c)
6768+ : : "memory");
6769+ return c != 0;
6770+}
6771+
6772+/**
6773+ * atomic_inc_and_test_unchecked - increment and test
6774+ * @v: pointer of type atomic_unchecked_t
6775+ *
6776+ * Atomically increments @v by 1
6777+ * and returns true if the result is zero, or false for all
6778+ * other cases.
6779+ */
6780+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6781+{
6782+ unsigned char c;
6783+
6784+ asm volatile(LOCK_PREFIX "incl %0\n"
6785+ "sete %1\n"
6786 : "+m" (v->counter), "=qm" (c)
6787 : : "memory");
6788 return c != 0;
6789@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6790 {
6791 unsigned char c;
6792
6793- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6794+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
6795+
6796+#ifdef CONFIG_PAX_REFCOUNT
6797+ "jno 0f\n"
6798+ LOCK_PREFIX "subl %2,%0\n"
6799+ "int $4\n0:\n"
6800+ _ASM_EXTABLE(0b, 0b)
6801+#endif
6802+
6803+ "sets %1\n"
6804 : "+m" (v->counter), "=qm" (c)
6805 : "ir" (i) : "memory");
6806 return c;
6807@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6808 #endif
6809 /* Modern 486+ processor */
6810 __i = i;
6811+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6812+
6813+#ifdef CONFIG_PAX_REFCOUNT
6814+ "jno 0f\n"
6815+ "movl %0, %1\n"
6816+ "int $4\n0:\n"
6817+ _ASM_EXTABLE(0b, 0b)
6818+#endif
6819+
6820+ : "+r" (i), "+m" (v->counter)
6821+ : : "memory");
6822+ return i + __i;
6823+
6824+#ifdef CONFIG_M386
6825+no_xadd: /* Legacy 386 processor */
6826+ local_irq_save(flags);
6827+ __i = atomic_read(v);
6828+ atomic_set(v, i + __i);
6829+ local_irq_restore(flags);
6830+ return i + __i;
6831+#endif
6832+}
6833+
6834+/**
6835+ * atomic_add_return_unchecked - add integer and return
6836+ * @v: pointer of type atomic_unchecked_t
6837+ * @i: integer value to add
6838+ *
6839+ * Atomically adds @i to @v and returns @i + @v
6840+ */
6841+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6842+{
6843+ int __i;
6844+#ifdef CONFIG_M386
6845+ unsigned long flags;
6846+ if (unlikely(boot_cpu_data.x86 <= 3))
6847+ goto no_xadd;
6848+#endif
6849+ /* Modern 486+ processor */
6850+ __i = i;
6851 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6852 : "+r" (i), "+m" (v->counter)
6853 : : "memory");
6854@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6855 }
6856
6857 #define atomic_inc_return(v) (atomic_add_return(1, v))
6858+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6859+{
6860+ return atomic_add_return_unchecked(1, v);
6861+}
6862 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6863
6864 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6865@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6866 return cmpxchg(&v->counter, old, new);
6867 }
6868
6869+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6870+{
6871+ return cmpxchg(&v->counter, old, new);
6872+}
6873+
6874 static inline int atomic_xchg(atomic_t *v, int new)
6875 {
6876 return xchg(&v->counter, new);
6877 }
6878
6879+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6880+{
6881+ return xchg(&v->counter, new);
6882+}
6883+
6884 /**
6885 * atomic_add_unless - add unless the number is already a given value
6886 * @v: pointer of type atomic_t
6887@@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6888 */
6889 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6890 {
6891- int c, old;
6892+ int c, old, new;
6893 c = atomic_read(v);
6894 for (;;) {
6895- if (unlikely(c == (u)))
6896+ if (unlikely(c == u))
6897 break;
6898- old = atomic_cmpxchg((v), c, c + (a));
6899+
6900+ asm volatile("addl %2,%0\n"
6901+
6902+#ifdef CONFIG_PAX_REFCOUNT
6903+ "jno 0f\n"
6904+ "subl %2,%0\n"
6905+ "int $4\n0:\n"
6906+ _ASM_EXTABLE(0b, 0b)
6907+#endif
6908+
6909+ : "=r" (new)
6910+ : "0" (c), "ir" (a));
6911+
6912+ old = atomic_cmpxchg(v, c, new);
6913 if (likely(old == c))
6914 break;
6915 c = old;
6916 }
6917- return c != (u);
6918+ return c != u;
6919 }
6920
6921 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6922
6923+/**
6924+ * atomic_inc_not_zero_hint - increment if not null
6925+ * @v: pointer of type atomic_t
6926+ * @hint: probable value of the atomic before the increment
6927+ *
6928+ * This version of atomic_inc_not_zero() gives a hint of probable
6929+ * value of the atomic. This helps processor to not read the memory
6930+ * before doing the atomic read/modify/write cycle, lowering
6931+ * number of bus transactions on some arches.
6932+ *
6933+ * Returns: 0 if increment was not done, 1 otherwise.
6934+ */
6935+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6936+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6937+{
6938+ int val, c = hint, new;
6939+
6940+ /* sanity test, should be removed by compiler if hint is a constant */
6941+ if (!hint)
6942+ return atomic_inc_not_zero(v);
6943+
6944+ do {
6945+ asm volatile("incl %0\n"
6946+
6947+#ifdef CONFIG_PAX_REFCOUNT
6948+ "jno 0f\n"
6949+ "decl %0\n"
6950+ "int $4\n0:\n"
6951+ _ASM_EXTABLE(0b, 0b)
6952+#endif
6953+
6954+ : "=r" (new)
6955+ : "0" (c));
6956+
6957+ val = atomic_cmpxchg(v, c, new);
6958+ if (val == c)
6959+ return 1;
6960+ c = val;
6961+ } while (c);
6962+
6963+ return 0;
6964+}
6965+
6966 /*
6967 * atomic_dec_if_positive - decrement by 1 if old value positive
6968 * @v: pointer of type atomic_t
6969diff -urNp linux-3.0.4/arch/x86/include/asm/bitops.h linux-3.0.4/arch/x86/include/asm/bitops.h
6970--- linux-3.0.4/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
6971+++ linux-3.0.4/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
6972@@ -38,7 +38,7 @@
6973 * a mask operation on a byte.
6974 */
6975 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6976-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6977+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6978 #define CONST_MASK(nr) (1 << ((nr) & 7))
6979
6980 /**
6981diff -urNp linux-3.0.4/arch/x86/include/asm/boot.h linux-3.0.4/arch/x86/include/asm/boot.h
6982--- linux-3.0.4/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
6983+++ linux-3.0.4/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
6984@@ -11,10 +11,15 @@
6985 #include <asm/pgtable_types.h>
6986
6987 /* Physical address where kernel should be loaded. */
6988-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6989+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6990 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6991 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6992
6993+#ifndef __ASSEMBLY__
6994+extern unsigned char __LOAD_PHYSICAL_ADDR[];
6995+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6996+#endif
6997+
6998 /* Minimum kernel alignment, as a power of two */
6999 #ifdef CONFIG_X86_64
7000 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
7001diff -urNp linux-3.0.4/arch/x86/include/asm/cacheflush.h linux-3.0.4/arch/x86/include/asm/cacheflush.h
7002--- linux-3.0.4/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
7003+++ linux-3.0.4/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
7004@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
7005 unsigned long pg_flags = pg->flags & _PGMT_MASK;
7006
7007 if (pg_flags == _PGMT_DEFAULT)
7008- return -1;
7009+ return ~0UL;
7010 else if (pg_flags == _PGMT_WC)
7011 return _PAGE_CACHE_WC;
7012 else if (pg_flags == _PGMT_UC_MINUS)
7013diff -urNp linux-3.0.4/arch/x86/include/asm/cache.h linux-3.0.4/arch/x86/include/asm/cache.h
7014--- linux-3.0.4/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
7015+++ linux-3.0.4/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
7016@@ -5,12 +5,13 @@
7017
7018 /* L1 cache line size */
7019 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7020-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7021+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7022
7023 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7024+#define __read_only __attribute__((__section__(".data..read_only")))
7025
7026 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
7027-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
7028+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
7029
7030 #ifdef CONFIG_X86_VSMP
7031 #ifdef CONFIG_SMP
7032diff -urNp linux-3.0.4/arch/x86/include/asm/checksum_32.h linux-3.0.4/arch/x86/include/asm/checksum_32.h
7033--- linux-3.0.4/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
7034+++ linux-3.0.4/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
7035@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7036 int len, __wsum sum,
7037 int *src_err_ptr, int *dst_err_ptr);
7038
7039+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7040+ int len, __wsum sum,
7041+ int *src_err_ptr, int *dst_err_ptr);
7042+
7043+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7044+ int len, __wsum sum,
7045+ int *src_err_ptr, int *dst_err_ptr);
7046+
7047 /*
7048 * Note: when you get a NULL pointer exception here this means someone
7049 * passed in an incorrect kernel address to one of these functions.
7050@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7051 int *err_ptr)
7052 {
7053 might_sleep();
7054- return csum_partial_copy_generic((__force void *)src, dst,
7055+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
7056 len, sum, err_ptr, NULL);
7057 }
7058
7059@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7060 {
7061 might_sleep();
7062 if (access_ok(VERIFY_WRITE, dst, len))
7063- return csum_partial_copy_generic(src, (__force void *)dst,
7064+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7065 len, sum, NULL, err_ptr);
7066
7067 if (len)
7068diff -urNp linux-3.0.4/arch/x86/include/asm/cpufeature.h linux-3.0.4/arch/x86/include/asm/cpufeature.h
7069--- linux-3.0.4/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
7070+++ linux-3.0.4/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
7071@@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
7072 ".section .discard,\"aw\",@progbits\n"
7073 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
7074 ".previous\n"
7075- ".section .altinstr_replacement,\"ax\"\n"
7076+ ".section .altinstr_replacement,\"a\"\n"
7077 "3: movb $1,%0\n"
7078 "4:\n"
7079 ".previous\n"
7080diff -urNp linux-3.0.4/arch/x86/include/asm/desc_defs.h linux-3.0.4/arch/x86/include/asm/desc_defs.h
7081--- linux-3.0.4/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
7082+++ linux-3.0.4/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
7083@@ -31,6 +31,12 @@ struct desc_struct {
7084 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7085 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7086 };
7087+ struct {
7088+ u16 offset_low;
7089+ u16 seg;
7090+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7091+ unsigned offset_high: 16;
7092+ } gate;
7093 };
7094 } __attribute__((packed));
7095
7096diff -urNp linux-3.0.4/arch/x86/include/asm/desc.h linux-3.0.4/arch/x86/include/asm/desc.h
7097--- linux-3.0.4/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
7098+++ linux-3.0.4/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
7099@@ -4,6 +4,7 @@
7100 #include <asm/desc_defs.h>
7101 #include <asm/ldt.h>
7102 #include <asm/mmu.h>
7103+#include <asm/pgtable.h>
7104
7105 #include <linux/smp.h>
7106
7107@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
7108
7109 desc->type = (info->read_exec_only ^ 1) << 1;
7110 desc->type |= info->contents << 2;
7111+ desc->type |= info->seg_not_present ^ 1;
7112
7113 desc->s = 1;
7114 desc->dpl = 0x3;
7115@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
7116 }
7117
7118 extern struct desc_ptr idt_descr;
7119-extern gate_desc idt_table[];
7120-
7121-struct gdt_page {
7122- struct desc_struct gdt[GDT_ENTRIES];
7123-} __attribute__((aligned(PAGE_SIZE)));
7124-
7125-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
7126+extern gate_desc idt_table[256];
7127
7128+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
7129 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7130 {
7131- return per_cpu(gdt_page, cpu).gdt;
7132+ return cpu_gdt_table[cpu];
7133 }
7134
7135 #ifdef CONFIG_X86_64
7136@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
7137 unsigned long base, unsigned dpl, unsigned flags,
7138 unsigned short seg)
7139 {
7140- gate->a = (seg << 16) | (base & 0xffff);
7141- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7142+ gate->gate.offset_low = base;
7143+ gate->gate.seg = seg;
7144+ gate->gate.reserved = 0;
7145+ gate->gate.type = type;
7146+ gate->gate.s = 0;
7147+ gate->gate.dpl = dpl;
7148+ gate->gate.p = 1;
7149+ gate->gate.offset_high = base >> 16;
7150 }
7151
7152 #endif
7153@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
7154
7155 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
7156 {
7157+ pax_open_kernel();
7158 memcpy(&idt[entry], gate, sizeof(*gate));
7159+ pax_close_kernel();
7160 }
7161
7162 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
7163 {
7164+ pax_open_kernel();
7165 memcpy(&ldt[entry], desc, 8);
7166+ pax_close_kernel();
7167 }
7168
7169 static inline void
7170@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
7171 default: size = sizeof(*gdt); break;
7172 }
7173
7174+ pax_open_kernel();
7175 memcpy(&gdt[entry], desc, size);
7176+ pax_close_kernel();
7177 }
7178
7179 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7180@@ -207,7 +216,9 @@ static inline void native_set_ldt(const
7181
7182 static inline void native_load_tr_desc(void)
7183 {
7184+ pax_open_kernel();
7185 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7186+ pax_close_kernel();
7187 }
7188
7189 static inline void native_load_gdt(const struct desc_ptr *dtr)
7190@@ -244,8 +255,10 @@ static inline void native_load_tls(struc
7191 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7192 unsigned int i;
7193
7194+ pax_open_kernel();
7195 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7196 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7197+ pax_close_kernel();
7198 }
7199
7200 #define _LDT_empty(info) \
7201@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7202 desc->limit = (limit >> 16) & 0xf;
7203 }
7204
7205-static inline void _set_gate(int gate, unsigned type, void *addr,
7206+static inline void _set_gate(int gate, unsigned type, const void *addr,
7207 unsigned dpl, unsigned ist, unsigned seg)
7208 {
7209 gate_desc s;
7210@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7211 * Pentium F0 0F bugfix can have resulted in the mapped
7212 * IDT being write-protected.
7213 */
7214-static inline void set_intr_gate(unsigned int n, void *addr)
7215+static inline void set_intr_gate(unsigned int n, const void *addr)
7216 {
7217 BUG_ON((unsigned)n > 0xFF);
7218 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7219@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7220 /*
7221 * This routine sets up an interrupt gate at directory privilege level 3.
7222 */
7223-static inline void set_system_intr_gate(unsigned int n, void *addr)
7224+static inline void set_system_intr_gate(unsigned int n, const void *addr)
7225 {
7226 BUG_ON((unsigned)n > 0xFF);
7227 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7228 }
7229
7230-static inline void set_system_trap_gate(unsigned int n, void *addr)
7231+static inline void set_system_trap_gate(unsigned int n, const void *addr)
7232 {
7233 BUG_ON((unsigned)n > 0xFF);
7234 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7235 }
7236
7237-static inline void set_trap_gate(unsigned int n, void *addr)
7238+static inline void set_trap_gate(unsigned int n, const void *addr)
7239 {
7240 BUG_ON((unsigned)n > 0xFF);
7241 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7242@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7243 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7244 {
7245 BUG_ON((unsigned)n > 0xFF);
7246- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7247+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7248 }
7249
7250-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7251+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7252 {
7253 BUG_ON((unsigned)n > 0xFF);
7254 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7255 }
7256
7257-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7258+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7259 {
7260 BUG_ON((unsigned)n > 0xFF);
7261 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7262 }
7263
7264+#ifdef CONFIG_X86_32
7265+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7266+{
7267+ struct desc_struct d;
7268+
7269+ if (likely(limit))
7270+ limit = (limit - 1UL) >> PAGE_SHIFT;
7271+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
7272+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7273+}
7274+#endif
7275+
7276 #endif /* _ASM_X86_DESC_H */
7277diff -urNp linux-3.0.4/arch/x86/include/asm/e820.h linux-3.0.4/arch/x86/include/asm/e820.h
7278--- linux-3.0.4/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7279+++ linux-3.0.4/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7280@@ -69,7 +69,7 @@ struct e820map {
7281 #define ISA_START_ADDRESS 0xa0000
7282 #define ISA_END_ADDRESS 0x100000
7283
7284-#define BIOS_BEGIN 0x000a0000
7285+#define BIOS_BEGIN 0x000c0000
7286 #define BIOS_END 0x00100000
7287
7288 #define BIOS_ROM_BASE 0xffe00000
7289diff -urNp linux-3.0.4/arch/x86/include/asm/elf.h linux-3.0.4/arch/x86/include/asm/elf.h
7290--- linux-3.0.4/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7291+++ linux-3.0.4/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7292@@ -237,7 +237,25 @@ extern int force_personality32;
7293 the loader. We need to make sure that it is out of the way of the program
7294 that it will "exec", and that there is sufficient room for the brk. */
7295
7296+#ifdef CONFIG_PAX_SEGMEXEC
7297+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7298+#else
7299 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7300+#endif
7301+
7302+#ifdef CONFIG_PAX_ASLR
7303+#ifdef CONFIG_X86_32
7304+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7305+
7306+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7307+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7308+#else
7309+#define PAX_ELF_ET_DYN_BASE 0x400000UL
7310+
7311+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7312+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7313+#endif
7314+#endif
7315
7316 /* This yields a mask that user programs can use to figure out what
7317 instruction set this CPU supports. This could be done in user space,
7318@@ -290,9 +308,7 @@ do { \
7319
7320 #define ARCH_DLINFO \
7321 do { \
7322- if (vdso_enabled) \
7323- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7324- (unsigned long)current->mm->context.vdso); \
7325+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7326 } while (0)
7327
7328 #define AT_SYSINFO 32
7329@@ -303,7 +319,7 @@ do { \
7330
7331 #endif /* !CONFIG_X86_32 */
7332
7333-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7334+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7335
7336 #define VDSO_ENTRY \
7337 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7338@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7339 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7340 #define compat_arch_setup_additional_pages syscall32_setup_pages
7341
7342-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7343-#define arch_randomize_brk arch_randomize_brk
7344-
7345 #endif /* _ASM_X86_ELF_H */
7346diff -urNp linux-3.0.4/arch/x86/include/asm/emergency-restart.h linux-3.0.4/arch/x86/include/asm/emergency-restart.h
7347--- linux-3.0.4/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7348+++ linux-3.0.4/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7349@@ -15,6 +15,6 @@ enum reboot_type {
7350
7351 extern enum reboot_type reboot_type;
7352
7353-extern void machine_emergency_restart(void);
7354+extern void machine_emergency_restart(void) __noreturn;
7355
7356 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7357diff -urNp linux-3.0.4/arch/x86/include/asm/futex.h linux-3.0.4/arch/x86/include/asm/futex.h
7358--- linux-3.0.4/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7359+++ linux-3.0.4/arch/x86/include/asm/futex.h 2011-10-06 04:17:55.000000000 -0400
7360@@ -12,16 +12,18 @@
7361 #include <asm/system.h>
7362
7363 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7364+ typecheck(u32 __user *, uaddr); \
7365 asm volatile("1:\t" insn "\n" \
7366 "2:\t.section .fixup,\"ax\"\n" \
7367 "3:\tmov\t%3, %1\n" \
7368 "\tjmp\t2b\n" \
7369 "\t.previous\n" \
7370 _ASM_EXTABLE(1b, 3b) \
7371- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7372+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
7373 : "i" (-EFAULT), "0" (oparg), "1" (0))
7374
7375 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7376+ typecheck(u32 __user *, uaddr); \
7377 asm volatile("1:\tmovl %2, %0\n" \
7378 "\tmovl\t%0, %3\n" \
7379 "\t" insn "\n" \
7380@@ -34,7 +36,7 @@
7381 _ASM_EXTABLE(1b, 4b) \
7382 _ASM_EXTABLE(2b, 4b) \
7383 : "=&a" (oldval), "=&r" (ret), \
7384- "+m" (*uaddr), "=&r" (tem) \
7385+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
7386 : "r" (oparg), "i" (-EFAULT), "1" (0))
7387
7388 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7389@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7390
7391 switch (op) {
7392 case FUTEX_OP_SET:
7393- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7394+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7395 break;
7396 case FUTEX_OP_ADD:
7397- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7398+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7399 uaddr, oparg);
7400 break;
7401 case FUTEX_OP_OR:
7402@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7403 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7404 return -EFAULT;
7405
7406- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7407+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7408 "2:\t.section .fixup, \"ax\"\n"
7409 "3:\tmov %3, %0\n"
7410 "\tjmp 2b\n"
7411 "\t.previous\n"
7412 _ASM_EXTABLE(1b, 3b)
7413- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7414+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
7415 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7416 : "memory"
7417 );
7418diff -urNp linux-3.0.4/arch/x86/include/asm/hw_irq.h linux-3.0.4/arch/x86/include/asm/hw_irq.h
7419--- linux-3.0.4/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7420+++ linux-3.0.4/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7421@@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7422 extern void enable_IO_APIC(void);
7423
7424 /* Statistics */
7425-extern atomic_t irq_err_count;
7426-extern atomic_t irq_mis_count;
7427+extern atomic_unchecked_t irq_err_count;
7428+extern atomic_unchecked_t irq_mis_count;
7429
7430 /* EISA */
7431 extern void eisa_set_level_irq(unsigned int irq);
7432diff -urNp linux-3.0.4/arch/x86/include/asm/i387.h linux-3.0.4/arch/x86/include/asm/i387.h
7433--- linux-3.0.4/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7434+++ linux-3.0.4/arch/x86/include/asm/i387.h 2011-10-06 04:17:55.000000000 -0400
7435@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7436 {
7437 int err;
7438
7439+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7440+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7441+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
7442+#endif
7443+
7444 /* See comment in fxsave() below. */
7445 #ifdef CONFIG_AS_FXSAVEQ
7446 asm volatile("1: fxrstorq %[fx]\n\t"
7447@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7448 {
7449 int err;
7450
7451+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7452+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7453+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7454+#endif
7455+
7456 /*
7457 * Clear the bytes not touched by the fxsave and reserved
7458 * for the SW usage.
7459@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7460 #endif /* CONFIG_X86_64 */
7461
7462 /* We need a safe address that is cheap to find and that is already
7463- in L1 during context switch. The best choices are unfortunately
7464- different for UP and SMP */
7465-#ifdef CONFIG_SMP
7466-#define safe_address (__per_cpu_offset[0])
7467-#else
7468-#define safe_address (kstat_cpu(0).cpustat.user)
7469-#endif
7470+ in L1 during context switch. */
7471+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7472
7473 /*
7474 * These must be called with preempt disabled
7475@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7476 struct thread_info *me = current_thread_info();
7477 preempt_disable();
7478 if (me->status & TS_USEDFPU)
7479- __save_init_fpu(me->task);
7480+ __save_init_fpu(current);
7481 else
7482 clts();
7483 }
7484diff -urNp linux-3.0.4/arch/x86/include/asm/io.h linux-3.0.4/arch/x86/include/asm/io.h
7485--- linux-3.0.4/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7486+++ linux-3.0.4/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7487@@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7488
7489 #include <linux/vmalloc.h>
7490
7491+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7492+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7493+{
7494+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7495+}
7496+
7497+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7498+{
7499+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7500+}
7501+
7502 /*
7503 * Convert a virtual cached pointer to an uncached pointer
7504 */
7505diff -urNp linux-3.0.4/arch/x86/include/asm/irqflags.h linux-3.0.4/arch/x86/include/asm/irqflags.h
7506--- linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7507+++ linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7508@@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7509 sti; \
7510 sysexit
7511
7512+#define GET_CR0_INTO_RDI mov %cr0, %rdi
7513+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7514+#define GET_CR3_INTO_RDI mov %cr3, %rdi
7515+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7516+
7517 #else
7518 #define INTERRUPT_RETURN iret
7519 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7520diff -urNp linux-3.0.4/arch/x86/include/asm/kprobes.h linux-3.0.4/arch/x86/include/asm/kprobes.h
7521--- linux-3.0.4/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
7522+++ linux-3.0.4/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
7523@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7524 #define RELATIVEJUMP_SIZE 5
7525 #define RELATIVECALL_OPCODE 0xe8
7526 #define RELATIVE_ADDR_SIZE 4
7527-#define MAX_STACK_SIZE 64
7528-#define MIN_STACK_SIZE(ADDR) \
7529- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7530- THREAD_SIZE - (unsigned long)(ADDR))) \
7531- ? (MAX_STACK_SIZE) \
7532- : (((unsigned long)current_thread_info()) + \
7533- THREAD_SIZE - (unsigned long)(ADDR)))
7534+#define MAX_STACK_SIZE 64UL
7535+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7536
7537 #define flush_insn_slot(p) do { } while (0)
7538
7539diff -urNp linux-3.0.4/arch/x86/include/asm/kvm_host.h linux-3.0.4/arch/x86/include/asm/kvm_host.h
7540--- linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7541+++ linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-08-26 19:49:56.000000000 -0400
7542@@ -441,7 +441,7 @@ struct kvm_arch {
7543 unsigned int n_used_mmu_pages;
7544 unsigned int n_requested_mmu_pages;
7545 unsigned int n_max_mmu_pages;
7546- atomic_t invlpg_counter;
7547+ atomic_unchecked_t invlpg_counter;
7548 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7549 /*
7550 * Hash table of struct kvm_mmu_page.
7551@@ -619,7 +619,7 @@ struct kvm_x86_ops {
7552 enum x86_intercept_stage stage);
7553
7554 const struct trace_print_flags *exit_reasons_str;
7555-};
7556+} __do_const;
7557
7558 struct kvm_arch_async_pf {
7559 u32 token;
7560diff -urNp linux-3.0.4/arch/x86/include/asm/local.h linux-3.0.4/arch/x86/include/asm/local.h
7561--- linux-3.0.4/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
7562+++ linux-3.0.4/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
7563@@ -18,26 +18,58 @@ typedef struct {
7564
7565 static inline void local_inc(local_t *l)
7566 {
7567- asm volatile(_ASM_INC "%0"
7568+ asm volatile(_ASM_INC "%0\n"
7569+
7570+#ifdef CONFIG_PAX_REFCOUNT
7571+ "jno 0f\n"
7572+ _ASM_DEC "%0\n"
7573+ "int $4\n0:\n"
7574+ _ASM_EXTABLE(0b, 0b)
7575+#endif
7576+
7577 : "+m" (l->a.counter));
7578 }
7579
7580 static inline void local_dec(local_t *l)
7581 {
7582- asm volatile(_ASM_DEC "%0"
7583+ asm volatile(_ASM_DEC "%0\n"
7584+
7585+#ifdef CONFIG_PAX_REFCOUNT
7586+ "jno 0f\n"
7587+ _ASM_INC "%0\n"
7588+ "int $4\n0:\n"
7589+ _ASM_EXTABLE(0b, 0b)
7590+#endif
7591+
7592 : "+m" (l->a.counter));
7593 }
7594
7595 static inline void local_add(long i, local_t *l)
7596 {
7597- asm volatile(_ASM_ADD "%1,%0"
7598+ asm volatile(_ASM_ADD "%1,%0\n"
7599+
7600+#ifdef CONFIG_PAX_REFCOUNT
7601+ "jno 0f\n"
7602+ _ASM_SUB "%1,%0\n"
7603+ "int $4\n0:\n"
7604+ _ASM_EXTABLE(0b, 0b)
7605+#endif
7606+
7607 : "+m" (l->a.counter)
7608 : "ir" (i));
7609 }
7610
7611 static inline void local_sub(long i, local_t *l)
7612 {
7613- asm volatile(_ASM_SUB "%1,%0"
7614+ asm volatile(_ASM_SUB "%1,%0\n"
7615+
7616+#ifdef CONFIG_PAX_REFCOUNT
7617+ "jno 0f\n"
7618+ _ASM_ADD "%1,%0\n"
7619+ "int $4\n0:\n"
7620+ _ASM_EXTABLE(0b, 0b)
7621+#endif
7622+
7623 : "+m" (l->a.counter)
7624 : "ir" (i));
7625 }
7626@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7627 {
7628 unsigned char c;
7629
7630- asm volatile(_ASM_SUB "%2,%0; sete %1"
7631+ asm volatile(_ASM_SUB "%2,%0\n"
7632+
7633+#ifdef CONFIG_PAX_REFCOUNT
7634+ "jno 0f\n"
7635+ _ASM_ADD "%2,%0\n"
7636+ "int $4\n0:\n"
7637+ _ASM_EXTABLE(0b, 0b)
7638+#endif
7639+
7640+ "sete %1\n"
7641 : "+m" (l->a.counter), "=qm" (c)
7642 : "ir" (i) : "memory");
7643 return c;
7644@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7645 {
7646 unsigned char c;
7647
7648- asm volatile(_ASM_DEC "%0; sete %1"
7649+ asm volatile(_ASM_DEC "%0\n"
7650+
7651+#ifdef CONFIG_PAX_REFCOUNT
7652+ "jno 0f\n"
7653+ _ASM_INC "%0\n"
7654+ "int $4\n0:\n"
7655+ _ASM_EXTABLE(0b, 0b)
7656+#endif
7657+
7658+ "sete %1\n"
7659 : "+m" (l->a.counter), "=qm" (c)
7660 : : "memory");
7661 return c != 0;
7662@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7663 {
7664 unsigned char c;
7665
7666- asm volatile(_ASM_INC "%0; sete %1"
7667+ asm volatile(_ASM_INC "%0\n"
7668+
7669+#ifdef CONFIG_PAX_REFCOUNT
7670+ "jno 0f\n"
7671+ _ASM_DEC "%0\n"
7672+ "int $4\n0:\n"
7673+ _ASM_EXTABLE(0b, 0b)
7674+#endif
7675+
7676+ "sete %1\n"
7677 : "+m" (l->a.counter), "=qm" (c)
7678 : : "memory");
7679 return c != 0;
7680@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7681 {
7682 unsigned char c;
7683
7684- asm volatile(_ASM_ADD "%2,%0; sets %1"
7685+ asm volatile(_ASM_ADD "%2,%0\n"
7686+
7687+#ifdef CONFIG_PAX_REFCOUNT
7688+ "jno 0f\n"
7689+ _ASM_SUB "%2,%0\n"
7690+ "int $4\n0:\n"
7691+ _ASM_EXTABLE(0b, 0b)
7692+#endif
7693+
7694+ "sets %1\n"
7695 : "+m" (l->a.counter), "=qm" (c)
7696 : "ir" (i) : "memory");
7697 return c;
7698@@ -133,7 +201,15 @@ static inline long local_add_return(long
7699 #endif
7700 /* Modern 486+ processor */
7701 __i = i;
7702- asm volatile(_ASM_XADD "%0, %1;"
7703+ asm volatile(_ASM_XADD "%0, %1\n"
7704+
7705+#ifdef CONFIG_PAX_REFCOUNT
7706+ "jno 0f\n"
7707+ _ASM_MOV "%0,%1\n"
7708+ "int $4\n0:\n"
7709+ _ASM_EXTABLE(0b, 0b)
7710+#endif
7711+
7712 : "+r" (i), "+m" (l->a.counter)
7713 : : "memory");
7714 return i + __i;
7715diff -urNp linux-3.0.4/arch/x86/include/asm/mman.h linux-3.0.4/arch/x86/include/asm/mman.h
7716--- linux-3.0.4/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
7717+++ linux-3.0.4/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
7718@@ -5,4 +5,14 @@
7719
7720 #include <asm-generic/mman.h>
7721
7722+#ifdef __KERNEL__
7723+#ifndef __ASSEMBLY__
7724+#ifdef CONFIG_X86_32
7725+#define arch_mmap_check i386_mmap_check
7726+int i386_mmap_check(unsigned long addr, unsigned long len,
7727+ unsigned long flags);
7728+#endif
7729+#endif
7730+#endif
7731+
7732 #endif /* _ASM_X86_MMAN_H */
7733diff -urNp linux-3.0.4/arch/x86/include/asm/mmu_context.h linux-3.0.4/arch/x86/include/asm/mmu_context.h
7734--- linux-3.0.4/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
7735+++ linux-3.0.4/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
7736@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7737
7738 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7739 {
7740+
7741+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7742+ unsigned int i;
7743+ pgd_t *pgd;
7744+
7745+ pax_open_kernel();
7746+ pgd = get_cpu_pgd(smp_processor_id());
7747+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7748+ set_pgd_batched(pgd+i, native_make_pgd(0));
7749+ pax_close_kernel();
7750+#endif
7751+
7752 #ifdef CONFIG_SMP
7753 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7754 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7755@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7756 struct task_struct *tsk)
7757 {
7758 unsigned cpu = smp_processor_id();
7759+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7760+ int tlbstate = TLBSTATE_OK;
7761+#endif
7762
7763 if (likely(prev != next)) {
7764 #ifdef CONFIG_SMP
7765+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7766+ tlbstate = percpu_read(cpu_tlbstate.state);
7767+#endif
7768 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7769 percpu_write(cpu_tlbstate.active_mm, next);
7770 #endif
7771 cpumask_set_cpu(cpu, mm_cpumask(next));
7772
7773 /* Re-load page tables */
7774+#ifdef CONFIG_PAX_PER_CPU_PGD
7775+ pax_open_kernel();
7776+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7777+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7778+ pax_close_kernel();
7779+ load_cr3(get_cpu_pgd(cpu));
7780+#else
7781 load_cr3(next->pgd);
7782+#endif
7783
7784 /* stop flush ipis for the previous mm */
7785 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7786@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7787 */
7788 if (unlikely(prev->context.ldt != next->context.ldt))
7789 load_LDT_nolock(&next->context);
7790- }
7791+
7792+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7793+ if (!(__supported_pte_mask & _PAGE_NX)) {
7794+ smp_mb__before_clear_bit();
7795+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7796+ smp_mb__after_clear_bit();
7797+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7798+ }
7799+#endif
7800+
7801+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7802+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7803+ prev->context.user_cs_limit != next->context.user_cs_limit))
7804+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7805 #ifdef CONFIG_SMP
7806+ else if (unlikely(tlbstate != TLBSTATE_OK))
7807+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7808+#endif
7809+#endif
7810+
7811+ }
7812 else {
7813+
7814+#ifdef CONFIG_PAX_PER_CPU_PGD
7815+ pax_open_kernel();
7816+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7817+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7818+ pax_close_kernel();
7819+ load_cr3(get_cpu_pgd(cpu));
7820+#endif
7821+
7822+#ifdef CONFIG_SMP
7823 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7824 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7825
7826@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7827 * tlb flush IPI delivery. We must reload CR3
7828 * to make sure to use no freed page tables.
7829 */
7830+
7831+#ifndef CONFIG_PAX_PER_CPU_PGD
7832 load_cr3(next->pgd);
7833+#endif
7834+
7835 load_LDT_nolock(&next->context);
7836+
7837+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7838+ if (!(__supported_pte_mask & _PAGE_NX))
7839+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7840+#endif
7841+
7842+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7843+#ifdef CONFIG_PAX_PAGEEXEC
7844+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7845+#endif
7846+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7847+#endif
7848+
7849 }
7850- }
7851 #endif
7852+ }
7853 }
7854
7855 #define activate_mm(prev, next) \
7856diff -urNp linux-3.0.4/arch/x86/include/asm/mmu.h linux-3.0.4/arch/x86/include/asm/mmu.h
7857--- linux-3.0.4/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
7858+++ linux-3.0.4/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
7859@@ -9,7 +9,7 @@
7860 * we put the segment information here.
7861 */
7862 typedef struct {
7863- void *ldt;
7864+ struct desc_struct *ldt;
7865 int size;
7866
7867 #ifdef CONFIG_X86_64
7868@@ -18,7 +18,19 @@ typedef struct {
7869 #endif
7870
7871 struct mutex lock;
7872- void *vdso;
7873+ unsigned long vdso;
7874+
7875+#ifdef CONFIG_X86_32
7876+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7877+ unsigned long user_cs_base;
7878+ unsigned long user_cs_limit;
7879+
7880+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7881+ cpumask_t cpu_user_cs_mask;
7882+#endif
7883+
7884+#endif
7885+#endif
7886 } mm_context_t;
7887
7888 #ifdef CONFIG_SMP
7889diff -urNp linux-3.0.4/arch/x86/include/asm/module.h linux-3.0.4/arch/x86/include/asm/module.h
7890--- linux-3.0.4/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
7891+++ linux-3.0.4/arch/x86/include/asm/module.h 2011-10-07 19:24:31.000000000 -0400
7892@@ -5,6 +5,7 @@
7893
7894 #ifdef CONFIG_X86_64
7895 /* X86_64 does not define MODULE_PROC_FAMILY */
7896+#define MODULE_PROC_FAMILY ""
7897 #elif defined CONFIG_M386
7898 #define MODULE_PROC_FAMILY "386 "
7899 #elif defined CONFIG_M486
7900@@ -59,8 +60,18 @@
7901 #error unknown processor family
7902 #endif
7903
7904-#ifdef CONFIG_X86_32
7905-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7906+#ifdef CONFIG_PAX_KERNEXEC
7907+#define MODULE_PAX_KERNEXEC "KERNEXEC "
7908+#else
7909+#define MODULE_PAX_KERNEXEC ""
7910 #endif
7911
7912+#ifdef CONFIG_PAX_MEMORY_UDEREF
7913+#define MODULE_PAX_UDEREF "UDEREF "
7914+#else
7915+#define MODULE_PAX_UDEREF ""
7916+#endif
7917+
7918+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
7919+
7920 #endif /* _ASM_X86_MODULE_H */
7921diff -urNp linux-3.0.4/arch/x86/include/asm/page_64_types.h linux-3.0.4/arch/x86/include/asm/page_64_types.h
7922--- linux-3.0.4/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
7923+++ linux-3.0.4/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
7924@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7925
7926 /* duplicated to the one in bootmem.h */
7927 extern unsigned long max_pfn;
7928-extern unsigned long phys_base;
7929+extern const unsigned long phys_base;
7930
7931 extern unsigned long __phys_addr(unsigned long);
7932 #define __phys_reloc_hide(x) (x)
7933diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt.h linux-3.0.4/arch/x86/include/asm/paravirt.h
7934--- linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7935+++ linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7936@@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
7937 val);
7938 }
7939
7940+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7941+{
7942+ pgdval_t val = native_pgd_val(pgd);
7943+
7944+ if (sizeof(pgdval_t) > sizeof(long))
7945+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7946+ val, (u64)val >> 32);
7947+ else
7948+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7949+ val);
7950+}
7951+
7952 static inline void pgd_clear(pgd_t *pgdp)
7953 {
7954 set_pgd(pgdp, __pgd(0));
7955@@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
7956 pv_mmu_ops.set_fixmap(idx, phys, flags);
7957 }
7958
7959+#ifdef CONFIG_PAX_KERNEXEC
7960+static inline unsigned long pax_open_kernel(void)
7961+{
7962+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7963+}
7964+
7965+static inline unsigned long pax_close_kernel(void)
7966+{
7967+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7968+}
7969+#else
7970+static inline unsigned long pax_open_kernel(void) { return 0; }
7971+static inline unsigned long pax_close_kernel(void) { return 0; }
7972+#endif
7973+
7974 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7975
7976 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7977@@ -955,7 +982,7 @@ extern void default_banner(void);
7978
7979 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7980 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7981-#define PARA_INDIRECT(addr) *%cs:addr
7982+#define PARA_INDIRECT(addr) *%ss:addr
7983 #endif
7984
7985 #define INTERRUPT_RETURN \
7986@@ -1032,6 +1059,21 @@ extern void default_banner(void);
7987 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7988 CLBR_NONE, \
7989 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7990+
7991+#define GET_CR0_INTO_RDI \
7992+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7993+ mov %rax,%rdi
7994+
7995+#define SET_RDI_INTO_CR0 \
7996+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7997+
7998+#define GET_CR3_INTO_RDI \
7999+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
8000+ mov %rax,%rdi
8001+
8002+#define SET_RDI_INTO_CR3 \
8003+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
8004+
8005 #endif /* CONFIG_X86_32 */
8006
8007 #endif /* __ASSEMBLY__ */
8008diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt_types.h linux-3.0.4/arch/x86/include/asm/paravirt_types.h
8009--- linux-3.0.4/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
8010+++ linux-3.0.4/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
8011@@ -78,19 +78,19 @@ struct pv_init_ops {
8012 */
8013 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
8014 unsigned long addr, unsigned len);
8015-};
8016+} __no_const;
8017
8018
8019 struct pv_lazy_ops {
8020 /* Set deferred update mode, used for batching operations. */
8021 void (*enter)(void);
8022 void (*leave)(void);
8023-};
8024+} __no_const;
8025
8026 struct pv_time_ops {
8027 unsigned long long (*sched_clock)(void);
8028 unsigned long (*get_tsc_khz)(void);
8029-};
8030+} __no_const;
8031
8032 struct pv_cpu_ops {
8033 /* hooks for various privileged instructions */
8034@@ -186,7 +186,7 @@ struct pv_cpu_ops {
8035
8036 void (*start_context_switch)(struct task_struct *prev);
8037 void (*end_context_switch)(struct task_struct *next);
8038-};
8039+} __no_const;
8040
8041 struct pv_irq_ops {
8042 /*
8043@@ -217,7 +217,7 @@ struct pv_apic_ops {
8044 unsigned long start_eip,
8045 unsigned long start_esp);
8046 #endif
8047-};
8048+} __no_const;
8049
8050 struct pv_mmu_ops {
8051 unsigned long (*read_cr2)(void);
8052@@ -306,6 +306,7 @@ struct pv_mmu_ops {
8053 struct paravirt_callee_save make_pud;
8054
8055 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
8056+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
8057 #endif /* PAGETABLE_LEVELS == 4 */
8058 #endif /* PAGETABLE_LEVELS >= 3 */
8059
8060@@ -317,6 +318,12 @@ struct pv_mmu_ops {
8061 an mfn. We can tell which is which from the index. */
8062 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
8063 phys_addr_t phys, pgprot_t flags);
8064+
8065+#ifdef CONFIG_PAX_KERNEXEC
8066+ unsigned long (*pax_open_kernel)(void);
8067+ unsigned long (*pax_close_kernel)(void);
8068+#endif
8069+
8070 };
8071
8072 struct arch_spinlock;
8073@@ -327,7 +334,7 @@ struct pv_lock_ops {
8074 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
8075 int (*spin_trylock)(struct arch_spinlock *lock);
8076 void (*spin_unlock)(struct arch_spinlock *lock);
8077-};
8078+} __no_const;
8079
8080 /* This contains all the paravirt structures: we get a convenient
8081 * number for each function using the offset which we use to indicate
8082diff -urNp linux-3.0.4/arch/x86/include/asm/pgalloc.h linux-3.0.4/arch/x86/include/asm/pgalloc.h
8083--- linux-3.0.4/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
8084+++ linux-3.0.4/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
8085@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
8086 pmd_t *pmd, pte_t *pte)
8087 {
8088 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8089+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
8090+}
8091+
8092+static inline void pmd_populate_user(struct mm_struct *mm,
8093+ pmd_t *pmd, pte_t *pte)
8094+{
8095+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8096 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
8097 }
8098
8099diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-2level.h linux-3.0.4/arch/x86/include/asm/pgtable-2level.h
8100--- linux-3.0.4/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
8101+++ linux-3.0.4/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
8102@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
8103
8104 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8105 {
8106+ pax_open_kernel();
8107 *pmdp = pmd;
8108+ pax_close_kernel();
8109 }
8110
8111 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
8112diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32.h linux-3.0.4/arch/x86/include/asm/pgtable_32.h
8113--- linux-3.0.4/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
8114+++ linux-3.0.4/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
8115@@ -25,9 +25,6 @@
8116 struct mm_struct;
8117 struct vm_area_struct;
8118
8119-extern pgd_t swapper_pg_dir[1024];
8120-extern pgd_t initial_page_table[1024];
8121-
8122 static inline void pgtable_cache_init(void) { }
8123 static inline void check_pgt_cache(void) { }
8124 void paging_init(void);
8125@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
8126 # include <asm/pgtable-2level.h>
8127 #endif
8128
8129+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
8130+extern pgd_t initial_page_table[PTRS_PER_PGD];
8131+#ifdef CONFIG_X86_PAE
8132+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
8133+#endif
8134+
8135 #if defined(CONFIG_HIGHPTE)
8136 #define pte_offset_map(dir, address) \
8137 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
8138@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
8139 /* Clear a kernel PTE and flush it from the TLB */
8140 #define kpte_clear_flush(ptep, vaddr) \
8141 do { \
8142+ pax_open_kernel(); \
8143 pte_clear(&init_mm, (vaddr), (ptep)); \
8144+ pax_close_kernel(); \
8145 __flush_tlb_one((vaddr)); \
8146 } while (0)
8147
8148@@ -74,6 +79,9 @@ do { \
8149
8150 #endif /* !__ASSEMBLY__ */
8151
8152+#define HAVE_ARCH_UNMAPPED_AREA
8153+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
8154+
8155 /*
8156 * kern_addr_valid() is (1) for FLATMEM and (0) for
8157 * SPARSEMEM and DISCONTIGMEM
8158diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h
8159--- linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
8160+++ linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
8161@@ -8,7 +8,7 @@
8162 */
8163 #ifdef CONFIG_X86_PAE
8164 # include <asm/pgtable-3level_types.h>
8165-# define PMD_SIZE (1UL << PMD_SHIFT)
8166+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8167 # define PMD_MASK (~(PMD_SIZE - 1))
8168 #else
8169 # include <asm/pgtable-2level_types.h>
8170@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8171 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8172 #endif
8173
8174+#ifdef CONFIG_PAX_KERNEXEC
8175+#ifndef __ASSEMBLY__
8176+extern unsigned char MODULES_EXEC_VADDR[];
8177+extern unsigned char MODULES_EXEC_END[];
8178+#endif
8179+#include <asm/boot.h>
8180+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8181+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8182+#else
8183+#define ktla_ktva(addr) (addr)
8184+#define ktva_ktla(addr) (addr)
8185+#endif
8186+
8187 #define MODULES_VADDR VMALLOC_START
8188 #define MODULES_END VMALLOC_END
8189 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8190diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-3level.h linux-3.0.4/arch/x86/include/asm/pgtable-3level.h
8191--- linux-3.0.4/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8192+++ linux-3.0.4/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8193@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8194
8195 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8196 {
8197+ pax_open_kernel();
8198 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8199+ pax_close_kernel();
8200 }
8201
8202 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8203 {
8204+ pax_open_kernel();
8205 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8206+ pax_close_kernel();
8207 }
8208
8209 /*
8210diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64.h linux-3.0.4/arch/x86/include/asm/pgtable_64.h
8211--- linux-3.0.4/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8212+++ linux-3.0.4/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8213@@ -16,10 +16,13 @@
8214
8215 extern pud_t level3_kernel_pgt[512];
8216 extern pud_t level3_ident_pgt[512];
8217+extern pud_t level3_vmalloc_pgt[512];
8218+extern pud_t level3_vmemmap_pgt[512];
8219+extern pud_t level2_vmemmap_pgt[512];
8220 extern pmd_t level2_kernel_pgt[512];
8221 extern pmd_t level2_fixmap_pgt[512];
8222-extern pmd_t level2_ident_pgt[512];
8223-extern pgd_t init_level4_pgt[];
8224+extern pmd_t level2_ident_pgt[512*2];
8225+extern pgd_t init_level4_pgt[512];
8226
8227 #define swapper_pg_dir init_level4_pgt
8228
8229@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8230
8231 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8232 {
8233+ pax_open_kernel();
8234 *pmdp = pmd;
8235+ pax_close_kernel();
8236 }
8237
8238 static inline void native_pmd_clear(pmd_t *pmd)
8239@@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8240
8241 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8242 {
8243+ pax_open_kernel();
8244+ *pgdp = pgd;
8245+ pax_close_kernel();
8246+}
8247+
8248+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8249+{
8250 *pgdp = pgd;
8251 }
8252
8253diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h
8254--- linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8255+++ linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8256@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8257 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8258 #define MODULES_END _AC(0xffffffffff000000, UL)
8259 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8260+#define MODULES_EXEC_VADDR MODULES_VADDR
8261+#define MODULES_EXEC_END MODULES_END
8262+
8263+#define ktla_ktva(addr) (addr)
8264+#define ktva_ktla(addr) (addr)
8265
8266 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8267diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable.h linux-3.0.4/arch/x86/include/asm/pgtable.h
8268--- linux-3.0.4/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8269+++ linux-3.0.4/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8270@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8271
8272 #ifndef __PAGETABLE_PUD_FOLDED
8273 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8274+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8275 #define pgd_clear(pgd) native_pgd_clear(pgd)
8276 #endif
8277
8278@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8279
8280 #define arch_end_context_switch(prev) do {} while(0)
8281
8282+#define pax_open_kernel() native_pax_open_kernel()
8283+#define pax_close_kernel() native_pax_close_kernel()
8284 #endif /* CONFIG_PARAVIRT */
8285
8286+#define __HAVE_ARCH_PAX_OPEN_KERNEL
8287+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8288+
8289+#ifdef CONFIG_PAX_KERNEXEC
8290+static inline unsigned long native_pax_open_kernel(void)
8291+{
8292+ unsigned long cr0;
8293+
8294+ preempt_disable();
8295+ barrier();
8296+ cr0 = read_cr0() ^ X86_CR0_WP;
8297+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
8298+ write_cr0(cr0);
8299+ return cr0 ^ X86_CR0_WP;
8300+}
8301+
8302+static inline unsigned long native_pax_close_kernel(void)
8303+{
8304+ unsigned long cr0;
8305+
8306+ cr0 = read_cr0() ^ X86_CR0_WP;
8307+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8308+ write_cr0(cr0);
8309+ barrier();
8310+ preempt_enable_no_resched();
8311+ return cr0 ^ X86_CR0_WP;
8312+}
8313+#else
8314+static inline unsigned long native_pax_open_kernel(void) { return 0; }
8315+static inline unsigned long native_pax_close_kernel(void) { return 0; }
8316+#endif
8317+
8318 /*
8319 * The following only work if pte_present() is true.
8320 * Undefined behaviour if not..
8321 */
8322+static inline int pte_user(pte_t pte)
8323+{
8324+ return pte_val(pte) & _PAGE_USER;
8325+}
8326+
8327 static inline int pte_dirty(pte_t pte)
8328 {
8329 return pte_flags(pte) & _PAGE_DIRTY;
8330@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8331 return pte_clear_flags(pte, _PAGE_RW);
8332 }
8333
8334+static inline pte_t pte_mkread(pte_t pte)
8335+{
8336+ return __pte(pte_val(pte) | _PAGE_USER);
8337+}
8338+
8339 static inline pte_t pte_mkexec(pte_t pte)
8340 {
8341- return pte_clear_flags(pte, _PAGE_NX);
8342+#ifdef CONFIG_X86_PAE
8343+ if (__supported_pte_mask & _PAGE_NX)
8344+ return pte_clear_flags(pte, _PAGE_NX);
8345+ else
8346+#endif
8347+ return pte_set_flags(pte, _PAGE_USER);
8348+}
8349+
8350+static inline pte_t pte_exprotect(pte_t pte)
8351+{
8352+#ifdef CONFIG_X86_PAE
8353+ if (__supported_pte_mask & _PAGE_NX)
8354+ return pte_set_flags(pte, _PAGE_NX);
8355+ else
8356+#endif
8357+ return pte_clear_flags(pte, _PAGE_USER);
8358 }
8359
8360 static inline pte_t pte_mkdirty(pte_t pte)
8361@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8362 #endif
8363
8364 #ifndef __ASSEMBLY__
8365+
8366+#ifdef CONFIG_PAX_PER_CPU_PGD
8367+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8368+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8369+{
8370+ return cpu_pgd[cpu];
8371+}
8372+#endif
8373+
8374 #include <linux/mm_types.h>
8375
8376 static inline int pte_none(pte_t pte)
8377@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8378
8379 static inline int pgd_bad(pgd_t pgd)
8380 {
8381- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8382+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8383 }
8384
8385 static inline int pgd_none(pgd_t pgd)
8386@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8387 * pgd_offset() returns a (pgd_t *)
8388 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8389 */
8390-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8391+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8392+
8393+#ifdef CONFIG_PAX_PER_CPU_PGD
8394+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8395+#endif
8396+
8397 /*
8398 * a shortcut which implies the use of the kernel's pgd, instead
8399 * of a process's
8400@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8401 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8402 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8403
8404+#ifdef CONFIG_X86_32
8405+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8406+#else
8407+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8408+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8409+
8410+#ifdef CONFIG_PAX_MEMORY_UDEREF
8411+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8412+#else
8413+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8414+#endif
8415+
8416+#endif
8417+
8418 #ifndef __ASSEMBLY__
8419
8420 extern int direct_gbpages;
8421@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8422 * dst and src can be on the same page, but the range must not overlap,
8423 * and must not cross a page boundary.
8424 */
8425-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8426+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8427 {
8428- memcpy(dst, src, count * sizeof(pgd_t));
8429+ pax_open_kernel();
8430+ while (count--)
8431+ *dst++ = *src++;
8432+ pax_close_kernel();
8433 }
8434
8435+#ifdef CONFIG_PAX_PER_CPU_PGD
8436+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8437+#endif
8438+
8439+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8440+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8441+#else
8442+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8443+#endif
8444
8445 #include <asm-generic/pgtable.h>
8446 #endif /* __ASSEMBLY__ */
8447diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_types.h linux-3.0.4/arch/x86/include/asm/pgtable_types.h
8448--- linux-3.0.4/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8449+++ linux-3.0.4/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8450@@ -16,13 +16,12 @@
8451 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8452 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8453 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8454-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8455+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8456 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8457 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8458 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8459-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8460-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8461-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8462+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8463+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8464 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8465
8466 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8467@@ -40,7 +39,6 @@
8468 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8469 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8470 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8471-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8472 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8473 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8474 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8475@@ -57,8 +55,10 @@
8476
8477 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8478 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8479-#else
8480+#elif defined(CONFIG_KMEMCHECK)
8481 #define _PAGE_NX (_AT(pteval_t, 0))
8482+#else
8483+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8484 #endif
8485
8486 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8487@@ -96,6 +96,9 @@
8488 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8489 _PAGE_ACCESSED)
8490
8491+#define PAGE_READONLY_NOEXEC PAGE_READONLY
8492+#define PAGE_SHARED_NOEXEC PAGE_SHARED
8493+
8494 #define __PAGE_KERNEL_EXEC \
8495 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8496 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8497@@ -106,8 +109,8 @@
8498 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8499 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8500 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8501-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8502-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8503+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8504+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8505 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8506 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8507 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8508@@ -166,8 +169,8 @@
8509 * bits are combined, this will alow user to access the high address mapped
8510 * VDSO in the presence of CONFIG_COMPAT_VDSO
8511 */
8512-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8513-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8514+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8515+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8516 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8517 #endif
8518
8519@@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8520 {
8521 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8522 }
8523+#endif
8524
8525+#if PAGETABLE_LEVELS == 3
8526+#include <asm-generic/pgtable-nopud.h>
8527+#endif
8528+
8529+#if PAGETABLE_LEVELS == 2
8530+#include <asm-generic/pgtable-nopmd.h>
8531+#endif
8532+
8533+#ifndef __ASSEMBLY__
8534 #if PAGETABLE_LEVELS > 3
8535 typedef struct { pudval_t pud; } pud_t;
8536
8537@@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8538 return pud.pud;
8539 }
8540 #else
8541-#include <asm-generic/pgtable-nopud.h>
8542-
8543 static inline pudval_t native_pud_val(pud_t pud)
8544 {
8545 return native_pgd_val(pud.pgd);
8546@@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8547 return pmd.pmd;
8548 }
8549 #else
8550-#include <asm-generic/pgtable-nopmd.h>
8551-
8552 static inline pmdval_t native_pmd_val(pmd_t pmd)
8553 {
8554 return native_pgd_val(pmd.pud.pgd);
8555@@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8556
8557 extern pteval_t __supported_pte_mask;
8558 extern void set_nx(void);
8559-extern int nx_enabled;
8560
8561 #define pgprot_writecombine pgprot_writecombine
8562 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8563diff -urNp linux-3.0.4/arch/x86/include/asm/processor.h linux-3.0.4/arch/x86/include/asm/processor.h
8564--- linux-3.0.4/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
8565+++ linux-3.0.4/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
8566@@ -266,7 +266,7 @@ struct tss_struct {
8567
8568 } ____cacheline_aligned;
8569
8570-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8571+extern struct tss_struct init_tss[NR_CPUS];
8572
8573 /*
8574 * Save the original ist values for checking stack pointers during debugging
8575@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8576 */
8577 #define TASK_SIZE PAGE_OFFSET
8578 #define TASK_SIZE_MAX TASK_SIZE
8579+
8580+#ifdef CONFIG_PAX_SEGMEXEC
8581+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8582+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8583+#else
8584 #define STACK_TOP TASK_SIZE
8585-#define STACK_TOP_MAX STACK_TOP
8586+#endif
8587+
8588+#define STACK_TOP_MAX TASK_SIZE
8589
8590 #define INIT_THREAD { \
8591- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8592+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8593 .vm86_info = NULL, \
8594 .sysenter_cs = __KERNEL_CS, \
8595 .io_bitmap_ptr = NULL, \
8596@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8597 */
8598 #define INIT_TSS { \
8599 .x86_tss = { \
8600- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8601+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8602 .ss0 = __KERNEL_DS, \
8603 .ss1 = __KERNEL_CS, \
8604 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8605@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8606 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8607
8608 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8609-#define KSTK_TOP(info) \
8610-({ \
8611- unsigned long *__ptr = (unsigned long *)(info); \
8612- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8613-})
8614+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8615
8616 /*
8617 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8618@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8619 #define task_pt_regs(task) \
8620 ({ \
8621 struct pt_regs *__regs__; \
8622- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8623+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8624 __regs__ - 1; \
8625 })
8626
8627@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8628 /*
8629 * User space process size. 47bits minus one guard page.
8630 */
8631-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8632+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8633
8634 /* This decides where the kernel will search for a free chunk of vm
8635 * space during mmap's.
8636 */
8637 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8638- 0xc0000000 : 0xFFFFe000)
8639+ 0xc0000000 : 0xFFFFf000)
8640
8641 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8642 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8643@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8644 #define STACK_TOP_MAX TASK_SIZE_MAX
8645
8646 #define INIT_THREAD { \
8647- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8648+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8649 }
8650
8651 #define INIT_TSS { \
8652- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8653+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8654 }
8655
8656 /*
8657@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8658 */
8659 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8660
8661+#ifdef CONFIG_PAX_SEGMEXEC
8662+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8663+#endif
8664+
8665 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8666
8667 /* Get/set a process' ability to use the timestamp counter instruction */
8668diff -urNp linux-3.0.4/arch/x86/include/asm/ptrace.h linux-3.0.4/arch/x86/include/asm/ptrace.h
8669--- linux-3.0.4/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
8670+++ linux-3.0.4/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
8671@@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8672 }
8673
8674 /*
8675- * user_mode_vm(regs) determines whether a register set came from user mode.
8676+ * user_mode(regs) determines whether a register set came from user mode.
8677 * This is true if V8086 mode was enabled OR if the register set was from
8678 * protected mode with RPL-3 CS value. This tricky test checks that with
8679 * one comparison. Many places in the kernel can bypass this full check
8680- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8681+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8682+ * be used.
8683 */
8684-static inline int user_mode(struct pt_regs *regs)
8685+static inline int user_mode_novm(struct pt_regs *regs)
8686 {
8687 #ifdef CONFIG_X86_32
8688 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8689 #else
8690- return !!(regs->cs & 3);
8691+ return !!(regs->cs & SEGMENT_RPL_MASK);
8692 #endif
8693 }
8694
8695-static inline int user_mode_vm(struct pt_regs *regs)
8696+static inline int user_mode(struct pt_regs *regs)
8697 {
8698 #ifdef CONFIG_X86_32
8699 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8700 USER_RPL;
8701 #else
8702- return user_mode(regs);
8703+ return user_mode_novm(regs);
8704 #endif
8705 }
8706
8707diff -urNp linux-3.0.4/arch/x86/include/asm/reboot.h linux-3.0.4/arch/x86/include/asm/reboot.h
8708--- linux-3.0.4/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
8709+++ linux-3.0.4/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
8710@@ -6,19 +6,19 @@
8711 struct pt_regs;
8712
8713 struct machine_ops {
8714- void (*restart)(char *cmd);
8715- void (*halt)(void);
8716- void (*power_off)(void);
8717+ void (* __noreturn restart)(char *cmd);
8718+ void (* __noreturn halt)(void);
8719+ void (* __noreturn power_off)(void);
8720 void (*shutdown)(void);
8721 void (*crash_shutdown)(struct pt_regs *);
8722- void (*emergency_restart)(void);
8723-};
8724+ void (* __noreturn emergency_restart)(void);
8725+} __no_const;
8726
8727 extern struct machine_ops machine_ops;
8728
8729 void native_machine_crash_shutdown(struct pt_regs *regs);
8730 void native_machine_shutdown(void);
8731-void machine_real_restart(unsigned int type);
8732+void machine_real_restart(unsigned int type) __noreturn;
8733 /* These must match dispatch_table in reboot_32.S */
8734 #define MRR_BIOS 0
8735 #define MRR_APM 1
8736diff -urNp linux-3.0.4/arch/x86/include/asm/rwsem.h linux-3.0.4/arch/x86/include/asm/rwsem.h
8737--- linux-3.0.4/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
8738+++ linux-3.0.4/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
8739@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8740 {
8741 asm volatile("# beginning down_read\n\t"
8742 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8743+
8744+#ifdef CONFIG_PAX_REFCOUNT
8745+ "jno 0f\n"
8746+ LOCK_PREFIX _ASM_DEC "(%1)\n"
8747+ "int $4\n0:\n"
8748+ _ASM_EXTABLE(0b, 0b)
8749+#endif
8750+
8751 /* adds 0x00000001 */
8752 " jns 1f\n"
8753 " call call_rwsem_down_read_failed\n"
8754@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8755 "1:\n\t"
8756 " mov %1,%2\n\t"
8757 " add %3,%2\n\t"
8758+
8759+#ifdef CONFIG_PAX_REFCOUNT
8760+ "jno 0f\n"
8761+ "sub %3,%2\n"
8762+ "int $4\n0:\n"
8763+ _ASM_EXTABLE(0b, 0b)
8764+#endif
8765+
8766 " jle 2f\n\t"
8767 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8768 " jnz 1b\n\t"
8769@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8770 long tmp;
8771 asm volatile("# beginning down_write\n\t"
8772 LOCK_PREFIX " xadd %1,(%2)\n\t"
8773+
8774+#ifdef CONFIG_PAX_REFCOUNT
8775+ "jno 0f\n"
8776+ "mov %1,(%2)\n"
8777+ "int $4\n0:\n"
8778+ _ASM_EXTABLE(0b, 0b)
8779+#endif
8780+
8781 /* adds 0xffff0001, returns the old value */
8782 " test %1,%1\n\t"
8783 /* was the count 0 before? */
8784@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8785 long tmp;
8786 asm volatile("# beginning __up_read\n\t"
8787 LOCK_PREFIX " xadd %1,(%2)\n\t"
8788+
8789+#ifdef CONFIG_PAX_REFCOUNT
8790+ "jno 0f\n"
8791+ "mov %1,(%2)\n"
8792+ "int $4\n0:\n"
8793+ _ASM_EXTABLE(0b, 0b)
8794+#endif
8795+
8796 /* subtracts 1, returns the old value */
8797 " jns 1f\n\t"
8798 " call call_rwsem_wake\n" /* expects old value in %edx */
8799@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8800 long tmp;
8801 asm volatile("# beginning __up_write\n\t"
8802 LOCK_PREFIX " xadd %1,(%2)\n\t"
8803+
8804+#ifdef CONFIG_PAX_REFCOUNT
8805+ "jno 0f\n"
8806+ "mov %1,(%2)\n"
8807+ "int $4\n0:\n"
8808+ _ASM_EXTABLE(0b, 0b)
8809+#endif
8810+
8811 /* subtracts 0xffff0001, returns the old value */
8812 " jns 1f\n\t"
8813 " call call_rwsem_wake\n" /* expects old value in %edx */
8814@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8815 {
8816 asm volatile("# beginning __downgrade_write\n\t"
8817 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8818+
8819+#ifdef CONFIG_PAX_REFCOUNT
8820+ "jno 0f\n"
8821+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8822+ "int $4\n0:\n"
8823+ _ASM_EXTABLE(0b, 0b)
8824+#endif
8825+
8826 /*
8827 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8828 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8829@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8830 */
8831 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8832 {
8833- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8834+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8835+
8836+#ifdef CONFIG_PAX_REFCOUNT
8837+ "jno 0f\n"
8838+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
8839+ "int $4\n0:\n"
8840+ _ASM_EXTABLE(0b, 0b)
8841+#endif
8842+
8843 : "+m" (sem->count)
8844 : "er" (delta));
8845 }
8846@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8847 {
8848 long tmp = delta;
8849
8850- asm volatile(LOCK_PREFIX "xadd %0,%1"
8851+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8852+
8853+#ifdef CONFIG_PAX_REFCOUNT
8854+ "jno 0f\n"
8855+ "mov %0,%1\n"
8856+ "int $4\n0:\n"
8857+ _ASM_EXTABLE(0b, 0b)
8858+#endif
8859+
8860 : "+r" (tmp), "+m" (sem->count)
8861 : : "memory");
8862
8863diff -urNp linux-3.0.4/arch/x86/include/asm/segment.h linux-3.0.4/arch/x86/include/asm/segment.h
8864--- linux-3.0.4/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
8865+++ linux-3.0.4/arch/x86/include/asm/segment.h 2011-09-17 00:53:42.000000000 -0400
8866@@ -64,10 +64,15 @@
8867 * 26 - ESPFIX small SS
8868 * 27 - per-cpu [ offset to per-cpu data area ]
8869 * 28 - stack_canary-20 [ for stack protector ]
8870- * 29 - unused
8871- * 30 - unused
8872+ * 29 - PCI BIOS CS
8873+ * 30 - PCI BIOS DS
8874 * 31 - TSS for double fault handler
8875 */
8876+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
8877+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
8878+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
8879+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
8880+
8881 #define GDT_ENTRY_TLS_MIN 6
8882 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
8883
8884@@ -79,6 +84,8 @@
8885
8886 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8887
8888+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8889+
8890 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8891
8892 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8893@@ -104,6 +111,12 @@
8894 #define __KERNEL_STACK_CANARY 0
8895 #endif
8896
8897+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8898+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8899+
8900+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8901+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8902+
8903 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8904
8905 /*
8906@@ -141,7 +154,7 @@
8907 */
8908
8909 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8910-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8911+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8912
8913
8914 #else
8915@@ -165,6 +178,8 @@
8916 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8917 #define __USER32_DS __USER_DS
8918
8919+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8920+
8921 #define GDT_ENTRY_TSS 8 /* needs two entries */
8922 #define GDT_ENTRY_LDT 10 /* needs two entries */
8923 #define GDT_ENTRY_TLS_MIN 12
8924@@ -185,6 +200,7 @@
8925 #endif
8926
8927 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8928+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8929 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8930 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8931 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8932diff -urNp linux-3.0.4/arch/x86/include/asm/smp.h linux-3.0.4/arch/x86/include/asm/smp.h
8933--- linux-3.0.4/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
8934+++ linux-3.0.4/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
8935@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8936 /* cpus sharing the last level cache: */
8937 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8938 DECLARE_PER_CPU(u16, cpu_llc_id);
8939-DECLARE_PER_CPU(int, cpu_number);
8940+DECLARE_PER_CPU(unsigned int, cpu_number);
8941
8942 static inline struct cpumask *cpu_sibling_mask(int cpu)
8943 {
8944@@ -77,7 +77,7 @@ struct smp_ops {
8945
8946 void (*send_call_func_ipi)(const struct cpumask *mask);
8947 void (*send_call_func_single_ipi)(int cpu);
8948-};
8949+} __no_const;
8950
8951 /* Globals due to paravirt */
8952 extern void set_cpu_sibling_map(int cpu);
8953@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8954 extern int safe_smp_processor_id(void);
8955
8956 #elif defined(CONFIG_X86_64_SMP)
8957-#define raw_smp_processor_id() (percpu_read(cpu_number))
8958-
8959-#define stack_smp_processor_id() \
8960-({ \
8961- struct thread_info *ti; \
8962- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8963- ti->cpu; \
8964-})
8965+#define raw_smp_processor_id() (percpu_read(cpu_number))
8966+#define stack_smp_processor_id() raw_smp_processor_id()
8967 #define safe_smp_processor_id() smp_processor_id()
8968
8969 #endif
8970diff -urNp linux-3.0.4/arch/x86/include/asm/spinlock.h linux-3.0.4/arch/x86/include/asm/spinlock.h
8971--- linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
8972+++ linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
8973@@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8974 static inline void arch_read_lock(arch_rwlock_t *rw)
8975 {
8976 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8977+
8978+#ifdef CONFIG_PAX_REFCOUNT
8979+ "jno 0f\n"
8980+ LOCK_PREFIX " addl $1,(%0)\n"
8981+ "int $4\n0:\n"
8982+ _ASM_EXTABLE(0b, 0b)
8983+#endif
8984+
8985 "jns 1f\n"
8986 "call __read_lock_failed\n\t"
8987 "1:\n"
8988@@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8989 static inline void arch_write_lock(arch_rwlock_t *rw)
8990 {
8991 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8992+
8993+#ifdef CONFIG_PAX_REFCOUNT
8994+ "jno 0f\n"
8995+ LOCK_PREFIX " addl %1,(%0)\n"
8996+ "int $4\n0:\n"
8997+ _ASM_EXTABLE(0b, 0b)
8998+#endif
8999+
9000 "jz 1f\n"
9001 "call __write_lock_failed\n\t"
9002 "1:\n"
9003@@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
9004
9005 static inline void arch_read_unlock(arch_rwlock_t *rw)
9006 {
9007- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
9008+ asm volatile(LOCK_PREFIX "incl %0\n"
9009+
9010+#ifdef CONFIG_PAX_REFCOUNT
9011+ "jno 0f\n"
9012+ LOCK_PREFIX "decl %0\n"
9013+ "int $4\n0:\n"
9014+ _ASM_EXTABLE(0b, 0b)
9015+#endif
9016+
9017+ :"+m" (rw->lock) : : "memory");
9018 }
9019
9020 static inline void arch_write_unlock(arch_rwlock_t *rw)
9021 {
9022- asm volatile(LOCK_PREFIX "addl %1, %0"
9023+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
9024+
9025+#ifdef CONFIG_PAX_REFCOUNT
9026+ "jno 0f\n"
9027+ LOCK_PREFIX "subl %1, %0\n"
9028+ "int $4\n0:\n"
9029+ _ASM_EXTABLE(0b, 0b)
9030+#endif
9031+
9032 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
9033 }
9034
9035diff -urNp linux-3.0.4/arch/x86/include/asm/stackprotector.h linux-3.0.4/arch/x86/include/asm/stackprotector.h
9036--- linux-3.0.4/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
9037+++ linux-3.0.4/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
9038@@ -48,7 +48,7 @@
9039 * head_32 for boot CPU and setup_per_cpu_areas() for others.
9040 */
9041 #define GDT_STACK_CANARY_INIT \
9042- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
9043+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
9044
9045 /*
9046 * Initialize the stackprotector canary value.
9047@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
9048
9049 static inline void load_stack_canary_segment(void)
9050 {
9051-#ifdef CONFIG_X86_32
9052+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
9053 asm volatile ("mov %0, %%gs" : : "r" (0));
9054 #endif
9055 }
9056diff -urNp linux-3.0.4/arch/x86/include/asm/stacktrace.h linux-3.0.4/arch/x86/include/asm/stacktrace.h
9057--- linux-3.0.4/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
9058+++ linux-3.0.4/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
9059@@ -11,28 +11,20 @@
9060
9061 extern int kstack_depth_to_print;
9062
9063-struct thread_info;
9064+struct task_struct;
9065 struct stacktrace_ops;
9066
9067-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
9068- unsigned long *stack,
9069- unsigned long bp,
9070- const struct stacktrace_ops *ops,
9071- void *data,
9072- unsigned long *end,
9073- int *graph);
9074-
9075-extern unsigned long
9076-print_context_stack(struct thread_info *tinfo,
9077- unsigned long *stack, unsigned long bp,
9078- const struct stacktrace_ops *ops, void *data,
9079- unsigned long *end, int *graph);
9080-
9081-extern unsigned long
9082-print_context_stack_bp(struct thread_info *tinfo,
9083- unsigned long *stack, unsigned long bp,
9084- const struct stacktrace_ops *ops, void *data,
9085- unsigned long *end, int *graph);
9086+typedef unsigned long walk_stack_t(struct task_struct *task,
9087+ void *stack_start,
9088+ unsigned long *stack,
9089+ unsigned long bp,
9090+ const struct stacktrace_ops *ops,
9091+ void *data,
9092+ unsigned long *end,
9093+ int *graph);
9094+
9095+extern walk_stack_t print_context_stack;
9096+extern walk_stack_t print_context_stack_bp;
9097
9098 /* Generic stack tracer with callbacks */
9099
9100@@ -40,7 +32,7 @@ struct stacktrace_ops {
9101 void (*address)(void *data, unsigned long address, int reliable);
9102 /* On negative return stop dumping */
9103 int (*stack)(void *data, char *name);
9104- walk_stack_t walk_stack;
9105+ walk_stack_t *walk_stack;
9106 };
9107
9108 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
9109diff -urNp linux-3.0.4/arch/x86/include/asm/sys_ia32.h linux-3.0.4/arch/x86/include/asm/sys_ia32.h
9110--- linux-3.0.4/arch/x86/include/asm/sys_ia32.h 2011-07-21 22:17:23.000000000 -0400
9111+++ linux-3.0.4/arch/x86/include/asm/sys_ia32.h 2011-10-06 04:17:55.000000000 -0400
9112@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int
9113 compat_sigset_t __user *, unsigned int);
9114 asmlinkage long sys32_alarm(unsigned int);
9115
9116-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
9117+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
9118 asmlinkage long sys32_sysfs(int, u32, u32);
9119
9120 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
9121diff -urNp linux-3.0.4/arch/x86/include/asm/system.h linux-3.0.4/arch/x86/include/asm/system.h
9122--- linux-3.0.4/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
9123+++ linux-3.0.4/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
9124@@ -129,7 +129,7 @@ do { \
9125 "call __switch_to\n\t" \
9126 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
9127 __switch_canary \
9128- "movq %P[thread_info](%%rsi),%%r8\n\t" \
9129+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9130 "movq %%rax,%%rdi\n\t" \
9131 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9132 "jnz ret_from_fork\n\t" \
9133@@ -140,7 +140,7 @@ do { \
9134 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9135 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9136 [_tif_fork] "i" (_TIF_FORK), \
9137- [thread_info] "i" (offsetof(struct task_struct, stack)), \
9138+ [thread_info] "m" (current_tinfo), \
9139 [current_task] "m" (current_task) \
9140 __switch_canary_iparam \
9141 : "memory", "cc" __EXTRA_CLOBBER)
9142@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9143 {
9144 unsigned long __limit;
9145 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9146- return __limit + 1;
9147+ return __limit;
9148 }
9149
9150 static inline void native_clts(void)
9151@@ -397,12 +397,12 @@ void enable_hlt(void);
9152
9153 void cpu_idle_wait(void);
9154
9155-extern unsigned long arch_align_stack(unsigned long sp);
9156+#define arch_align_stack(x) ((x) & ~0xfUL)
9157 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9158
9159 void default_idle(void);
9160
9161-void stop_this_cpu(void *dummy);
9162+void stop_this_cpu(void *dummy) __noreturn;
9163
9164 /*
9165 * Force strict CPU ordering.
9166diff -urNp linux-3.0.4/arch/x86/include/asm/thread_info.h linux-3.0.4/arch/x86/include/asm/thread_info.h
9167--- linux-3.0.4/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
9168+++ linux-3.0.4/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
9169@@ -10,6 +10,7 @@
9170 #include <linux/compiler.h>
9171 #include <asm/page.h>
9172 #include <asm/types.h>
9173+#include <asm/percpu.h>
9174
9175 /*
9176 * low level task data that entry.S needs immediate access to
9177@@ -24,7 +25,6 @@ struct exec_domain;
9178 #include <asm/atomic.h>
9179
9180 struct thread_info {
9181- struct task_struct *task; /* main task structure */
9182 struct exec_domain *exec_domain; /* execution domain */
9183 __u32 flags; /* low level flags */
9184 __u32 status; /* thread synchronous flags */
9185@@ -34,18 +34,12 @@ struct thread_info {
9186 mm_segment_t addr_limit;
9187 struct restart_block restart_block;
9188 void __user *sysenter_return;
9189-#ifdef CONFIG_X86_32
9190- unsigned long previous_esp; /* ESP of the previous stack in
9191- case of nested (IRQ) stacks
9192- */
9193- __u8 supervisor_stack[0];
9194-#endif
9195+ unsigned long lowest_stack;
9196 int uaccess_err;
9197 };
9198
9199-#define INIT_THREAD_INFO(tsk) \
9200+#define INIT_THREAD_INFO \
9201 { \
9202- .task = &tsk, \
9203 .exec_domain = &default_exec_domain, \
9204 .flags = 0, \
9205 .cpu = 0, \
9206@@ -56,7 +50,7 @@ struct thread_info {
9207 }, \
9208 }
9209
9210-#define init_thread_info (init_thread_union.thread_info)
9211+#define init_thread_info (init_thread_union.stack)
9212 #define init_stack (init_thread_union.stack)
9213
9214 #else /* !__ASSEMBLY__ */
9215@@ -170,6 +164,23 @@ struct thread_info {
9216 ret; \
9217 })
9218
9219+#ifdef __ASSEMBLY__
9220+/* how to get the thread information struct from ASM */
9221+#define GET_THREAD_INFO(reg) \
9222+ mov PER_CPU_VAR(current_tinfo), reg
9223+
9224+/* use this one if reg already contains %esp */
9225+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9226+#else
9227+/* how to get the thread information struct from C */
9228+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9229+
9230+static __always_inline struct thread_info *current_thread_info(void)
9231+{
9232+ return percpu_read_stable(current_tinfo);
9233+}
9234+#endif
9235+
9236 #ifdef CONFIG_X86_32
9237
9238 #define STACK_WARN (THREAD_SIZE/8)
9239@@ -180,35 +191,13 @@ struct thread_info {
9240 */
9241 #ifndef __ASSEMBLY__
9242
9243-
9244 /* how to get the current stack pointer from C */
9245 register unsigned long current_stack_pointer asm("esp") __used;
9246
9247-/* how to get the thread information struct from C */
9248-static inline struct thread_info *current_thread_info(void)
9249-{
9250- return (struct thread_info *)
9251- (current_stack_pointer & ~(THREAD_SIZE - 1));
9252-}
9253-
9254-#else /* !__ASSEMBLY__ */
9255-
9256-/* how to get the thread information struct from ASM */
9257-#define GET_THREAD_INFO(reg) \
9258- movl $-THREAD_SIZE, reg; \
9259- andl %esp, reg
9260-
9261-/* use this one if reg already contains %esp */
9262-#define GET_THREAD_INFO_WITH_ESP(reg) \
9263- andl $-THREAD_SIZE, reg
9264-
9265 #endif
9266
9267 #else /* X86_32 */
9268
9269-#include <asm/percpu.h>
9270-#define KERNEL_STACK_OFFSET (5*8)
9271-
9272 /*
9273 * macros/functions for gaining access to the thread information structure
9274 * preempt_count needs to be 1 initially, until the scheduler is functional.
9275@@ -216,21 +205,8 @@ static inline struct thread_info *curren
9276 #ifndef __ASSEMBLY__
9277 DECLARE_PER_CPU(unsigned long, kernel_stack);
9278
9279-static inline struct thread_info *current_thread_info(void)
9280-{
9281- struct thread_info *ti;
9282- ti = (void *)(percpu_read_stable(kernel_stack) +
9283- KERNEL_STACK_OFFSET - THREAD_SIZE);
9284- return ti;
9285-}
9286-
9287-#else /* !__ASSEMBLY__ */
9288-
9289-/* how to get the thread information struct from ASM */
9290-#define GET_THREAD_INFO(reg) \
9291- movq PER_CPU_VAR(kernel_stack),reg ; \
9292- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9293-
9294+/* how to get the current stack pointer from C */
9295+register unsigned long current_stack_pointer asm("rsp") __used;
9296 #endif
9297
9298 #endif /* !X86_32 */
9299@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9300 extern void free_thread_info(struct thread_info *ti);
9301 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9302 #define arch_task_cache_init arch_task_cache_init
9303+
9304+#define __HAVE_THREAD_FUNCTIONS
9305+#define task_thread_info(task) (&(task)->tinfo)
9306+#define task_stack_page(task) ((task)->stack)
9307+#define setup_thread_stack(p, org) do {} while (0)
9308+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9309+
9310+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9311+extern struct task_struct *alloc_task_struct_node(int node);
9312+extern void free_task_struct(struct task_struct *);
9313+
9314 #endif
9315 #endif /* _ASM_X86_THREAD_INFO_H */
9316diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_32.h linux-3.0.4/arch/x86/include/asm/uaccess_32.h
9317--- linux-3.0.4/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
9318+++ linux-3.0.4/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
9319@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9320 static __always_inline unsigned long __must_check
9321 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9322 {
9323+ pax_track_stack();
9324+
9325+ if ((long)n < 0)
9326+ return n;
9327+
9328 if (__builtin_constant_p(n)) {
9329 unsigned long ret;
9330
9331@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9332 return ret;
9333 }
9334 }
9335+ if (!__builtin_constant_p(n))
9336+ check_object_size(from, n, true);
9337 return __copy_to_user_ll(to, from, n);
9338 }
9339
9340@@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9341 __copy_to_user(void __user *to, const void *from, unsigned long n)
9342 {
9343 might_fault();
9344+
9345 return __copy_to_user_inatomic(to, from, n);
9346 }
9347
9348 static __always_inline unsigned long
9349 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9350 {
9351+ if ((long)n < 0)
9352+ return n;
9353+
9354 /* Avoid zeroing the tail if the copy fails..
9355 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9356 * but as the zeroing behaviour is only significant when n is not
9357@@ -137,6 +148,12 @@ static __always_inline unsigned long
9358 __copy_from_user(void *to, const void __user *from, unsigned long n)
9359 {
9360 might_fault();
9361+
9362+ pax_track_stack();
9363+
9364+ if ((long)n < 0)
9365+ return n;
9366+
9367 if (__builtin_constant_p(n)) {
9368 unsigned long ret;
9369
9370@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9371 return ret;
9372 }
9373 }
9374+ if (!__builtin_constant_p(n))
9375+ check_object_size(to, n, false);
9376 return __copy_from_user_ll(to, from, n);
9377 }
9378
9379@@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9380 const void __user *from, unsigned long n)
9381 {
9382 might_fault();
9383+
9384+ if ((long)n < 0)
9385+ return n;
9386+
9387 if (__builtin_constant_p(n)) {
9388 unsigned long ret;
9389
9390@@ -181,15 +204,19 @@ static __always_inline unsigned long
9391 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9392 unsigned long n)
9393 {
9394- return __copy_from_user_ll_nocache_nozero(to, from, n);
9395-}
9396+ if ((long)n < 0)
9397+ return n;
9398
9399-unsigned long __must_check copy_to_user(void __user *to,
9400- const void *from, unsigned long n);
9401-unsigned long __must_check _copy_from_user(void *to,
9402- const void __user *from,
9403- unsigned long n);
9404+ return __copy_from_user_ll_nocache_nozero(to, from, n);
9405+}
9406
9407+extern void copy_to_user_overflow(void)
9408+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9409+ __compiletime_error("copy_to_user() buffer size is not provably correct")
9410+#else
9411+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
9412+#endif
9413+;
9414
9415 extern void copy_from_user_overflow(void)
9416 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9417@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9418 #endif
9419 ;
9420
9421-static inline unsigned long __must_check copy_from_user(void *to,
9422- const void __user *from,
9423- unsigned long n)
9424+/**
9425+ * copy_to_user: - Copy a block of data into user space.
9426+ * @to: Destination address, in user space.
9427+ * @from: Source address, in kernel space.
9428+ * @n: Number of bytes to copy.
9429+ *
9430+ * Context: User context only. This function may sleep.
9431+ *
9432+ * Copy data from kernel space to user space.
9433+ *
9434+ * Returns number of bytes that could not be copied.
9435+ * On success, this will be zero.
9436+ */
9437+static inline unsigned long __must_check
9438+copy_to_user(void __user *to, const void *from, unsigned long n)
9439+{
9440+ int sz = __compiletime_object_size(from);
9441+
9442+ if (unlikely(sz != -1 && sz < n))
9443+ copy_to_user_overflow();
9444+ else if (access_ok(VERIFY_WRITE, to, n))
9445+ n = __copy_to_user(to, from, n);
9446+ return n;
9447+}
9448+
9449+/**
9450+ * copy_from_user: - Copy a block of data from user space.
9451+ * @to: Destination address, in kernel space.
9452+ * @from: Source address, in user space.
9453+ * @n: Number of bytes to copy.
9454+ *
9455+ * Context: User context only. This function may sleep.
9456+ *
9457+ * Copy data from user space to kernel space.
9458+ *
9459+ * Returns number of bytes that could not be copied.
9460+ * On success, this will be zero.
9461+ *
9462+ * If some data could not be copied, this function will pad the copied
9463+ * data to the requested size using zero bytes.
9464+ */
9465+static inline unsigned long __must_check
9466+copy_from_user(void *to, const void __user *from, unsigned long n)
9467 {
9468 int sz = __compiletime_object_size(to);
9469
9470- if (likely(sz == -1 || sz >= n))
9471- n = _copy_from_user(to, from, n);
9472- else
9473+ if (unlikely(sz != -1 && sz < n))
9474 copy_from_user_overflow();
9475-
9476+ else if (access_ok(VERIFY_READ, from, n))
9477+ n = __copy_from_user(to, from, n);
9478+ else if ((long)n > 0) {
9479+ if (!__builtin_constant_p(n))
9480+ check_object_size(to, n, false);
9481+ memset(to, 0, n);
9482+ }
9483 return n;
9484 }
9485
9486diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_64.h linux-3.0.4/arch/x86/include/asm/uaccess_64.h
9487--- linux-3.0.4/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
9488+++ linux-3.0.4/arch/x86/include/asm/uaccess_64.h 2011-10-06 04:17:55.000000000 -0400
9489@@ -10,6 +10,9 @@
9490 #include <asm/alternative.h>
9491 #include <asm/cpufeature.h>
9492 #include <asm/page.h>
9493+#include <asm/pgtable.h>
9494+
9495+#define set_fs(x) (current_thread_info()->addr_limit = (x))
9496
9497 /*
9498 * Copy To/From Userspace
9499@@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9500 return ret;
9501 }
9502
9503-__must_check unsigned long
9504-_copy_to_user(void __user *to, const void *from, unsigned len);
9505-__must_check unsigned long
9506-_copy_from_user(void *to, const void __user *from, unsigned len);
9507+static __always_inline __must_check unsigned long
9508+__copy_to_user(void __user *to, const void *from, unsigned len);
9509+static __always_inline __must_check unsigned long
9510+__copy_from_user(void *to, const void __user *from, unsigned len);
9511 __must_check unsigned long
9512 copy_in_user(void __user *to, const void __user *from, unsigned len);
9513
9514 static inline unsigned long __must_check copy_from_user(void *to,
9515 const void __user *from,
9516- unsigned long n)
9517+ unsigned n)
9518 {
9519- int sz = __compiletime_object_size(to);
9520-
9521 might_fault();
9522- if (likely(sz == -1 || sz >= n))
9523- n = _copy_from_user(to, from, n);
9524-#ifdef CONFIG_DEBUG_VM
9525- else
9526- WARN(1, "Buffer overflow detected!\n");
9527-#endif
9528+
9529+ if (access_ok(VERIFY_READ, from, n))
9530+ n = __copy_from_user(to, from, n);
9531+ else if ((int)n > 0) {
9532+ if (!__builtin_constant_p(n))
9533+ check_object_size(to, n, false);
9534+ memset(to, 0, n);
9535+ }
9536 return n;
9537 }
9538
9539@@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9540 {
9541 might_fault();
9542
9543- return _copy_to_user(dst, src, size);
9544+ if (access_ok(VERIFY_WRITE, dst, size))
9545+ size = __copy_to_user(dst, src, size);
9546+ return size;
9547 }
9548
9549 static __always_inline __must_check
9550-int __copy_from_user(void *dst, const void __user *src, unsigned size)
9551+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9552 {
9553- int ret = 0;
9554+ int sz = __compiletime_object_size(dst);
9555+ unsigned ret = 0;
9556
9557 might_fault();
9558- if (!__builtin_constant_p(size))
9559- return copy_user_generic(dst, (__force void *)src, size);
9560+
9561+ pax_track_stack();
9562+
9563+ if ((int)size < 0)
9564+ return size;
9565+
9566+#ifdef CONFIG_PAX_MEMORY_UDEREF
9567+ if (!__access_ok(VERIFY_READ, src, size))
9568+ return size;
9569+#endif
9570+
9571+ if (unlikely(sz != -1 && sz < size)) {
9572+#ifdef CONFIG_DEBUG_VM
9573+ WARN(1, "Buffer overflow detected!\n");
9574+#endif
9575+ return size;
9576+ }
9577+
9578+ if (!__builtin_constant_p(size)) {
9579+ check_object_size(dst, size, false);
9580+
9581+#ifdef CONFIG_PAX_MEMORY_UDEREF
9582+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9583+ src += PAX_USER_SHADOW_BASE;
9584+#endif
9585+
9586+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9587+ }
9588 switch (size) {
9589- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9590+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9591 ret, "b", "b", "=q", 1);
9592 return ret;
9593- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9594+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9595 ret, "w", "w", "=r", 2);
9596 return ret;
9597- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9598+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9599 ret, "l", "k", "=r", 4);
9600 return ret;
9601- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9602+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9603 ret, "q", "", "=r", 8);
9604 return ret;
9605 case 10:
9606- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9607+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9608 ret, "q", "", "=r", 10);
9609 if (unlikely(ret))
9610 return ret;
9611 __get_user_asm(*(u16 *)(8 + (char *)dst),
9612- (u16 __user *)(8 + (char __user *)src),
9613+ (const u16 __user *)(8 + (const char __user *)src),
9614 ret, "w", "w", "=r", 2);
9615 return ret;
9616 case 16:
9617- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9618+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9619 ret, "q", "", "=r", 16);
9620 if (unlikely(ret))
9621 return ret;
9622 __get_user_asm(*(u64 *)(8 + (char *)dst),
9623- (u64 __user *)(8 + (char __user *)src),
9624+ (const u64 __user *)(8 + (const char __user *)src),
9625 ret, "q", "", "=r", 8);
9626 return ret;
9627 default:
9628- return copy_user_generic(dst, (__force void *)src, size);
9629+
9630+#ifdef CONFIG_PAX_MEMORY_UDEREF
9631+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9632+ src += PAX_USER_SHADOW_BASE;
9633+#endif
9634+
9635+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9636 }
9637 }
9638
9639 static __always_inline __must_check
9640-int __copy_to_user(void __user *dst, const void *src, unsigned size)
9641+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9642 {
9643- int ret = 0;
9644+ int sz = __compiletime_object_size(src);
9645+ unsigned ret = 0;
9646
9647 might_fault();
9648- if (!__builtin_constant_p(size))
9649- return copy_user_generic((__force void *)dst, src, size);
9650+
9651+ pax_track_stack();
9652+
9653+ if ((int)size < 0)
9654+ return size;
9655+
9656+#ifdef CONFIG_PAX_MEMORY_UDEREF
9657+ if (!__access_ok(VERIFY_WRITE, dst, size))
9658+ return size;
9659+#endif
9660+
9661+ if (unlikely(sz != -1 && sz < size)) {
9662+#ifdef CONFIG_DEBUG_VM
9663+ WARN(1, "Buffer overflow detected!\n");
9664+#endif
9665+ return size;
9666+ }
9667+
9668+ if (!__builtin_constant_p(size)) {
9669+ check_object_size(src, size, true);
9670+
9671+#ifdef CONFIG_PAX_MEMORY_UDEREF
9672+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9673+ dst += PAX_USER_SHADOW_BASE;
9674+#endif
9675+
9676+ return copy_user_generic((__force_kernel void *)dst, src, size);
9677+ }
9678 switch (size) {
9679- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9680+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9681 ret, "b", "b", "iq", 1);
9682 return ret;
9683- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9684+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9685 ret, "w", "w", "ir", 2);
9686 return ret;
9687- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9688+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9689 ret, "l", "k", "ir", 4);
9690 return ret;
9691- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9692+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9693 ret, "q", "", "er", 8);
9694 return ret;
9695 case 10:
9696- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9697+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9698 ret, "q", "", "er", 10);
9699 if (unlikely(ret))
9700 return ret;
9701 asm("":::"memory");
9702- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9703+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9704 ret, "w", "w", "ir", 2);
9705 return ret;
9706 case 16:
9707- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9708+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9709 ret, "q", "", "er", 16);
9710 if (unlikely(ret))
9711 return ret;
9712 asm("":::"memory");
9713- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9714+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9715 ret, "q", "", "er", 8);
9716 return ret;
9717 default:
9718- return copy_user_generic((__force void *)dst, src, size);
9719+
9720+#ifdef CONFIG_PAX_MEMORY_UDEREF
9721+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9722+ dst += PAX_USER_SHADOW_BASE;
9723+#endif
9724+
9725+ return copy_user_generic((__force_kernel void *)dst, src, size);
9726 }
9727 }
9728
9729 static __always_inline __must_check
9730-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9731+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9732 {
9733- int ret = 0;
9734+ unsigned ret = 0;
9735
9736 might_fault();
9737- if (!__builtin_constant_p(size))
9738- return copy_user_generic((__force void *)dst,
9739- (__force void *)src, size);
9740+
9741+ if ((int)size < 0)
9742+ return size;
9743+
9744+#ifdef CONFIG_PAX_MEMORY_UDEREF
9745+ if (!__access_ok(VERIFY_READ, src, size))
9746+ return size;
9747+ if (!__access_ok(VERIFY_WRITE, dst, size))
9748+ return size;
9749+#endif
9750+
9751+ if (!__builtin_constant_p(size)) {
9752+
9753+#ifdef CONFIG_PAX_MEMORY_UDEREF
9754+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9755+ src += PAX_USER_SHADOW_BASE;
9756+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9757+ dst += PAX_USER_SHADOW_BASE;
9758+#endif
9759+
9760+ return copy_user_generic((__force_kernel void *)dst,
9761+ (__force_kernel const void *)src, size);
9762+ }
9763 switch (size) {
9764 case 1: {
9765 u8 tmp;
9766- __get_user_asm(tmp, (u8 __user *)src,
9767+ __get_user_asm(tmp, (const u8 __user *)src,
9768 ret, "b", "b", "=q", 1);
9769 if (likely(!ret))
9770 __put_user_asm(tmp, (u8 __user *)dst,
9771@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9772 }
9773 case 2: {
9774 u16 tmp;
9775- __get_user_asm(tmp, (u16 __user *)src,
9776+ __get_user_asm(tmp, (const u16 __user *)src,
9777 ret, "w", "w", "=r", 2);
9778 if (likely(!ret))
9779 __put_user_asm(tmp, (u16 __user *)dst,
9780@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9781
9782 case 4: {
9783 u32 tmp;
9784- __get_user_asm(tmp, (u32 __user *)src,
9785+ __get_user_asm(tmp, (const u32 __user *)src,
9786 ret, "l", "k", "=r", 4);
9787 if (likely(!ret))
9788 __put_user_asm(tmp, (u32 __user *)dst,
9789@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9790 }
9791 case 8: {
9792 u64 tmp;
9793- __get_user_asm(tmp, (u64 __user *)src,
9794+ __get_user_asm(tmp, (const u64 __user *)src,
9795 ret, "q", "", "=r", 8);
9796 if (likely(!ret))
9797 __put_user_asm(tmp, (u64 __user *)dst,
9798@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9799 return ret;
9800 }
9801 default:
9802- return copy_user_generic((__force void *)dst,
9803- (__force void *)src, size);
9804+
9805+#ifdef CONFIG_PAX_MEMORY_UDEREF
9806+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9807+ src += PAX_USER_SHADOW_BASE;
9808+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9809+ dst += PAX_USER_SHADOW_BASE;
9810+#endif
9811+
9812+ return copy_user_generic((__force_kernel void *)dst,
9813+ (__force_kernel const void *)src, size);
9814 }
9815 }
9816
9817@@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9818 static __must_check __always_inline int
9819 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9820 {
9821- return copy_user_generic(dst, (__force const void *)src, size);
9822+ pax_track_stack();
9823+
9824+ if ((int)size < 0)
9825+ return size;
9826+
9827+#ifdef CONFIG_PAX_MEMORY_UDEREF
9828+ if (!__access_ok(VERIFY_READ, src, size))
9829+ return size;
9830+
9831+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9832+ src += PAX_USER_SHADOW_BASE;
9833+#endif
9834+
9835+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9836 }
9837
9838-static __must_check __always_inline int
9839+static __must_check __always_inline unsigned long
9840 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9841 {
9842- return copy_user_generic((__force void *)dst, src, size);
9843+ if ((int)size < 0)
9844+ return size;
9845+
9846+#ifdef CONFIG_PAX_MEMORY_UDEREF
9847+ if (!__access_ok(VERIFY_WRITE, dst, size))
9848+ return size;
9849+
9850+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9851+ dst += PAX_USER_SHADOW_BASE;
9852+#endif
9853+
9854+ return copy_user_generic((__force_kernel void *)dst, src, size);
9855 }
9856
9857-extern long __copy_user_nocache(void *dst, const void __user *src,
9858+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9859 unsigned size, int zerorest);
9860
9861-static inline int
9862-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9863+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9864 {
9865 might_sleep();
9866+
9867+ if ((int)size < 0)
9868+ return size;
9869+
9870+#ifdef CONFIG_PAX_MEMORY_UDEREF
9871+ if (!__access_ok(VERIFY_READ, src, size))
9872+ return size;
9873+#endif
9874+
9875 return __copy_user_nocache(dst, src, size, 1);
9876 }
9877
9878-static inline int
9879-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9880+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9881 unsigned size)
9882 {
9883+ if ((int)size < 0)
9884+ return size;
9885+
9886+#ifdef CONFIG_PAX_MEMORY_UDEREF
9887+ if (!__access_ok(VERIFY_READ, src, size))
9888+ return size;
9889+#endif
9890+
9891 return __copy_user_nocache(dst, src, size, 0);
9892 }
9893
9894-unsigned long
9895-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9896+extern unsigned long
9897+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest);
9898
9899 #endif /* _ASM_X86_UACCESS_64_H */
9900diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess.h linux-3.0.4/arch/x86/include/asm/uaccess.h
9901--- linux-3.0.4/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9902+++ linux-3.0.4/arch/x86/include/asm/uaccess.h 2011-10-06 04:17:55.000000000 -0400
9903@@ -7,12 +7,15 @@
9904 #include <linux/compiler.h>
9905 #include <linux/thread_info.h>
9906 #include <linux/string.h>
9907+#include <linux/sched.h>
9908 #include <asm/asm.h>
9909 #include <asm/page.h>
9910
9911 #define VERIFY_READ 0
9912 #define VERIFY_WRITE 1
9913
9914+extern void check_object_size(const void *ptr, unsigned long n, bool to);
9915+
9916 /*
9917 * The fs value determines whether argument validity checking should be
9918 * performed or not. If get_fs() == USER_DS, checking is performed, with
9919@@ -28,7 +31,12 @@
9920
9921 #define get_ds() (KERNEL_DS)
9922 #define get_fs() (current_thread_info()->addr_limit)
9923+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9924+void __set_fs(mm_segment_t x);
9925+void set_fs(mm_segment_t x);
9926+#else
9927 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9928+#endif
9929
9930 #define segment_eq(a, b) ((a).seg == (b).seg)
9931
9932@@ -76,7 +84,33 @@
9933 * checks that the pointer is in the user space range - after calling
9934 * this function, memory access functions may still return -EFAULT.
9935 */
9936-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9937+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9938+#define access_ok(type, addr, size) \
9939+({ \
9940+ long __size = size; \
9941+ unsigned long __addr = (unsigned long)addr; \
9942+ unsigned long __addr_ao = __addr & PAGE_MASK; \
9943+ unsigned long __end_ao = __addr + __size - 1; \
9944+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9945+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9946+ while(__addr_ao <= __end_ao) { \
9947+ char __c_ao; \
9948+ __addr_ao += PAGE_SIZE; \
9949+ if (__size > PAGE_SIZE) \
9950+ cond_resched(); \
9951+ if (__get_user(__c_ao, (char __user *)__addr)) \
9952+ break; \
9953+ if (type != VERIFY_WRITE) { \
9954+ __addr = __addr_ao; \
9955+ continue; \
9956+ } \
9957+ if (__put_user(__c_ao, (char __user *)__addr)) \
9958+ break; \
9959+ __addr = __addr_ao; \
9960+ } \
9961+ } \
9962+ __ret_ao; \
9963+})
9964
9965 /*
9966 * The exception table consists of pairs of addresses: the first is the
9967@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9968 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9969 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9970
9971-
9972+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9973+#define __copyuser_seg "gs;"
9974+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9975+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9976+#else
9977+#define __copyuser_seg
9978+#define __COPYUSER_SET_ES
9979+#define __COPYUSER_RESTORE_ES
9980+#endif
9981
9982 #ifdef CONFIG_X86_32
9983 #define __put_user_asm_u64(x, addr, err, errret) \
9984- asm volatile("1: movl %%eax,0(%2)\n" \
9985- "2: movl %%edx,4(%2)\n" \
9986+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9987+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9988 "3:\n" \
9989 ".section .fixup,\"ax\"\n" \
9990 "4: movl %3,%0\n" \
9991@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
9992 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9993
9994 #define __put_user_asm_ex_u64(x, addr) \
9995- asm volatile("1: movl %%eax,0(%1)\n" \
9996- "2: movl %%edx,4(%1)\n" \
9997+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9998+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9999 "3:\n" \
10000 _ASM_EXTABLE(1b, 2b - 1b) \
10001 _ASM_EXTABLE(2b, 3b - 2b) \
10002@@ -252,7 +294,7 @@ extern void __put_user_8(void);
10003 __typeof__(*(ptr)) __pu_val; \
10004 __chk_user_ptr(ptr); \
10005 might_fault(); \
10006- __pu_val = x; \
10007+ __pu_val = (x); \
10008 switch (sizeof(*(ptr))) { \
10009 case 1: \
10010 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10011@@ -373,7 +415,7 @@ do { \
10012 } while (0)
10013
10014 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10015- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10016+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10017 "2:\n" \
10018 ".section .fixup,\"ax\"\n" \
10019 "3: mov %3,%0\n" \
10020@@ -381,7 +423,7 @@ do { \
10021 " jmp 2b\n" \
10022 ".previous\n" \
10023 _ASM_EXTABLE(1b, 3b) \
10024- : "=r" (err), ltype(x) \
10025+ : "=r" (err), ltype (x) \
10026 : "m" (__m(addr)), "i" (errret), "0" (err))
10027
10028 #define __get_user_size_ex(x, ptr, size) \
10029@@ -406,7 +448,7 @@ do { \
10030 } while (0)
10031
10032 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10033- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10034+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10035 "2:\n" \
10036 _ASM_EXTABLE(1b, 2b - 1b) \
10037 : ltype(x) : "m" (__m(addr)))
10038@@ -423,13 +465,24 @@ do { \
10039 int __gu_err; \
10040 unsigned long __gu_val; \
10041 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10042- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10043+ (x) = (__typeof__(*(ptr)))__gu_val; \
10044 __gu_err; \
10045 })
10046
10047 /* FIXME: this hack is definitely wrong -AK */
10048 struct __large_struct { unsigned long buf[100]; };
10049-#define __m(x) (*(struct __large_struct __user *)(x))
10050+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10051+#define ____m(x) \
10052+({ \
10053+ unsigned long ____x = (unsigned long)(x); \
10054+ if (____x < PAX_USER_SHADOW_BASE) \
10055+ ____x += PAX_USER_SHADOW_BASE; \
10056+ (void __user *)____x; \
10057+})
10058+#else
10059+#define ____m(x) (x)
10060+#endif
10061+#define __m(x) (*(struct __large_struct __user *)____m(x))
10062
10063 /*
10064 * Tell gcc we read from memory instead of writing: this is because
10065@@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
10066 * aliasing issues.
10067 */
10068 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10069- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10070+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10071 "2:\n" \
10072 ".section .fixup,\"ax\"\n" \
10073 "3: mov %3,%0\n" \
10074@@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
10075 ".previous\n" \
10076 _ASM_EXTABLE(1b, 3b) \
10077 : "=r"(err) \
10078- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10079+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10080
10081 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10082- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10083+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10084 "2:\n" \
10085 _ASM_EXTABLE(1b, 2b - 1b) \
10086 : : ltype(x), "m" (__m(addr)))
10087@@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
10088 * On error, the variable @x is set to zero.
10089 */
10090
10091+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10092+#define __get_user(x, ptr) get_user((x), (ptr))
10093+#else
10094 #define __get_user(x, ptr) \
10095 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10096+#endif
10097
10098 /**
10099 * __put_user: - Write a simple value into user space, with less checking.
10100@@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
10101 * Returns zero on success, or -EFAULT on error.
10102 */
10103
10104+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10105+#define __put_user(x, ptr) put_user((x), (ptr))
10106+#else
10107 #define __put_user(x, ptr) \
10108 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10109+#endif
10110
10111 #define __get_user_unaligned __get_user
10112 #define __put_user_unaligned __put_user
10113@@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
10114 #define get_user_ex(x, ptr) do { \
10115 unsigned long __gue_val; \
10116 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10117- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10118+ (x) = (__typeof__(*(ptr)))__gue_val; \
10119 } while (0)
10120
10121 #ifdef CONFIG_X86_WP_WORKS_OK
10122diff -urNp linux-3.0.4/arch/x86/include/asm/vdso.h linux-3.0.4/arch/x86/include/asm/vdso.h
10123--- linux-3.0.4/arch/x86/include/asm/vdso.h 2011-07-21 22:17:23.000000000 -0400
10124+++ linux-3.0.4/arch/x86/include/asm/vdso.h 2011-10-06 04:17:55.000000000 -0400
10125@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
10126 #define VDSO32_SYMBOL(base, name) \
10127 ({ \
10128 extern const char VDSO32_##name[]; \
10129- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10130+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10131 })
10132 #endif
10133
10134diff -urNp linux-3.0.4/arch/x86/include/asm/x86_init.h linux-3.0.4/arch/x86/include/asm/x86_init.h
10135--- linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
10136+++ linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
10137@@ -28,7 +28,7 @@ struct x86_init_mpparse {
10138 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
10139 void (*find_smp_config)(void);
10140 void (*get_smp_config)(unsigned int early);
10141-};
10142+} __no_const;
10143
10144 /**
10145 * struct x86_init_resources - platform specific resource related ops
10146@@ -42,7 +42,7 @@ struct x86_init_resources {
10147 void (*probe_roms)(void);
10148 void (*reserve_resources)(void);
10149 char *(*memory_setup)(void);
10150-};
10151+} __no_const;
10152
10153 /**
10154 * struct x86_init_irqs - platform specific interrupt setup
10155@@ -55,7 +55,7 @@ struct x86_init_irqs {
10156 void (*pre_vector_init)(void);
10157 void (*intr_init)(void);
10158 void (*trap_init)(void);
10159-};
10160+} __no_const;
10161
10162 /**
10163 * struct x86_init_oem - oem platform specific customizing functions
10164@@ -65,7 +65,7 @@ struct x86_init_irqs {
10165 struct x86_init_oem {
10166 void (*arch_setup)(void);
10167 void (*banner)(void);
10168-};
10169+} __no_const;
10170
10171 /**
10172 * struct x86_init_mapping - platform specific initial kernel pagetable setup
10173@@ -76,7 +76,7 @@ struct x86_init_oem {
10174 */
10175 struct x86_init_mapping {
10176 void (*pagetable_reserve)(u64 start, u64 end);
10177-};
10178+} __no_const;
10179
10180 /**
10181 * struct x86_init_paging - platform specific paging functions
10182@@ -86,7 +86,7 @@ struct x86_init_mapping {
10183 struct x86_init_paging {
10184 void (*pagetable_setup_start)(pgd_t *base);
10185 void (*pagetable_setup_done)(pgd_t *base);
10186-};
10187+} __no_const;
10188
10189 /**
10190 * struct x86_init_timers - platform specific timer setup
10191@@ -101,7 +101,7 @@ struct x86_init_timers {
10192 void (*tsc_pre_init)(void);
10193 void (*timer_init)(void);
10194 void (*wallclock_init)(void);
10195-};
10196+} __no_const;
10197
10198 /**
10199 * struct x86_init_iommu - platform specific iommu setup
10200@@ -109,7 +109,7 @@ struct x86_init_timers {
10201 */
10202 struct x86_init_iommu {
10203 int (*iommu_init)(void);
10204-};
10205+} __no_const;
10206
10207 /**
10208 * struct x86_init_pci - platform specific pci init functions
10209@@ -123,7 +123,7 @@ struct x86_init_pci {
10210 int (*init)(void);
10211 void (*init_irq)(void);
10212 void (*fixup_irqs)(void);
10213-};
10214+} __no_const;
10215
10216 /**
10217 * struct x86_init_ops - functions for platform specific setup
10218@@ -139,7 +139,7 @@ struct x86_init_ops {
10219 struct x86_init_timers timers;
10220 struct x86_init_iommu iommu;
10221 struct x86_init_pci pci;
10222-};
10223+} __no_const;
10224
10225 /**
10226 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10227@@ -147,7 +147,7 @@ struct x86_init_ops {
10228 */
10229 struct x86_cpuinit_ops {
10230 void (*setup_percpu_clockev)(void);
10231-};
10232+} __no_const;
10233
10234 /**
10235 * struct x86_platform_ops - platform specific runtime functions
10236@@ -166,7 +166,7 @@ struct x86_platform_ops {
10237 bool (*is_untracked_pat_range)(u64 start, u64 end);
10238 void (*nmi_init)(void);
10239 int (*i8042_detect)(void);
10240-};
10241+} __no_const;
10242
10243 struct pci_dev;
10244
10245@@ -174,7 +174,7 @@ struct x86_msi_ops {
10246 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10247 void (*teardown_msi_irq)(unsigned int irq);
10248 void (*teardown_msi_irqs)(struct pci_dev *dev);
10249-};
10250+} __no_const;
10251
10252 extern struct x86_init_ops x86_init;
10253 extern struct x86_cpuinit_ops x86_cpuinit;
10254diff -urNp linux-3.0.4/arch/x86/include/asm/xsave.h linux-3.0.4/arch/x86/include/asm/xsave.h
10255--- linux-3.0.4/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10256+++ linux-3.0.4/arch/x86/include/asm/xsave.h 2011-10-06 04:17:55.000000000 -0400
10257@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10258 {
10259 int err;
10260
10261+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10262+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10263+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10264+#endif
10265+
10266 /*
10267 * Clear the xsave header first, so that reserved fields are
10268 * initialized to zero.
10269@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsav
10270 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
10271 {
10272 int err;
10273- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
10274+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
10275 u32 lmask = mask;
10276 u32 hmask = mask >> 32;
10277
10278+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10279+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10280+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10281+#endif
10282+
10283 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10284 "2:\n"
10285 ".section .fixup,\"ax\"\n"
10286diff -urNp linux-3.0.4/arch/x86/Kconfig linux-3.0.4/arch/x86/Kconfig
10287--- linux-3.0.4/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
10288+++ linux-3.0.4/arch/x86/Kconfig 2011-09-17 00:58:36.000000000 -0400
10289@@ -229,7 +229,7 @@ config X86_HT
10290
10291 config X86_32_LAZY_GS
10292 def_bool y
10293- depends on X86_32 && !CC_STACKPROTECTOR
10294+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10295
10296 config ARCH_HWEIGHT_CFLAGS
10297 string
10298@@ -1018,7 +1018,7 @@ choice
10299
10300 config NOHIGHMEM
10301 bool "off"
10302- depends on !X86_NUMAQ
10303+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10304 ---help---
10305 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10306 However, the address space of 32-bit x86 processors is only 4
10307@@ -1055,7 +1055,7 @@ config NOHIGHMEM
10308
10309 config HIGHMEM4G
10310 bool "4GB"
10311- depends on !X86_NUMAQ
10312+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10313 ---help---
10314 Select this if you have a 32-bit processor and between 1 and 4
10315 gigabytes of physical RAM.
10316@@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10317 hex
10318 default 0xB0000000 if VMSPLIT_3G_OPT
10319 default 0x80000000 if VMSPLIT_2G
10320- default 0x78000000 if VMSPLIT_2G_OPT
10321+ default 0x70000000 if VMSPLIT_2G_OPT
10322 default 0x40000000 if VMSPLIT_1G
10323 default 0xC0000000
10324 depends on X86_32
10325@@ -1483,6 +1483,7 @@ config SECCOMP
10326
10327 config CC_STACKPROTECTOR
10328 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10329+ depends on X86_64 || !PAX_MEMORY_UDEREF
10330 ---help---
10331 This option turns on the -fstack-protector GCC feature. This
10332 feature puts, at the beginning of functions, a canary value on
10333@@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10334 config PHYSICAL_START
10335 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10336 default "0x1000000"
10337+ range 0x400000 0x40000000
10338 ---help---
10339 This gives the physical address where the kernel is loaded.
10340
10341@@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10342 config PHYSICAL_ALIGN
10343 hex "Alignment value to which kernel should be aligned" if X86_32
10344 default "0x1000000"
10345+ range 0x400000 0x1000000 if PAX_KERNEXEC
10346 range 0x2000 0x1000000
10347 ---help---
10348 This value puts the alignment restrictions on physical address
10349@@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10350 Say N if you want to disable CPU hotplug.
10351
10352 config COMPAT_VDSO
10353- def_bool y
10354+ def_bool n
10355 prompt "Compat VDSO support"
10356 depends on X86_32 || IA32_EMULATION
10357+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10358 ---help---
10359 Map the 32-bit VDSO to the predictable old-style address too.
10360
10361diff -urNp linux-3.0.4/arch/x86/Kconfig.cpu linux-3.0.4/arch/x86/Kconfig.cpu
10362--- linux-3.0.4/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
10363+++ linux-3.0.4/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
10364@@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10365
10366 config X86_F00F_BUG
10367 def_bool y
10368- depends on M586MMX || M586TSC || M586 || M486 || M386
10369+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10370
10371 config X86_INVD_BUG
10372 def_bool y
10373@@ -362,7 +362,7 @@ config X86_POPAD_OK
10374
10375 config X86_ALIGNMENT_16
10376 def_bool y
10377- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10378+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10379
10380 config X86_INTEL_USERCOPY
10381 def_bool y
10382@@ -408,7 +408,7 @@ config X86_CMPXCHG64
10383 # generates cmov.
10384 config X86_CMOV
10385 def_bool y
10386- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10387+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10388
10389 config X86_MINIMUM_CPU_FAMILY
10390 int
10391diff -urNp linux-3.0.4/arch/x86/Kconfig.debug linux-3.0.4/arch/x86/Kconfig.debug
10392--- linux-3.0.4/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
10393+++ linux-3.0.4/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
10394@@ -81,7 +81,7 @@ config X86_PTDUMP
10395 config DEBUG_RODATA
10396 bool "Write protect kernel read-only data structures"
10397 default y
10398- depends on DEBUG_KERNEL
10399+ depends on DEBUG_KERNEL && BROKEN
10400 ---help---
10401 Mark the kernel read-only data as write-protected in the pagetables,
10402 in order to catch accidental (and incorrect) writes to such const
10403@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10404
10405 config DEBUG_SET_MODULE_RONX
10406 bool "Set loadable kernel module data as NX and text as RO"
10407- depends on MODULES
10408+ depends on MODULES && BROKEN
10409 ---help---
10410 This option helps catch unintended modifications to loadable
10411 kernel module's text and read-only data. It also prevents execution
10412diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile
10413--- linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10414+++ linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10415@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10416 $(call cc-option, -fno-stack-protector) \
10417 $(call cc-option, -mpreferred-stack-boundary=2)
10418 KBUILD_CFLAGS += $(call cc-option, -m32)
10419+ifdef CONSTIFY_PLUGIN
10420+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10421+endif
10422 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10423 GCOV_PROFILE := n
10424
10425diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S
10426--- linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10427+++ linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10428@@ -108,6 +108,9 @@ wakeup_code:
10429 /* Do any other stuff... */
10430
10431 #ifndef CONFIG_64BIT
10432+ /* Recheck NX bit overrides (64bit path does this in trampoline */
10433+ call verify_cpu
10434+
10435 /* This could also be done in C code... */
10436 movl pmode_cr3, %eax
10437 movl %eax, %cr3
10438@@ -131,6 +134,7 @@ wakeup_code:
10439 movl pmode_cr0, %eax
10440 movl %eax, %cr0
10441 jmp pmode_return
10442+# include "../../verify_cpu.S"
10443 #else
10444 pushw $0
10445 pushw trampoline_segment
10446diff -urNp linux-3.0.4/arch/x86/kernel/acpi/sleep.c linux-3.0.4/arch/x86/kernel/acpi/sleep.c
10447--- linux-3.0.4/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10448+++ linux-3.0.4/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10449@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10450 header->trampoline_segment = trampoline_address() >> 4;
10451 #ifdef CONFIG_SMP
10452 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10453+
10454+ pax_open_kernel();
10455 early_gdt_descr.address =
10456 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10457+ pax_close_kernel();
10458+
10459 initial_gs = per_cpu_offset(smp_processor_id());
10460 #endif
10461 initial_code = (unsigned long)wakeup_long64;
10462diff -urNp linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S
10463--- linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10464+++ linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10465@@ -30,13 +30,11 @@ wakeup_pmode_return:
10466 # and restore the stack ... but you need gdt for this to work
10467 movl saved_context_esp, %esp
10468
10469- movl %cs:saved_magic, %eax
10470- cmpl $0x12345678, %eax
10471+ cmpl $0x12345678, saved_magic
10472 jne bogus_magic
10473
10474 # jump to place where we left off
10475- movl saved_eip, %eax
10476- jmp *%eax
10477+ jmp *(saved_eip)
10478
10479 bogus_magic:
10480 jmp bogus_magic
10481diff -urNp linux-3.0.4/arch/x86/kernel/alternative.c linux-3.0.4/arch/x86/kernel/alternative.c
10482--- linux-3.0.4/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10483+++ linux-3.0.4/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10484@@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10485 if (!*poff || ptr < text || ptr >= text_end)
10486 continue;
10487 /* turn DS segment override prefix into lock prefix */
10488- if (*ptr == 0x3e)
10489+ if (*ktla_ktva(ptr) == 0x3e)
10490 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10491 };
10492 mutex_unlock(&text_mutex);
10493@@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10494 if (!*poff || ptr < text || ptr >= text_end)
10495 continue;
10496 /* turn lock prefix into DS segment override prefix */
10497- if (*ptr == 0xf0)
10498+ if (*ktla_ktva(ptr) == 0xf0)
10499 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10500 };
10501 mutex_unlock(&text_mutex);
10502@@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10503
10504 BUG_ON(p->len > MAX_PATCH_LEN);
10505 /* prep the buffer with the original instructions */
10506- memcpy(insnbuf, p->instr, p->len);
10507+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10508 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10509 (unsigned long)p->instr, p->len);
10510
10511@@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10512 if (smp_alt_once)
10513 free_init_pages("SMP alternatives",
10514 (unsigned long)__smp_locks,
10515- (unsigned long)__smp_locks_end);
10516+ PAGE_ALIGN((unsigned long)__smp_locks_end));
10517
10518 restart_nmi();
10519 }
10520@@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10521 * instructions. And on the local CPU you need to be protected again NMI or MCE
10522 * handlers seeing an inconsistent instruction while you patch.
10523 */
10524-void *__init_or_module text_poke_early(void *addr, const void *opcode,
10525+void *__kprobes text_poke_early(void *addr, const void *opcode,
10526 size_t len)
10527 {
10528 unsigned long flags;
10529 local_irq_save(flags);
10530- memcpy(addr, opcode, len);
10531+
10532+ pax_open_kernel();
10533+ memcpy(ktla_ktva(addr), opcode, len);
10534 sync_core();
10535+ pax_close_kernel();
10536+
10537 local_irq_restore(flags);
10538 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10539 that causes hangs on some VIA CPUs. */
10540@@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10541 */
10542 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10543 {
10544- unsigned long flags;
10545- char *vaddr;
10546+ unsigned char *vaddr = ktla_ktva(addr);
10547 struct page *pages[2];
10548- int i;
10549+ size_t i;
10550
10551 if (!core_kernel_text((unsigned long)addr)) {
10552- pages[0] = vmalloc_to_page(addr);
10553- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10554+ pages[0] = vmalloc_to_page(vaddr);
10555+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10556 } else {
10557- pages[0] = virt_to_page(addr);
10558+ pages[0] = virt_to_page(vaddr);
10559 WARN_ON(!PageReserved(pages[0]));
10560- pages[1] = virt_to_page(addr + PAGE_SIZE);
10561+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10562 }
10563 BUG_ON(!pages[0]);
10564- local_irq_save(flags);
10565- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10566- if (pages[1])
10567- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10568- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10569- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10570- clear_fixmap(FIX_TEXT_POKE0);
10571- if (pages[1])
10572- clear_fixmap(FIX_TEXT_POKE1);
10573- local_flush_tlb();
10574- sync_core();
10575- /* Could also do a CLFLUSH here to speed up CPU recovery; but
10576- that causes hangs on some VIA CPUs. */
10577+ text_poke_early(addr, opcode, len);
10578 for (i = 0; i < len; i++)
10579- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10580- local_irq_restore(flags);
10581+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10582 return addr;
10583 }
10584
10585diff -urNp linux-3.0.4/arch/x86/kernel/apic/apic.c linux-3.0.4/arch/x86/kernel/apic/apic.c
10586--- linux-3.0.4/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10587+++ linux-3.0.4/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10588@@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10589 /*
10590 * Debug level, exported for io_apic.c
10591 */
10592-unsigned int apic_verbosity;
10593+int apic_verbosity;
10594
10595 int pic_mode;
10596
10597@@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10598 apic_write(APIC_ESR, 0);
10599 v1 = apic_read(APIC_ESR);
10600 ack_APIC_irq();
10601- atomic_inc(&irq_err_count);
10602+ atomic_inc_unchecked(&irq_err_count);
10603
10604 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10605 smp_processor_id(), v0 , v1);
10606@@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10607 u16 *bios_cpu_apicid;
10608 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10609
10610+ pax_track_stack();
10611+
10612 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10613 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10614
10615diff -urNp linux-3.0.4/arch/x86/kernel/apic/io_apic.c linux-3.0.4/arch/x86/kernel/apic/io_apic.c
10616--- linux-3.0.4/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10617+++ linux-3.0.4/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10618@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10619 }
10620 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10621
10622-void lock_vector_lock(void)
10623+void lock_vector_lock(void) __acquires(vector_lock)
10624 {
10625 /* Used to the online set of cpus does not change
10626 * during assign_irq_vector.
10627@@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10628 raw_spin_lock(&vector_lock);
10629 }
10630
10631-void unlock_vector_lock(void)
10632+void unlock_vector_lock(void) __releases(vector_lock)
10633 {
10634 raw_spin_unlock(&vector_lock);
10635 }
10636@@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10637 ack_APIC_irq();
10638 }
10639
10640-atomic_t irq_mis_count;
10641+atomic_unchecked_t irq_mis_count;
10642
10643 /*
10644 * IO-APIC versions below 0x20 don't support EOI register.
10645@@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10646 * at the cpu.
10647 */
10648 if (!(v & (1 << (i & 0x1f)))) {
10649- atomic_inc(&irq_mis_count);
10650+ atomic_inc_unchecked(&irq_mis_count);
10651
10652 eoi_ioapic_irq(irq, cfg);
10653 }
10654diff -urNp linux-3.0.4/arch/x86/kernel/apm_32.c linux-3.0.4/arch/x86/kernel/apm_32.c
10655--- linux-3.0.4/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
10656+++ linux-3.0.4/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
10657@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10658 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10659 * even though they are called in protected mode.
10660 */
10661-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10662+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10663 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10664
10665 static const char driver_version[] = "1.16ac"; /* no spaces */
10666@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10667 BUG_ON(cpu != 0);
10668 gdt = get_cpu_gdt_table(cpu);
10669 save_desc_40 = gdt[0x40 / 8];
10670+
10671+ pax_open_kernel();
10672 gdt[0x40 / 8] = bad_bios_desc;
10673+ pax_close_kernel();
10674
10675 apm_irq_save(flags);
10676 APM_DO_SAVE_SEGS;
10677@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10678 &call->esi);
10679 APM_DO_RESTORE_SEGS;
10680 apm_irq_restore(flags);
10681+
10682+ pax_open_kernel();
10683 gdt[0x40 / 8] = save_desc_40;
10684+ pax_close_kernel();
10685+
10686 put_cpu();
10687
10688 return call->eax & 0xff;
10689@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10690 BUG_ON(cpu != 0);
10691 gdt = get_cpu_gdt_table(cpu);
10692 save_desc_40 = gdt[0x40 / 8];
10693+
10694+ pax_open_kernel();
10695 gdt[0x40 / 8] = bad_bios_desc;
10696+ pax_close_kernel();
10697
10698 apm_irq_save(flags);
10699 APM_DO_SAVE_SEGS;
10700@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10701 &call->eax);
10702 APM_DO_RESTORE_SEGS;
10703 apm_irq_restore(flags);
10704+
10705+ pax_open_kernel();
10706 gdt[0x40 / 8] = save_desc_40;
10707+ pax_close_kernel();
10708+
10709 put_cpu();
10710 return error;
10711 }
10712@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10713 * code to that CPU.
10714 */
10715 gdt = get_cpu_gdt_table(0);
10716+
10717+ pax_open_kernel();
10718 set_desc_base(&gdt[APM_CS >> 3],
10719 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10720 set_desc_base(&gdt[APM_CS_16 >> 3],
10721 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10722 set_desc_base(&gdt[APM_DS >> 3],
10723 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10724+ pax_close_kernel();
10725
10726 proc_create("apm", 0, NULL, &apm_file_ops);
10727
10728diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets_64.c linux-3.0.4/arch/x86/kernel/asm-offsets_64.c
10729--- linux-3.0.4/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
10730+++ linux-3.0.4/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
10731@@ -69,6 +69,7 @@ int main(void)
10732 BLANK();
10733 #undef ENTRY
10734
10735+ DEFINE(TSS_size, sizeof(struct tss_struct));
10736 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10737 BLANK();
10738
10739diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets.c linux-3.0.4/arch/x86/kernel/asm-offsets.c
10740--- linux-3.0.4/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
10741+++ linux-3.0.4/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
10742@@ -33,6 +33,8 @@ void common(void) {
10743 OFFSET(TI_status, thread_info, status);
10744 OFFSET(TI_addr_limit, thread_info, addr_limit);
10745 OFFSET(TI_preempt_count, thread_info, preempt_count);
10746+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10747+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10748
10749 BLANK();
10750 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10751@@ -53,8 +55,26 @@ void common(void) {
10752 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10753 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10754 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10755+
10756+#ifdef CONFIG_PAX_KERNEXEC
10757+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10758+#endif
10759+
10760+#ifdef CONFIG_PAX_MEMORY_UDEREF
10761+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10762+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10763+#ifdef CONFIG_X86_64
10764+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10765+#endif
10766 #endif
10767
10768+#endif
10769+
10770+ BLANK();
10771+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10772+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10773+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10774+
10775 #ifdef CONFIG_XEN
10776 BLANK();
10777 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10778diff -urNp linux-3.0.4/arch/x86/kernel/cpu/amd.c linux-3.0.4/arch/x86/kernel/cpu/amd.c
10779--- linux-3.0.4/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
10780+++ linux-3.0.4/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
10781@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10782 unsigned int size)
10783 {
10784 /* AMD errata T13 (order #21922) */
10785- if ((c->x86 == 6)) {
10786+ if (c->x86 == 6) {
10787 /* Duron Rev A0 */
10788 if (c->x86_model == 3 && c->x86_mask == 0)
10789 size = 64;
10790diff -urNp linux-3.0.4/arch/x86/kernel/cpu/common.c linux-3.0.4/arch/x86/kernel/cpu/common.c
10791--- linux-3.0.4/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
10792+++ linux-3.0.4/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
10793@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10794
10795 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10796
10797-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10798-#ifdef CONFIG_X86_64
10799- /*
10800- * We need valid kernel segments for data and code in long mode too
10801- * IRET will check the segment types kkeil 2000/10/28
10802- * Also sysret mandates a special GDT layout
10803- *
10804- * TLS descriptors are currently at a different place compared to i386.
10805- * Hopefully nobody expects them at a fixed place (Wine?)
10806- */
10807- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10808- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10809- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10810- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10811- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10812- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10813-#else
10814- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10815- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10816- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10817- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10818- /*
10819- * Segments used for calling PnP BIOS have byte granularity.
10820- * They code segments and data segments have fixed 64k limits,
10821- * the transfer segment sizes are set at run time.
10822- */
10823- /* 32-bit code */
10824- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10825- /* 16-bit code */
10826- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10827- /* 16-bit data */
10828- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10829- /* 16-bit data */
10830- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10831- /* 16-bit data */
10832- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10833- /*
10834- * The APM segments have byte granularity and their bases
10835- * are set at run time. All have 64k limits.
10836- */
10837- /* 32-bit code */
10838- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10839- /* 16-bit code */
10840- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10841- /* data */
10842- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10843-
10844- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10845- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10846- GDT_STACK_CANARY_INIT
10847-#endif
10848-} };
10849-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10850-
10851 static int __init x86_xsave_setup(char *s)
10852 {
10853 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10854@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10855 {
10856 struct desc_ptr gdt_descr;
10857
10858- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10859+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10860 gdt_descr.size = GDT_SIZE - 1;
10861 load_gdt(&gdt_descr);
10862 /* Reload the per-cpu base */
10863@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10864 /* Filter out anything that depends on CPUID levels we don't have */
10865 filter_cpuid_features(c, true);
10866
10867+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10868+ setup_clear_cpu_cap(X86_FEATURE_SEP);
10869+#endif
10870+
10871 /* If the model name is still unset, do table lookup. */
10872 if (!c->x86_model_id[0]) {
10873 const char *p;
10874@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10875 }
10876 __setup("clearcpuid=", setup_disablecpuid);
10877
10878+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10879+EXPORT_PER_CPU_SYMBOL(current_tinfo);
10880+
10881 #ifdef CONFIG_X86_64
10882 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10883
10884@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10885 EXPORT_PER_CPU_SYMBOL(current_task);
10886
10887 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10888- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10889+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10890 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10891
10892 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10893@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10894 {
10895 memset(regs, 0, sizeof(struct pt_regs));
10896 regs->fs = __KERNEL_PERCPU;
10897- regs->gs = __KERNEL_STACK_CANARY;
10898+ savesegment(gs, regs->gs);
10899
10900 return regs;
10901 }
10902@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10903 int i;
10904
10905 cpu = stack_smp_processor_id();
10906- t = &per_cpu(init_tss, cpu);
10907+ t = init_tss + cpu;
10908 oist = &per_cpu(orig_ist, cpu);
10909
10910 #ifdef CONFIG_NUMA
10911@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10912 switch_to_new_gdt(cpu);
10913 loadsegment(fs, 0);
10914
10915- load_idt((const struct desc_ptr *)&idt_descr);
10916+ load_idt(&idt_descr);
10917
10918 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10919 syscall_init();
10920@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10921 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10922 barrier();
10923
10924- x86_configure_nx();
10925 if (cpu != 0)
10926 enable_x2apic();
10927
10928@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10929 {
10930 int cpu = smp_processor_id();
10931 struct task_struct *curr = current;
10932- struct tss_struct *t = &per_cpu(init_tss, cpu);
10933+ struct tss_struct *t = init_tss + cpu;
10934 struct thread_struct *thread = &curr->thread;
10935
10936 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10937diff -urNp linux-3.0.4/arch/x86/kernel/cpu/intel.c linux-3.0.4/arch/x86/kernel/cpu/intel.c
10938--- linux-3.0.4/arch/x86/kernel/cpu/intel.c 2011-09-02 18:11:26.000000000 -0400
10939+++ linux-3.0.4/arch/x86/kernel/cpu/intel.c 2011-08-29 23:30:14.000000000 -0400
10940@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10941 * Update the IDT descriptor and reload the IDT so that
10942 * it uses the read-only mapped virtual address.
10943 */
10944- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10945+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10946 load_idt(&idt_descr);
10947 }
10948 #endif
10949diff -urNp linux-3.0.4/arch/x86/kernel/cpu/Makefile linux-3.0.4/arch/x86/kernel/cpu/Makefile
10950--- linux-3.0.4/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
10951+++ linux-3.0.4/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
10952@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10953 CFLAGS_REMOVE_perf_event.o = -pg
10954 endif
10955
10956-# Make sure load_percpu_segment has no stackprotector
10957-nostackp := $(call cc-option, -fno-stack-protector)
10958-CFLAGS_common.o := $(nostackp)
10959-
10960 obj-y := intel_cacheinfo.o scattered.o topology.o
10961 obj-y += proc.o capflags.o powerflags.o common.o
10962 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10963diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c
10964--- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
10965+++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
10966@@ -46,6 +46,7 @@
10967 #include <asm/ipi.h>
10968 #include <asm/mce.h>
10969 #include <asm/msr.h>
10970+#include <asm/local.h>
10971
10972 #include "mce-internal.h"
10973
10974@@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
10975 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10976 m->cs, m->ip);
10977
10978- if (m->cs == __KERNEL_CS)
10979+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10980 print_symbol("{%s}", m->ip);
10981 pr_cont("\n");
10982 }
10983@@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
10984
10985 #define PANIC_TIMEOUT 5 /* 5 seconds */
10986
10987-static atomic_t mce_paniced;
10988+static atomic_unchecked_t mce_paniced;
10989
10990 static int fake_panic;
10991-static atomic_t mce_fake_paniced;
10992+static atomic_unchecked_t mce_fake_paniced;
10993
10994 /* Panic in progress. Enable interrupts and wait for final IPI */
10995 static void wait_for_panic(void)
10996@@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
10997 /*
10998 * Make sure only one CPU runs in machine check panic
10999 */
11000- if (atomic_inc_return(&mce_paniced) > 1)
11001+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11002 wait_for_panic();
11003 barrier();
11004
11005@@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
11006 console_verbose();
11007 } else {
11008 /* Don't log too much for fake panic */
11009- if (atomic_inc_return(&mce_fake_paniced) > 1)
11010+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11011 return;
11012 }
11013 /* First print corrected ones that are still unlogged */
11014@@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
11015 * might have been modified by someone else.
11016 */
11017 rmb();
11018- if (atomic_read(&mce_paniced))
11019+ if (atomic_read_unchecked(&mce_paniced))
11020 wait_for_panic();
11021 if (!monarch_timeout)
11022 goto out;
11023@@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
11024 */
11025
11026 static DEFINE_SPINLOCK(mce_state_lock);
11027-static int open_count; /* #times opened */
11028+static local_t open_count; /* #times opened */
11029 static int open_exclu; /* already open exclusive? */
11030
11031 static int mce_open(struct inode *inode, struct file *file)
11032 {
11033 spin_lock(&mce_state_lock);
11034
11035- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
11036+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
11037 spin_unlock(&mce_state_lock);
11038
11039 return -EBUSY;
11040@@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
11041
11042 if (file->f_flags & O_EXCL)
11043 open_exclu = 1;
11044- open_count++;
11045+ local_inc(&open_count);
11046
11047 spin_unlock(&mce_state_lock);
11048
11049@@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
11050 {
11051 spin_lock(&mce_state_lock);
11052
11053- open_count--;
11054+ local_dec(&open_count);
11055 open_exclu = 0;
11056
11057 spin_unlock(&mce_state_lock);
11058@@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
11059 static void mce_reset(void)
11060 {
11061 cpu_missing = 0;
11062- atomic_set(&mce_fake_paniced, 0);
11063+ atomic_set_unchecked(&mce_fake_paniced, 0);
11064 atomic_set(&mce_executing, 0);
11065 atomic_set(&mce_callin, 0);
11066 atomic_set(&global_nwo, 0);
11067diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c
11068--- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
11069+++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
11070@@ -215,7 +215,9 @@ static int inject_init(void)
11071 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
11072 return -ENOMEM;
11073 printk(KERN_INFO "Machine check injector initialized\n");
11074- mce_chrdev_ops.write = mce_write;
11075+ pax_open_kernel();
11076+ *(void **)&mce_chrdev_ops.write = mce_write;
11077+ pax_close_kernel();
11078 register_die_notifier(&mce_raise_nb);
11079 return 0;
11080 }
11081diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c
11082--- linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-09-02 18:11:26.000000000 -0400
11083+++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:21.000000000 -0400
11084@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
11085 u64 size_or_mask, size_and_mask;
11086 static bool mtrr_aps_delayed_init;
11087
11088-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11089+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11090
11091 const struct mtrr_ops *mtrr_if;
11092
11093diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h
11094--- linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
11095+++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 19:49:56.000000000 -0400
11096@@ -25,7 +25,7 @@ struct mtrr_ops {
11097 int (*validate_add_page)(unsigned long base, unsigned long size,
11098 unsigned int type);
11099 int (*have_wrcomb)(void);
11100-};
11101+} __do_const;
11102
11103 extern int generic_get_free_region(unsigned long base, unsigned long size,
11104 int replace_reg);
11105diff -urNp linux-3.0.4/arch/x86/kernel/cpu/perf_event.c linux-3.0.4/arch/x86/kernel/cpu/perf_event.c
11106--- linux-3.0.4/arch/x86/kernel/cpu/perf_event.c 2011-07-21 22:17:23.000000000 -0400
11107+++ linux-3.0.4/arch/x86/kernel/cpu/perf_event.c 2011-10-06 04:17:55.000000000 -0400
11108@@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
11109 int i, j, w, wmax, num = 0;
11110 struct hw_perf_event *hwc;
11111
11112+ pax_track_stack();
11113+
11114 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
11115
11116 for (i = 0; i < n; i++) {
11117@@ -1872,7 +1874,7 @@ perf_callchain_user(struct perf_callchai
11118 break;
11119
11120 perf_callchain_store(entry, frame.return_address);
11121- fp = frame.next_frame;
11122+ fp = (const void __force_user *)frame.next_frame;
11123 }
11124 }
11125
11126diff -urNp linux-3.0.4/arch/x86/kernel/crash.c linux-3.0.4/arch/x86/kernel/crash.c
11127--- linux-3.0.4/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
11128+++ linux-3.0.4/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
11129@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
11130 regs = args->regs;
11131
11132 #ifdef CONFIG_X86_32
11133- if (!user_mode_vm(regs)) {
11134+ if (!user_mode(regs)) {
11135 crash_fixup_ss_esp(&fixed_regs, regs);
11136 regs = &fixed_regs;
11137 }
11138diff -urNp linux-3.0.4/arch/x86/kernel/doublefault_32.c linux-3.0.4/arch/x86/kernel/doublefault_32.c
11139--- linux-3.0.4/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
11140+++ linux-3.0.4/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
11141@@ -11,7 +11,7 @@
11142
11143 #define DOUBLEFAULT_STACKSIZE (1024)
11144 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
11145-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
11146+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
11147
11148 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
11149
11150@@ -21,7 +21,7 @@ static void doublefault_fn(void)
11151 unsigned long gdt, tss;
11152
11153 store_gdt(&gdt_desc);
11154- gdt = gdt_desc.address;
11155+ gdt = (unsigned long)gdt_desc.address;
11156
11157 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
11158
11159@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
11160 /* 0x2 bit is always set */
11161 .flags = X86_EFLAGS_SF | 0x2,
11162 .sp = STACK_START,
11163- .es = __USER_DS,
11164+ .es = __KERNEL_DS,
11165 .cs = __KERNEL_CS,
11166 .ss = __KERNEL_DS,
11167- .ds = __USER_DS,
11168+ .ds = __KERNEL_DS,
11169 .fs = __KERNEL_PERCPU,
11170
11171 .__cr3 = __pa_nodebug(swapper_pg_dir),
11172diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_32.c linux-3.0.4/arch/x86/kernel/dumpstack_32.c
11173--- linux-3.0.4/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
11174+++ linux-3.0.4/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
11175@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11176 bp = stack_frame(task, regs);
11177
11178 for (;;) {
11179- struct thread_info *context;
11180+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11181
11182- context = (struct thread_info *)
11183- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11184- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11185+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11186
11187- stack = (unsigned long *)context->previous_esp;
11188- if (!stack)
11189+ if (stack_start == task_stack_page(task))
11190 break;
11191+ stack = *(unsigned long **)stack_start;
11192 if (ops->stack(data, "IRQ") < 0)
11193 break;
11194 touch_nmi_watchdog();
11195@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11196 * When in-kernel, we also print out the stack and code at the
11197 * time of the fault..
11198 */
11199- if (!user_mode_vm(regs)) {
11200+ if (!user_mode(regs)) {
11201 unsigned int code_prologue = code_bytes * 43 / 64;
11202 unsigned int code_len = code_bytes;
11203 unsigned char c;
11204 u8 *ip;
11205+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11206
11207 printk(KERN_EMERG "Stack:\n");
11208 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11209
11210 printk(KERN_EMERG "Code: ");
11211
11212- ip = (u8 *)regs->ip - code_prologue;
11213+ ip = (u8 *)regs->ip - code_prologue + cs_base;
11214 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11215 /* try starting at IP */
11216- ip = (u8 *)regs->ip;
11217+ ip = (u8 *)regs->ip + cs_base;
11218 code_len = code_len - code_prologue + 1;
11219 }
11220 for (i = 0; i < code_len; i++, ip++) {
11221@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11222 printk(" Bad EIP value.");
11223 break;
11224 }
11225- if (ip == (u8 *)regs->ip)
11226+ if (ip == (u8 *)regs->ip + cs_base)
11227 printk("<%02x> ", c);
11228 else
11229 printk("%02x ", c);
11230@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11231 {
11232 unsigned short ud2;
11233
11234+ ip = ktla_ktva(ip);
11235 if (ip < PAGE_OFFSET)
11236 return 0;
11237 if (probe_kernel_address((unsigned short *)ip, ud2))
11238diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_64.c linux-3.0.4/arch/x86/kernel/dumpstack_64.c
11239--- linux-3.0.4/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11240+++ linux-3.0.4/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11241@@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11242 unsigned long *irq_stack_end =
11243 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11244 unsigned used = 0;
11245- struct thread_info *tinfo;
11246 int graph = 0;
11247 unsigned long dummy;
11248+ void *stack_start;
11249
11250 if (!task)
11251 task = current;
11252@@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11253 * current stack address. If the stacks consist of nested
11254 * exceptions
11255 */
11256- tinfo = task_thread_info(task);
11257 for (;;) {
11258 char *id;
11259 unsigned long *estack_end;
11260+
11261 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11262 &used, &id);
11263
11264@@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11265 if (ops->stack(data, id) < 0)
11266 break;
11267
11268- bp = ops->walk_stack(tinfo, stack, bp, ops,
11269+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11270 data, estack_end, &graph);
11271 ops->stack(data, "<EOE>");
11272 /*
11273@@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11274 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11275 if (ops->stack(data, "IRQ") < 0)
11276 break;
11277- bp = ops->walk_stack(tinfo, stack, bp,
11278+ bp = ops->walk_stack(task, irq_stack, stack, bp,
11279 ops, data, irq_stack_end, &graph);
11280 /*
11281 * We link to the next stack (which would be
11282@@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11283 /*
11284 * This handles the process stack:
11285 */
11286- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11287+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11288+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11289 put_cpu();
11290 }
11291 EXPORT_SYMBOL(dump_trace);
11292diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack.c linux-3.0.4/arch/x86/kernel/dumpstack.c
11293--- linux-3.0.4/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11294+++ linux-3.0.4/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11295@@ -2,6 +2,9 @@
11296 * Copyright (C) 1991, 1992 Linus Torvalds
11297 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11298 */
11299+#ifdef CONFIG_GRKERNSEC_HIDESYM
11300+#define __INCLUDED_BY_HIDESYM 1
11301+#endif
11302 #include <linux/kallsyms.h>
11303 #include <linux/kprobes.h>
11304 #include <linux/uaccess.h>
11305@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11306 static void
11307 print_ftrace_graph_addr(unsigned long addr, void *data,
11308 const struct stacktrace_ops *ops,
11309- struct thread_info *tinfo, int *graph)
11310+ struct task_struct *task, int *graph)
11311 {
11312- struct task_struct *task = tinfo->task;
11313 unsigned long ret_addr;
11314 int index = task->curr_ret_stack;
11315
11316@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11317 static inline void
11318 print_ftrace_graph_addr(unsigned long addr, void *data,
11319 const struct stacktrace_ops *ops,
11320- struct thread_info *tinfo, int *graph)
11321+ struct task_struct *task, int *graph)
11322 { }
11323 #endif
11324
11325@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11326 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11327 */
11328
11329-static inline int valid_stack_ptr(struct thread_info *tinfo,
11330- void *p, unsigned int size, void *end)
11331+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11332 {
11333- void *t = tinfo;
11334 if (end) {
11335 if (p < end && p >= (end-THREAD_SIZE))
11336 return 1;
11337@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11338 }
11339
11340 unsigned long
11341-print_context_stack(struct thread_info *tinfo,
11342+print_context_stack(struct task_struct *task, void *stack_start,
11343 unsigned long *stack, unsigned long bp,
11344 const struct stacktrace_ops *ops, void *data,
11345 unsigned long *end, int *graph)
11346 {
11347 struct stack_frame *frame = (struct stack_frame *)bp;
11348
11349- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11350+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11351 unsigned long addr;
11352
11353 addr = *stack;
11354@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11355 } else {
11356 ops->address(data, addr, 0);
11357 }
11358- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11359+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11360 }
11361 stack++;
11362 }
11363@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11364 EXPORT_SYMBOL_GPL(print_context_stack);
11365
11366 unsigned long
11367-print_context_stack_bp(struct thread_info *tinfo,
11368+print_context_stack_bp(struct task_struct *task, void *stack_start,
11369 unsigned long *stack, unsigned long bp,
11370 const struct stacktrace_ops *ops, void *data,
11371 unsigned long *end, int *graph)
11372@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11373 struct stack_frame *frame = (struct stack_frame *)bp;
11374 unsigned long *ret_addr = &frame->return_address;
11375
11376- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11377+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11378 unsigned long addr = *ret_addr;
11379
11380 if (!__kernel_text_address(addr))
11381@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11382 ops->address(data, addr, 1);
11383 frame = frame->next_frame;
11384 ret_addr = &frame->return_address;
11385- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11386+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11387 }
11388
11389 return (unsigned long)frame;
11390@@ -186,7 +186,7 @@ void dump_stack(void)
11391
11392 bp = stack_frame(current, NULL);
11393 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11394- current->pid, current->comm, print_tainted(),
11395+ task_pid_nr(current), current->comm, print_tainted(),
11396 init_utsname()->release,
11397 (int)strcspn(init_utsname()->version, " "),
11398 init_utsname()->version);
11399@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11400 }
11401 EXPORT_SYMBOL_GPL(oops_begin);
11402
11403+extern void gr_handle_kernel_exploit(void);
11404+
11405 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11406 {
11407 if (regs && kexec_should_crash(current))
11408@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11409 panic("Fatal exception in interrupt");
11410 if (panic_on_oops)
11411 panic("Fatal exception");
11412- do_exit(signr);
11413+
11414+ gr_handle_kernel_exploit();
11415+
11416+ do_group_exit(signr);
11417 }
11418
11419 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11420@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11421
11422 show_registers(regs);
11423 #ifdef CONFIG_X86_32
11424- if (user_mode_vm(regs)) {
11425+ if (user_mode(regs)) {
11426 sp = regs->sp;
11427 ss = regs->ss & 0xffff;
11428 } else {
11429@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11430 unsigned long flags = oops_begin();
11431 int sig = SIGSEGV;
11432
11433- if (!user_mode_vm(regs))
11434+ if (!user_mode(regs))
11435 report_bug(regs->ip, regs);
11436
11437 if (__die(str, regs, err))
11438diff -urNp linux-3.0.4/arch/x86/kernel/early_printk.c linux-3.0.4/arch/x86/kernel/early_printk.c
11439--- linux-3.0.4/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11440+++ linux-3.0.4/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11441@@ -7,6 +7,7 @@
11442 #include <linux/pci_regs.h>
11443 #include <linux/pci_ids.h>
11444 #include <linux/errno.h>
11445+#include <linux/sched.h>
11446 #include <asm/io.h>
11447 #include <asm/processor.h>
11448 #include <asm/fcntl.h>
11449@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11450 int n;
11451 va_list ap;
11452
11453+ pax_track_stack();
11454+
11455 va_start(ap, fmt);
11456 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11457 early_console->write(early_console, buf, n);
11458diff -urNp linux-3.0.4/arch/x86/kernel/entry_32.S linux-3.0.4/arch/x86/kernel/entry_32.S
11459--- linux-3.0.4/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11460+++ linux-3.0.4/arch/x86/kernel/entry_32.S 2011-08-30 18:23:52.000000000 -0400
11461@@ -185,13 +185,146 @@
11462 /*CFI_REL_OFFSET gs, PT_GS*/
11463 .endm
11464 .macro SET_KERNEL_GS reg
11465+
11466+#ifdef CONFIG_CC_STACKPROTECTOR
11467 movl $(__KERNEL_STACK_CANARY), \reg
11468+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11469+ movl $(__USER_DS), \reg
11470+#else
11471+ xorl \reg, \reg
11472+#endif
11473+
11474 movl \reg, %gs
11475 .endm
11476
11477 #endif /* CONFIG_X86_32_LAZY_GS */
11478
11479-.macro SAVE_ALL
11480+.macro pax_enter_kernel
11481+#ifdef CONFIG_PAX_KERNEXEC
11482+ call pax_enter_kernel
11483+#endif
11484+.endm
11485+
11486+.macro pax_exit_kernel
11487+#ifdef CONFIG_PAX_KERNEXEC
11488+ call pax_exit_kernel
11489+#endif
11490+.endm
11491+
11492+#ifdef CONFIG_PAX_KERNEXEC
11493+ENTRY(pax_enter_kernel)
11494+#ifdef CONFIG_PARAVIRT
11495+ pushl %eax
11496+ pushl %ecx
11497+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11498+ mov %eax, %esi
11499+#else
11500+ mov %cr0, %esi
11501+#endif
11502+ bts $16, %esi
11503+ jnc 1f
11504+ mov %cs, %esi
11505+ cmp $__KERNEL_CS, %esi
11506+ jz 3f
11507+ ljmp $__KERNEL_CS, $3f
11508+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11509+2:
11510+#ifdef CONFIG_PARAVIRT
11511+ mov %esi, %eax
11512+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11513+#else
11514+ mov %esi, %cr0
11515+#endif
11516+3:
11517+#ifdef CONFIG_PARAVIRT
11518+ popl %ecx
11519+ popl %eax
11520+#endif
11521+ ret
11522+ENDPROC(pax_enter_kernel)
11523+
11524+ENTRY(pax_exit_kernel)
11525+#ifdef CONFIG_PARAVIRT
11526+ pushl %eax
11527+ pushl %ecx
11528+#endif
11529+ mov %cs, %esi
11530+ cmp $__KERNEXEC_KERNEL_CS, %esi
11531+ jnz 2f
11532+#ifdef CONFIG_PARAVIRT
11533+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11534+ mov %eax, %esi
11535+#else
11536+ mov %cr0, %esi
11537+#endif
11538+ btr $16, %esi
11539+ ljmp $__KERNEL_CS, $1f
11540+1:
11541+#ifdef CONFIG_PARAVIRT
11542+ mov %esi, %eax
11543+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11544+#else
11545+ mov %esi, %cr0
11546+#endif
11547+2:
11548+#ifdef CONFIG_PARAVIRT
11549+ popl %ecx
11550+ popl %eax
11551+#endif
11552+ ret
11553+ENDPROC(pax_exit_kernel)
11554+#endif
11555+
11556+.macro pax_erase_kstack
11557+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11558+ call pax_erase_kstack
11559+#endif
11560+.endm
11561+
11562+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11563+/*
11564+ * ebp: thread_info
11565+ * ecx, edx: can be clobbered
11566+ */
11567+ENTRY(pax_erase_kstack)
11568+ pushl %edi
11569+ pushl %eax
11570+
11571+ mov TI_lowest_stack(%ebp), %edi
11572+ mov $-0xBEEF, %eax
11573+ std
11574+
11575+1: mov %edi, %ecx
11576+ and $THREAD_SIZE_asm - 1, %ecx
11577+ shr $2, %ecx
11578+ repne scasl
11579+ jecxz 2f
11580+
11581+ cmp $2*16, %ecx
11582+ jc 2f
11583+
11584+ mov $2*16, %ecx
11585+ repe scasl
11586+ jecxz 2f
11587+ jne 1b
11588+
11589+2: cld
11590+ mov %esp, %ecx
11591+ sub %edi, %ecx
11592+ shr $2, %ecx
11593+ rep stosl
11594+
11595+ mov TI_task_thread_sp0(%ebp), %edi
11596+ sub $128, %edi
11597+ mov %edi, TI_lowest_stack(%ebp)
11598+
11599+ popl %eax
11600+ popl %edi
11601+ ret
11602+ENDPROC(pax_erase_kstack)
11603+#endif
11604+
11605+.macro __SAVE_ALL _DS
11606 cld
11607 PUSH_GS
11608 pushl_cfi %fs
11609@@ -214,7 +347,7 @@
11610 CFI_REL_OFFSET ecx, 0
11611 pushl_cfi %ebx
11612 CFI_REL_OFFSET ebx, 0
11613- movl $(__USER_DS), %edx
11614+ movl $\_DS, %edx
11615 movl %edx, %ds
11616 movl %edx, %es
11617 movl $(__KERNEL_PERCPU), %edx
11618@@ -222,6 +355,15 @@
11619 SET_KERNEL_GS %edx
11620 .endm
11621
11622+.macro SAVE_ALL
11623+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11624+ __SAVE_ALL __KERNEL_DS
11625+ pax_enter_kernel
11626+#else
11627+ __SAVE_ALL __USER_DS
11628+#endif
11629+.endm
11630+
11631 .macro RESTORE_INT_REGS
11632 popl_cfi %ebx
11633 CFI_RESTORE ebx
11634@@ -332,7 +474,15 @@ check_userspace:
11635 movb PT_CS(%esp), %al
11636 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11637 cmpl $USER_RPL, %eax
11638+
11639+#ifdef CONFIG_PAX_KERNEXEC
11640+ jae resume_userspace
11641+
11642+ PAX_EXIT_KERNEL
11643+ jmp resume_kernel
11644+#else
11645 jb resume_kernel # not returning to v8086 or userspace
11646+#endif
11647
11648 ENTRY(resume_userspace)
11649 LOCKDEP_SYS_EXIT
11650@@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11651 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11652 # int/exception return?
11653 jne work_pending
11654- jmp restore_all
11655+ jmp restore_all_pax
11656 END(ret_from_exception)
11657
11658 #ifdef CONFIG_PREEMPT
11659@@ -394,23 +544,34 @@ sysenter_past_esp:
11660 /*CFI_REL_OFFSET cs, 0*/
11661 /*
11662 * Push current_thread_info()->sysenter_return to the stack.
11663- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11664- * pushed above; +8 corresponds to copy_thread's esp0 setting.
11665 */
11666- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11667+ pushl_cfi $0
11668 CFI_REL_OFFSET eip, 0
11669
11670 pushl_cfi %eax
11671 SAVE_ALL
11672+ GET_THREAD_INFO(%ebp)
11673+ movl TI_sysenter_return(%ebp),%ebp
11674+ movl %ebp,PT_EIP(%esp)
11675 ENABLE_INTERRUPTS(CLBR_NONE)
11676
11677 /*
11678 * Load the potential sixth argument from user stack.
11679 * Careful about security.
11680 */
11681+ movl PT_OLDESP(%esp),%ebp
11682+
11683+#ifdef CONFIG_PAX_MEMORY_UDEREF
11684+ mov PT_OLDSS(%esp),%ds
11685+1: movl %ds:(%ebp),%ebp
11686+ push %ss
11687+ pop %ds
11688+#else
11689 cmpl $__PAGE_OFFSET-3,%ebp
11690 jae syscall_fault
11691 1: movl (%ebp),%ebp
11692+#endif
11693+
11694 movl %ebp,PT_EBP(%esp)
11695 .section __ex_table,"a"
11696 .align 4
11697@@ -433,12 +594,24 @@ sysenter_do_call:
11698 testl $_TIF_ALLWORK_MASK, %ecx
11699 jne sysexit_audit
11700 sysenter_exit:
11701+
11702+#ifdef CONFIG_PAX_RANDKSTACK
11703+ pushl_cfi %eax
11704+ movl %esp, %eax
11705+ call pax_randomize_kstack
11706+ popl_cfi %eax
11707+#endif
11708+
11709+ pax_erase_kstack
11710+
11711 /* if something modifies registers it must also disable sysexit */
11712 movl PT_EIP(%esp), %edx
11713 movl PT_OLDESP(%esp), %ecx
11714 xorl %ebp,%ebp
11715 TRACE_IRQS_ON
11716 1: mov PT_FS(%esp), %fs
11717+2: mov PT_DS(%esp), %ds
11718+3: mov PT_ES(%esp), %es
11719 PTGS_TO_GS
11720 ENABLE_INTERRUPTS_SYSEXIT
11721
11722@@ -455,6 +628,9 @@ sysenter_audit:
11723 movl %eax,%edx /* 2nd arg: syscall number */
11724 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11725 call audit_syscall_entry
11726+
11727+ pax_erase_kstack
11728+
11729 pushl_cfi %ebx
11730 movl PT_EAX(%esp),%eax /* reload syscall number */
11731 jmp sysenter_do_call
11732@@ -481,11 +657,17 @@ sysexit_audit:
11733
11734 CFI_ENDPROC
11735 .pushsection .fixup,"ax"
11736-2: movl $0,PT_FS(%esp)
11737+4: movl $0,PT_FS(%esp)
11738+ jmp 1b
11739+5: movl $0,PT_DS(%esp)
11740+ jmp 1b
11741+6: movl $0,PT_ES(%esp)
11742 jmp 1b
11743 .section __ex_table,"a"
11744 .align 4
11745- .long 1b,2b
11746+ .long 1b,4b
11747+ .long 2b,5b
11748+ .long 3b,6b
11749 .popsection
11750 PTGS_TO_GS_EX
11751 ENDPROC(ia32_sysenter_target)
11752@@ -518,6 +700,15 @@ syscall_exit:
11753 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11754 jne syscall_exit_work
11755
11756+restore_all_pax:
11757+
11758+#ifdef CONFIG_PAX_RANDKSTACK
11759+ movl %esp, %eax
11760+ call pax_randomize_kstack
11761+#endif
11762+
11763+ pax_erase_kstack
11764+
11765 restore_all:
11766 TRACE_IRQS_IRET
11767 restore_all_notrace:
11768@@ -577,14 +768,34 @@ ldt_ss:
11769 * compensating for the offset by changing to the ESPFIX segment with
11770 * a base address that matches for the difference.
11771 */
11772-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11773+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11774 mov %esp, %edx /* load kernel esp */
11775 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11776 mov %dx, %ax /* eax: new kernel esp */
11777 sub %eax, %edx /* offset (low word is 0) */
11778+#ifdef CONFIG_SMP
11779+ movl PER_CPU_VAR(cpu_number), %ebx
11780+ shll $PAGE_SHIFT_asm, %ebx
11781+ addl $cpu_gdt_table, %ebx
11782+#else
11783+ movl $cpu_gdt_table, %ebx
11784+#endif
11785 shr $16, %edx
11786- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11787- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11788+
11789+#ifdef CONFIG_PAX_KERNEXEC
11790+ mov %cr0, %esi
11791+ btr $16, %esi
11792+ mov %esi, %cr0
11793+#endif
11794+
11795+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11796+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11797+
11798+#ifdef CONFIG_PAX_KERNEXEC
11799+ bts $16, %esi
11800+ mov %esi, %cr0
11801+#endif
11802+
11803 pushl_cfi $__ESPFIX_SS
11804 pushl_cfi %eax /* new kernel esp */
11805 /* Disable interrupts, but do not irqtrace this section: we
11806@@ -613,29 +824,23 @@ work_resched:
11807 movl TI_flags(%ebp), %ecx
11808 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11809 # than syscall tracing?
11810- jz restore_all
11811+ jz restore_all_pax
11812 testb $_TIF_NEED_RESCHED, %cl
11813 jnz work_resched
11814
11815 work_notifysig: # deal with pending signals and
11816 # notify-resume requests
11817+ movl %esp, %eax
11818 #ifdef CONFIG_VM86
11819 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11820- movl %esp, %eax
11821- jne work_notifysig_v86 # returning to kernel-space or
11822+ jz 1f # returning to kernel-space or
11823 # vm86-space
11824- xorl %edx, %edx
11825- call do_notify_resume
11826- jmp resume_userspace_sig
11827
11828- ALIGN
11829-work_notifysig_v86:
11830 pushl_cfi %ecx # save ti_flags for do_notify_resume
11831 call save_v86_state # %eax contains pt_regs pointer
11832 popl_cfi %ecx
11833 movl %eax, %esp
11834-#else
11835- movl %esp, %eax
11836+1:
11837 #endif
11838 xorl %edx, %edx
11839 call do_notify_resume
11840@@ -648,6 +853,9 @@ syscall_trace_entry:
11841 movl $-ENOSYS,PT_EAX(%esp)
11842 movl %esp, %eax
11843 call syscall_trace_enter
11844+
11845+ pax_erase_kstack
11846+
11847 /* What it returned is what we'll actually use. */
11848 cmpl $(nr_syscalls), %eax
11849 jnae syscall_call
11850@@ -670,6 +878,10 @@ END(syscall_exit_work)
11851
11852 RING0_INT_FRAME # can't unwind into user space anyway
11853 syscall_fault:
11854+#ifdef CONFIG_PAX_MEMORY_UDEREF
11855+ push %ss
11856+ pop %ds
11857+#endif
11858 GET_THREAD_INFO(%ebp)
11859 movl $-EFAULT,PT_EAX(%esp)
11860 jmp resume_userspace
11861@@ -752,6 +964,36 @@ ptregs_clone:
11862 CFI_ENDPROC
11863 ENDPROC(ptregs_clone)
11864
11865+ ALIGN;
11866+ENTRY(kernel_execve)
11867+ CFI_STARTPROC
11868+ pushl_cfi %ebp
11869+ sub $PT_OLDSS+4,%esp
11870+ pushl_cfi %edi
11871+ pushl_cfi %ecx
11872+ pushl_cfi %eax
11873+ lea 3*4(%esp),%edi
11874+ mov $PT_OLDSS/4+1,%ecx
11875+ xorl %eax,%eax
11876+ rep stosl
11877+ popl_cfi %eax
11878+ popl_cfi %ecx
11879+ popl_cfi %edi
11880+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11881+ pushl_cfi %esp
11882+ call sys_execve
11883+ add $4,%esp
11884+ CFI_ADJUST_CFA_OFFSET -4
11885+ GET_THREAD_INFO(%ebp)
11886+ test %eax,%eax
11887+ jz syscall_exit
11888+ add $PT_OLDSS+4,%esp
11889+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11890+ popl_cfi %ebp
11891+ ret
11892+ CFI_ENDPROC
11893+ENDPROC(kernel_execve)
11894+
11895 .macro FIXUP_ESPFIX_STACK
11896 /*
11897 * Switch back for ESPFIX stack to the normal zerobased stack
11898@@ -761,8 +1003,15 @@ ENDPROC(ptregs_clone)
11899 * normal stack and adjusts ESP with the matching offset.
11900 */
11901 /* fixup the stack */
11902- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11903- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11904+#ifdef CONFIG_SMP
11905+ movl PER_CPU_VAR(cpu_number), %ebx
11906+ shll $PAGE_SHIFT_asm, %ebx
11907+ addl $cpu_gdt_table, %ebx
11908+#else
11909+ movl $cpu_gdt_table, %ebx
11910+#endif
11911+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11912+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11913 shl $16, %eax
11914 addl %esp, %eax /* the adjusted stack pointer */
11915 pushl_cfi $__KERNEL_DS
11916@@ -1213,7 +1462,6 @@ return_to_handler:
11917 jmp *%ecx
11918 #endif
11919
11920-.section .rodata,"a"
11921 #include "syscall_table_32.S"
11922
11923 syscall_table_size=(.-sys_call_table)
11924@@ -1259,9 +1507,12 @@ error_code:
11925 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11926 REG_TO_PTGS %ecx
11927 SET_KERNEL_GS %ecx
11928- movl $(__USER_DS), %ecx
11929+ movl $(__KERNEL_DS), %ecx
11930 movl %ecx, %ds
11931 movl %ecx, %es
11932+
11933+ pax_enter_kernel
11934+
11935 TRACE_IRQS_OFF
11936 movl %esp,%eax # pt_regs pointer
11937 call *%edi
11938@@ -1346,6 +1597,9 @@ nmi_stack_correct:
11939 xorl %edx,%edx # zero error code
11940 movl %esp,%eax # pt_regs pointer
11941 call do_nmi
11942+
11943+ pax_exit_kernel
11944+
11945 jmp restore_all_notrace
11946 CFI_ENDPROC
11947
11948@@ -1382,6 +1636,9 @@ nmi_espfix_stack:
11949 FIXUP_ESPFIX_STACK # %eax == %esp
11950 xorl %edx,%edx # zero error code
11951 call do_nmi
11952+
11953+ pax_exit_kernel
11954+
11955 RESTORE_REGS
11956 lss 12+4(%esp), %esp # back to espfix stack
11957 CFI_ADJUST_CFA_OFFSET -24
11958diff -urNp linux-3.0.4/arch/x86/kernel/entry_64.S linux-3.0.4/arch/x86/kernel/entry_64.S
11959--- linux-3.0.4/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
11960+++ linux-3.0.4/arch/x86/kernel/entry_64.S 2011-10-07 19:07:23.000000000 -0400
11961@@ -53,6 +53,8 @@
11962 #include <asm/paravirt.h>
11963 #include <asm/ftrace.h>
11964 #include <asm/percpu.h>
11965+#include <asm/pgtable.h>
11966+#include <asm/alternative-asm.h>
11967
11968 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11969 #include <linux/elf-em.h>
11970@@ -66,6 +68,7 @@
11971 #ifdef CONFIG_FUNCTION_TRACER
11972 #ifdef CONFIG_DYNAMIC_FTRACE
11973 ENTRY(mcount)
11974+ pax_force_retaddr
11975 retq
11976 END(mcount)
11977
11978@@ -90,6 +93,7 @@ GLOBAL(ftrace_graph_call)
11979 #endif
11980
11981 GLOBAL(ftrace_stub)
11982+ pax_force_retaddr
11983 retq
11984 END(ftrace_caller)
11985
11986@@ -110,6 +114,7 @@ ENTRY(mcount)
11987 #endif
11988
11989 GLOBAL(ftrace_stub)
11990+ pax_force_retaddr
11991 retq
11992
11993 trace:
11994@@ -119,6 +124,7 @@ trace:
11995 movq 8(%rbp), %rsi
11996 subq $MCOUNT_INSN_SIZE, %rdi
11997
11998+ pax_force_fptr ftrace_trace_function
11999 call *ftrace_trace_function
12000
12001 MCOUNT_RESTORE_FRAME
12002@@ -144,6 +150,7 @@ ENTRY(ftrace_graph_caller)
12003
12004 MCOUNT_RESTORE_FRAME
12005
12006+ pax_force_retaddr
12007 retq
12008 END(ftrace_graph_caller)
12009
12010@@ -161,6 +168,7 @@ GLOBAL(return_to_handler)
12011 movq 8(%rsp), %rdx
12012 movq (%rsp), %rax
12013 addq $24, %rsp
12014+ pax_force_fptr %rdi
12015 jmp *%rdi
12016 #endif
12017
12018@@ -176,6 +184,269 @@ ENTRY(native_usergs_sysret64)
12019 ENDPROC(native_usergs_sysret64)
12020 #endif /* CONFIG_PARAVIRT */
12021
12022+ .macro ljmpq sel, off
12023+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
12024+ .byte 0x48; ljmp *1234f(%rip)
12025+ .pushsection .rodata
12026+ .align 16
12027+ 1234: .quad \off; .word \sel
12028+ .popsection
12029+#else
12030+ pushq $\sel
12031+ pushq $\off
12032+ lretq
12033+#endif
12034+ .endm
12035+
12036+ .macro pax_enter_kernel
12037+#ifdef CONFIG_PAX_KERNEXEC
12038+ call pax_enter_kernel
12039+#endif
12040+ .endm
12041+
12042+ .macro pax_exit_kernel
12043+#ifdef CONFIG_PAX_KERNEXEC
12044+ call pax_exit_kernel
12045+#endif
12046+ .endm
12047+
12048+#ifdef CONFIG_PAX_KERNEXEC
12049+ENTRY(pax_enter_kernel)
12050+ pushq %rdi
12051+
12052+#ifdef CONFIG_PARAVIRT
12053+ PV_SAVE_REGS(CLBR_RDI)
12054+#endif
12055+
12056+ GET_CR0_INTO_RDI
12057+ bts $16,%rdi
12058+ jnc 1f
12059+ mov %cs,%edi
12060+ cmp $__KERNEL_CS,%edi
12061+ jz 3f
12062+ ljmpq __KERNEL_CS,3f
12063+1: ljmpq __KERNEXEC_KERNEL_CS,2f
12064+2: SET_RDI_INTO_CR0
12065+3:
12066+
12067+#ifdef CONFIG_PARAVIRT
12068+ PV_RESTORE_REGS(CLBR_RDI)
12069+#endif
12070+
12071+ popq %rdi
12072+ pax_force_retaddr
12073+ retq
12074+ENDPROC(pax_enter_kernel)
12075+
12076+ENTRY(pax_exit_kernel)
12077+ pushq %rdi
12078+
12079+#ifdef CONFIG_PARAVIRT
12080+ PV_SAVE_REGS(CLBR_RDI)
12081+#endif
12082+
12083+ mov %cs,%rdi
12084+ cmp $__KERNEXEC_KERNEL_CS,%edi
12085+ jnz 2f
12086+ GET_CR0_INTO_RDI
12087+ btr $16,%rdi
12088+ ljmpq __KERNEL_CS,1f
12089+1: SET_RDI_INTO_CR0
12090+2:
12091+
12092+#ifdef CONFIG_PARAVIRT
12093+ PV_RESTORE_REGS(CLBR_RDI);
12094+#endif
12095+
12096+ popq %rdi
12097+ pax_force_retaddr
12098+ retq
12099+ENDPROC(pax_exit_kernel)
12100+#endif
12101+
12102+ .macro pax_enter_kernel_user
12103+#ifdef CONFIG_PAX_MEMORY_UDEREF
12104+ call pax_enter_kernel_user
12105+#endif
12106+ .endm
12107+
12108+ .macro pax_exit_kernel_user
12109+#ifdef CONFIG_PAX_MEMORY_UDEREF
12110+ call pax_exit_kernel_user
12111+#endif
12112+#ifdef CONFIG_PAX_RANDKSTACK
12113+ push %rax
12114+ call pax_randomize_kstack
12115+ pop %rax
12116+#endif
12117+ .endm
12118+
12119+#ifdef CONFIG_PAX_MEMORY_UDEREF
12120+ENTRY(pax_enter_kernel_user)
12121+ pushq %rdi
12122+ pushq %rbx
12123+
12124+#ifdef CONFIG_PARAVIRT
12125+ PV_SAVE_REGS(CLBR_RDI)
12126+#endif
12127+
12128+ GET_CR3_INTO_RDI
12129+ mov %rdi,%rbx
12130+ add $__START_KERNEL_map,%rbx
12131+ sub phys_base(%rip),%rbx
12132+
12133+#ifdef CONFIG_PARAVIRT
12134+ pushq %rdi
12135+ cmpl $0, pv_info+PARAVIRT_enabled
12136+ jz 1f
12137+ i = 0
12138+ .rept USER_PGD_PTRS
12139+ mov i*8(%rbx),%rsi
12140+ mov $0,%sil
12141+ lea i*8(%rbx),%rdi
12142+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12143+ i = i + 1
12144+ .endr
12145+ jmp 2f
12146+1:
12147+#endif
12148+
12149+ i = 0
12150+ .rept USER_PGD_PTRS
12151+ movb $0,i*8(%rbx)
12152+ i = i + 1
12153+ .endr
12154+
12155+#ifdef CONFIG_PARAVIRT
12156+2: popq %rdi
12157+#endif
12158+ SET_RDI_INTO_CR3
12159+
12160+#ifdef CONFIG_PAX_KERNEXEC
12161+ GET_CR0_INTO_RDI
12162+ bts $16,%rdi
12163+ SET_RDI_INTO_CR0
12164+#endif
12165+
12166+#ifdef CONFIG_PARAVIRT
12167+ PV_RESTORE_REGS(CLBR_RDI)
12168+#endif
12169+
12170+ popq %rbx
12171+ popq %rdi
12172+ pax_force_retaddr
12173+ retq
12174+ENDPROC(pax_enter_kernel_user)
12175+
12176+ENTRY(pax_exit_kernel_user)
12177+ push %rdi
12178+
12179+#ifdef CONFIG_PARAVIRT
12180+ pushq %rbx
12181+ PV_SAVE_REGS(CLBR_RDI)
12182+#endif
12183+
12184+#ifdef CONFIG_PAX_KERNEXEC
12185+ GET_CR0_INTO_RDI
12186+ btr $16,%rdi
12187+ SET_RDI_INTO_CR0
12188+#endif
12189+
12190+ GET_CR3_INTO_RDI
12191+ add $__START_KERNEL_map,%rdi
12192+ sub phys_base(%rip),%rdi
12193+
12194+#ifdef CONFIG_PARAVIRT
12195+ cmpl $0, pv_info+PARAVIRT_enabled
12196+ jz 1f
12197+ mov %rdi,%rbx
12198+ i = 0
12199+ .rept USER_PGD_PTRS
12200+ mov i*8(%rbx),%rsi
12201+ mov $0x67,%sil
12202+ lea i*8(%rbx),%rdi
12203+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12204+ i = i + 1
12205+ .endr
12206+ jmp 2f
12207+1:
12208+#endif
12209+
12210+ i = 0
12211+ .rept USER_PGD_PTRS
12212+ movb $0x67,i*8(%rdi)
12213+ i = i + 1
12214+ .endr
12215+
12216+#ifdef CONFIG_PARAVIRT
12217+2: PV_RESTORE_REGS(CLBR_RDI)
12218+ popq %rbx
12219+#endif
12220+
12221+ popq %rdi
12222+ pax_force_retaddr
12223+ retq
12224+ENDPROC(pax_exit_kernel_user)
12225+#endif
12226+
12227+ .macro pax_erase_kstack
12228+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12229+ call pax_erase_kstack
12230+#endif
12231+ .endm
12232+
12233+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12234+/*
12235+ * r10: thread_info
12236+ * rcx, rdx: can be clobbered
12237+ */
12238+ENTRY(pax_erase_kstack)
12239+ pushq %rdi
12240+ pushq %rax
12241+ pushq %r10
12242+
12243+ GET_THREAD_INFO(%r10)
12244+ mov TI_lowest_stack(%r10), %rdi
12245+ mov $-0xBEEF, %rax
12246+ std
12247+
12248+1: mov %edi, %ecx
12249+ and $THREAD_SIZE_asm - 1, %ecx
12250+ shr $3, %ecx
12251+ repne scasq
12252+ jecxz 2f
12253+
12254+ cmp $2*8, %ecx
12255+ jc 2f
12256+
12257+ mov $2*8, %ecx
12258+ repe scasq
12259+ jecxz 2f
12260+ jne 1b
12261+
12262+2: cld
12263+ mov %esp, %ecx
12264+ sub %edi, %ecx
12265+
12266+ cmp $THREAD_SIZE_asm, %rcx
12267+ jb 3f
12268+ ud2
12269+3:
12270+
12271+ shr $3, %ecx
12272+ rep stosq
12273+
12274+ mov TI_task_thread_sp0(%r10), %rdi
12275+ sub $256, %rdi
12276+ mov %rdi, TI_lowest_stack(%r10)
12277+
12278+ popq %r10
12279+ popq %rax
12280+ popq %rdi
12281+ pax_force_retaddr
12282+ ret
12283+ENDPROC(pax_erase_kstack)
12284+#endif
12285
12286 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12287 #ifdef CONFIG_TRACE_IRQFLAGS
12288@@ -318,7 +589,7 @@ ENTRY(save_args)
12289 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12290 movq_cfi rbp, 8 /* push %rbp */
12291 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12292- testl $3, CS(%rdi)
12293+ testb $3, CS(%rdi)
12294 je 1f
12295 SWAPGS
12296 /*
12297@@ -338,6 +609,7 @@ ENTRY(save_args)
12298 * We entered an interrupt context - irqs are off:
12299 */
12300 2: TRACE_IRQS_OFF
12301+ pax_force_retaddr
12302 ret
12303 CFI_ENDPROC
12304 END(save_args)
12305@@ -354,6 +626,7 @@ ENTRY(save_rest)
12306 movq_cfi r15, R15+16
12307 movq %r11, 8(%rsp) /* return address */
12308 FIXUP_TOP_OF_STACK %r11, 16
12309+ pax_force_retaddr
12310 ret
12311 CFI_ENDPROC
12312 END(save_rest)
12313@@ -385,7 +658,8 @@ ENTRY(save_paranoid)
12314 js 1f /* negative -> in kernel */
12315 SWAPGS
12316 xorl %ebx,%ebx
12317-1: ret
12318+1: pax_force_retaddr
12319+ ret
12320 CFI_ENDPROC
12321 END(save_paranoid)
12322 .popsection
12323@@ -409,7 +683,7 @@ ENTRY(ret_from_fork)
12324
12325 RESTORE_REST
12326
12327- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12328+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12329 je int_ret_from_sys_call
12330
12331 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12332@@ -455,7 +729,7 @@ END(ret_from_fork)
12333 ENTRY(system_call)
12334 CFI_STARTPROC simple
12335 CFI_SIGNAL_FRAME
12336- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12337+ CFI_DEF_CFA rsp,0
12338 CFI_REGISTER rip,rcx
12339 /*CFI_REGISTER rflags,r11*/
12340 SWAPGS_UNSAFE_STACK
12341@@ -468,12 +742,13 @@ ENTRY(system_call_after_swapgs)
12342
12343 movq %rsp,PER_CPU_VAR(old_rsp)
12344 movq PER_CPU_VAR(kernel_stack),%rsp
12345+ pax_enter_kernel_user
12346 /*
12347 * No need to follow this irqs off/on section - it's straight
12348 * and short:
12349 */
12350 ENABLE_INTERRUPTS(CLBR_NONE)
12351- SAVE_ARGS 8,1
12352+ SAVE_ARGS 8*6,1
12353 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12354 movq %rcx,RIP-ARGOFFSET(%rsp)
12355 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12356@@ -502,6 +777,8 @@ sysret_check:
12357 andl %edi,%edx
12358 jnz sysret_careful
12359 CFI_REMEMBER_STATE
12360+ pax_exit_kernel_user
12361+ pax_erase_kstack
12362 /*
12363 * sysretq will re-enable interrupts:
12364 */
12365@@ -560,6 +837,9 @@ auditsys:
12366 movq %rax,%rsi /* 2nd arg: syscall number */
12367 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12368 call audit_syscall_entry
12369+
12370+ pax_erase_kstack
12371+
12372 LOAD_ARGS 0 /* reload call-clobbered registers */
12373 jmp system_call_fastpath
12374
12375@@ -590,6 +870,9 @@ tracesys:
12376 FIXUP_TOP_OF_STACK %rdi
12377 movq %rsp,%rdi
12378 call syscall_trace_enter
12379+
12380+ pax_erase_kstack
12381+
12382 /*
12383 * Reload arg registers from stack in case ptrace changed them.
12384 * We don't reload %rax because syscall_trace_enter() returned
12385@@ -611,7 +894,7 @@ tracesys:
12386 GLOBAL(int_ret_from_sys_call)
12387 DISABLE_INTERRUPTS(CLBR_NONE)
12388 TRACE_IRQS_OFF
12389- testl $3,CS-ARGOFFSET(%rsp)
12390+ testb $3,CS-ARGOFFSET(%rsp)
12391 je retint_restore_args
12392 movl $_TIF_ALLWORK_MASK,%edi
12393 /* edi: mask to check */
12394@@ -702,6 +985,7 @@ ENTRY(ptregscall_common)
12395 movq_cfi_restore R12+8, r12
12396 movq_cfi_restore RBP+8, rbp
12397 movq_cfi_restore RBX+8, rbx
12398+ pax_force_retaddr
12399 ret $REST_SKIP /* pop extended registers */
12400 CFI_ENDPROC
12401 END(ptregscall_common)
12402@@ -793,6 +1077,16 @@ END(interrupt)
12403 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12404 call save_args
12405 PARTIAL_FRAME 0
12406+#ifdef CONFIG_PAX_MEMORY_UDEREF
12407+ testb $3, CS(%rdi)
12408+ jnz 1f
12409+ pax_enter_kernel
12410+ jmp 2f
12411+1: pax_enter_kernel_user
12412+2:
12413+#else
12414+ pax_enter_kernel
12415+#endif
12416 call \func
12417 .endm
12418
12419@@ -825,7 +1119,7 @@ ret_from_intr:
12420 CFI_ADJUST_CFA_OFFSET -8
12421 exit_intr:
12422 GET_THREAD_INFO(%rcx)
12423- testl $3,CS-ARGOFFSET(%rsp)
12424+ testb $3,CS-ARGOFFSET(%rsp)
12425 je retint_kernel
12426
12427 /* Interrupt came from user space */
12428@@ -847,12 +1141,16 @@ retint_swapgs: /* return to user-space
12429 * The iretq could re-enable interrupts:
12430 */
12431 DISABLE_INTERRUPTS(CLBR_ANY)
12432+ pax_exit_kernel_user
12433+ pax_erase_kstack
12434 TRACE_IRQS_IRETQ
12435 SWAPGS
12436 jmp restore_args
12437
12438 retint_restore_args: /* return to kernel space */
12439 DISABLE_INTERRUPTS(CLBR_ANY)
12440+ pax_exit_kernel
12441+ pax_force_retaddr RIP-ARGOFFSET
12442 /*
12443 * The iretq could re-enable interrupts:
12444 */
12445@@ -1027,6 +1325,16 @@ ENTRY(\sym)
12446 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12447 call error_entry
12448 DEFAULT_FRAME 0
12449+#ifdef CONFIG_PAX_MEMORY_UDEREF
12450+ testb $3, CS(%rsp)
12451+ jnz 1f
12452+ pax_enter_kernel
12453+ jmp 2f
12454+1: pax_enter_kernel_user
12455+2:
12456+#else
12457+ pax_enter_kernel
12458+#endif
12459 movq %rsp,%rdi /* pt_regs pointer */
12460 xorl %esi,%esi /* no error code */
12461 call \do_sym
12462@@ -1044,6 +1352,16 @@ ENTRY(\sym)
12463 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12464 call save_paranoid
12465 TRACE_IRQS_OFF
12466+#ifdef CONFIG_PAX_MEMORY_UDEREF
12467+ testb $3, CS(%rsp)
12468+ jnz 1f
12469+ pax_enter_kernel
12470+ jmp 2f
12471+1: pax_enter_kernel_user
12472+2:
12473+#else
12474+ pax_enter_kernel
12475+#endif
12476 movq %rsp,%rdi /* pt_regs pointer */
12477 xorl %esi,%esi /* no error code */
12478 call \do_sym
12479@@ -1052,7 +1370,7 @@ ENTRY(\sym)
12480 END(\sym)
12481 .endm
12482
12483-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12484+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12485 .macro paranoidzeroentry_ist sym do_sym ist
12486 ENTRY(\sym)
12487 INTR_FRAME
12488@@ -1062,8 +1380,24 @@ ENTRY(\sym)
12489 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12490 call save_paranoid
12491 TRACE_IRQS_OFF
12492+#ifdef CONFIG_PAX_MEMORY_UDEREF
12493+ testb $3, CS(%rsp)
12494+ jnz 1f
12495+ pax_enter_kernel
12496+ jmp 2f
12497+1: pax_enter_kernel_user
12498+2:
12499+#else
12500+ pax_enter_kernel
12501+#endif
12502 movq %rsp,%rdi /* pt_regs pointer */
12503 xorl %esi,%esi /* no error code */
12504+#ifdef CONFIG_SMP
12505+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12506+ lea init_tss(%r12), %r12
12507+#else
12508+ lea init_tss(%rip), %r12
12509+#endif
12510 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12511 call \do_sym
12512 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12513@@ -1080,6 +1414,16 @@ ENTRY(\sym)
12514 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12515 call error_entry
12516 DEFAULT_FRAME 0
12517+#ifdef CONFIG_PAX_MEMORY_UDEREF
12518+ testb $3, CS(%rsp)
12519+ jnz 1f
12520+ pax_enter_kernel
12521+ jmp 2f
12522+1: pax_enter_kernel_user
12523+2:
12524+#else
12525+ pax_enter_kernel
12526+#endif
12527 movq %rsp,%rdi /* pt_regs pointer */
12528 movq ORIG_RAX(%rsp),%rsi /* get error code */
12529 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12530@@ -1099,6 +1443,16 @@ ENTRY(\sym)
12531 call save_paranoid
12532 DEFAULT_FRAME 0
12533 TRACE_IRQS_OFF
12534+#ifdef CONFIG_PAX_MEMORY_UDEREF
12535+ testb $3, CS(%rsp)
12536+ jnz 1f
12537+ pax_enter_kernel
12538+ jmp 2f
12539+1: pax_enter_kernel_user
12540+2:
12541+#else
12542+ pax_enter_kernel
12543+#endif
12544 movq %rsp,%rdi /* pt_regs pointer */
12545 movq ORIG_RAX(%rsp),%rsi /* get error code */
12546 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12547@@ -1134,6 +1488,7 @@ gs_change:
12548 2: mfence /* workaround */
12549 SWAPGS
12550 popfq_cfi
12551+ pax_force_retaddr
12552 ret
12553 CFI_ENDPROC
12554 END(native_load_gs_index)
12555@@ -1158,6 +1513,7 @@ ENTRY(kernel_thread_helper)
12556 * Here we are in the child and the registers are set as they were
12557 * at kernel_thread() invocation in the parent.
12558 */
12559+ pax_force_fptr %rsi
12560 call *%rsi
12561 # exit
12562 mov %eax, %edi
12563@@ -1213,6 +1569,7 @@ ENTRY(call_softirq)
12564 CFI_DEF_CFA_REGISTER rsp
12565 CFI_ADJUST_CFA_OFFSET -8
12566 decl PER_CPU_VAR(irq_count)
12567+ pax_force_retaddr
12568 ret
12569 CFI_ENDPROC
12570 END(call_softirq)
12571@@ -1361,16 +1718,31 @@ ENTRY(paranoid_exit)
12572 TRACE_IRQS_OFF
12573 testl %ebx,%ebx /* swapgs needed? */
12574 jnz paranoid_restore
12575- testl $3,CS(%rsp)
12576+ testb $3,CS(%rsp)
12577 jnz paranoid_userspace
12578+#ifdef CONFIG_PAX_MEMORY_UDEREF
12579+ pax_exit_kernel
12580+ TRACE_IRQS_IRETQ 0
12581+ SWAPGS_UNSAFE_STACK
12582+ RESTORE_ALL 8
12583+ pax_force_retaddr
12584+ jmp irq_return
12585+#endif
12586 paranoid_swapgs:
12587+#ifdef CONFIG_PAX_MEMORY_UDEREF
12588+ pax_exit_kernel_user
12589+#else
12590+ pax_exit_kernel
12591+#endif
12592 TRACE_IRQS_IRETQ 0
12593 SWAPGS_UNSAFE_STACK
12594 RESTORE_ALL 8
12595 jmp irq_return
12596 paranoid_restore:
12597+ pax_exit_kernel
12598 TRACE_IRQS_IRETQ 0
12599 RESTORE_ALL 8
12600+ pax_force_retaddr
12601 jmp irq_return
12602 paranoid_userspace:
12603 GET_THREAD_INFO(%rcx)
12604@@ -1426,12 +1798,13 @@ ENTRY(error_entry)
12605 movq_cfi r14, R14+8
12606 movq_cfi r15, R15+8
12607 xorl %ebx,%ebx
12608- testl $3,CS+8(%rsp)
12609+ testb $3,CS+8(%rsp)
12610 je error_kernelspace
12611 error_swapgs:
12612 SWAPGS
12613 error_sti:
12614 TRACE_IRQS_OFF
12615+ pax_force_retaddr
12616 ret
12617
12618 /*
12619@@ -1490,6 +1863,16 @@ ENTRY(nmi)
12620 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12621 call save_paranoid
12622 DEFAULT_FRAME 0
12623+#ifdef CONFIG_PAX_MEMORY_UDEREF
12624+ testb $3, CS(%rsp)
12625+ jnz 1f
12626+ pax_enter_kernel
12627+ jmp 2f
12628+1: pax_enter_kernel_user
12629+2:
12630+#else
12631+ pax_enter_kernel
12632+#endif
12633 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12634 movq %rsp,%rdi
12635 movq $-1,%rsi
12636@@ -1500,12 +1883,28 @@ ENTRY(nmi)
12637 DISABLE_INTERRUPTS(CLBR_NONE)
12638 testl %ebx,%ebx /* swapgs needed? */
12639 jnz nmi_restore
12640- testl $3,CS(%rsp)
12641+ testb $3,CS(%rsp)
12642 jnz nmi_userspace
12643+#ifdef CONFIG_PAX_MEMORY_UDEREF
12644+ pax_exit_kernel
12645+ SWAPGS_UNSAFE_STACK
12646+ RESTORE_ALL 8
12647+ pax_force_retaddr
12648+ jmp irq_return
12649+#endif
12650 nmi_swapgs:
12651+#ifdef CONFIG_PAX_MEMORY_UDEREF
12652+ pax_exit_kernel_user
12653+#else
12654+ pax_exit_kernel
12655+#endif
12656 SWAPGS_UNSAFE_STACK
12657+ RESTORE_ALL 8
12658+ jmp irq_return
12659 nmi_restore:
12660+ pax_exit_kernel
12661 RESTORE_ALL 8
12662+ pax_force_retaddr
12663 jmp irq_return
12664 nmi_userspace:
12665 GET_THREAD_INFO(%rcx)
12666diff -urNp linux-3.0.4/arch/x86/kernel/ftrace.c linux-3.0.4/arch/x86/kernel/ftrace.c
12667--- linux-3.0.4/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
12668+++ linux-3.0.4/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
12669@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12670 static const void *mod_code_newcode; /* holds the text to write to the IP */
12671
12672 static unsigned nmi_wait_count;
12673-static atomic_t nmi_update_count = ATOMIC_INIT(0);
12674+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12675
12676 int ftrace_arch_read_dyn_info(char *buf, int size)
12677 {
12678@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12679
12680 r = snprintf(buf, size, "%u %u",
12681 nmi_wait_count,
12682- atomic_read(&nmi_update_count));
12683+ atomic_read_unchecked(&nmi_update_count));
12684 return r;
12685 }
12686
12687@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12688
12689 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12690 smp_rmb();
12691+ pax_open_kernel();
12692 ftrace_mod_code();
12693- atomic_inc(&nmi_update_count);
12694+ pax_close_kernel();
12695+ atomic_inc_unchecked(&nmi_update_count);
12696 }
12697 /* Must have previous changes seen before executions */
12698 smp_mb();
12699@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12700 {
12701 unsigned char replaced[MCOUNT_INSN_SIZE];
12702
12703+ ip = ktla_ktva(ip);
12704+
12705 /*
12706 * Note: Due to modules and __init, code can
12707 * disappear and change, we need to protect against faulting
12708@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12709 unsigned char old[MCOUNT_INSN_SIZE], *new;
12710 int ret;
12711
12712- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12713+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12714 new = ftrace_call_replace(ip, (unsigned long)func);
12715 ret = ftrace_modify_code(ip, old, new);
12716
12717@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12718 {
12719 unsigned char code[MCOUNT_INSN_SIZE];
12720
12721+ ip = ktla_ktva(ip);
12722+
12723 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12724 return -EFAULT;
12725
12726diff -urNp linux-3.0.4/arch/x86/kernel/head32.c linux-3.0.4/arch/x86/kernel/head32.c
12727--- linux-3.0.4/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
12728+++ linux-3.0.4/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
12729@@ -19,6 +19,7 @@
12730 #include <asm/io_apic.h>
12731 #include <asm/bios_ebda.h>
12732 #include <asm/tlbflush.h>
12733+#include <asm/boot.h>
12734
12735 static void __init i386_default_early_setup(void)
12736 {
12737@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
12738 {
12739 memblock_init();
12740
12741- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12742+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12743
12744 #ifdef CONFIG_BLK_DEV_INITRD
12745 /* Reserve INITRD */
12746diff -urNp linux-3.0.4/arch/x86/kernel/head_32.S linux-3.0.4/arch/x86/kernel/head_32.S
12747--- linux-3.0.4/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
12748+++ linux-3.0.4/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
12749@@ -25,6 +25,12 @@
12750 /* Physical address */
12751 #define pa(X) ((X) - __PAGE_OFFSET)
12752
12753+#ifdef CONFIG_PAX_KERNEXEC
12754+#define ta(X) (X)
12755+#else
12756+#define ta(X) ((X) - __PAGE_OFFSET)
12757+#endif
12758+
12759 /*
12760 * References to members of the new_cpu_data structure.
12761 */
12762@@ -54,11 +60,7 @@
12763 * and small than max_low_pfn, otherwise will waste some page table entries
12764 */
12765
12766-#if PTRS_PER_PMD > 1
12767-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12768-#else
12769-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12770-#endif
12771+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12772
12773 /* Number of possible pages in the lowmem region */
12774 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12775@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12776 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12777
12778 /*
12779+ * Real beginning of normal "text" segment
12780+ */
12781+ENTRY(stext)
12782+ENTRY(_stext)
12783+
12784+/*
12785 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12786 * %esi points to the real-mode code as a 32-bit pointer.
12787 * CS and DS must be 4 GB flat segments, but we don't depend on
12788@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12789 * can.
12790 */
12791 __HEAD
12792+
12793+#ifdef CONFIG_PAX_KERNEXEC
12794+ jmp startup_32
12795+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12796+.fill PAGE_SIZE-5,1,0xcc
12797+#endif
12798+
12799 ENTRY(startup_32)
12800 movl pa(stack_start),%ecx
12801
12802@@ -105,6 +120,57 @@ ENTRY(startup_32)
12803 2:
12804 leal -__PAGE_OFFSET(%ecx),%esp
12805
12806+#ifdef CONFIG_SMP
12807+ movl $pa(cpu_gdt_table),%edi
12808+ movl $__per_cpu_load,%eax
12809+ movw %ax,__KERNEL_PERCPU + 2(%edi)
12810+ rorl $16,%eax
12811+ movb %al,__KERNEL_PERCPU + 4(%edi)
12812+ movb %ah,__KERNEL_PERCPU + 7(%edi)
12813+ movl $__per_cpu_end - 1,%eax
12814+ subl $__per_cpu_start,%eax
12815+ movw %ax,__KERNEL_PERCPU + 0(%edi)
12816+#endif
12817+
12818+#ifdef CONFIG_PAX_MEMORY_UDEREF
12819+ movl $NR_CPUS,%ecx
12820+ movl $pa(cpu_gdt_table),%edi
12821+1:
12822+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12823+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12824+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12825+ addl $PAGE_SIZE_asm,%edi
12826+ loop 1b
12827+#endif
12828+
12829+#ifdef CONFIG_PAX_KERNEXEC
12830+ movl $pa(boot_gdt),%edi
12831+ movl $__LOAD_PHYSICAL_ADDR,%eax
12832+ movw %ax,__BOOT_CS + 2(%edi)
12833+ rorl $16,%eax
12834+ movb %al,__BOOT_CS + 4(%edi)
12835+ movb %ah,__BOOT_CS + 7(%edi)
12836+ rorl $16,%eax
12837+
12838+ ljmp $(__BOOT_CS),$1f
12839+1:
12840+
12841+ movl $NR_CPUS,%ecx
12842+ movl $pa(cpu_gdt_table),%edi
12843+ addl $__PAGE_OFFSET,%eax
12844+1:
12845+ movw %ax,__KERNEL_CS + 2(%edi)
12846+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12847+ rorl $16,%eax
12848+ movb %al,__KERNEL_CS + 4(%edi)
12849+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12850+ movb %ah,__KERNEL_CS + 7(%edi)
12851+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12852+ rorl $16,%eax
12853+ addl $PAGE_SIZE_asm,%edi
12854+ loop 1b
12855+#endif
12856+
12857 /*
12858 * Clear BSS first so that there are no surprises...
12859 */
12860@@ -195,8 +261,11 @@ ENTRY(startup_32)
12861 movl %eax, pa(max_pfn_mapped)
12862
12863 /* Do early initialization of the fixmap area */
12864- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12865- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12866+#ifdef CONFIG_COMPAT_VDSO
12867+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12868+#else
12869+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12870+#endif
12871 #else /* Not PAE */
12872
12873 page_pde_offset = (__PAGE_OFFSET >> 20);
12874@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12875 movl %eax, pa(max_pfn_mapped)
12876
12877 /* Do early initialization of the fixmap area */
12878- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12879- movl %eax,pa(initial_page_table+0xffc)
12880+#ifdef CONFIG_COMPAT_VDSO
12881+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12882+#else
12883+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12884+#endif
12885 #endif
12886
12887 #ifdef CONFIG_PARAVIRT
12888@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12889 cmpl $num_subarch_entries, %eax
12890 jae bad_subarch
12891
12892- movl pa(subarch_entries)(,%eax,4), %eax
12893- subl $__PAGE_OFFSET, %eax
12894- jmp *%eax
12895+ jmp *pa(subarch_entries)(,%eax,4)
12896
12897 bad_subarch:
12898 WEAK(lguest_entry)
12899@@ -255,10 +325,10 @@ WEAK(xen_entry)
12900 __INITDATA
12901
12902 subarch_entries:
12903- .long default_entry /* normal x86/PC */
12904- .long lguest_entry /* lguest hypervisor */
12905- .long xen_entry /* Xen hypervisor */
12906- .long default_entry /* Moorestown MID */
12907+ .long ta(default_entry) /* normal x86/PC */
12908+ .long ta(lguest_entry) /* lguest hypervisor */
12909+ .long ta(xen_entry) /* Xen hypervisor */
12910+ .long ta(default_entry) /* Moorestown MID */
12911 num_subarch_entries = (. - subarch_entries) / 4
12912 .previous
12913 #else
12914@@ -312,6 +382,7 @@ default_entry:
12915 orl %edx,%eax
12916 movl %eax,%cr4
12917
12918+#ifdef CONFIG_X86_PAE
12919 testb $X86_CR4_PAE, %al # check if PAE is enabled
12920 jz 6f
12921
12922@@ -340,6 +411,9 @@ default_entry:
12923 /* Make changes effective */
12924 wrmsr
12925
12926+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12927+#endif
12928+
12929 6:
12930
12931 /*
12932@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12933 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12934 movl %eax,%ss # after changing gdt.
12935
12936- movl $(__USER_DS),%eax # DS/ES contains default USER segment
12937+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12938 movl %eax,%ds
12939 movl %eax,%es
12940
12941@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12942 */
12943 cmpb $0,ready
12944 jne 1f
12945- movl $gdt_page,%eax
12946+ movl $cpu_gdt_table,%eax
12947 movl $stack_canary,%ecx
12948+#ifdef CONFIG_SMP
12949+ addl $__per_cpu_load,%ecx
12950+#endif
12951 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12952 shrl $16, %ecx
12953 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12954 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12955 1:
12956-#endif
12957 movl $(__KERNEL_STACK_CANARY),%eax
12958+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12959+ movl $(__USER_DS),%eax
12960+#else
12961+ xorl %eax,%eax
12962+#endif
12963 movl %eax,%gs
12964
12965 xorl %eax,%eax # Clear LDT
12966@@ -558,22 +639,22 @@ early_page_fault:
12967 jmp early_fault
12968
12969 early_fault:
12970- cld
12971 #ifdef CONFIG_PRINTK
12972+ cmpl $1,%ss:early_recursion_flag
12973+ je hlt_loop
12974+ incl %ss:early_recursion_flag
12975+ cld
12976 pusha
12977 movl $(__KERNEL_DS),%eax
12978 movl %eax,%ds
12979 movl %eax,%es
12980- cmpl $2,early_recursion_flag
12981- je hlt_loop
12982- incl early_recursion_flag
12983 movl %cr2,%eax
12984 pushl %eax
12985 pushl %edx /* trapno */
12986 pushl $fault_msg
12987 call printk
12988+; call dump_stack
12989 #endif
12990- call dump_stack
12991 hlt_loop:
12992 hlt
12993 jmp hlt_loop
12994@@ -581,8 +662,11 @@ hlt_loop:
12995 /* This is the default interrupt "handler" :-) */
12996 ALIGN
12997 ignore_int:
12998- cld
12999 #ifdef CONFIG_PRINTK
13000+ cmpl $2,%ss:early_recursion_flag
13001+ je hlt_loop
13002+ incl %ss:early_recursion_flag
13003+ cld
13004 pushl %eax
13005 pushl %ecx
13006 pushl %edx
13007@@ -591,9 +675,6 @@ ignore_int:
13008 movl $(__KERNEL_DS),%eax
13009 movl %eax,%ds
13010 movl %eax,%es
13011- cmpl $2,early_recursion_flag
13012- je hlt_loop
13013- incl early_recursion_flag
13014 pushl 16(%esp)
13015 pushl 24(%esp)
13016 pushl 32(%esp)
13017@@ -622,29 +703,43 @@ ENTRY(initial_code)
13018 /*
13019 * BSS section
13020 */
13021-__PAGE_ALIGNED_BSS
13022- .align PAGE_SIZE
13023 #ifdef CONFIG_X86_PAE
13024+.section .initial_pg_pmd,"a",@progbits
13025 initial_pg_pmd:
13026 .fill 1024*KPMDS,4,0
13027 #else
13028+.section .initial_page_table,"a",@progbits
13029 ENTRY(initial_page_table)
13030 .fill 1024,4,0
13031 #endif
13032+.section .initial_pg_fixmap,"a",@progbits
13033 initial_pg_fixmap:
13034 .fill 1024,4,0
13035+.section .empty_zero_page,"a",@progbits
13036 ENTRY(empty_zero_page)
13037 .fill 4096,1,0
13038+.section .swapper_pg_dir,"a",@progbits
13039 ENTRY(swapper_pg_dir)
13040+#ifdef CONFIG_X86_PAE
13041+ .fill 4,8,0
13042+#else
13043 .fill 1024,4,0
13044+#endif
13045+
13046+/*
13047+ * The IDT has to be page-aligned to simplify the Pentium
13048+ * F0 0F bug workaround.. We have a special link segment
13049+ * for this.
13050+ */
13051+.section .idt,"a",@progbits
13052+ENTRY(idt_table)
13053+ .fill 256,8,0
13054
13055 /*
13056 * This starts the data section.
13057 */
13058 #ifdef CONFIG_X86_PAE
13059-__PAGE_ALIGNED_DATA
13060- /* Page-aligned for the benefit of paravirt? */
13061- .align PAGE_SIZE
13062+.section .initial_page_table,"a",@progbits
13063 ENTRY(initial_page_table)
13064 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
13065 # if KPMDS == 3
13066@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
13067 # error "Kernel PMDs should be 1, 2 or 3"
13068 # endif
13069 .align PAGE_SIZE /* needs to be page-sized too */
13070+
13071+#ifdef CONFIG_PAX_PER_CPU_PGD
13072+ENTRY(cpu_pgd)
13073+ .rept NR_CPUS
13074+ .fill 4,8,0
13075+ .endr
13076+#endif
13077+
13078 #endif
13079
13080 .data
13081 .balign 4
13082 ENTRY(stack_start)
13083- .long init_thread_union+THREAD_SIZE
13084+ .long init_thread_union+THREAD_SIZE-8
13085+
13086+ready: .byte 0
13087
13088+.section .rodata,"a",@progbits
13089 early_recursion_flag:
13090 .long 0
13091
13092-ready: .byte 0
13093-
13094 int_msg:
13095 .asciz "Unknown interrupt or fault at: %p %p %p\n"
13096
13097@@ -707,7 +811,7 @@ fault_msg:
13098 .word 0 # 32 bit align gdt_desc.address
13099 boot_gdt_descr:
13100 .word __BOOT_DS+7
13101- .long boot_gdt - __PAGE_OFFSET
13102+ .long pa(boot_gdt)
13103
13104 .word 0 # 32-bit align idt_desc.address
13105 idt_descr:
13106@@ -718,7 +822,7 @@ idt_descr:
13107 .word 0 # 32 bit align gdt_desc.address
13108 ENTRY(early_gdt_descr)
13109 .word GDT_ENTRIES*8-1
13110- .long gdt_page /* Overwritten for secondary CPUs */
13111+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
13112
13113 /*
13114 * The boot_gdt must mirror the equivalent in setup.S and is
13115@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
13116 .align L1_CACHE_BYTES
13117 ENTRY(boot_gdt)
13118 .fill GDT_ENTRY_BOOT_CS,8,0
13119- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
13120- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
13121+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
13122+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
13123+
13124+ .align PAGE_SIZE_asm
13125+ENTRY(cpu_gdt_table)
13126+ .rept NR_CPUS
13127+ .quad 0x0000000000000000 /* NULL descriptor */
13128+ .quad 0x0000000000000000 /* 0x0b reserved */
13129+ .quad 0x0000000000000000 /* 0x13 reserved */
13130+ .quad 0x0000000000000000 /* 0x1b reserved */
13131+
13132+#ifdef CONFIG_PAX_KERNEXEC
13133+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
13134+#else
13135+ .quad 0x0000000000000000 /* 0x20 unused */
13136+#endif
13137+
13138+ .quad 0x0000000000000000 /* 0x28 unused */
13139+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
13140+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
13141+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
13142+ .quad 0x0000000000000000 /* 0x4b reserved */
13143+ .quad 0x0000000000000000 /* 0x53 reserved */
13144+ .quad 0x0000000000000000 /* 0x5b reserved */
13145+
13146+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
13147+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
13148+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
13149+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
13150+
13151+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
13152+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
13153+
13154+ /*
13155+ * Segments used for calling PnP BIOS have byte granularity.
13156+ * The code segments and data segments have fixed 64k limits,
13157+ * the transfer segment sizes are set at run time.
13158+ */
13159+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
13160+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
13161+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
13162+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
13163+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
13164+
13165+ /*
13166+ * The APM segments have byte granularity and their bases
13167+ * are set at run time. All have 64k limits.
13168+ */
13169+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
13170+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
13171+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
13172+
13173+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
13174+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
13175+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
13176+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
13177+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
13178+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
13179+
13180+ /* Be sure this is zeroed to avoid false validations in Xen */
13181+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
13182+ .endr
13183diff -urNp linux-3.0.4/arch/x86/kernel/head_64.S linux-3.0.4/arch/x86/kernel/head_64.S
13184--- linux-3.0.4/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
13185+++ linux-3.0.4/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
13186@@ -19,6 +19,7 @@
13187 #include <asm/cache.h>
13188 #include <asm/processor-flags.h>
13189 #include <asm/percpu.h>
13190+#include <asm/cpufeature.h>
13191
13192 #ifdef CONFIG_PARAVIRT
13193 #include <asm/asm-offsets.h>
13194@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
13195 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
13196 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
13197 L3_START_KERNEL = pud_index(__START_KERNEL_map)
13198+L4_VMALLOC_START = pgd_index(VMALLOC_START)
13199+L3_VMALLOC_START = pud_index(VMALLOC_START)
13200+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
13201+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
13202
13203 .text
13204 __HEAD
13205@@ -85,35 +90,22 @@ startup_64:
13206 */
13207 addq %rbp, init_level4_pgt + 0(%rip)
13208 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
13209+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
13210+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
13211 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
13212
13213 addq %rbp, level3_ident_pgt + 0(%rip)
13214+#ifndef CONFIG_XEN
13215+ addq %rbp, level3_ident_pgt + 8(%rip)
13216+#endif
13217
13218- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
13219- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
13220+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
13221
13222- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13223+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
13224+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
13225
13226- /* Add an Identity mapping if I am above 1G */
13227- leaq _text(%rip), %rdi
13228- andq $PMD_PAGE_MASK, %rdi
13229-
13230- movq %rdi, %rax
13231- shrq $PUD_SHIFT, %rax
13232- andq $(PTRS_PER_PUD - 1), %rax
13233- jz ident_complete
13234-
13235- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
13236- leaq level3_ident_pgt(%rip), %rbx
13237- movq %rdx, 0(%rbx, %rax, 8)
13238-
13239- movq %rdi, %rax
13240- shrq $PMD_SHIFT, %rax
13241- andq $(PTRS_PER_PMD - 1), %rax
13242- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
13243- leaq level2_spare_pgt(%rip), %rbx
13244- movq %rdx, 0(%rbx, %rax, 8)
13245-ident_complete:
13246+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13247+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
13248
13249 /*
13250 * Fixup the kernel text+data virtual addresses. Note that
13251@@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
13252 * after the boot processor executes this code.
13253 */
13254
13255- /* Enable PAE mode and PGE */
13256- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
13257+ /* Enable PAE mode and PSE/PGE */
13258+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
13259 movq %rax, %cr4
13260
13261 /* Setup early boot stage 4 level pagetables. */
13262@@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
13263 movl $MSR_EFER, %ecx
13264 rdmsr
13265 btsl $_EFER_SCE, %eax /* Enable System Call */
13266- btl $20,%edi /* No Execute supported? */
13267+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
13268 jnc 1f
13269 btsl $_EFER_NX, %eax
13270+ leaq init_level4_pgt(%rip), %rdi
13271+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
13272+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
13273+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
13274+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
13275 1: wrmsr /* Make changes effective */
13276
13277 /* Setup cr0 */
13278@@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
13279 bad_address:
13280 jmp bad_address
13281
13282- .section ".init.text","ax"
13283+ __INIT
13284 #ifdef CONFIG_EARLY_PRINTK
13285 .globl early_idt_handlers
13286 early_idt_handlers:
13287@@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
13288 #endif /* EARLY_PRINTK */
13289 1: hlt
13290 jmp 1b
13291+ .previous
13292
13293 #ifdef CONFIG_EARLY_PRINTK
13294+ __INITDATA
13295 early_recursion_flag:
13296 .long 0
13297+ .previous
13298
13299+ .section .rodata,"a",@progbits
13300 early_idt_msg:
13301 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
13302 early_idt_ripmsg:
13303 .asciz "RIP %s\n"
13304-#endif /* CONFIG_EARLY_PRINTK */
13305 .previous
13306+#endif /* CONFIG_EARLY_PRINTK */
13307
13308+ .section .rodata,"a",@progbits
13309 #define NEXT_PAGE(name) \
13310 .balign PAGE_SIZE; \
13311 ENTRY(name)
13312@@ -338,7 +340,6 @@ ENTRY(name)
13313 i = i + 1 ; \
13314 .endr
13315
13316- .data
13317 /*
13318 * This default setting generates an ident mapping at address 0x100000
13319 * and a mapping for the kernel that precisely maps virtual address
13320@@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
13321 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13322 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13323 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13324+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
13325+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13326+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13327+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13328 .org init_level4_pgt + L4_START_KERNEL*8, 0
13329 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13330 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13331
13332+#ifdef CONFIG_PAX_PER_CPU_PGD
13333+NEXT_PAGE(cpu_pgd)
13334+ .rept NR_CPUS
13335+ .fill 512,8,0
13336+ .endr
13337+#endif
13338+
13339 NEXT_PAGE(level3_ident_pgt)
13340 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13341+#ifdef CONFIG_XEN
13342 .fill 511,8,0
13343+#else
13344+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13345+ .fill 510,8,0
13346+#endif
13347+
13348+NEXT_PAGE(level3_vmalloc_pgt)
13349+ .fill 512,8,0
13350+
13351+NEXT_PAGE(level3_vmemmap_pgt)
13352+ .fill L3_VMEMMAP_START,8,0
13353+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13354
13355 NEXT_PAGE(level3_kernel_pgt)
13356 .fill L3_START_KERNEL,8,0
13357@@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13358 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13359 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13360
13361+NEXT_PAGE(level2_vmemmap_pgt)
13362+ .fill 512,8,0
13363+
13364 NEXT_PAGE(level2_fixmap_pgt)
13365- .fill 506,8,0
13366- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13367- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13368- .fill 5,8,0
13369+ .fill 507,8,0
13370+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13371+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13372+ .fill 4,8,0
13373
13374-NEXT_PAGE(level1_fixmap_pgt)
13375+NEXT_PAGE(level1_vsyscall_pgt)
13376 .fill 512,8,0
13377
13378-NEXT_PAGE(level2_ident_pgt)
13379- /* Since I easily can, map the first 1G.
13380+ /* Since I easily can, map the first 2G.
13381 * Don't set NX because code runs from these pages.
13382 */
13383- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13384+NEXT_PAGE(level2_ident_pgt)
13385+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13386
13387 NEXT_PAGE(level2_kernel_pgt)
13388 /*
13389@@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13390 * If you want to increase this then increase MODULES_VADDR
13391 * too.)
13392 */
13393- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13394- KERNEL_IMAGE_SIZE/PMD_SIZE)
13395-
13396-NEXT_PAGE(level2_spare_pgt)
13397- .fill 512, 8, 0
13398+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13399
13400 #undef PMDS
13401 #undef NEXT_PAGE
13402
13403- .data
13404+ .align PAGE_SIZE
13405+ENTRY(cpu_gdt_table)
13406+ .rept NR_CPUS
13407+ .quad 0x0000000000000000 /* NULL descriptor */
13408+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13409+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13410+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13411+ .quad 0x00cffb000000ffff /* __USER32_CS */
13412+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13413+ .quad 0x00affb000000ffff /* __USER_CS */
13414+
13415+#ifdef CONFIG_PAX_KERNEXEC
13416+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13417+#else
13418+ .quad 0x0 /* unused */
13419+#endif
13420+
13421+ .quad 0,0 /* TSS */
13422+ .quad 0,0 /* LDT */
13423+ .quad 0,0,0 /* three TLS descriptors */
13424+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
13425+ /* asm/segment.h:GDT_ENTRIES must match this */
13426+
13427+ /* zero the remaining page */
13428+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13429+ .endr
13430+
13431 .align 16
13432 .globl early_gdt_descr
13433 early_gdt_descr:
13434 .word GDT_ENTRIES*8-1
13435 early_gdt_descr_base:
13436- .quad INIT_PER_CPU_VAR(gdt_page)
13437+ .quad cpu_gdt_table
13438
13439 ENTRY(phys_base)
13440 /* This must match the first entry in level2_kernel_pgt */
13441 .quad 0x0000000000000000
13442
13443 #include "../../x86/xen/xen-head.S"
13444-
13445- .section .bss, "aw", @nobits
13446+
13447+ .section .rodata,"a",@progbits
13448 .align L1_CACHE_BYTES
13449 ENTRY(idt_table)
13450- .skip IDT_ENTRIES * 16
13451+ .fill 512,8,0
13452
13453 __PAGE_ALIGNED_BSS
13454 .align PAGE_SIZE
13455diff -urNp linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c
13456--- linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13457+++ linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13458@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13459 EXPORT_SYMBOL(cmpxchg8b_emu);
13460 #endif
13461
13462+EXPORT_SYMBOL_GPL(cpu_gdt_table);
13463+
13464 /* Networking helper routines. */
13465 EXPORT_SYMBOL(csum_partial_copy_generic);
13466+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13467+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13468
13469 EXPORT_SYMBOL(__get_user_1);
13470 EXPORT_SYMBOL(__get_user_2);
13471@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13472
13473 EXPORT_SYMBOL(csum_partial);
13474 EXPORT_SYMBOL(empty_zero_page);
13475+
13476+#ifdef CONFIG_PAX_KERNEXEC
13477+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13478+#endif
13479diff -urNp linux-3.0.4/arch/x86/kernel/i8259.c linux-3.0.4/arch/x86/kernel/i8259.c
13480--- linux-3.0.4/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13481+++ linux-3.0.4/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13482@@ -210,7 +210,7 @@ spurious_8259A_irq:
13483 "spurious 8259A interrupt: IRQ%d.\n", irq);
13484 spurious_irq_mask |= irqmask;
13485 }
13486- atomic_inc(&irq_err_count);
13487+ atomic_inc_unchecked(&irq_err_count);
13488 /*
13489 * Theoretically we do not have to handle this IRQ,
13490 * but in Linux this does not cause problems and is
13491diff -urNp linux-3.0.4/arch/x86/kernel/init_task.c linux-3.0.4/arch/x86/kernel/init_task.c
13492--- linux-3.0.4/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
13493+++ linux-3.0.4/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
13494@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13495 * way process stacks are handled. This is done by having a special
13496 * "init_task" linker map entry..
13497 */
13498-union thread_union init_thread_union __init_task_data =
13499- { INIT_THREAD_INFO(init_task) };
13500+union thread_union init_thread_union __init_task_data;
13501
13502 /*
13503 * Initial task structure.
13504@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13505 * section. Since TSS's are completely CPU-local, we want them
13506 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13507 */
13508-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13509-
13510+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13511+EXPORT_SYMBOL(init_tss);
13512diff -urNp linux-3.0.4/arch/x86/kernel/ioport.c linux-3.0.4/arch/x86/kernel/ioport.c
13513--- linux-3.0.4/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
13514+++ linux-3.0.4/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
13515@@ -6,6 +6,7 @@
13516 #include <linux/sched.h>
13517 #include <linux/kernel.h>
13518 #include <linux/capability.h>
13519+#include <linux/security.h>
13520 #include <linux/errno.h>
13521 #include <linux/types.h>
13522 #include <linux/ioport.h>
13523@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13524
13525 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13526 return -EINVAL;
13527+#ifdef CONFIG_GRKERNSEC_IO
13528+ if (turn_on && grsec_disable_privio) {
13529+ gr_handle_ioperm();
13530+ return -EPERM;
13531+ }
13532+#endif
13533 if (turn_on && !capable(CAP_SYS_RAWIO))
13534 return -EPERM;
13535
13536@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13537 * because the ->io_bitmap_max value must match the bitmap
13538 * contents:
13539 */
13540- tss = &per_cpu(init_tss, get_cpu());
13541+ tss = init_tss + get_cpu();
13542
13543 if (turn_on)
13544 bitmap_clear(t->io_bitmap_ptr, from, num);
13545@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13546 return -EINVAL;
13547 /* Trying to gain more privileges? */
13548 if (level > old) {
13549+#ifdef CONFIG_GRKERNSEC_IO
13550+ if (grsec_disable_privio) {
13551+ gr_handle_iopl();
13552+ return -EPERM;
13553+ }
13554+#endif
13555 if (!capable(CAP_SYS_RAWIO))
13556 return -EPERM;
13557 }
13558diff -urNp linux-3.0.4/arch/x86/kernel/irq_32.c linux-3.0.4/arch/x86/kernel/irq_32.c
13559--- linux-3.0.4/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
13560+++ linux-3.0.4/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
13561@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13562 __asm__ __volatile__("andl %%esp,%0" :
13563 "=r" (sp) : "0" (THREAD_SIZE - 1));
13564
13565- return sp < (sizeof(struct thread_info) + STACK_WARN);
13566+ return sp < STACK_WARN;
13567 }
13568
13569 static void print_stack_overflow(void)
13570@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13571 * per-CPU IRQ handling contexts (thread information and stack)
13572 */
13573 union irq_ctx {
13574- struct thread_info tinfo;
13575- u32 stack[THREAD_SIZE/sizeof(u32)];
13576+ unsigned long previous_esp;
13577+ u32 stack[THREAD_SIZE/sizeof(u32)];
13578 } __attribute__((aligned(THREAD_SIZE)));
13579
13580 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13581@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13582 static inline int
13583 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13584 {
13585- union irq_ctx *curctx, *irqctx;
13586+ union irq_ctx *irqctx;
13587 u32 *isp, arg1, arg2;
13588
13589- curctx = (union irq_ctx *) current_thread_info();
13590 irqctx = __this_cpu_read(hardirq_ctx);
13591
13592 /*
13593@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13594 * handler) we can't do that and just have to keep using the
13595 * current stack (which is the irq stack already after all)
13596 */
13597- if (unlikely(curctx == irqctx))
13598+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13599 return 0;
13600
13601 /* build the stack frame on the IRQ stack */
13602- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13603- irqctx->tinfo.task = curctx->tinfo.task;
13604- irqctx->tinfo.previous_esp = current_stack_pointer;
13605+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13606+ irqctx->previous_esp = current_stack_pointer;
13607
13608- /*
13609- * Copy the softirq bits in preempt_count so that the
13610- * softirq checks work in the hardirq context.
13611- */
13612- irqctx->tinfo.preempt_count =
13613- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13614- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13615+#ifdef CONFIG_PAX_MEMORY_UDEREF
13616+ __set_fs(MAKE_MM_SEG(0));
13617+#endif
13618
13619 if (unlikely(overflow))
13620 call_on_stack(print_stack_overflow, isp);
13621@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13622 : "0" (irq), "1" (desc), "2" (isp),
13623 "D" (desc->handle_irq)
13624 : "memory", "cc", "ecx");
13625+
13626+#ifdef CONFIG_PAX_MEMORY_UDEREF
13627+ __set_fs(current_thread_info()->addr_limit);
13628+#endif
13629+
13630 return 1;
13631 }
13632
13633@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13634 */
13635 void __cpuinit irq_ctx_init(int cpu)
13636 {
13637- union irq_ctx *irqctx;
13638-
13639 if (per_cpu(hardirq_ctx, cpu))
13640 return;
13641
13642- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13643- THREAD_FLAGS,
13644- THREAD_ORDER));
13645- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13646- irqctx->tinfo.cpu = cpu;
13647- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13648- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13649-
13650- per_cpu(hardirq_ctx, cpu) = irqctx;
13651-
13652- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13653- THREAD_FLAGS,
13654- THREAD_ORDER));
13655- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13656- irqctx->tinfo.cpu = cpu;
13657- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13658-
13659- per_cpu(softirq_ctx, cpu) = irqctx;
13660+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13661+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13662
13663 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13664 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13665@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13666 asmlinkage void do_softirq(void)
13667 {
13668 unsigned long flags;
13669- struct thread_info *curctx;
13670 union irq_ctx *irqctx;
13671 u32 *isp;
13672
13673@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13674 local_irq_save(flags);
13675
13676 if (local_softirq_pending()) {
13677- curctx = current_thread_info();
13678 irqctx = __this_cpu_read(softirq_ctx);
13679- irqctx->tinfo.task = curctx->task;
13680- irqctx->tinfo.previous_esp = current_stack_pointer;
13681+ irqctx->previous_esp = current_stack_pointer;
13682
13683 /* build the stack frame on the softirq stack */
13684- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13685+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13686+
13687+#ifdef CONFIG_PAX_MEMORY_UDEREF
13688+ __set_fs(MAKE_MM_SEG(0));
13689+#endif
13690
13691 call_on_stack(__do_softirq, isp);
13692+
13693+#ifdef CONFIG_PAX_MEMORY_UDEREF
13694+ __set_fs(current_thread_info()->addr_limit);
13695+#endif
13696+
13697 /*
13698 * Shouldn't happen, we returned above if in_interrupt():
13699 */
13700diff -urNp linux-3.0.4/arch/x86/kernel/irq.c linux-3.0.4/arch/x86/kernel/irq.c
13701--- linux-3.0.4/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
13702+++ linux-3.0.4/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
13703@@ -17,7 +17,7 @@
13704 #include <asm/mce.h>
13705 #include <asm/hw_irq.h>
13706
13707-atomic_t irq_err_count;
13708+atomic_unchecked_t irq_err_count;
13709
13710 /* Function pointer for generic interrupt vector handling */
13711 void (*x86_platform_ipi_callback)(void) = NULL;
13712@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13713 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13714 seq_printf(p, " Machine check polls\n");
13715 #endif
13716- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13717+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13718 #if defined(CONFIG_X86_IO_APIC)
13719- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13720+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13721 #endif
13722 return 0;
13723 }
13724@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13725
13726 u64 arch_irq_stat(void)
13727 {
13728- u64 sum = atomic_read(&irq_err_count);
13729+ u64 sum = atomic_read_unchecked(&irq_err_count);
13730
13731 #ifdef CONFIG_X86_IO_APIC
13732- sum += atomic_read(&irq_mis_count);
13733+ sum += atomic_read_unchecked(&irq_mis_count);
13734 #endif
13735 return sum;
13736 }
13737diff -urNp linux-3.0.4/arch/x86/kernel/kgdb.c linux-3.0.4/arch/x86/kernel/kgdb.c
13738--- linux-3.0.4/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
13739+++ linux-3.0.4/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
13740@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13741 #ifdef CONFIG_X86_32
13742 switch (regno) {
13743 case GDB_SS:
13744- if (!user_mode_vm(regs))
13745+ if (!user_mode(regs))
13746 *(unsigned long *)mem = __KERNEL_DS;
13747 break;
13748 case GDB_SP:
13749- if (!user_mode_vm(regs))
13750+ if (!user_mode(regs))
13751 *(unsigned long *)mem = kernel_stack_pointer(regs);
13752 break;
13753 case GDB_GS:
13754@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13755 case 'k':
13756 /* clear the trace bit */
13757 linux_regs->flags &= ~X86_EFLAGS_TF;
13758- atomic_set(&kgdb_cpu_doing_single_step, -1);
13759+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13760
13761 /* set the trace bit if we're stepping */
13762 if (remcomInBuffer[0] == 's') {
13763 linux_regs->flags |= X86_EFLAGS_TF;
13764- atomic_set(&kgdb_cpu_doing_single_step,
13765+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13766 raw_smp_processor_id());
13767 }
13768
13769@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13770 return NOTIFY_DONE;
13771
13772 case DIE_DEBUG:
13773- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13774+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13775 if (user_mode(regs))
13776 return single_step_cont(regs, args);
13777 break;
13778diff -urNp linux-3.0.4/arch/x86/kernel/kprobes.c linux-3.0.4/arch/x86/kernel/kprobes.c
13779--- linux-3.0.4/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
13780+++ linux-3.0.4/arch/x86/kernel/kprobes.c 2011-08-23 21:47:55.000000000 -0400
13781@@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13782 } __attribute__((packed)) *insn;
13783
13784 insn = (struct __arch_relative_insn *)from;
13785+
13786+ pax_open_kernel();
13787 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13788 insn->op = op;
13789+ pax_close_kernel();
13790 }
13791
13792 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13793@@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13794 kprobe_opcode_t opcode;
13795 kprobe_opcode_t *orig_opcodes = opcodes;
13796
13797- if (search_exception_tables((unsigned long)opcodes))
13798+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13799 return 0; /* Page fault may occur on this address. */
13800
13801 retry:
13802@@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13803 }
13804 }
13805 insn_get_length(&insn);
13806+ pax_open_kernel();
13807 memcpy(dest, insn.kaddr, insn.length);
13808+ pax_close_kernel();
13809
13810 #ifdef CONFIG_X86_64
13811 if (insn_rip_relative(&insn)) {
13812@@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13813 (u8 *) dest;
13814 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13815 disp = (u8 *) dest + insn_offset_displacement(&insn);
13816+ pax_open_kernel();
13817 *(s32 *) disp = (s32) newdisp;
13818+ pax_close_kernel();
13819 }
13820 #endif
13821 return insn.length;
13822@@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13823 */
13824 __copy_instruction(p->ainsn.insn, p->addr, 0);
13825
13826- if (can_boost(p->addr))
13827+ if (can_boost(ktla_ktva(p->addr)))
13828 p->ainsn.boostable = 0;
13829 else
13830 p->ainsn.boostable = -1;
13831
13832- p->opcode = *p->addr;
13833+ p->opcode = *(ktla_ktva(p->addr));
13834 }
13835
13836 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13837@@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13838 * nor set current_kprobe, because it doesn't use single
13839 * stepping.
13840 */
13841- regs->ip = (unsigned long)p->ainsn.insn;
13842+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13843 preempt_enable_no_resched();
13844 return;
13845 }
13846@@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13847 if (p->opcode == BREAKPOINT_INSTRUCTION)
13848 regs->ip = (unsigned long)p->addr;
13849 else
13850- regs->ip = (unsigned long)p->ainsn.insn;
13851+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13852 }
13853
13854 /*
13855@@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13856 setup_singlestep(p, regs, kcb, 0);
13857 return 1;
13858 }
13859- } else if (*addr != BREAKPOINT_INSTRUCTION) {
13860+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13861 /*
13862 * The breakpoint instruction was removed right
13863 * after we hit it. Another cpu has removed
13864@@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13865 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13866 {
13867 unsigned long *tos = stack_addr(regs);
13868- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13869+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13870 unsigned long orig_ip = (unsigned long)p->addr;
13871 kprobe_opcode_t *insn = p->ainsn.insn;
13872
13873@@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13874 struct die_args *args = data;
13875 int ret = NOTIFY_DONE;
13876
13877- if (args->regs && user_mode_vm(args->regs))
13878+ if (args->regs && user_mode(args->regs))
13879 return ret;
13880
13881 switch (val) {
13882@@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13883 * Verify if the address gap is in 2GB range, because this uses
13884 * a relative jump.
13885 */
13886- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13887+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13888 if (abs(rel) > 0x7fffffff)
13889 return -ERANGE;
13890
13891@@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13892 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13893
13894 /* Set probe function call */
13895- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13896+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13897
13898 /* Set returning jmp instruction at the tail of out-of-line buffer */
13899 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13900- (u8 *)op->kp.addr + op->optinsn.size);
13901+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13902
13903 flush_icache_range((unsigned long) buf,
13904 (unsigned long) buf + TMPL_END_IDX +
13905@@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13906 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13907
13908 /* Backup instructions which will be replaced by jump address */
13909- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13910+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13911 RELATIVE_ADDR_SIZE);
13912
13913 insn_buf[0] = RELATIVEJUMP_OPCODE;
13914diff -urNp linux-3.0.4/arch/x86/kernel/kvm.c linux-3.0.4/arch/x86/kernel/kvm.c
13915--- linux-3.0.4/arch/x86/kernel/kvm.c 2011-07-21 22:17:23.000000000 -0400
13916+++ linux-3.0.4/arch/x86/kernel/kvm.c 2011-08-24 18:10:12.000000000 -0400
13917@@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
13918 pv_mmu_ops.set_pud = kvm_set_pud;
13919 #if PAGETABLE_LEVELS == 4
13920 pv_mmu_ops.set_pgd = kvm_set_pgd;
13921+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
13922 #endif
13923 #endif
13924 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
13925diff -urNp linux-3.0.4/arch/x86/kernel/ldt.c linux-3.0.4/arch/x86/kernel/ldt.c
13926--- linux-3.0.4/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
13927+++ linux-3.0.4/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
13928@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13929 if (reload) {
13930 #ifdef CONFIG_SMP
13931 preempt_disable();
13932- load_LDT(pc);
13933+ load_LDT_nolock(pc);
13934 if (!cpumask_equal(mm_cpumask(current->mm),
13935 cpumask_of(smp_processor_id())))
13936 smp_call_function(flush_ldt, current->mm, 1);
13937 preempt_enable();
13938 #else
13939- load_LDT(pc);
13940+ load_LDT_nolock(pc);
13941 #endif
13942 }
13943 if (oldsize) {
13944@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13945 return err;
13946
13947 for (i = 0; i < old->size; i++)
13948- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13949+ write_ldt_entry(new->ldt, i, old->ldt + i);
13950 return 0;
13951 }
13952
13953@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13954 retval = copy_ldt(&mm->context, &old_mm->context);
13955 mutex_unlock(&old_mm->context.lock);
13956 }
13957+
13958+ if (tsk == current) {
13959+ mm->context.vdso = 0;
13960+
13961+#ifdef CONFIG_X86_32
13962+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13963+ mm->context.user_cs_base = 0UL;
13964+ mm->context.user_cs_limit = ~0UL;
13965+
13966+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13967+ cpus_clear(mm->context.cpu_user_cs_mask);
13968+#endif
13969+
13970+#endif
13971+#endif
13972+
13973+ }
13974+
13975 return retval;
13976 }
13977
13978@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13979 }
13980 }
13981
13982+#ifdef CONFIG_PAX_SEGMEXEC
13983+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13984+ error = -EINVAL;
13985+ goto out_unlock;
13986+ }
13987+#endif
13988+
13989 fill_ldt(&ldt, &ldt_info);
13990 if (oldmode)
13991 ldt.avl = 0;
13992diff -urNp linux-3.0.4/arch/x86/kernel/machine_kexec_32.c linux-3.0.4/arch/x86/kernel/machine_kexec_32.c
13993--- linux-3.0.4/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
13994+++ linux-3.0.4/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
13995@@ -27,7 +27,7 @@
13996 #include <asm/cacheflush.h>
13997 #include <asm/debugreg.h>
13998
13999-static void set_idt(void *newidt, __u16 limit)
14000+static void set_idt(struct desc_struct *newidt, __u16 limit)
14001 {
14002 struct desc_ptr curidt;
14003
14004@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
14005 }
14006
14007
14008-static void set_gdt(void *newgdt, __u16 limit)
14009+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
14010 {
14011 struct desc_ptr curgdt;
14012
14013@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
14014 }
14015
14016 control_page = page_address(image->control_code_page);
14017- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
14018+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
14019
14020 relocate_kernel_ptr = control_page;
14021 page_list[PA_CONTROL_PAGE] = __pa(control_page);
14022diff -urNp linux-3.0.4/arch/x86/kernel/microcode_intel.c linux-3.0.4/arch/x86/kernel/microcode_intel.c
14023--- linux-3.0.4/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
14024+++ linux-3.0.4/arch/x86/kernel/microcode_intel.c 2011-10-06 04:17:55.000000000 -0400
14025@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
14026
14027 static int get_ucode_user(void *to, const void *from, size_t n)
14028 {
14029- return copy_from_user(to, from, n);
14030+ return copy_from_user(to, (const void __force_user *)from, n);
14031 }
14032
14033 static enum ucode_state
14034 request_microcode_user(int cpu, const void __user *buf, size_t size)
14035 {
14036- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
14037+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
14038 }
14039
14040 static void microcode_fini_cpu(int cpu)
14041diff -urNp linux-3.0.4/arch/x86/kernel/module.c linux-3.0.4/arch/x86/kernel/module.c
14042--- linux-3.0.4/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
14043+++ linux-3.0.4/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
14044@@ -36,21 +36,66 @@
14045 #define DEBUGP(fmt...)
14046 #endif
14047
14048-void *module_alloc(unsigned long size)
14049+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
14050 {
14051 if (PAGE_ALIGN(size) > MODULES_LEN)
14052 return NULL;
14053 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
14054- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
14055+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
14056 -1, __builtin_return_address(0));
14057 }
14058
14059+void *module_alloc(unsigned long size)
14060+{
14061+
14062+#ifdef CONFIG_PAX_KERNEXEC
14063+ return __module_alloc(size, PAGE_KERNEL);
14064+#else
14065+ return __module_alloc(size, PAGE_KERNEL_EXEC);
14066+#endif
14067+
14068+}
14069+
14070 /* Free memory returned from module_alloc */
14071 void module_free(struct module *mod, void *module_region)
14072 {
14073 vfree(module_region);
14074 }
14075
14076+#ifdef CONFIG_PAX_KERNEXEC
14077+#ifdef CONFIG_X86_32
14078+void *module_alloc_exec(unsigned long size)
14079+{
14080+ struct vm_struct *area;
14081+
14082+ if (size == 0)
14083+ return NULL;
14084+
14085+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
14086+ return area ? area->addr : NULL;
14087+}
14088+EXPORT_SYMBOL(module_alloc_exec);
14089+
14090+void module_free_exec(struct module *mod, void *module_region)
14091+{
14092+ vunmap(module_region);
14093+}
14094+EXPORT_SYMBOL(module_free_exec);
14095+#else
14096+void module_free_exec(struct module *mod, void *module_region)
14097+{
14098+ module_free(mod, module_region);
14099+}
14100+EXPORT_SYMBOL(module_free_exec);
14101+
14102+void *module_alloc_exec(unsigned long size)
14103+{
14104+ return __module_alloc(size, PAGE_KERNEL_RX);
14105+}
14106+EXPORT_SYMBOL(module_alloc_exec);
14107+#endif
14108+#endif
14109+
14110 /* We don't need anything special. */
14111 int module_frob_arch_sections(Elf_Ehdr *hdr,
14112 Elf_Shdr *sechdrs,
14113@@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14114 unsigned int i;
14115 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
14116 Elf32_Sym *sym;
14117- uint32_t *location;
14118+ uint32_t *plocation, location;
14119
14120 DEBUGP("Applying relocate section %u to %u\n", relsec,
14121 sechdrs[relsec].sh_info);
14122 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
14123 /* This is where to make the change */
14124- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
14125- + rel[i].r_offset;
14126+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
14127+ location = (uint32_t)plocation;
14128+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
14129+ plocation = ktla_ktva((void *)plocation);
14130 /* This is the symbol it is referring to. Note that all
14131 undefined symbols have been resolved. */
14132 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
14133@@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14134 switch (ELF32_R_TYPE(rel[i].r_info)) {
14135 case R_386_32:
14136 /* We add the value into the location given */
14137- *location += sym->st_value;
14138+ pax_open_kernel();
14139+ *plocation += sym->st_value;
14140+ pax_close_kernel();
14141 break;
14142 case R_386_PC32:
14143 /* Add the value, subtract its postition */
14144- *location += sym->st_value - (uint32_t)location;
14145+ pax_open_kernel();
14146+ *plocation += sym->st_value - location;
14147+ pax_close_kernel();
14148 break;
14149 default:
14150 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
14151@@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
14152 case R_X86_64_NONE:
14153 break;
14154 case R_X86_64_64:
14155+ pax_open_kernel();
14156 *(u64 *)loc = val;
14157+ pax_close_kernel();
14158 break;
14159 case R_X86_64_32:
14160+ pax_open_kernel();
14161 *(u32 *)loc = val;
14162+ pax_close_kernel();
14163 if (val != *(u32 *)loc)
14164 goto overflow;
14165 break;
14166 case R_X86_64_32S:
14167+ pax_open_kernel();
14168 *(s32 *)loc = val;
14169+ pax_close_kernel();
14170 if ((s64)val != *(s32 *)loc)
14171 goto overflow;
14172 break;
14173 case R_X86_64_PC32:
14174 val -= (u64)loc;
14175+ pax_open_kernel();
14176 *(u32 *)loc = val;
14177+ pax_close_kernel();
14178+
14179 #if 0
14180 if ((s64)val != *(s32 *)loc)
14181 goto overflow;
14182diff -urNp linux-3.0.4/arch/x86/kernel/paravirt.c linux-3.0.4/arch/x86/kernel/paravirt.c
14183--- linux-3.0.4/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
14184+++ linux-3.0.4/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
14185@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
14186 {
14187 return x;
14188 }
14189+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14190+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
14191+#endif
14192
14193 void __init default_banner(void)
14194 {
14195@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
14196 * corresponding structure. */
14197 static void *get_call_destination(u8 type)
14198 {
14199- struct paravirt_patch_template tmpl = {
14200+ const struct paravirt_patch_template tmpl = {
14201 .pv_init_ops = pv_init_ops,
14202 .pv_time_ops = pv_time_ops,
14203 .pv_cpu_ops = pv_cpu_ops,
14204@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
14205 .pv_lock_ops = pv_lock_ops,
14206 #endif
14207 };
14208+
14209+ pax_track_stack();
14210+
14211 return *((void **)&tmpl + type);
14212 }
14213
14214@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
14215 if (opfunc == NULL)
14216 /* If there's no function, patch it with a ud2a (BUG) */
14217 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
14218- else if (opfunc == _paravirt_nop)
14219+ else if (opfunc == (void *)_paravirt_nop)
14220 /* If the operation is a nop, then nop the callsite */
14221 ret = paravirt_patch_nop();
14222
14223 /* identity functions just return their single argument */
14224- else if (opfunc == _paravirt_ident_32)
14225+ else if (opfunc == (void *)_paravirt_ident_32)
14226 ret = paravirt_patch_ident_32(insnbuf, len);
14227- else if (opfunc == _paravirt_ident_64)
14228+ else if (opfunc == (void *)_paravirt_ident_64)
14229 ret = paravirt_patch_ident_64(insnbuf, len);
14230+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14231+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
14232+ ret = paravirt_patch_ident_64(insnbuf, len);
14233+#endif
14234
14235 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
14236 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
14237@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
14238 if (insn_len > len || start == NULL)
14239 insn_len = len;
14240 else
14241- memcpy(insnbuf, start, insn_len);
14242+ memcpy(insnbuf, ktla_ktva(start), insn_len);
14243
14244 return insn_len;
14245 }
14246@@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
14247 preempt_enable();
14248 }
14249
14250-struct pv_info pv_info = {
14251+struct pv_info pv_info __read_only = {
14252 .name = "bare hardware",
14253 .paravirt_enabled = 0,
14254 .kernel_rpl = 0,
14255 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
14256 };
14257
14258-struct pv_init_ops pv_init_ops = {
14259+struct pv_init_ops pv_init_ops __read_only = {
14260 .patch = native_patch,
14261 };
14262
14263-struct pv_time_ops pv_time_ops = {
14264+struct pv_time_ops pv_time_ops __read_only = {
14265 .sched_clock = native_sched_clock,
14266 };
14267
14268-struct pv_irq_ops pv_irq_ops = {
14269+struct pv_irq_ops pv_irq_ops __read_only = {
14270 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
14271 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
14272 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
14273@@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
14274 #endif
14275 };
14276
14277-struct pv_cpu_ops pv_cpu_ops = {
14278+struct pv_cpu_ops pv_cpu_ops __read_only = {
14279 .cpuid = native_cpuid,
14280 .get_debugreg = native_get_debugreg,
14281 .set_debugreg = native_set_debugreg,
14282@@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
14283 .end_context_switch = paravirt_nop,
14284 };
14285
14286-struct pv_apic_ops pv_apic_ops = {
14287+struct pv_apic_ops pv_apic_ops __read_only = {
14288 #ifdef CONFIG_X86_LOCAL_APIC
14289 .startup_ipi_hook = paravirt_nop,
14290 #endif
14291 };
14292
14293-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
14294+#ifdef CONFIG_X86_32
14295+#ifdef CONFIG_X86_PAE
14296+/* 64-bit pagetable entries */
14297+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
14298+#else
14299 /* 32-bit pagetable entries */
14300 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
14301+#endif
14302 #else
14303 /* 64-bit pagetable entries */
14304 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
14305 #endif
14306
14307-struct pv_mmu_ops pv_mmu_ops = {
14308+struct pv_mmu_ops pv_mmu_ops __read_only = {
14309
14310 .read_cr2 = native_read_cr2,
14311 .write_cr2 = native_write_cr2,
14312@@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
14313 .make_pud = PTE_IDENT,
14314
14315 .set_pgd = native_set_pgd,
14316+ .set_pgd_batched = native_set_pgd_batched,
14317 #endif
14318 #endif /* PAGETABLE_LEVELS >= 3 */
14319
14320@@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
14321 },
14322
14323 .set_fixmap = native_set_fixmap,
14324+
14325+#ifdef CONFIG_PAX_KERNEXEC
14326+ .pax_open_kernel = native_pax_open_kernel,
14327+ .pax_close_kernel = native_pax_close_kernel,
14328+#endif
14329+
14330 };
14331
14332 EXPORT_SYMBOL_GPL(pv_time_ops);
14333diff -urNp linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c
14334--- linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
14335+++ linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
14336@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14337 arch_spin_lock(lock);
14338 }
14339
14340-struct pv_lock_ops pv_lock_ops = {
14341+struct pv_lock_ops pv_lock_ops __read_only = {
14342 #ifdef CONFIG_SMP
14343 .spin_is_locked = __ticket_spin_is_locked,
14344 .spin_is_contended = __ticket_spin_is_contended,
14345diff -urNp linux-3.0.4/arch/x86/kernel/pci-iommu_table.c linux-3.0.4/arch/x86/kernel/pci-iommu_table.c
14346--- linux-3.0.4/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
14347+++ linux-3.0.4/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
14348@@ -2,7 +2,7 @@
14349 #include <asm/iommu_table.h>
14350 #include <linux/string.h>
14351 #include <linux/kallsyms.h>
14352-
14353+#include <linux/sched.h>
14354
14355 #define DEBUG 1
14356
14357@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14358 {
14359 struct iommu_table_entry *p, *q, *x;
14360
14361+ pax_track_stack();
14362+
14363 /* Simple cyclic dependency checker. */
14364 for (p = start; p < finish; p++) {
14365 q = find_dependents_of(start, finish, p);
14366diff -urNp linux-3.0.4/arch/x86/kernel/process_32.c linux-3.0.4/arch/x86/kernel/process_32.c
14367--- linux-3.0.4/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14368+++ linux-3.0.4/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14369@@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14370 unsigned long thread_saved_pc(struct task_struct *tsk)
14371 {
14372 return ((unsigned long *)tsk->thread.sp)[3];
14373+//XXX return tsk->thread.eip;
14374 }
14375
14376 #ifndef CONFIG_SMP
14377@@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14378 unsigned long sp;
14379 unsigned short ss, gs;
14380
14381- if (user_mode_vm(regs)) {
14382+ if (user_mode(regs)) {
14383 sp = regs->sp;
14384 ss = regs->ss & 0xffff;
14385- gs = get_user_gs(regs);
14386 } else {
14387 sp = kernel_stack_pointer(regs);
14388 savesegment(ss, ss);
14389- savesegment(gs, gs);
14390 }
14391+ gs = get_user_gs(regs);
14392
14393 show_regs_common();
14394
14395@@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14396 struct task_struct *tsk;
14397 int err;
14398
14399- childregs = task_pt_regs(p);
14400+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14401 *childregs = *regs;
14402 childregs->ax = 0;
14403 childregs->sp = sp;
14404
14405 p->thread.sp = (unsigned long) childregs;
14406 p->thread.sp0 = (unsigned long) (childregs+1);
14407+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14408
14409 p->thread.ip = (unsigned long) ret_from_fork;
14410
14411@@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14412 struct thread_struct *prev = &prev_p->thread,
14413 *next = &next_p->thread;
14414 int cpu = smp_processor_id();
14415- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14416+ struct tss_struct *tss = init_tss + cpu;
14417 bool preload_fpu;
14418
14419 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14420@@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14421 */
14422 lazy_save_gs(prev->gs);
14423
14424+#ifdef CONFIG_PAX_MEMORY_UDEREF
14425+ __set_fs(task_thread_info(next_p)->addr_limit);
14426+#endif
14427+
14428 /*
14429 * Load the per-thread Thread-Local Storage descriptor.
14430 */
14431@@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14432 */
14433 arch_end_context_switch(next_p);
14434
14435+ percpu_write(current_task, next_p);
14436+ percpu_write(current_tinfo, &next_p->tinfo);
14437+
14438 if (preload_fpu)
14439 __math_state_restore();
14440
14441@@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14442 if (prev->gs | next->gs)
14443 lazy_load_gs(next->gs);
14444
14445- percpu_write(current_task, next_p);
14446-
14447 return prev_p;
14448 }
14449
14450@@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14451 } while (count++ < 16);
14452 return 0;
14453 }
14454-
14455diff -urNp linux-3.0.4/arch/x86/kernel/process_64.c linux-3.0.4/arch/x86/kernel/process_64.c
14456--- linux-3.0.4/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
14457+++ linux-3.0.4/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
14458@@ -87,7 +87,7 @@ static void __exit_idle(void)
14459 void exit_idle(void)
14460 {
14461 /* idle loop has pid 0 */
14462- if (current->pid)
14463+ if (task_pid_nr(current))
14464 return;
14465 __exit_idle();
14466 }
14467@@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14468 struct pt_regs *childregs;
14469 struct task_struct *me = current;
14470
14471- childregs = ((struct pt_regs *)
14472- (THREAD_SIZE + task_stack_page(p))) - 1;
14473+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14474 *childregs = *regs;
14475
14476 childregs->ax = 0;
14477@@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14478 p->thread.sp = (unsigned long) childregs;
14479 p->thread.sp0 = (unsigned long) (childregs+1);
14480 p->thread.usersp = me->thread.usersp;
14481+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14482
14483 set_tsk_thread_flag(p, TIF_FORK);
14484
14485@@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14486 struct thread_struct *prev = &prev_p->thread;
14487 struct thread_struct *next = &next_p->thread;
14488 int cpu = smp_processor_id();
14489- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14490+ struct tss_struct *tss = init_tss + cpu;
14491 unsigned fsindex, gsindex;
14492 bool preload_fpu;
14493
14494@@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14495 prev->usersp = percpu_read(old_rsp);
14496 percpu_write(old_rsp, next->usersp);
14497 percpu_write(current_task, next_p);
14498+ percpu_write(current_tinfo, &next_p->tinfo);
14499
14500- percpu_write(kernel_stack,
14501- (unsigned long)task_stack_page(next_p) +
14502- THREAD_SIZE - KERNEL_STACK_OFFSET);
14503+ percpu_write(kernel_stack, next->sp0);
14504
14505 /*
14506 * Now maybe reload the debug registers and handle I/O bitmaps
14507@@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14508 if (!p || p == current || p->state == TASK_RUNNING)
14509 return 0;
14510 stack = (unsigned long)task_stack_page(p);
14511- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14512+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14513 return 0;
14514 fp = *(u64 *)(p->thread.sp);
14515 do {
14516- if (fp < (unsigned long)stack ||
14517- fp >= (unsigned long)stack+THREAD_SIZE)
14518+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14519 return 0;
14520 ip = *(u64 *)(fp+8);
14521 if (!in_sched_functions(ip))
14522diff -urNp linux-3.0.4/arch/x86/kernel/process.c linux-3.0.4/arch/x86/kernel/process.c
14523--- linux-3.0.4/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
14524+++ linux-3.0.4/arch/x86/kernel/process.c 2011-08-30 18:23:52.000000000 -0400
14525@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14526
14527 void free_thread_info(struct thread_info *ti)
14528 {
14529- free_thread_xstate(ti->task);
14530 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14531 }
14532
14533+static struct kmem_cache *task_struct_cachep;
14534+
14535 void arch_task_cache_init(void)
14536 {
14537- task_xstate_cachep =
14538- kmem_cache_create("task_xstate", xstate_size,
14539+ /* create a slab on which task_structs can be allocated */
14540+ task_struct_cachep =
14541+ kmem_cache_create("task_struct", sizeof(struct task_struct),
14542+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14543+
14544+ task_xstate_cachep =
14545+ kmem_cache_create("task_xstate", xstate_size,
14546 __alignof__(union thread_xstate),
14547- SLAB_PANIC | SLAB_NOTRACK, NULL);
14548+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14549+}
14550+
14551+struct task_struct *alloc_task_struct_node(int node)
14552+{
14553+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14554+}
14555+
14556+void free_task_struct(struct task_struct *task)
14557+{
14558+ free_thread_xstate(task);
14559+ kmem_cache_free(task_struct_cachep, task);
14560 }
14561
14562 /*
14563@@ -70,7 +87,7 @@ void exit_thread(void)
14564 unsigned long *bp = t->io_bitmap_ptr;
14565
14566 if (bp) {
14567- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14568+ struct tss_struct *tss = init_tss + get_cpu();
14569
14570 t->io_bitmap_ptr = NULL;
14571 clear_thread_flag(TIF_IO_BITMAP);
14572@@ -106,7 +123,7 @@ void show_regs_common(void)
14573
14574 printk(KERN_CONT "\n");
14575 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14576- current->pid, current->comm, print_tainted(),
14577+ task_pid_nr(current), current->comm, print_tainted(),
14578 init_utsname()->release,
14579 (int)strcspn(init_utsname()->version, " "),
14580 init_utsname()->version);
14581@@ -120,6 +137,9 @@ void flush_thread(void)
14582 {
14583 struct task_struct *tsk = current;
14584
14585+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14586+ loadsegment(gs, 0);
14587+#endif
14588 flush_ptrace_hw_breakpoint(tsk);
14589 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14590 /*
14591@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14592 regs.di = (unsigned long) arg;
14593
14594 #ifdef CONFIG_X86_32
14595- regs.ds = __USER_DS;
14596- regs.es = __USER_DS;
14597+ regs.ds = __KERNEL_DS;
14598+ regs.es = __KERNEL_DS;
14599 regs.fs = __KERNEL_PERCPU;
14600- regs.gs = __KERNEL_STACK_CANARY;
14601+ savesegment(gs, regs.gs);
14602 #else
14603 regs.ss = __KERNEL_DS;
14604 #endif
14605@@ -403,7 +423,7 @@ void default_idle(void)
14606 EXPORT_SYMBOL(default_idle);
14607 #endif
14608
14609-void stop_this_cpu(void *dummy)
14610+__noreturn void stop_this_cpu(void *dummy)
14611 {
14612 local_irq_disable();
14613 /*
14614@@ -668,16 +688,37 @@ static int __init idle_setup(char *str)
14615 }
14616 early_param("idle", idle_setup);
14617
14618-unsigned long arch_align_stack(unsigned long sp)
14619+#ifdef CONFIG_PAX_RANDKSTACK
14620+void pax_randomize_kstack(struct pt_regs *regs)
14621 {
14622- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14623- sp -= get_random_int() % 8192;
14624- return sp & ~0xf;
14625-}
14626+ struct thread_struct *thread = &current->thread;
14627+ unsigned long time;
14628
14629-unsigned long arch_randomize_brk(struct mm_struct *mm)
14630-{
14631- unsigned long range_end = mm->brk + 0x02000000;
14632- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14633-}
14634+ if (!randomize_va_space)
14635+ return;
14636+
14637+ if (v8086_mode(regs))
14638+ return;
14639
14640+ rdtscl(time);
14641+
14642+ /* P4 seems to return a 0 LSB, ignore it */
14643+#ifdef CONFIG_MPENTIUM4
14644+ time &= 0x3EUL;
14645+ time <<= 2;
14646+#elif defined(CONFIG_X86_64)
14647+ time &= 0xFUL;
14648+ time <<= 4;
14649+#else
14650+ time &= 0x1FUL;
14651+ time <<= 3;
14652+#endif
14653+
14654+ thread->sp0 ^= time;
14655+ load_sp0(init_tss + smp_processor_id(), thread);
14656+
14657+#ifdef CONFIG_X86_64
14658+ percpu_write(kernel_stack, thread->sp0);
14659+#endif
14660+}
14661+#endif
14662diff -urNp linux-3.0.4/arch/x86/kernel/ptrace.c linux-3.0.4/arch/x86/kernel/ptrace.c
14663--- linux-3.0.4/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
14664+++ linux-3.0.4/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
14665@@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14666 unsigned long addr, unsigned long data)
14667 {
14668 int ret;
14669- unsigned long __user *datap = (unsigned long __user *)data;
14670+ unsigned long __user *datap = (__force unsigned long __user *)data;
14671
14672 switch (request) {
14673 /* read the word at location addr in the USER area. */
14674@@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14675 if ((int) addr < 0)
14676 return -EIO;
14677 ret = do_get_thread_area(child, addr,
14678- (struct user_desc __user *)data);
14679+ (__force struct user_desc __user *) data);
14680 break;
14681
14682 case PTRACE_SET_THREAD_AREA:
14683 if ((int) addr < 0)
14684 return -EIO;
14685 ret = do_set_thread_area(child, addr,
14686- (struct user_desc __user *)data, 0);
14687+ (__force struct user_desc __user *) data, 0);
14688 break;
14689 #endif
14690
14691@@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14692 memset(info, 0, sizeof(*info));
14693 info->si_signo = SIGTRAP;
14694 info->si_code = si_code;
14695- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14696+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14697 }
14698
14699 void user_single_step_siginfo(struct task_struct *tsk,
14700diff -urNp linux-3.0.4/arch/x86/kernel/pvclock.c linux-3.0.4/arch/x86/kernel/pvclock.c
14701--- linux-3.0.4/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
14702+++ linux-3.0.4/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
14703@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14704 return pv_tsc_khz;
14705 }
14706
14707-static atomic64_t last_value = ATOMIC64_INIT(0);
14708+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14709
14710 void pvclock_resume(void)
14711 {
14712- atomic64_set(&last_value, 0);
14713+ atomic64_set_unchecked(&last_value, 0);
14714 }
14715
14716 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14717@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14718 * updating at the same time, and one of them could be slightly behind,
14719 * making the assumption that last_value always go forward fail to hold.
14720 */
14721- last = atomic64_read(&last_value);
14722+ last = atomic64_read_unchecked(&last_value);
14723 do {
14724 if (ret < last)
14725 return last;
14726- last = atomic64_cmpxchg(&last_value, last, ret);
14727+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14728 } while (unlikely(last != ret));
14729
14730 return ret;
14731diff -urNp linux-3.0.4/arch/x86/kernel/reboot.c linux-3.0.4/arch/x86/kernel/reboot.c
14732--- linux-3.0.4/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
14733+++ linux-3.0.4/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
14734@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14735 EXPORT_SYMBOL(pm_power_off);
14736
14737 static const struct desc_ptr no_idt = {};
14738-static int reboot_mode;
14739+static unsigned short reboot_mode;
14740 enum reboot_type reboot_type = BOOT_ACPI;
14741 int reboot_force;
14742
14743@@ -315,13 +315,17 @@ core_initcall(reboot_init);
14744 extern const unsigned char machine_real_restart_asm[];
14745 extern const u64 machine_real_restart_gdt[3];
14746
14747-void machine_real_restart(unsigned int type)
14748+__noreturn void machine_real_restart(unsigned int type)
14749 {
14750 void *restart_va;
14751 unsigned long restart_pa;
14752- void (*restart_lowmem)(unsigned int);
14753+ void (* __noreturn restart_lowmem)(unsigned int);
14754 u64 *lowmem_gdt;
14755
14756+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14757+ struct desc_struct *gdt;
14758+#endif
14759+
14760 local_irq_disable();
14761
14762 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14763@@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
14764 boot)". This seems like a fairly standard thing that gets set by
14765 REBOOT.COM programs, and the previous reset routine did this
14766 too. */
14767- *((unsigned short *)0x472) = reboot_mode;
14768+ *(unsigned short *)(__va(0x472)) = reboot_mode;
14769
14770 /* Patch the GDT in the low memory trampoline */
14771 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14772
14773 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14774 restart_pa = virt_to_phys(restart_va);
14775- restart_lowmem = (void (*)(unsigned int))restart_pa;
14776+ restart_lowmem = (void *)restart_pa;
14777
14778 /* GDT[0]: GDT self-pointer */
14779 lowmem_gdt[0] =
14780@@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
14781 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14782
14783 /* Jump to the identity-mapped low memory code */
14784+
14785+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14786+ gdt = get_cpu_gdt_table(smp_processor_id());
14787+ pax_open_kernel();
14788+#ifdef CONFIG_PAX_MEMORY_UDEREF
14789+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14790+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14791+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14792+#endif
14793+#ifdef CONFIG_PAX_KERNEXEC
14794+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14795+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14796+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14797+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14798+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14799+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14800+#endif
14801+ pax_close_kernel();
14802+#endif
14803+
14804+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14805+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14806+ unreachable();
14807+#else
14808 restart_lowmem(type);
14809+#endif
14810+
14811 }
14812 #ifdef CONFIG_APM_MODULE
14813 EXPORT_SYMBOL(machine_real_restart);
14814@@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
14815 * try to force a triple fault and then cycle between hitting the keyboard
14816 * controller and doing that
14817 */
14818-static void native_machine_emergency_restart(void)
14819+__noreturn static void native_machine_emergency_restart(void)
14820 {
14821 int i;
14822 int attempt = 0;
14823@@ -647,13 +677,13 @@ void native_machine_shutdown(void)
14824 #endif
14825 }
14826
14827-static void __machine_emergency_restart(int emergency)
14828+static __noreturn void __machine_emergency_restart(int emergency)
14829 {
14830 reboot_emergency = emergency;
14831 machine_ops.emergency_restart();
14832 }
14833
14834-static void native_machine_restart(char *__unused)
14835+static __noreturn void native_machine_restart(char *__unused)
14836 {
14837 printk("machine restart\n");
14838
14839@@ -662,7 +692,7 @@ static void native_machine_restart(char
14840 __machine_emergency_restart(0);
14841 }
14842
14843-static void native_machine_halt(void)
14844+static __noreturn void native_machine_halt(void)
14845 {
14846 /* stop other cpus and apics */
14847 machine_shutdown();
14848@@ -673,7 +703,7 @@ static void native_machine_halt(void)
14849 stop_this_cpu(NULL);
14850 }
14851
14852-static void native_machine_power_off(void)
14853+__noreturn static void native_machine_power_off(void)
14854 {
14855 if (pm_power_off) {
14856 if (!reboot_force)
14857@@ -682,6 +712,7 @@ static void native_machine_power_off(voi
14858 }
14859 /* a fallback in case there is no PM info available */
14860 tboot_shutdown(TB_SHUTDOWN_HALT);
14861+ unreachable();
14862 }
14863
14864 struct machine_ops machine_ops = {
14865diff -urNp linux-3.0.4/arch/x86/kernel/setup.c linux-3.0.4/arch/x86/kernel/setup.c
14866--- linux-3.0.4/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
14867+++ linux-3.0.4/arch/x86/kernel/setup.c 2011-10-06 04:17:55.000000000 -0400
14868@@ -447,7 +447,7 @@ static void __init parse_setup_data(void
14869
14870 switch (data->type) {
14871 case SETUP_E820_EXT:
14872- parse_e820_ext(data);
14873+ parse_e820_ext((struct setup_data __force_kernel *)data);
14874 break;
14875 case SETUP_DTB:
14876 add_dtb(pa_data);
14877@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14878 * area (640->1Mb) as ram even though it is not.
14879 * take them out.
14880 */
14881- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14882+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14883 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14884 }
14885
14886@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
14887
14888 if (!boot_params.hdr.root_flags)
14889 root_mountflags &= ~MS_RDONLY;
14890- init_mm.start_code = (unsigned long) _text;
14891- init_mm.end_code = (unsigned long) _etext;
14892+ init_mm.start_code = ktla_ktva((unsigned long) _text);
14893+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
14894 init_mm.end_data = (unsigned long) _edata;
14895 init_mm.brk = _brk_end;
14896
14897- code_resource.start = virt_to_phys(_text);
14898- code_resource.end = virt_to_phys(_etext)-1;
14899- data_resource.start = virt_to_phys(_etext);
14900+ code_resource.start = virt_to_phys(ktla_ktva(_text));
14901+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14902+ data_resource.start = virt_to_phys(_sdata);
14903 data_resource.end = virt_to_phys(_edata)-1;
14904 bss_resource.start = virt_to_phys(&__bss_start);
14905 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14906diff -urNp linux-3.0.4/arch/x86/kernel/setup_percpu.c linux-3.0.4/arch/x86/kernel/setup_percpu.c
14907--- linux-3.0.4/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
14908+++ linux-3.0.4/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
14909@@ -21,19 +21,17 @@
14910 #include <asm/cpu.h>
14911 #include <asm/stackprotector.h>
14912
14913-DEFINE_PER_CPU(int, cpu_number);
14914+#ifdef CONFIG_SMP
14915+DEFINE_PER_CPU(unsigned int, cpu_number);
14916 EXPORT_PER_CPU_SYMBOL(cpu_number);
14917+#endif
14918
14919-#ifdef CONFIG_X86_64
14920 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14921-#else
14922-#define BOOT_PERCPU_OFFSET 0
14923-#endif
14924
14925 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14926 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14927
14928-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14929+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14930 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14931 };
14932 EXPORT_SYMBOL(__per_cpu_offset);
14933@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14934 {
14935 #ifdef CONFIG_X86_32
14936 struct desc_struct gdt;
14937+ unsigned long base = per_cpu_offset(cpu);
14938
14939- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14940- 0x2 | DESCTYPE_S, 0x8);
14941- gdt.s = 1;
14942+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14943+ 0x83 | DESCTYPE_S, 0xC);
14944 write_gdt_entry(get_cpu_gdt_table(cpu),
14945 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14946 #endif
14947@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14948 /* alrighty, percpu areas up and running */
14949 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14950 for_each_possible_cpu(cpu) {
14951+#ifdef CONFIG_CC_STACKPROTECTOR
14952+#ifdef CONFIG_X86_32
14953+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
14954+#endif
14955+#endif
14956 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14957 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14958 per_cpu(cpu_number, cpu) = cpu;
14959@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14960 */
14961 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14962 #endif
14963+#ifdef CONFIG_CC_STACKPROTECTOR
14964+#ifdef CONFIG_X86_32
14965+ if (!cpu)
14966+ per_cpu(stack_canary.canary, cpu) = canary;
14967+#endif
14968+#endif
14969 /*
14970 * Up to this point, the boot CPU has been using .init.data
14971 * area. Reload any changed state for the boot CPU.
14972diff -urNp linux-3.0.4/arch/x86/kernel/signal.c linux-3.0.4/arch/x86/kernel/signal.c
14973--- linux-3.0.4/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
14974+++ linux-3.0.4/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
14975@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14976 * Align the stack pointer according to the i386 ABI,
14977 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14978 */
14979- sp = ((sp + 4) & -16ul) - 4;
14980+ sp = ((sp - 12) & -16ul) - 4;
14981 #else /* !CONFIG_X86_32 */
14982 sp = round_down(sp, 16) - 8;
14983 #endif
14984@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14985 * Return an always-bogus address instead so we will die with SIGSEGV.
14986 */
14987 if (onsigstack && !likely(on_sig_stack(sp)))
14988- return (void __user *)-1L;
14989+ return (__force void __user *)-1L;
14990
14991 /* save i387 state */
14992 if (used_math() && save_i387_xstate(*fpstate) < 0)
14993- return (void __user *)-1L;
14994+ return (__force void __user *)-1L;
14995
14996 return (void __user *)sp;
14997 }
14998@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14999 }
15000
15001 if (current->mm->context.vdso)
15002- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15003+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15004 else
15005- restorer = &frame->retcode;
15006+ restorer = (void __user *)&frame->retcode;
15007 if (ka->sa.sa_flags & SA_RESTORER)
15008 restorer = ka->sa.sa_restorer;
15009
15010@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
15011 * reasons and because gdb uses it as a signature to notice
15012 * signal handler stack frames.
15013 */
15014- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
15015+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
15016
15017 if (err)
15018 return -EFAULT;
15019@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
15020 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
15021
15022 /* Set up to return from userspace. */
15023- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15024+ if (current->mm->context.vdso)
15025+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15026+ else
15027+ restorer = (void __user *)&frame->retcode;
15028 if (ka->sa.sa_flags & SA_RESTORER)
15029 restorer = ka->sa.sa_restorer;
15030 put_user_ex(restorer, &frame->pretcode);
15031@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
15032 * reasons and because gdb uses it as a signature to notice
15033 * signal handler stack frames.
15034 */
15035- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
15036+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
15037 } put_user_catch(err);
15038
15039 if (err)
15040@@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
15041 int signr;
15042 sigset_t *oldset;
15043
15044+ pax_track_stack();
15045+
15046 /*
15047 * We want the common case to go fast, which is why we may in certain
15048 * cases get here from kernel mode. Just return without doing anything
15049@@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
15050 * X86_32: vm86 regs switched out by assembly code before reaching
15051 * here, so testing against kernel CS suffices.
15052 */
15053- if (!user_mode(regs))
15054+ if (!user_mode_novm(regs))
15055 return;
15056
15057 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
15058diff -urNp linux-3.0.4/arch/x86/kernel/smpboot.c linux-3.0.4/arch/x86/kernel/smpboot.c
15059--- linux-3.0.4/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
15060+++ linux-3.0.4/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
15061@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
15062 set_idle_for_cpu(cpu, c_idle.idle);
15063 do_rest:
15064 per_cpu(current_task, cpu) = c_idle.idle;
15065+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
15066 #ifdef CONFIG_X86_32
15067 /* Stack for startup_32 can be just as for start_secondary onwards */
15068 irq_ctx_init(cpu);
15069 #else
15070 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
15071 initial_gs = per_cpu_offset(cpu);
15072- per_cpu(kernel_stack, cpu) =
15073- (unsigned long)task_stack_page(c_idle.idle) -
15074- KERNEL_STACK_OFFSET + THREAD_SIZE;
15075+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
15076 #endif
15077+
15078+ pax_open_kernel();
15079 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
15080+ pax_close_kernel();
15081+
15082 initial_code = (unsigned long)start_secondary;
15083 stack_start = c_idle.idle->thread.sp;
15084
15085@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
15086
15087 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
15088
15089+#ifdef CONFIG_PAX_PER_CPU_PGD
15090+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
15091+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15092+ KERNEL_PGD_PTRS);
15093+#endif
15094+
15095 err = do_boot_cpu(apicid, cpu);
15096 if (err) {
15097 pr_debug("do_boot_cpu failed %d\n", err);
15098diff -urNp linux-3.0.4/arch/x86/kernel/step.c linux-3.0.4/arch/x86/kernel/step.c
15099--- linux-3.0.4/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
15100+++ linux-3.0.4/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
15101@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
15102 struct desc_struct *desc;
15103 unsigned long base;
15104
15105- seg &= ~7UL;
15106+ seg >>= 3;
15107
15108 mutex_lock(&child->mm->context.lock);
15109- if (unlikely((seg >> 3) >= child->mm->context.size))
15110+ if (unlikely(seg >= child->mm->context.size))
15111 addr = -1L; /* bogus selector, access would fault */
15112 else {
15113 desc = child->mm->context.ldt + seg;
15114@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
15115 addr += base;
15116 }
15117 mutex_unlock(&child->mm->context.lock);
15118- }
15119+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
15120+ addr = ktla_ktva(addr);
15121
15122 return addr;
15123 }
15124@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
15125 unsigned char opcode[15];
15126 unsigned long addr = convert_ip_to_linear(child, regs);
15127
15128+ if (addr == -EINVAL)
15129+ return 0;
15130+
15131 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
15132 for (i = 0; i < copied; i++) {
15133 switch (opcode[i]) {
15134@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
15135
15136 #ifdef CONFIG_X86_64
15137 case 0x40 ... 0x4f:
15138- if (regs->cs != __USER_CS)
15139+ if ((regs->cs & 0xffff) != __USER_CS)
15140 /* 32-bit mode: register increment */
15141 return 0;
15142 /* 64-bit mode: REX prefix */
15143diff -urNp linux-3.0.4/arch/x86/kernel/syscall_table_32.S linux-3.0.4/arch/x86/kernel/syscall_table_32.S
15144--- linux-3.0.4/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
15145+++ linux-3.0.4/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
15146@@ -1,3 +1,4 @@
15147+.section .rodata,"a",@progbits
15148 ENTRY(sys_call_table)
15149 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
15150 .long sys_exit
15151diff -urNp linux-3.0.4/arch/x86/kernel/sys_i386_32.c linux-3.0.4/arch/x86/kernel/sys_i386_32.c
15152--- linux-3.0.4/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
15153+++ linux-3.0.4/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
15154@@ -24,17 +24,224 @@
15155
15156 #include <asm/syscalls.h>
15157
15158-/*
15159- * Do a system call from kernel instead of calling sys_execve so we
15160- * end up with proper pt_regs.
15161- */
15162-int kernel_execve(const char *filename,
15163- const char *const argv[],
15164- const char *const envp[])
15165+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
15166 {
15167- long __res;
15168- asm volatile ("int $0x80"
15169- : "=a" (__res)
15170- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
15171- return __res;
15172+ unsigned long pax_task_size = TASK_SIZE;
15173+
15174+#ifdef CONFIG_PAX_SEGMEXEC
15175+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
15176+ pax_task_size = SEGMEXEC_TASK_SIZE;
15177+#endif
15178+
15179+ if (len > pax_task_size || addr > pax_task_size - len)
15180+ return -EINVAL;
15181+
15182+ return 0;
15183+}
15184+
15185+unsigned long
15186+arch_get_unmapped_area(struct file *filp, unsigned long addr,
15187+ unsigned long len, unsigned long pgoff, unsigned long flags)
15188+{
15189+ struct mm_struct *mm = current->mm;
15190+ struct vm_area_struct *vma;
15191+ unsigned long start_addr, pax_task_size = TASK_SIZE;
15192+
15193+#ifdef CONFIG_PAX_SEGMEXEC
15194+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15195+ pax_task_size = SEGMEXEC_TASK_SIZE;
15196+#endif
15197+
15198+ pax_task_size -= PAGE_SIZE;
15199+
15200+ if (len > pax_task_size)
15201+ return -ENOMEM;
15202+
15203+ if (flags & MAP_FIXED)
15204+ return addr;
15205+
15206+#ifdef CONFIG_PAX_RANDMMAP
15207+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15208+#endif
15209+
15210+ if (addr) {
15211+ addr = PAGE_ALIGN(addr);
15212+ if (pax_task_size - len >= addr) {
15213+ vma = find_vma(mm, addr);
15214+ if (check_heap_stack_gap(vma, addr, len))
15215+ return addr;
15216+ }
15217+ }
15218+ if (len > mm->cached_hole_size) {
15219+ start_addr = addr = mm->free_area_cache;
15220+ } else {
15221+ start_addr = addr = mm->mmap_base;
15222+ mm->cached_hole_size = 0;
15223+ }
15224+
15225+#ifdef CONFIG_PAX_PAGEEXEC
15226+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
15227+ start_addr = 0x00110000UL;
15228+
15229+#ifdef CONFIG_PAX_RANDMMAP
15230+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15231+ start_addr += mm->delta_mmap & 0x03FFF000UL;
15232+#endif
15233+
15234+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
15235+ start_addr = addr = mm->mmap_base;
15236+ else
15237+ addr = start_addr;
15238+ }
15239+#endif
15240+
15241+full_search:
15242+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
15243+ /* At this point: (!vma || addr < vma->vm_end). */
15244+ if (pax_task_size - len < addr) {
15245+ /*
15246+ * Start a new search - just in case we missed
15247+ * some holes.
15248+ */
15249+ if (start_addr != mm->mmap_base) {
15250+ start_addr = addr = mm->mmap_base;
15251+ mm->cached_hole_size = 0;
15252+ goto full_search;
15253+ }
15254+ return -ENOMEM;
15255+ }
15256+ if (check_heap_stack_gap(vma, addr, len))
15257+ break;
15258+ if (addr + mm->cached_hole_size < vma->vm_start)
15259+ mm->cached_hole_size = vma->vm_start - addr;
15260+ addr = vma->vm_end;
15261+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
15262+ start_addr = addr = mm->mmap_base;
15263+ mm->cached_hole_size = 0;
15264+ goto full_search;
15265+ }
15266+ }
15267+
15268+ /*
15269+ * Remember the place where we stopped the search:
15270+ */
15271+ mm->free_area_cache = addr + len;
15272+ return addr;
15273+}
15274+
15275+unsigned long
15276+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
15277+ const unsigned long len, const unsigned long pgoff,
15278+ const unsigned long flags)
15279+{
15280+ struct vm_area_struct *vma;
15281+ struct mm_struct *mm = current->mm;
15282+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
15283+
15284+#ifdef CONFIG_PAX_SEGMEXEC
15285+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15286+ pax_task_size = SEGMEXEC_TASK_SIZE;
15287+#endif
15288+
15289+ pax_task_size -= PAGE_SIZE;
15290+
15291+ /* requested length too big for entire address space */
15292+ if (len > pax_task_size)
15293+ return -ENOMEM;
15294+
15295+ if (flags & MAP_FIXED)
15296+ return addr;
15297+
15298+#ifdef CONFIG_PAX_PAGEEXEC
15299+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
15300+ goto bottomup;
15301+#endif
15302+
15303+#ifdef CONFIG_PAX_RANDMMAP
15304+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15305+#endif
15306+
15307+ /* requesting a specific address */
15308+ if (addr) {
15309+ addr = PAGE_ALIGN(addr);
15310+ if (pax_task_size - len >= addr) {
15311+ vma = find_vma(mm, addr);
15312+ if (check_heap_stack_gap(vma, addr, len))
15313+ return addr;
15314+ }
15315+ }
15316+
15317+ /* check if free_area_cache is useful for us */
15318+ if (len <= mm->cached_hole_size) {
15319+ mm->cached_hole_size = 0;
15320+ mm->free_area_cache = mm->mmap_base;
15321+ }
15322+
15323+ /* either no address requested or can't fit in requested address hole */
15324+ addr = mm->free_area_cache;
15325+
15326+ /* make sure it can fit in the remaining address space */
15327+ if (addr > len) {
15328+ vma = find_vma(mm, addr-len);
15329+ if (check_heap_stack_gap(vma, addr - len, len))
15330+ /* remember the address as a hint for next time */
15331+ return (mm->free_area_cache = addr-len);
15332+ }
15333+
15334+ if (mm->mmap_base < len)
15335+ goto bottomup;
15336+
15337+ addr = mm->mmap_base-len;
15338+
15339+ do {
15340+ /*
15341+ * Lookup failure means no vma is above this address,
15342+ * else if new region fits below vma->vm_start,
15343+ * return with success:
15344+ */
15345+ vma = find_vma(mm, addr);
15346+ if (check_heap_stack_gap(vma, addr, len))
15347+ /* remember the address as a hint for next time */
15348+ return (mm->free_area_cache = addr);
15349+
15350+ /* remember the largest hole we saw so far */
15351+ if (addr + mm->cached_hole_size < vma->vm_start)
15352+ mm->cached_hole_size = vma->vm_start - addr;
15353+
15354+ /* try just below the current vma->vm_start */
15355+ addr = skip_heap_stack_gap(vma, len);
15356+ } while (!IS_ERR_VALUE(addr));
15357+
15358+bottomup:
15359+ /*
15360+ * A failed mmap() very likely causes application failure,
15361+ * so fall back to the bottom-up function here. This scenario
15362+ * can happen with large stack limits and large mmap()
15363+ * allocations.
15364+ */
15365+
15366+#ifdef CONFIG_PAX_SEGMEXEC
15367+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15368+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15369+ else
15370+#endif
15371+
15372+ mm->mmap_base = TASK_UNMAPPED_BASE;
15373+
15374+#ifdef CONFIG_PAX_RANDMMAP
15375+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15376+ mm->mmap_base += mm->delta_mmap;
15377+#endif
15378+
15379+ mm->free_area_cache = mm->mmap_base;
15380+ mm->cached_hole_size = ~0UL;
15381+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15382+ /*
15383+ * Restore the topdown base:
15384+ */
15385+ mm->mmap_base = base;
15386+ mm->free_area_cache = base;
15387+ mm->cached_hole_size = ~0UL;
15388+
15389+ return addr;
15390 }
15391diff -urNp linux-3.0.4/arch/x86/kernel/sys_x86_64.c linux-3.0.4/arch/x86/kernel/sys_x86_64.c
15392--- linux-3.0.4/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15393+++ linux-3.0.4/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15394@@ -32,8 +32,8 @@ out:
15395 return error;
15396 }
15397
15398-static void find_start_end(unsigned long flags, unsigned long *begin,
15399- unsigned long *end)
15400+static void find_start_end(struct mm_struct *mm, unsigned long flags,
15401+ unsigned long *begin, unsigned long *end)
15402 {
15403 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15404 unsigned long new_begin;
15405@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15406 *begin = new_begin;
15407 }
15408 } else {
15409- *begin = TASK_UNMAPPED_BASE;
15410+ *begin = mm->mmap_base;
15411 *end = TASK_SIZE;
15412 }
15413 }
15414@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15415 if (flags & MAP_FIXED)
15416 return addr;
15417
15418- find_start_end(flags, &begin, &end);
15419+ find_start_end(mm, flags, &begin, &end);
15420
15421 if (len > end)
15422 return -ENOMEM;
15423
15424+#ifdef CONFIG_PAX_RANDMMAP
15425+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15426+#endif
15427+
15428 if (addr) {
15429 addr = PAGE_ALIGN(addr);
15430 vma = find_vma(mm, addr);
15431- if (end - len >= addr &&
15432- (!vma || addr + len <= vma->vm_start))
15433+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15434 return addr;
15435 }
15436 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15437@@ -106,7 +109,7 @@ full_search:
15438 }
15439 return -ENOMEM;
15440 }
15441- if (!vma || addr + len <= vma->vm_start) {
15442+ if (check_heap_stack_gap(vma, addr, len)) {
15443 /*
15444 * Remember the place where we stopped the search:
15445 */
15446@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15447 {
15448 struct vm_area_struct *vma;
15449 struct mm_struct *mm = current->mm;
15450- unsigned long addr = addr0;
15451+ unsigned long base = mm->mmap_base, addr = addr0;
15452
15453 /* requested length too big for entire address space */
15454 if (len > TASK_SIZE)
15455@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15456 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15457 goto bottomup;
15458
15459+#ifdef CONFIG_PAX_RANDMMAP
15460+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15461+#endif
15462+
15463 /* requesting a specific address */
15464 if (addr) {
15465 addr = PAGE_ALIGN(addr);
15466- vma = find_vma(mm, addr);
15467- if (TASK_SIZE - len >= addr &&
15468- (!vma || addr + len <= vma->vm_start))
15469- return addr;
15470+ if (TASK_SIZE - len >= addr) {
15471+ vma = find_vma(mm, addr);
15472+ if (check_heap_stack_gap(vma, addr, len))
15473+ return addr;
15474+ }
15475 }
15476
15477 /* check if free_area_cache is useful for us */
15478@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15479 /* make sure it can fit in the remaining address space */
15480 if (addr > len) {
15481 vma = find_vma(mm, addr-len);
15482- if (!vma || addr <= vma->vm_start)
15483+ if (check_heap_stack_gap(vma, addr - len, len))
15484 /* remember the address as a hint for next time */
15485 return mm->free_area_cache = addr-len;
15486 }
15487@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15488 * return with success:
15489 */
15490 vma = find_vma(mm, addr);
15491- if (!vma || addr+len <= vma->vm_start)
15492+ if (check_heap_stack_gap(vma, addr, len))
15493 /* remember the address as a hint for next time */
15494 return mm->free_area_cache = addr;
15495
15496@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15497 mm->cached_hole_size = vma->vm_start - addr;
15498
15499 /* try just below the current vma->vm_start */
15500- addr = vma->vm_start-len;
15501- } while (len < vma->vm_start);
15502+ addr = skip_heap_stack_gap(vma, len);
15503+ } while (!IS_ERR_VALUE(addr));
15504
15505 bottomup:
15506 /*
15507@@ -198,13 +206,21 @@ bottomup:
15508 * can happen with large stack limits and large mmap()
15509 * allocations.
15510 */
15511+ mm->mmap_base = TASK_UNMAPPED_BASE;
15512+
15513+#ifdef CONFIG_PAX_RANDMMAP
15514+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15515+ mm->mmap_base += mm->delta_mmap;
15516+#endif
15517+
15518+ mm->free_area_cache = mm->mmap_base;
15519 mm->cached_hole_size = ~0UL;
15520- mm->free_area_cache = TASK_UNMAPPED_BASE;
15521 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15522 /*
15523 * Restore the topdown base:
15524 */
15525- mm->free_area_cache = mm->mmap_base;
15526+ mm->mmap_base = base;
15527+ mm->free_area_cache = base;
15528 mm->cached_hole_size = ~0UL;
15529
15530 return addr;
15531diff -urNp linux-3.0.4/arch/x86/kernel/tboot.c linux-3.0.4/arch/x86/kernel/tboot.c
15532--- linux-3.0.4/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15533+++ linux-3.0.4/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15534@@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15535
15536 void tboot_shutdown(u32 shutdown_type)
15537 {
15538- void (*shutdown)(void);
15539+ void (* __noreturn shutdown)(void);
15540
15541 if (!tboot_enabled())
15542 return;
15543@@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15544
15545 switch_to_tboot_pt();
15546
15547- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15548+ shutdown = (void *)tboot->shutdown_entry;
15549 shutdown();
15550
15551 /* should not reach here */
15552@@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15553 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15554 }
15555
15556-static atomic_t ap_wfs_count;
15557+static atomic_unchecked_t ap_wfs_count;
15558
15559 static int tboot_wait_for_aps(int num_aps)
15560 {
15561@@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15562 {
15563 switch (action) {
15564 case CPU_DYING:
15565- atomic_inc(&ap_wfs_count);
15566+ atomic_inc_unchecked(&ap_wfs_count);
15567 if (num_online_cpus() == 1)
15568- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15569+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15570 return NOTIFY_BAD;
15571 break;
15572 }
15573@@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15574
15575 tboot_create_trampoline();
15576
15577- atomic_set(&ap_wfs_count, 0);
15578+ atomic_set_unchecked(&ap_wfs_count, 0);
15579 register_hotcpu_notifier(&tboot_cpu_notifier);
15580 return 0;
15581 }
15582diff -urNp linux-3.0.4/arch/x86/kernel/time.c linux-3.0.4/arch/x86/kernel/time.c
15583--- linux-3.0.4/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
15584+++ linux-3.0.4/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
15585@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
15586 {
15587 unsigned long pc = instruction_pointer(regs);
15588
15589- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15590+ if (!user_mode(regs) && in_lock_functions(pc)) {
15591 #ifdef CONFIG_FRAME_POINTER
15592- return *(unsigned long *)(regs->bp + sizeof(long));
15593+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15594 #else
15595 unsigned long *sp =
15596 (unsigned long *)kernel_stack_pointer(regs);
15597@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
15598 * or above a saved flags. Eflags has bits 22-31 zero,
15599 * kernel addresses don't.
15600 */
15601+
15602+#ifdef CONFIG_PAX_KERNEXEC
15603+ return ktla_ktva(sp[0]);
15604+#else
15605 if (sp[0] >> 22)
15606 return sp[0];
15607 if (sp[1] >> 22)
15608 return sp[1];
15609 #endif
15610+
15611+#endif
15612 }
15613 return pc;
15614 }
15615diff -urNp linux-3.0.4/arch/x86/kernel/tls.c linux-3.0.4/arch/x86/kernel/tls.c
15616--- linux-3.0.4/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
15617+++ linux-3.0.4/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
15618@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15619 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15620 return -EINVAL;
15621
15622+#ifdef CONFIG_PAX_SEGMEXEC
15623+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15624+ return -EINVAL;
15625+#endif
15626+
15627 set_tls_desc(p, idx, &info, 1);
15628
15629 return 0;
15630diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_32.S linux-3.0.4/arch/x86/kernel/trampoline_32.S
15631--- linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
15632+++ linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
15633@@ -32,6 +32,12 @@
15634 #include <asm/segment.h>
15635 #include <asm/page_types.h>
15636
15637+#ifdef CONFIG_PAX_KERNEXEC
15638+#define ta(X) (X)
15639+#else
15640+#define ta(X) ((X) - __PAGE_OFFSET)
15641+#endif
15642+
15643 #ifdef CONFIG_SMP
15644
15645 .section ".x86_trampoline","a"
15646@@ -62,7 +68,7 @@ r_base = .
15647 inc %ax # protected mode (PE) bit
15648 lmsw %ax # into protected mode
15649 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15650- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15651+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
15652
15653 # These need to be in the same 64K segment as the above;
15654 # hence we don't use the boot_gdt_descr defined in head.S
15655diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_64.S linux-3.0.4/arch/x86/kernel/trampoline_64.S
15656--- linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
15657+++ linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
15658@@ -90,7 +90,7 @@ startup_32:
15659 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15660 movl %eax, %ds
15661
15662- movl $X86_CR4_PAE, %eax
15663+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15664 movl %eax, %cr4 # Enable PAE mode
15665
15666 # Setup trampoline 4 level pagetables
15667@@ -138,7 +138,7 @@ tidt:
15668 # so the kernel can live anywhere
15669 .balign 4
15670 tgdt:
15671- .short tgdt_end - tgdt # gdt limit
15672+ .short tgdt_end - tgdt - 1 # gdt limit
15673 .long tgdt - r_base
15674 .short 0
15675 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15676diff -urNp linux-3.0.4/arch/x86/kernel/traps.c linux-3.0.4/arch/x86/kernel/traps.c
15677--- linux-3.0.4/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
15678+++ linux-3.0.4/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
15679@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15680
15681 /* Do we ignore FPU interrupts ? */
15682 char ignore_fpu_irq;
15683-
15684-/*
15685- * The IDT has to be page-aligned to simplify the Pentium
15686- * F0 0F bug workaround.
15687- */
15688-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15689 #endif
15690
15691 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15692@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15693 }
15694
15695 static void __kprobes
15696-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15697+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15698 long error_code, siginfo_t *info)
15699 {
15700 struct task_struct *tsk = current;
15701
15702 #ifdef CONFIG_X86_32
15703- if (regs->flags & X86_VM_MASK) {
15704+ if (v8086_mode(regs)) {
15705 /*
15706 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15707 * On nmi (interrupt 2), do_trap should not be called.
15708@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15709 }
15710 #endif
15711
15712- if (!user_mode(regs))
15713+ if (!user_mode_novm(regs))
15714 goto kernel_trap;
15715
15716 #ifdef CONFIG_X86_32
15717@@ -157,7 +151,7 @@ trap_signal:
15718 printk_ratelimit()) {
15719 printk(KERN_INFO
15720 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15721- tsk->comm, tsk->pid, str,
15722+ tsk->comm, task_pid_nr(tsk), str,
15723 regs->ip, regs->sp, error_code);
15724 print_vma_addr(" in ", regs->ip);
15725 printk("\n");
15726@@ -174,8 +168,20 @@ kernel_trap:
15727 if (!fixup_exception(regs)) {
15728 tsk->thread.error_code = error_code;
15729 tsk->thread.trap_no = trapnr;
15730+
15731+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15732+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15733+ str = "PAX: suspicious stack segment fault";
15734+#endif
15735+
15736 die(str, regs, error_code);
15737 }
15738+
15739+#ifdef CONFIG_PAX_REFCOUNT
15740+ if (trapnr == 4)
15741+ pax_report_refcount_overflow(regs);
15742+#endif
15743+
15744 return;
15745
15746 #ifdef CONFIG_X86_32
15747@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15748 conditional_sti(regs);
15749
15750 #ifdef CONFIG_X86_32
15751- if (regs->flags & X86_VM_MASK)
15752+ if (v8086_mode(regs))
15753 goto gp_in_vm86;
15754 #endif
15755
15756 tsk = current;
15757- if (!user_mode(regs))
15758+ if (!user_mode_novm(regs))
15759 goto gp_in_kernel;
15760
15761+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15762+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15763+ struct mm_struct *mm = tsk->mm;
15764+ unsigned long limit;
15765+
15766+ down_write(&mm->mmap_sem);
15767+ limit = mm->context.user_cs_limit;
15768+ if (limit < TASK_SIZE) {
15769+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15770+ up_write(&mm->mmap_sem);
15771+ return;
15772+ }
15773+ up_write(&mm->mmap_sem);
15774+ }
15775+#endif
15776+
15777 tsk->thread.error_code = error_code;
15778 tsk->thread.trap_no = 13;
15779
15780@@ -304,6 +326,13 @@ gp_in_kernel:
15781 if (notify_die(DIE_GPF, "general protection fault", regs,
15782 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15783 return;
15784+
15785+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15786+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15787+ die("PAX: suspicious general protection fault", regs, error_code);
15788+ else
15789+#endif
15790+
15791 die("general protection fault", regs, error_code);
15792 }
15793
15794@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15795 dotraplinkage notrace __kprobes void
15796 do_nmi(struct pt_regs *regs, long error_code)
15797 {
15798+
15799+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15800+ if (!user_mode(regs)) {
15801+ unsigned long cs = regs->cs & 0xFFFF;
15802+ unsigned long ip = ktva_ktla(regs->ip);
15803+
15804+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15805+ regs->ip = ip;
15806+ }
15807+#endif
15808+
15809 nmi_enter();
15810
15811 inc_irq_stat(__nmi_count);
15812@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15813 /* It's safe to allow irq's after DR6 has been saved */
15814 preempt_conditional_sti(regs);
15815
15816- if (regs->flags & X86_VM_MASK) {
15817+ if (v8086_mode(regs)) {
15818 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15819 error_code, 1);
15820 preempt_conditional_cli(regs);
15821@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15822 * We already checked v86 mode above, so we can check for kernel mode
15823 * by just checking the CPL of CS.
15824 */
15825- if ((dr6 & DR_STEP) && !user_mode(regs)) {
15826+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15827 tsk->thread.debugreg6 &= ~DR_STEP;
15828 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15829 regs->flags &= ~X86_EFLAGS_TF;
15830@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15831 return;
15832 conditional_sti(regs);
15833
15834- if (!user_mode_vm(regs))
15835+ if (!user_mode(regs))
15836 {
15837 if (!fixup_exception(regs)) {
15838 task->thread.error_code = error_code;
15839@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15840 void __math_state_restore(void)
15841 {
15842 struct thread_info *thread = current_thread_info();
15843- struct task_struct *tsk = thread->task;
15844+ struct task_struct *tsk = current;
15845
15846 /*
15847 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15848@@ -750,8 +790,7 @@ void __math_state_restore(void)
15849 */
15850 asmlinkage void math_state_restore(void)
15851 {
15852- struct thread_info *thread = current_thread_info();
15853- struct task_struct *tsk = thread->task;
15854+ struct task_struct *tsk = current;
15855
15856 if (!tsk_used_math(tsk)) {
15857 local_irq_enable();
15858diff -urNp linux-3.0.4/arch/x86/kernel/verify_cpu.S linux-3.0.4/arch/x86/kernel/verify_cpu.S
15859--- linux-3.0.4/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
15860+++ linux-3.0.4/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
15861@@ -20,6 +20,7 @@
15862 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15863 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15864 * arch/x86/kernel/head_32.S: processor startup
15865+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15866 *
15867 * verify_cpu, returns the status of longmode and SSE in register %eax.
15868 * 0: Success 1: Failure
15869diff -urNp linux-3.0.4/arch/x86/kernel/vm86_32.c linux-3.0.4/arch/x86/kernel/vm86_32.c
15870--- linux-3.0.4/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
15871+++ linux-3.0.4/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
15872@@ -41,6 +41,7 @@
15873 #include <linux/ptrace.h>
15874 #include <linux/audit.h>
15875 #include <linux/stddef.h>
15876+#include <linux/grsecurity.h>
15877
15878 #include <asm/uaccess.h>
15879 #include <asm/io.h>
15880@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15881 do_exit(SIGSEGV);
15882 }
15883
15884- tss = &per_cpu(init_tss, get_cpu());
15885+ tss = init_tss + get_cpu();
15886 current->thread.sp0 = current->thread.saved_sp0;
15887 current->thread.sysenter_cs = __KERNEL_CS;
15888 load_sp0(tss, &current->thread);
15889@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15890 struct task_struct *tsk;
15891 int tmp, ret = -EPERM;
15892
15893+#ifdef CONFIG_GRKERNSEC_VM86
15894+ if (!capable(CAP_SYS_RAWIO)) {
15895+ gr_handle_vm86();
15896+ goto out;
15897+ }
15898+#endif
15899+
15900 tsk = current;
15901 if (tsk->thread.saved_sp0)
15902 goto out;
15903@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15904 int tmp, ret;
15905 struct vm86plus_struct __user *v86;
15906
15907+#ifdef CONFIG_GRKERNSEC_VM86
15908+ if (!capable(CAP_SYS_RAWIO)) {
15909+ gr_handle_vm86();
15910+ ret = -EPERM;
15911+ goto out;
15912+ }
15913+#endif
15914+
15915 tsk = current;
15916 switch (cmd) {
15917 case VM86_REQUEST_IRQ:
15918@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15919 tsk->thread.saved_fs = info->regs32->fs;
15920 tsk->thread.saved_gs = get_user_gs(info->regs32);
15921
15922- tss = &per_cpu(init_tss, get_cpu());
15923+ tss = init_tss + get_cpu();
15924 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15925 if (cpu_has_sep)
15926 tsk->thread.sysenter_cs = 0;
15927@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15928 goto cannot_handle;
15929 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15930 goto cannot_handle;
15931- intr_ptr = (unsigned long __user *) (i << 2);
15932+ intr_ptr = (__force unsigned long __user *) (i << 2);
15933 if (get_user(segoffs, intr_ptr))
15934 goto cannot_handle;
15935 if ((segoffs >> 16) == BIOSSEG)
15936diff -urNp linux-3.0.4/arch/x86/kernel/vmlinux.lds.S linux-3.0.4/arch/x86/kernel/vmlinux.lds.S
15937--- linux-3.0.4/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
15938+++ linux-3.0.4/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
15939@@ -26,6 +26,13 @@
15940 #include <asm/page_types.h>
15941 #include <asm/cache.h>
15942 #include <asm/boot.h>
15943+#include <asm/segment.h>
15944+
15945+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15946+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15947+#else
15948+#define __KERNEL_TEXT_OFFSET 0
15949+#endif
15950
15951 #undef i386 /* in case the preprocessor is a 32bit one */
15952
15953@@ -69,31 +76,46 @@ jiffies_64 = jiffies;
15954
15955 PHDRS {
15956 text PT_LOAD FLAGS(5); /* R_E */
15957+#ifdef CONFIG_X86_32
15958+ module PT_LOAD FLAGS(5); /* R_E */
15959+#endif
15960+#ifdef CONFIG_XEN
15961+ rodata PT_LOAD FLAGS(5); /* R_E */
15962+#else
15963+ rodata PT_LOAD FLAGS(4); /* R__ */
15964+#endif
15965 data PT_LOAD FLAGS(6); /* RW_ */
15966 #ifdef CONFIG_X86_64
15967 user PT_LOAD FLAGS(5); /* R_E */
15968+#endif
15969+ init.begin PT_LOAD FLAGS(6); /* RW_ */
15970 #ifdef CONFIG_SMP
15971 percpu PT_LOAD FLAGS(6); /* RW_ */
15972 #endif
15973+ text.init PT_LOAD FLAGS(5); /* R_E */
15974+ text.exit PT_LOAD FLAGS(5); /* R_E */
15975 init PT_LOAD FLAGS(7); /* RWE */
15976-#endif
15977 note PT_NOTE FLAGS(0); /* ___ */
15978 }
15979
15980 SECTIONS
15981 {
15982 #ifdef CONFIG_X86_32
15983- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15984- phys_startup_32 = startup_32 - LOAD_OFFSET;
15985+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15986 #else
15987- . = __START_KERNEL;
15988- phys_startup_64 = startup_64 - LOAD_OFFSET;
15989+ . = __START_KERNEL;
15990 #endif
15991
15992 /* Text and read-only data */
15993- .text : AT(ADDR(.text) - LOAD_OFFSET) {
15994- _text = .;
15995+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15996 /* bootstrapping code */
15997+#ifdef CONFIG_X86_32
15998+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15999+#else
16000+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16001+#endif
16002+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16003+ _text = .;
16004 HEAD_TEXT
16005 #ifdef CONFIG_X86_32
16006 . = ALIGN(PAGE_SIZE);
16007@@ -109,13 +131,47 @@ SECTIONS
16008 IRQENTRY_TEXT
16009 *(.fixup)
16010 *(.gnu.warning)
16011- /* End of text section */
16012- _etext = .;
16013 } :text = 0x9090
16014
16015- NOTES :text :note
16016+ . += __KERNEL_TEXT_OFFSET;
16017+
16018+#ifdef CONFIG_X86_32
16019+ . = ALIGN(PAGE_SIZE);
16020+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
16021+
16022+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
16023+ MODULES_EXEC_VADDR = .;
16024+ BYTE(0)
16025+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
16026+ . = ALIGN(HPAGE_SIZE);
16027+ MODULES_EXEC_END = . - 1;
16028+#endif
16029+
16030+ } :module
16031+#endif
16032+
16033+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
16034+ /* End of text section */
16035+ _etext = . - __KERNEL_TEXT_OFFSET;
16036+ }
16037+
16038+#ifdef CONFIG_X86_32
16039+ . = ALIGN(PAGE_SIZE);
16040+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
16041+ *(.idt)
16042+ . = ALIGN(PAGE_SIZE);
16043+ *(.empty_zero_page)
16044+ *(.initial_pg_fixmap)
16045+ *(.initial_pg_pmd)
16046+ *(.initial_page_table)
16047+ *(.swapper_pg_dir)
16048+ } :rodata
16049+#endif
16050+
16051+ . = ALIGN(PAGE_SIZE);
16052+ NOTES :rodata :note
16053
16054- EXCEPTION_TABLE(16) :text = 0x9090
16055+ EXCEPTION_TABLE(16) :rodata
16056
16057 #if defined(CONFIG_DEBUG_RODATA)
16058 /* .text should occupy whole number of pages */
16059@@ -127,16 +183,20 @@ SECTIONS
16060
16061 /* Data */
16062 .data : AT(ADDR(.data) - LOAD_OFFSET) {
16063+
16064+#ifdef CONFIG_PAX_KERNEXEC
16065+ . = ALIGN(HPAGE_SIZE);
16066+#else
16067+ . = ALIGN(PAGE_SIZE);
16068+#endif
16069+
16070 /* Start of data section */
16071 _sdata = .;
16072
16073 /* init_task */
16074 INIT_TASK_DATA(THREAD_SIZE)
16075
16076-#ifdef CONFIG_X86_32
16077- /* 32 bit has nosave before _edata */
16078 NOSAVE_DATA
16079-#endif
16080
16081 PAGE_ALIGNED_DATA(PAGE_SIZE)
16082
16083@@ -208,12 +268,19 @@ SECTIONS
16084 #endif /* CONFIG_X86_64 */
16085
16086 /* Init code and data - will be freed after init */
16087- . = ALIGN(PAGE_SIZE);
16088 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
16089+ BYTE(0)
16090+
16091+#ifdef CONFIG_PAX_KERNEXEC
16092+ . = ALIGN(HPAGE_SIZE);
16093+#else
16094+ . = ALIGN(PAGE_SIZE);
16095+#endif
16096+
16097 __init_begin = .; /* paired with __init_end */
16098- }
16099+ } :init.begin
16100
16101-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
16102+#ifdef CONFIG_SMP
16103 /*
16104 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
16105 * output PHDR, so the next output section - .init.text - should
16106@@ -222,12 +289,27 @@ SECTIONS
16107 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
16108 #endif
16109
16110- INIT_TEXT_SECTION(PAGE_SIZE)
16111-#ifdef CONFIG_X86_64
16112- :init
16113-#endif
16114+ . = ALIGN(PAGE_SIZE);
16115+ init_begin = .;
16116+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
16117+ VMLINUX_SYMBOL(_sinittext) = .;
16118+ INIT_TEXT
16119+ VMLINUX_SYMBOL(_einittext) = .;
16120+ . = ALIGN(PAGE_SIZE);
16121+ } :text.init
16122
16123- INIT_DATA_SECTION(16)
16124+ /*
16125+ * .exit.text is discard at runtime, not link time, to deal with
16126+ * references from .altinstructions and .eh_frame
16127+ */
16128+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16129+ EXIT_TEXT
16130+ . = ALIGN(16);
16131+ } :text.exit
16132+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
16133+
16134+ . = ALIGN(PAGE_SIZE);
16135+ INIT_DATA_SECTION(16) :init
16136
16137 /*
16138 * Code and data for a variety of lowlevel trampolines, to be
16139@@ -301,19 +383,12 @@ SECTIONS
16140 }
16141
16142 . = ALIGN(8);
16143- /*
16144- * .exit.text is discard at runtime, not link time, to deal with
16145- * references from .altinstructions and .eh_frame
16146- */
16147- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
16148- EXIT_TEXT
16149- }
16150
16151 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
16152 EXIT_DATA
16153 }
16154
16155-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
16156+#ifndef CONFIG_SMP
16157 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
16158 #endif
16159
16160@@ -332,16 +407,10 @@ SECTIONS
16161 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
16162 __smp_locks = .;
16163 *(.smp_locks)
16164- . = ALIGN(PAGE_SIZE);
16165 __smp_locks_end = .;
16166+ . = ALIGN(PAGE_SIZE);
16167 }
16168
16169-#ifdef CONFIG_X86_64
16170- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
16171- NOSAVE_DATA
16172- }
16173-#endif
16174-
16175 /* BSS */
16176 . = ALIGN(PAGE_SIZE);
16177 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
16178@@ -357,6 +426,7 @@ SECTIONS
16179 __brk_base = .;
16180 . += 64 * 1024; /* 64k alignment slop space */
16181 *(.brk_reservation) /* areas brk users have reserved */
16182+ . = ALIGN(HPAGE_SIZE);
16183 __brk_limit = .;
16184 }
16185
16186@@ -383,13 +453,12 @@ SECTIONS
16187 * for the boot processor.
16188 */
16189 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
16190-INIT_PER_CPU(gdt_page);
16191 INIT_PER_CPU(irq_stack_union);
16192
16193 /*
16194 * Build-time check on the image size:
16195 */
16196-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
16197+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
16198 "kernel image bigger than KERNEL_IMAGE_SIZE");
16199
16200 #ifdef CONFIG_SMP
16201diff -urNp linux-3.0.4/arch/x86/kernel/vsyscall_64.c linux-3.0.4/arch/x86/kernel/vsyscall_64.c
16202--- linux-3.0.4/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
16203+++ linux-3.0.4/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
16204@@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
16205 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
16206 {
16207 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
16208- .sysctl_enabled = 1,
16209+ .sysctl_enabled = 0,
16210 };
16211
16212 void update_vsyscall_tz(void)
16213@@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
16214 static ctl_table kernel_table2[] = {
16215 { .procname = "vsyscall64",
16216 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
16217- .mode = 0644,
16218+ .mode = 0444,
16219 .proc_handler = proc_dointvec },
16220 {}
16221 };
16222diff -urNp linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c
16223--- linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
16224+++ linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
16225@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
16226 EXPORT_SYMBOL(copy_user_generic_string);
16227 EXPORT_SYMBOL(copy_user_generic_unrolled);
16228 EXPORT_SYMBOL(__copy_user_nocache);
16229-EXPORT_SYMBOL(_copy_from_user);
16230-EXPORT_SYMBOL(_copy_to_user);
16231
16232 EXPORT_SYMBOL(copy_page);
16233 EXPORT_SYMBOL(clear_page);
16234diff -urNp linux-3.0.4/arch/x86/kernel/xsave.c linux-3.0.4/arch/x86/kernel/xsave.c
16235--- linux-3.0.4/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
16236+++ linux-3.0.4/arch/x86/kernel/xsave.c 2011-10-06 04:17:55.000000000 -0400
16237@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
16238 fx_sw_user->xstate_size > fx_sw_user->extended_size)
16239 return -EINVAL;
16240
16241- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
16242+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
16243 fx_sw_user->extended_size -
16244 FP_XSTATE_MAGIC2_SIZE));
16245 if (err)
16246@@ -267,7 +267,7 @@ fx_only:
16247 * the other extended state.
16248 */
16249 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
16250- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
16251+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
16252 }
16253
16254 /*
16255@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
16256 if (use_xsave())
16257 err = restore_user_xstate(buf);
16258 else
16259- err = fxrstor_checking((__force struct i387_fxsave_struct *)
16260+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
16261 buf);
16262 if (unlikely(err)) {
16263 /*
16264diff -urNp linux-3.0.4/arch/x86/kvm/emulate.c linux-3.0.4/arch/x86/kvm/emulate.c
16265--- linux-3.0.4/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
16266+++ linux-3.0.4/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
16267@@ -96,7 +96,7 @@
16268 #define Src2ImmByte (2<<29)
16269 #define Src2One (3<<29)
16270 #define Src2Imm (4<<29)
16271-#define Src2Mask (7<<29)
16272+#define Src2Mask (7U<<29)
16273
16274 #define X2(x...) x, x
16275 #define X3(x...) X2(x), x
16276@@ -207,6 +207,7 @@ struct gprefix {
16277
16278 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
16279 do { \
16280+ unsigned long _tmp; \
16281 __asm__ __volatile__ ( \
16282 _PRE_EFLAGS("0", "4", "2") \
16283 _op _suffix " %"_x"3,%1; " \
16284@@ -220,8 +221,6 @@ struct gprefix {
16285 /* Raw emulation: instruction has two explicit operands. */
16286 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
16287 do { \
16288- unsigned long _tmp; \
16289- \
16290 switch ((_dst).bytes) { \
16291 case 2: \
16292 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
16293@@ -237,7 +236,6 @@ struct gprefix {
16294
16295 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16296 do { \
16297- unsigned long _tmp; \
16298 switch ((_dst).bytes) { \
16299 case 1: \
16300 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16301diff -urNp linux-3.0.4/arch/x86/kvm/lapic.c linux-3.0.4/arch/x86/kvm/lapic.c
16302--- linux-3.0.4/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
16303+++ linux-3.0.4/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
16304@@ -53,7 +53,7 @@
16305 #define APIC_BUS_CYCLE_NS 1
16306
16307 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16308-#define apic_debug(fmt, arg...)
16309+#define apic_debug(fmt, arg...) do {} while (0)
16310
16311 #define APIC_LVT_NUM 6
16312 /* 14 is the version for Xeon and Pentium 8.4.8*/
16313diff -urNp linux-3.0.4/arch/x86/kvm/mmu.c linux-3.0.4/arch/x86/kvm/mmu.c
16314--- linux-3.0.4/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
16315+++ linux-3.0.4/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
16316@@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16317
16318 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16319
16320- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16321+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16322
16323 /*
16324 * Assume that the pte write on a page table of the same type
16325@@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16326 }
16327
16328 spin_lock(&vcpu->kvm->mmu_lock);
16329- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16330+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16331 gentry = 0;
16332 kvm_mmu_free_some_pages(vcpu);
16333 ++vcpu->kvm->stat.mmu_pte_write;
16334diff -urNp linux-3.0.4/arch/x86/kvm/paging_tmpl.h linux-3.0.4/arch/x86/kvm/paging_tmpl.h
16335--- linux-3.0.4/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
16336+++ linux-3.0.4/arch/x86/kvm/paging_tmpl.h 2011-10-06 04:17:55.000000000 -0400
16337@@ -182,7 +182,7 @@ walk:
16338 break;
16339 }
16340
16341- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
16342+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
16343 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) {
16344 present = false;
16345 break;
16346@@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
16347 unsigned long mmu_seq;
16348 bool map_writable;
16349
16350+ pax_track_stack();
16351+
16352 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16353
16354 r = mmu_topup_memory_caches(vcpu);
16355@@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16356 if (need_flush)
16357 kvm_flush_remote_tlbs(vcpu->kvm);
16358
16359- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16360+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16361
16362 spin_unlock(&vcpu->kvm->mmu_lock);
16363
16364diff -urNp linux-3.0.4/arch/x86/kvm/svm.c linux-3.0.4/arch/x86/kvm/svm.c
16365--- linux-3.0.4/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
16366+++ linux-3.0.4/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
16367@@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
16368 int cpu = raw_smp_processor_id();
16369
16370 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16371+
16372+ pax_open_kernel();
16373 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16374+ pax_close_kernel();
16375+
16376 load_TR_desc();
16377 }
16378
16379@@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16380 #endif
16381 #endif
16382
16383+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16384+ __set_fs(current_thread_info()->addr_limit);
16385+#endif
16386+
16387 reload_tss(vcpu);
16388
16389 local_irq_disable();
16390diff -urNp linux-3.0.4/arch/x86/kvm/vmx.c linux-3.0.4/arch/x86/kvm/vmx.c
16391--- linux-3.0.4/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16392+++ linux-3.0.4/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16393@@ -797,7 +797,11 @@ static void reload_tss(void)
16394 struct desc_struct *descs;
16395
16396 descs = (void *)gdt->address;
16397+
16398+ pax_open_kernel();
16399 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16400+ pax_close_kernel();
16401+
16402 load_TR_desc();
16403 }
16404
16405@@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16406 if (!cpu_has_vmx_flexpriority())
16407 flexpriority_enabled = 0;
16408
16409- if (!cpu_has_vmx_tpr_shadow())
16410- kvm_x86_ops->update_cr8_intercept = NULL;
16411+ if (!cpu_has_vmx_tpr_shadow()) {
16412+ pax_open_kernel();
16413+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16414+ pax_close_kernel();
16415+ }
16416
16417 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16418 kvm_disable_largepages();
16419@@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16420 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16421
16422 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16423- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16424+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16425 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16426 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16427 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16428@@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16429 "jmp .Lkvm_vmx_return \n\t"
16430 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16431 ".Lkvm_vmx_return: "
16432+
16433+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16434+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16435+ ".Lkvm_vmx_return2: "
16436+#endif
16437+
16438 /* Save guest registers, load host registers, keep flags */
16439 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16440 "pop %0 \n\t"
16441@@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16442 #endif
16443 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16444 [wordsize]"i"(sizeof(ulong))
16445+
16446+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16447+ ,[cs]"i"(__KERNEL_CS)
16448+#endif
16449+
16450 : "cc", "memory"
16451 , R"ax", R"bx", R"di", R"si"
16452 #ifdef CONFIG_X86_64
16453@@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16454
16455 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16456
16457- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16458+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16459+
16460+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16461+ loadsegment(fs, __KERNEL_PERCPU);
16462+#endif
16463+
16464+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16465+ __set_fs(current_thread_info()->addr_limit);
16466+#endif
16467+
16468 vmx->launched = 1;
16469
16470 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16471diff -urNp linux-3.0.4/arch/x86/kvm/x86.c linux-3.0.4/arch/x86/kvm/x86.c
16472--- linux-3.0.4/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
16473+++ linux-3.0.4/arch/x86/kvm/x86.c 2011-10-06 04:17:55.000000000 -0400
16474@@ -1313,8 +1313,8 @@ static int xen_hvm_config(struct kvm_vcp
16475 {
16476 struct kvm *kvm = vcpu->kvm;
16477 int lm = is_long_mode(vcpu);
16478- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
16479- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
16480+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
16481+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
16482 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
16483 : kvm->arch.xen_hvm_config.blob_size_32;
16484 u32 page_num = data & ~PAGE_MASK;
16485@@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16486 if (n < msr_list.nmsrs)
16487 goto out;
16488 r = -EFAULT;
16489+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16490+ goto out;
16491 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16492 num_msrs_to_save * sizeof(u32)))
16493 goto out;
16494@@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16495 struct kvm_cpuid2 *cpuid,
16496 struct kvm_cpuid_entry2 __user *entries)
16497 {
16498- int r;
16499+ int r, i;
16500
16501 r = -E2BIG;
16502 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16503 goto out;
16504 r = -EFAULT;
16505- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16506- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16507+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16508 goto out;
16509+ for (i = 0; i < cpuid->nent; ++i) {
16510+ struct kvm_cpuid_entry2 cpuid_entry;
16511+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16512+ goto out;
16513+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
16514+ }
16515 vcpu->arch.cpuid_nent = cpuid->nent;
16516 kvm_apic_set_version(vcpu);
16517 kvm_x86_ops->cpuid_update(vcpu);
16518@@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16519 struct kvm_cpuid2 *cpuid,
16520 struct kvm_cpuid_entry2 __user *entries)
16521 {
16522- int r;
16523+ int r, i;
16524
16525 r = -E2BIG;
16526 if (cpuid->nent < vcpu->arch.cpuid_nent)
16527 goto out;
16528 r = -EFAULT;
16529- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16530- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16531+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16532 goto out;
16533+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16534+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16535+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16536+ goto out;
16537+ }
16538 return 0;
16539
16540 out:
16541@@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16542 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16543 struct kvm_interrupt *irq)
16544 {
16545- if (irq->irq < 0 || irq->irq >= 256)
16546+ if (irq->irq >= 256)
16547 return -EINVAL;
16548 if (irqchip_in_kernel(vcpu->kvm))
16549 return -ENXIO;
16550@@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16551 }
16552 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16553
16554-int kvm_arch_init(void *opaque)
16555+int kvm_arch_init(const void *opaque)
16556 {
16557 int r;
16558 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16559diff -urNp linux-3.0.4/arch/x86/lguest/boot.c linux-3.0.4/arch/x86/lguest/boot.c
16560--- linux-3.0.4/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
16561+++ linux-3.0.4/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
16562@@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16563 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16564 * Launcher to reboot us.
16565 */
16566-static void lguest_restart(char *reason)
16567+static __noreturn void lguest_restart(char *reason)
16568 {
16569 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16570+ BUG();
16571 }
16572
16573 /*G:050
16574diff -urNp linux-3.0.4/arch/x86/lib/atomic64_32.c linux-3.0.4/arch/x86/lib/atomic64_32.c
16575--- linux-3.0.4/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
16576+++ linux-3.0.4/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
16577@@ -8,18 +8,30 @@
16578
16579 long long atomic64_read_cx8(long long, const atomic64_t *v);
16580 EXPORT_SYMBOL(atomic64_read_cx8);
16581+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16582+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16583 long long atomic64_set_cx8(long long, const atomic64_t *v);
16584 EXPORT_SYMBOL(atomic64_set_cx8);
16585+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16586+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16587 long long atomic64_xchg_cx8(long long, unsigned high);
16588 EXPORT_SYMBOL(atomic64_xchg_cx8);
16589 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16590 EXPORT_SYMBOL(atomic64_add_return_cx8);
16591+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16592+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16593 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16594 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16595+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16596+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16597 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16598 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16599+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16600+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16601 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16602 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16603+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16604+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16605 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16606 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16607 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16608@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16609 #ifndef CONFIG_X86_CMPXCHG64
16610 long long atomic64_read_386(long long, const atomic64_t *v);
16611 EXPORT_SYMBOL(atomic64_read_386);
16612+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16613+EXPORT_SYMBOL(atomic64_read_unchecked_386);
16614 long long atomic64_set_386(long long, const atomic64_t *v);
16615 EXPORT_SYMBOL(atomic64_set_386);
16616+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16617+EXPORT_SYMBOL(atomic64_set_unchecked_386);
16618 long long atomic64_xchg_386(long long, unsigned high);
16619 EXPORT_SYMBOL(atomic64_xchg_386);
16620 long long atomic64_add_return_386(long long a, atomic64_t *v);
16621 EXPORT_SYMBOL(atomic64_add_return_386);
16622+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16623+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16624 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16625 EXPORT_SYMBOL(atomic64_sub_return_386);
16626+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16627+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16628 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16629 EXPORT_SYMBOL(atomic64_inc_return_386);
16630+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16631+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16632 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16633 EXPORT_SYMBOL(atomic64_dec_return_386);
16634+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16635+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16636 long long atomic64_add_386(long long a, atomic64_t *v);
16637 EXPORT_SYMBOL(atomic64_add_386);
16638+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16639+EXPORT_SYMBOL(atomic64_add_unchecked_386);
16640 long long atomic64_sub_386(long long a, atomic64_t *v);
16641 EXPORT_SYMBOL(atomic64_sub_386);
16642+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16643+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16644 long long atomic64_inc_386(long long a, atomic64_t *v);
16645 EXPORT_SYMBOL(atomic64_inc_386);
16646+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16647+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16648 long long atomic64_dec_386(long long a, atomic64_t *v);
16649 EXPORT_SYMBOL(atomic64_dec_386);
16650+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16651+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16652 long long atomic64_dec_if_positive_386(atomic64_t *v);
16653 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16654 int atomic64_inc_not_zero_386(atomic64_t *v);
16655diff -urNp linux-3.0.4/arch/x86/lib/atomic64_386_32.S linux-3.0.4/arch/x86/lib/atomic64_386_32.S
16656--- linux-3.0.4/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
16657+++ linux-3.0.4/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
16658@@ -48,6 +48,10 @@ BEGIN(read)
16659 movl (v), %eax
16660 movl 4(v), %edx
16661 RET_ENDP
16662+BEGIN(read_unchecked)
16663+ movl (v), %eax
16664+ movl 4(v), %edx
16665+RET_ENDP
16666 #undef v
16667
16668 #define v %esi
16669@@ -55,6 +59,10 @@ BEGIN(set)
16670 movl %ebx, (v)
16671 movl %ecx, 4(v)
16672 RET_ENDP
16673+BEGIN(set_unchecked)
16674+ movl %ebx, (v)
16675+ movl %ecx, 4(v)
16676+RET_ENDP
16677 #undef v
16678
16679 #define v %esi
16680@@ -70,6 +78,20 @@ RET_ENDP
16681 BEGIN(add)
16682 addl %eax, (v)
16683 adcl %edx, 4(v)
16684+
16685+#ifdef CONFIG_PAX_REFCOUNT
16686+ jno 0f
16687+ subl %eax, (v)
16688+ sbbl %edx, 4(v)
16689+ int $4
16690+0:
16691+ _ASM_EXTABLE(0b, 0b)
16692+#endif
16693+
16694+RET_ENDP
16695+BEGIN(add_unchecked)
16696+ addl %eax, (v)
16697+ adcl %edx, 4(v)
16698 RET_ENDP
16699 #undef v
16700
16701@@ -77,6 +99,24 @@ RET_ENDP
16702 BEGIN(add_return)
16703 addl (v), %eax
16704 adcl 4(v), %edx
16705+
16706+#ifdef CONFIG_PAX_REFCOUNT
16707+ into
16708+1234:
16709+ _ASM_EXTABLE(1234b, 2f)
16710+#endif
16711+
16712+ movl %eax, (v)
16713+ movl %edx, 4(v)
16714+
16715+#ifdef CONFIG_PAX_REFCOUNT
16716+2:
16717+#endif
16718+
16719+RET_ENDP
16720+BEGIN(add_return_unchecked)
16721+ addl (v), %eax
16722+ adcl 4(v), %edx
16723 movl %eax, (v)
16724 movl %edx, 4(v)
16725 RET_ENDP
16726@@ -86,6 +126,20 @@ RET_ENDP
16727 BEGIN(sub)
16728 subl %eax, (v)
16729 sbbl %edx, 4(v)
16730+
16731+#ifdef CONFIG_PAX_REFCOUNT
16732+ jno 0f
16733+ addl %eax, (v)
16734+ adcl %edx, 4(v)
16735+ int $4
16736+0:
16737+ _ASM_EXTABLE(0b, 0b)
16738+#endif
16739+
16740+RET_ENDP
16741+BEGIN(sub_unchecked)
16742+ subl %eax, (v)
16743+ sbbl %edx, 4(v)
16744 RET_ENDP
16745 #undef v
16746
16747@@ -96,6 +150,27 @@ BEGIN(sub_return)
16748 sbbl $0, %edx
16749 addl (v), %eax
16750 adcl 4(v), %edx
16751+
16752+#ifdef CONFIG_PAX_REFCOUNT
16753+ into
16754+1234:
16755+ _ASM_EXTABLE(1234b, 2f)
16756+#endif
16757+
16758+ movl %eax, (v)
16759+ movl %edx, 4(v)
16760+
16761+#ifdef CONFIG_PAX_REFCOUNT
16762+2:
16763+#endif
16764+
16765+RET_ENDP
16766+BEGIN(sub_return_unchecked)
16767+ negl %edx
16768+ negl %eax
16769+ sbbl $0, %edx
16770+ addl (v), %eax
16771+ adcl 4(v), %edx
16772 movl %eax, (v)
16773 movl %edx, 4(v)
16774 RET_ENDP
16775@@ -105,6 +180,20 @@ RET_ENDP
16776 BEGIN(inc)
16777 addl $1, (v)
16778 adcl $0, 4(v)
16779+
16780+#ifdef CONFIG_PAX_REFCOUNT
16781+ jno 0f
16782+ subl $1, (v)
16783+ sbbl $0, 4(v)
16784+ int $4
16785+0:
16786+ _ASM_EXTABLE(0b, 0b)
16787+#endif
16788+
16789+RET_ENDP
16790+BEGIN(inc_unchecked)
16791+ addl $1, (v)
16792+ adcl $0, 4(v)
16793 RET_ENDP
16794 #undef v
16795
16796@@ -114,6 +203,26 @@ BEGIN(inc_return)
16797 movl 4(v), %edx
16798 addl $1, %eax
16799 adcl $0, %edx
16800+
16801+#ifdef CONFIG_PAX_REFCOUNT
16802+ into
16803+1234:
16804+ _ASM_EXTABLE(1234b, 2f)
16805+#endif
16806+
16807+ movl %eax, (v)
16808+ movl %edx, 4(v)
16809+
16810+#ifdef CONFIG_PAX_REFCOUNT
16811+2:
16812+#endif
16813+
16814+RET_ENDP
16815+BEGIN(inc_return_unchecked)
16816+ movl (v), %eax
16817+ movl 4(v), %edx
16818+ addl $1, %eax
16819+ adcl $0, %edx
16820 movl %eax, (v)
16821 movl %edx, 4(v)
16822 RET_ENDP
16823@@ -123,6 +232,20 @@ RET_ENDP
16824 BEGIN(dec)
16825 subl $1, (v)
16826 sbbl $0, 4(v)
16827+
16828+#ifdef CONFIG_PAX_REFCOUNT
16829+ jno 0f
16830+ addl $1, (v)
16831+ adcl $0, 4(v)
16832+ int $4
16833+0:
16834+ _ASM_EXTABLE(0b, 0b)
16835+#endif
16836+
16837+RET_ENDP
16838+BEGIN(dec_unchecked)
16839+ subl $1, (v)
16840+ sbbl $0, 4(v)
16841 RET_ENDP
16842 #undef v
16843
16844@@ -132,6 +255,26 @@ BEGIN(dec_return)
16845 movl 4(v), %edx
16846 subl $1, %eax
16847 sbbl $0, %edx
16848+
16849+#ifdef CONFIG_PAX_REFCOUNT
16850+ into
16851+1234:
16852+ _ASM_EXTABLE(1234b, 2f)
16853+#endif
16854+
16855+ movl %eax, (v)
16856+ movl %edx, 4(v)
16857+
16858+#ifdef CONFIG_PAX_REFCOUNT
16859+2:
16860+#endif
16861+
16862+RET_ENDP
16863+BEGIN(dec_return_unchecked)
16864+ movl (v), %eax
16865+ movl 4(v), %edx
16866+ subl $1, %eax
16867+ sbbl $0, %edx
16868 movl %eax, (v)
16869 movl %edx, 4(v)
16870 RET_ENDP
16871@@ -143,6 +286,13 @@ BEGIN(add_unless)
16872 adcl %edx, %edi
16873 addl (v), %eax
16874 adcl 4(v), %edx
16875+
16876+#ifdef CONFIG_PAX_REFCOUNT
16877+ into
16878+1234:
16879+ _ASM_EXTABLE(1234b, 2f)
16880+#endif
16881+
16882 cmpl %eax, %esi
16883 je 3f
16884 1:
16885@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16886 1:
16887 addl $1, %eax
16888 adcl $0, %edx
16889+
16890+#ifdef CONFIG_PAX_REFCOUNT
16891+ into
16892+1234:
16893+ _ASM_EXTABLE(1234b, 2f)
16894+#endif
16895+
16896 movl %eax, (v)
16897 movl %edx, 4(v)
16898 movl $1, %eax
16899@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16900 movl 4(v), %edx
16901 subl $1, %eax
16902 sbbl $0, %edx
16903+
16904+#ifdef CONFIG_PAX_REFCOUNT
16905+ into
16906+1234:
16907+ _ASM_EXTABLE(1234b, 1f)
16908+#endif
16909+
16910 js 1f
16911 movl %eax, (v)
16912 movl %edx, 4(v)
16913diff -urNp linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S
16914--- linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
16915+++ linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S 2011-10-06 04:17:55.000000000 -0400
16916@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
16917 CFI_STARTPROC
16918
16919 read64 %ecx
16920+ pax_force_retaddr
16921 ret
16922 CFI_ENDPROC
16923 ENDPROC(atomic64_read_cx8)
16924
16925+ENTRY(atomic64_read_unchecked_cx8)
16926+ CFI_STARTPROC
16927+
16928+ read64 %ecx
16929+ pax_force_retaddr
16930+ ret
16931+ CFI_ENDPROC
16932+ENDPROC(atomic64_read_unchecked_cx8)
16933+
16934 ENTRY(atomic64_set_cx8)
16935 CFI_STARTPROC
16936
16937@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
16938 cmpxchg8b (%esi)
16939 jne 1b
16940
16941+ pax_force_retaddr
16942 ret
16943 CFI_ENDPROC
16944 ENDPROC(atomic64_set_cx8)
16945
16946+ENTRY(atomic64_set_unchecked_cx8)
16947+ CFI_STARTPROC
16948+
16949+1:
16950+/* we don't need LOCK_PREFIX since aligned 64-bit writes
16951+ * are atomic on 586 and newer */
16952+ cmpxchg8b (%esi)
16953+ jne 1b
16954+
16955+ pax_force_retaddr
16956+ ret
16957+ CFI_ENDPROC
16958+ENDPROC(atomic64_set_unchecked_cx8)
16959+
16960 ENTRY(atomic64_xchg_cx8)
16961 CFI_STARTPROC
16962
16963@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
16964 cmpxchg8b (%esi)
16965 jne 1b
16966
16967+ pax_force_retaddr
16968 ret
16969 CFI_ENDPROC
16970 ENDPROC(atomic64_xchg_cx8)
16971
16972-.macro addsub_return func ins insc
16973-ENTRY(atomic64_\func\()_return_cx8)
16974+.macro addsub_return func ins insc unchecked=""
16975+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16976 CFI_STARTPROC
16977 SAVE ebp
16978 SAVE ebx
16979@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
16980 movl %edx, %ecx
16981 \ins\()l %esi, %ebx
16982 \insc\()l %edi, %ecx
16983+
16984+.ifb \unchecked
16985+#ifdef CONFIG_PAX_REFCOUNT
16986+ into
16987+2:
16988+ _ASM_EXTABLE(2b, 3f)
16989+#endif
16990+.endif
16991+
16992 LOCK_PREFIX
16993 cmpxchg8b (%ebp)
16994 jne 1b
16995-
16996-10:
16997 movl %ebx, %eax
16998 movl %ecx, %edx
16999+
17000+.ifb \unchecked
17001+#ifdef CONFIG_PAX_REFCOUNT
17002+3:
17003+#endif
17004+.endif
17005+
17006 RESTORE edi
17007 RESTORE esi
17008 RESTORE ebx
17009 RESTORE ebp
17010+ pax_force_retaddr
17011 ret
17012 CFI_ENDPROC
17013-ENDPROC(atomic64_\func\()_return_cx8)
17014+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17015 .endm
17016
17017 addsub_return add add adc
17018 addsub_return sub sub sbb
17019+addsub_return add add adc _unchecked
17020+addsub_return sub sub sbb _unchecked
17021
17022-.macro incdec_return func ins insc
17023-ENTRY(atomic64_\func\()_return_cx8)
17024+.macro incdec_return func ins insc unchecked
17025+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17026 CFI_STARTPROC
17027 SAVE ebx
17028
17029@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
17030 movl %edx, %ecx
17031 \ins\()l $1, %ebx
17032 \insc\()l $0, %ecx
17033+
17034+.ifb \unchecked
17035+#ifdef CONFIG_PAX_REFCOUNT
17036+ into
17037+2:
17038+ _ASM_EXTABLE(2b, 3f)
17039+#endif
17040+.endif
17041+
17042 LOCK_PREFIX
17043 cmpxchg8b (%esi)
17044 jne 1b
17045
17046-10:
17047 movl %ebx, %eax
17048 movl %ecx, %edx
17049+
17050+.ifb \unchecked
17051+#ifdef CONFIG_PAX_REFCOUNT
17052+3:
17053+#endif
17054+.endif
17055+
17056 RESTORE ebx
17057+ pax_force_retaddr
17058 ret
17059 CFI_ENDPROC
17060-ENDPROC(atomic64_\func\()_return_cx8)
17061+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17062 .endm
17063
17064 incdec_return inc add adc
17065 incdec_return dec sub sbb
17066+incdec_return inc add adc _unchecked
17067+incdec_return dec sub sbb _unchecked
17068
17069 ENTRY(atomic64_dec_if_positive_cx8)
17070 CFI_STARTPROC
17071@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
17072 movl %edx, %ecx
17073 subl $1, %ebx
17074 sbb $0, %ecx
17075+
17076+#ifdef CONFIG_PAX_REFCOUNT
17077+ into
17078+1234:
17079+ _ASM_EXTABLE(1234b, 2f)
17080+#endif
17081+
17082 js 2f
17083 LOCK_PREFIX
17084 cmpxchg8b (%esi)
17085@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
17086 movl %ebx, %eax
17087 movl %ecx, %edx
17088 RESTORE ebx
17089+ pax_force_retaddr
17090 ret
17091 CFI_ENDPROC
17092 ENDPROC(atomic64_dec_if_positive_cx8)
17093@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
17094 movl %edx, %ecx
17095 addl %esi, %ebx
17096 adcl %edi, %ecx
17097+
17098+#ifdef CONFIG_PAX_REFCOUNT
17099+ into
17100+1234:
17101+ _ASM_EXTABLE(1234b, 3f)
17102+#endif
17103+
17104 LOCK_PREFIX
17105 cmpxchg8b (%ebp)
17106 jne 1b
17107@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
17108 CFI_ADJUST_CFA_OFFSET -8
17109 RESTORE ebx
17110 RESTORE ebp
17111+ pax_force_retaddr
17112 ret
17113 4:
17114 cmpl %edx, 4(%esp)
17115@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
17116 movl %edx, %ecx
17117 addl $1, %ebx
17118 adcl $0, %ecx
17119+
17120+#ifdef CONFIG_PAX_REFCOUNT
17121+ into
17122+1234:
17123+ _ASM_EXTABLE(1234b, 3f)
17124+#endif
17125+
17126 LOCK_PREFIX
17127 cmpxchg8b (%esi)
17128 jne 1b
17129@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
17130 movl $1, %eax
17131 3:
17132 RESTORE ebx
17133+ pax_force_retaddr
17134 ret
17135 4:
17136 testl %edx, %edx
17137diff -urNp linux-3.0.4/arch/x86/lib/checksum_32.S linux-3.0.4/arch/x86/lib/checksum_32.S
17138--- linux-3.0.4/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
17139+++ linux-3.0.4/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
17140@@ -28,7 +28,8 @@
17141 #include <linux/linkage.h>
17142 #include <asm/dwarf2.h>
17143 #include <asm/errno.h>
17144-
17145+#include <asm/segment.h>
17146+
17147 /*
17148 * computes a partial checksum, e.g. for TCP/UDP fragments
17149 */
17150@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
17151
17152 #define ARGBASE 16
17153 #define FP 12
17154-
17155-ENTRY(csum_partial_copy_generic)
17156+
17157+ENTRY(csum_partial_copy_generic_to_user)
17158 CFI_STARTPROC
17159+
17160+#ifdef CONFIG_PAX_MEMORY_UDEREF
17161+ pushl_cfi %gs
17162+ popl_cfi %es
17163+ jmp csum_partial_copy_generic
17164+#endif
17165+
17166+ENTRY(csum_partial_copy_generic_from_user)
17167+
17168+#ifdef CONFIG_PAX_MEMORY_UDEREF
17169+ pushl_cfi %gs
17170+ popl_cfi %ds
17171+#endif
17172+
17173+ENTRY(csum_partial_copy_generic)
17174 subl $4,%esp
17175 CFI_ADJUST_CFA_OFFSET 4
17176 pushl_cfi %edi
17177@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
17178 jmp 4f
17179 SRC(1: movw (%esi), %bx )
17180 addl $2, %esi
17181-DST( movw %bx, (%edi) )
17182+DST( movw %bx, %es:(%edi) )
17183 addl $2, %edi
17184 addw %bx, %ax
17185 adcl $0, %eax
17186@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
17187 SRC(1: movl (%esi), %ebx )
17188 SRC( movl 4(%esi), %edx )
17189 adcl %ebx, %eax
17190-DST( movl %ebx, (%edi) )
17191+DST( movl %ebx, %es:(%edi) )
17192 adcl %edx, %eax
17193-DST( movl %edx, 4(%edi) )
17194+DST( movl %edx, %es:4(%edi) )
17195
17196 SRC( movl 8(%esi), %ebx )
17197 SRC( movl 12(%esi), %edx )
17198 adcl %ebx, %eax
17199-DST( movl %ebx, 8(%edi) )
17200+DST( movl %ebx, %es:8(%edi) )
17201 adcl %edx, %eax
17202-DST( movl %edx, 12(%edi) )
17203+DST( movl %edx, %es:12(%edi) )
17204
17205 SRC( movl 16(%esi), %ebx )
17206 SRC( movl 20(%esi), %edx )
17207 adcl %ebx, %eax
17208-DST( movl %ebx, 16(%edi) )
17209+DST( movl %ebx, %es:16(%edi) )
17210 adcl %edx, %eax
17211-DST( movl %edx, 20(%edi) )
17212+DST( movl %edx, %es:20(%edi) )
17213
17214 SRC( movl 24(%esi), %ebx )
17215 SRC( movl 28(%esi), %edx )
17216 adcl %ebx, %eax
17217-DST( movl %ebx, 24(%edi) )
17218+DST( movl %ebx, %es:24(%edi) )
17219 adcl %edx, %eax
17220-DST( movl %edx, 28(%edi) )
17221+DST( movl %edx, %es:28(%edi) )
17222
17223 lea 32(%esi), %esi
17224 lea 32(%edi), %edi
17225@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
17226 shrl $2, %edx # This clears CF
17227 SRC(3: movl (%esi), %ebx )
17228 adcl %ebx, %eax
17229-DST( movl %ebx, (%edi) )
17230+DST( movl %ebx, %es:(%edi) )
17231 lea 4(%esi), %esi
17232 lea 4(%edi), %edi
17233 dec %edx
17234@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
17235 jb 5f
17236 SRC( movw (%esi), %cx )
17237 leal 2(%esi), %esi
17238-DST( movw %cx, (%edi) )
17239+DST( movw %cx, %es:(%edi) )
17240 leal 2(%edi), %edi
17241 je 6f
17242 shll $16,%ecx
17243 SRC(5: movb (%esi), %cl )
17244-DST( movb %cl, (%edi) )
17245+DST( movb %cl, %es:(%edi) )
17246 6: addl %ecx, %eax
17247 adcl $0, %eax
17248 7:
17249@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
17250
17251 6001:
17252 movl ARGBASE+20(%esp), %ebx # src_err_ptr
17253- movl $-EFAULT, (%ebx)
17254+ movl $-EFAULT, %ss:(%ebx)
17255
17256 # zero the complete destination - computing the rest
17257 # is too much work
17258@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
17259
17260 6002:
17261 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17262- movl $-EFAULT,(%ebx)
17263+ movl $-EFAULT,%ss:(%ebx)
17264 jmp 5000b
17265
17266 .previous
17267
17268+ pushl_cfi %ss
17269+ popl_cfi %ds
17270+ pushl_cfi %ss
17271+ popl_cfi %es
17272 popl_cfi %ebx
17273 CFI_RESTORE ebx
17274 popl_cfi %esi
17275@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
17276 popl_cfi %ecx # equivalent to addl $4,%esp
17277 ret
17278 CFI_ENDPROC
17279-ENDPROC(csum_partial_copy_generic)
17280+ENDPROC(csum_partial_copy_generic_to_user)
17281
17282 #else
17283
17284 /* Version for PentiumII/PPro */
17285
17286 #define ROUND1(x) \
17287+ nop; nop; nop; \
17288 SRC(movl x(%esi), %ebx ) ; \
17289 addl %ebx, %eax ; \
17290- DST(movl %ebx, x(%edi) ) ;
17291+ DST(movl %ebx, %es:x(%edi)) ;
17292
17293 #define ROUND(x) \
17294+ nop; nop; nop; \
17295 SRC(movl x(%esi), %ebx ) ; \
17296 adcl %ebx, %eax ; \
17297- DST(movl %ebx, x(%edi) ) ;
17298+ DST(movl %ebx, %es:x(%edi)) ;
17299
17300 #define ARGBASE 12
17301-
17302-ENTRY(csum_partial_copy_generic)
17303+
17304+ENTRY(csum_partial_copy_generic_to_user)
17305 CFI_STARTPROC
17306+
17307+#ifdef CONFIG_PAX_MEMORY_UDEREF
17308+ pushl_cfi %gs
17309+ popl_cfi %es
17310+ jmp csum_partial_copy_generic
17311+#endif
17312+
17313+ENTRY(csum_partial_copy_generic_from_user)
17314+
17315+#ifdef CONFIG_PAX_MEMORY_UDEREF
17316+ pushl_cfi %gs
17317+ popl_cfi %ds
17318+#endif
17319+
17320+ENTRY(csum_partial_copy_generic)
17321 pushl_cfi %ebx
17322 CFI_REL_OFFSET ebx, 0
17323 pushl_cfi %edi
17324@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
17325 subl %ebx, %edi
17326 lea -1(%esi),%edx
17327 andl $-32,%edx
17328- lea 3f(%ebx,%ebx), %ebx
17329+ lea 3f(%ebx,%ebx,2), %ebx
17330 testl %esi, %esi
17331 jmp *%ebx
17332 1: addl $64,%esi
17333@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
17334 jb 5f
17335 SRC( movw (%esi), %dx )
17336 leal 2(%esi), %esi
17337-DST( movw %dx, (%edi) )
17338+DST( movw %dx, %es:(%edi) )
17339 leal 2(%edi), %edi
17340 je 6f
17341 shll $16,%edx
17342 5:
17343 SRC( movb (%esi), %dl )
17344-DST( movb %dl, (%edi) )
17345+DST( movb %dl, %es:(%edi) )
17346 6: addl %edx, %eax
17347 adcl $0, %eax
17348 7:
17349 .section .fixup, "ax"
17350 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
17351- movl $-EFAULT, (%ebx)
17352+ movl $-EFAULT, %ss:(%ebx)
17353 # zero the complete destination (computing the rest is too much work)
17354 movl ARGBASE+8(%esp),%edi # dst
17355 movl ARGBASE+12(%esp),%ecx # len
17356@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
17357 rep; stosb
17358 jmp 7b
17359 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17360- movl $-EFAULT, (%ebx)
17361+ movl $-EFAULT, %ss:(%ebx)
17362 jmp 7b
17363 .previous
17364
17365+#ifdef CONFIG_PAX_MEMORY_UDEREF
17366+ pushl_cfi %ss
17367+ popl_cfi %ds
17368+ pushl_cfi %ss
17369+ popl_cfi %es
17370+#endif
17371+
17372 popl_cfi %esi
17373 CFI_RESTORE esi
17374 popl_cfi %edi
17375@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
17376 CFI_RESTORE ebx
17377 ret
17378 CFI_ENDPROC
17379-ENDPROC(csum_partial_copy_generic)
17380+ENDPROC(csum_partial_copy_generic_to_user)
17381
17382 #undef ROUND
17383 #undef ROUND1
17384diff -urNp linux-3.0.4/arch/x86/lib/clear_page_64.S linux-3.0.4/arch/x86/lib/clear_page_64.S
17385--- linux-3.0.4/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
17386+++ linux-3.0.4/arch/x86/lib/clear_page_64.S 2011-10-06 04:17:55.000000000 -0400
17387@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
17388 movl $4096/8,%ecx
17389 xorl %eax,%eax
17390 rep stosq
17391+ pax_force_retaddr
17392 ret
17393 CFI_ENDPROC
17394 ENDPROC(clear_page_c)
17395@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
17396 movl $4096,%ecx
17397 xorl %eax,%eax
17398 rep stosb
17399+ pax_force_retaddr
17400 ret
17401 CFI_ENDPROC
17402 ENDPROC(clear_page_c_e)
17403@@ -43,6 +45,7 @@ ENTRY(clear_page)
17404 leaq 64(%rdi),%rdi
17405 jnz .Lloop
17406 nop
17407+ pax_force_retaddr
17408 ret
17409 CFI_ENDPROC
17410 .Lclear_page_end:
17411@@ -58,7 +61,7 @@ ENDPROC(clear_page)
17412
17413 #include <asm/cpufeature.h>
17414
17415- .section .altinstr_replacement,"ax"
17416+ .section .altinstr_replacement,"a"
17417 1: .byte 0xeb /* jmp <disp8> */
17418 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
17419 2: .byte 0xeb /* jmp <disp8> */
17420diff -urNp linux-3.0.4/arch/x86/lib/cmpxchg16b_emu.S linux-3.0.4/arch/x86/lib/cmpxchg16b_emu.S
17421--- linux-3.0.4/arch/x86/lib/cmpxchg16b_emu.S 2011-07-21 22:17:23.000000000 -0400
17422+++ linux-3.0.4/arch/x86/lib/cmpxchg16b_emu.S 2011-10-07 19:07:28.000000000 -0400
17423@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
17424
17425 popf
17426 mov $1, %al
17427+ pax_force_retaddr
17428 ret
17429
17430 not_same:
17431 popf
17432 xor %al,%al
17433+ pax_force_retaddr
17434 ret
17435
17436 CFI_ENDPROC
17437diff -urNp linux-3.0.4/arch/x86/lib/copy_page_64.S linux-3.0.4/arch/x86/lib/copy_page_64.S
17438--- linux-3.0.4/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
17439+++ linux-3.0.4/arch/x86/lib/copy_page_64.S 2011-10-06 04:17:55.000000000 -0400
17440@@ -2,12 +2,14 @@
17441
17442 #include <linux/linkage.h>
17443 #include <asm/dwarf2.h>
17444+#include <asm/alternative-asm.h>
17445
17446 ALIGN
17447 copy_page_c:
17448 CFI_STARTPROC
17449 movl $4096/8,%ecx
17450 rep movsq
17451+ pax_force_retaddr
17452 ret
17453 CFI_ENDPROC
17454 ENDPROC(copy_page_c)
17455@@ -94,6 +96,7 @@ ENTRY(copy_page)
17456 CFI_RESTORE r13
17457 addq $3*8,%rsp
17458 CFI_ADJUST_CFA_OFFSET -3*8
17459+ pax_force_retaddr
17460 ret
17461 .Lcopy_page_end:
17462 CFI_ENDPROC
17463@@ -104,7 +107,7 @@ ENDPROC(copy_page)
17464
17465 #include <asm/cpufeature.h>
17466
17467- .section .altinstr_replacement,"ax"
17468+ .section .altinstr_replacement,"a"
17469 1: .byte 0xeb /* jmp <disp8> */
17470 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
17471 2:
17472diff -urNp linux-3.0.4/arch/x86/lib/copy_user_64.S linux-3.0.4/arch/x86/lib/copy_user_64.S
17473--- linux-3.0.4/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
17474+++ linux-3.0.4/arch/x86/lib/copy_user_64.S 2011-10-06 04:17:55.000000000 -0400
17475@@ -16,6 +16,7 @@
17476 #include <asm/thread_info.h>
17477 #include <asm/cpufeature.h>
17478 #include <asm/alternative-asm.h>
17479+#include <asm/pgtable.h>
17480
17481 /*
17482 * By placing feature2 after feature1 in altinstructions section, we logically
17483@@ -29,7 +30,7 @@
17484 .byte 0xe9 /* 32bit jump */
17485 .long \orig-1f /* by default jump to orig */
17486 1:
17487- .section .altinstr_replacement,"ax"
17488+ .section .altinstr_replacement,"a"
17489 2: .byte 0xe9 /* near jump with 32bit immediate */
17490 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
17491 3: .byte 0xe9 /* near jump with 32bit immediate */
17492@@ -71,47 +72,20 @@
17493 #endif
17494 .endm
17495
17496-/* Standard copy_to_user with segment limit checking */
17497-ENTRY(_copy_to_user)
17498- CFI_STARTPROC
17499- GET_THREAD_INFO(%rax)
17500- movq %rdi,%rcx
17501- addq %rdx,%rcx
17502- jc bad_to_user
17503- cmpq TI_addr_limit(%rax),%rcx
17504- ja bad_to_user
17505- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17506- copy_user_generic_unrolled,copy_user_generic_string, \
17507- copy_user_enhanced_fast_string
17508- CFI_ENDPROC
17509-ENDPROC(_copy_to_user)
17510-
17511-/* Standard copy_from_user with segment limit checking */
17512-ENTRY(_copy_from_user)
17513- CFI_STARTPROC
17514- GET_THREAD_INFO(%rax)
17515- movq %rsi,%rcx
17516- addq %rdx,%rcx
17517- jc bad_from_user
17518- cmpq TI_addr_limit(%rax),%rcx
17519- ja bad_from_user
17520- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17521- copy_user_generic_unrolled,copy_user_generic_string, \
17522- copy_user_enhanced_fast_string
17523- CFI_ENDPROC
17524-ENDPROC(_copy_from_user)
17525-
17526 .section .fixup,"ax"
17527 /* must zero dest */
17528 ENTRY(bad_from_user)
17529 bad_from_user:
17530 CFI_STARTPROC
17531+ testl %edx,%edx
17532+ js bad_to_user
17533 movl %edx,%ecx
17534 xorl %eax,%eax
17535 rep
17536 stosb
17537 bad_to_user:
17538 movl %edx,%eax
17539+ pax_force_retaddr
17540 ret
17541 CFI_ENDPROC
17542 ENDPROC(bad_from_user)
17543@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
17544 decl %ecx
17545 jnz 21b
17546 23: xor %eax,%eax
17547+ pax_force_retaddr
17548 ret
17549
17550 .section .fixup,"ax"
17551@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
17552 3: rep
17553 movsb
17554 4: xorl %eax,%eax
17555+ pax_force_retaddr
17556 ret
17557
17558 .section .fixup,"ax"
17559@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
17560 1: rep
17561 movsb
17562 2: xorl %eax,%eax
17563+ pax_force_retaddr
17564 ret
17565
17566 .section .fixup,"ax"
17567diff -urNp linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S
17568--- linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
17569+++ linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S 2011-10-06 04:17:55.000000000 -0400
17570@@ -8,12 +8,14 @@
17571
17572 #include <linux/linkage.h>
17573 #include <asm/dwarf2.h>
17574+#include <asm/alternative-asm.h>
17575
17576 #define FIX_ALIGNMENT 1
17577
17578 #include <asm/current.h>
17579 #include <asm/asm-offsets.h>
17580 #include <asm/thread_info.h>
17581+#include <asm/pgtable.h>
17582
17583 .macro ALIGN_DESTINATION
17584 #ifdef FIX_ALIGNMENT
17585@@ -50,6 +52,15 @@
17586 */
17587 ENTRY(__copy_user_nocache)
17588 CFI_STARTPROC
17589+
17590+#ifdef CONFIG_PAX_MEMORY_UDEREF
17591+ mov $PAX_USER_SHADOW_BASE,%rcx
17592+ cmp %rcx,%rsi
17593+ jae 1f
17594+ add %rcx,%rsi
17595+1:
17596+#endif
17597+
17598 cmpl $8,%edx
17599 jb 20f /* less then 8 bytes, go to byte copy loop */
17600 ALIGN_DESTINATION
17601@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
17602 jnz 21b
17603 23: xorl %eax,%eax
17604 sfence
17605+ pax_force_retaddr
17606 ret
17607
17608 .section .fixup,"ax"
17609diff -urNp linux-3.0.4/arch/x86/lib/csum-copy_64.S linux-3.0.4/arch/x86/lib/csum-copy_64.S
17610--- linux-3.0.4/arch/x86/lib/csum-copy_64.S 2011-07-21 22:17:23.000000000 -0400
17611+++ linux-3.0.4/arch/x86/lib/csum-copy_64.S 2011-10-06 04:17:55.000000000 -0400
17612@@ -8,6 +8,7 @@
17613 #include <linux/linkage.h>
17614 #include <asm/dwarf2.h>
17615 #include <asm/errno.h>
17616+#include <asm/alternative-asm.h>
17617
17618 /*
17619 * Checksum copy with exception handling.
17620@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
17621 CFI_RESTORE rbp
17622 addq $7*8, %rsp
17623 CFI_ADJUST_CFA_OFFSET -7*8
17624+ pax_force_retaddr
17625 ret
17626 CFI_RESTORE_STATE
17627
17628diff -urNp linux-3.0.4/arch/x86/lib/csum-wrappers_64.c linux-3.0.4/arch/x86/lib/csum-wrappers_64.c
17629--- linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
17630+++ linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-10-06 04:17:55.000000000 -0400
17631@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void _
17632 len -= 2;
17633 }
17634 }
17635- isum = csum_partial_copy_generic((__force const void *)src,
17636+
17637+#ifdef CONFIG_PAX_MEMORY_UDEREF
17638+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17639+ src += PAX_USER_SHADOW_BASE;
17640+#endif
17641+
17642+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
17643 dst, len, isum, errp, NULL);
17644 if (unlikely(*errp))
17645 goto out_err;
17646@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *sr
17647 }
17648
17649 *errp = 0;
17650- return csum_partial_copy_generic(src, (void __force *)dst,
17651+
17652+#ifdef CONFIG_PAX_MEMORY_UDEREF
17653+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17654+ dst += PAX_USER_SHADOW_BASE;
17655+#endif
17656+
17657+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
17658 len, isum, NULL, errp);
17659 }
17660 EXPORT_SYMBOL(csum_partial_copy_to_user);
17661diff -urNp linux-3.0.4/arch/x86/lib/getuser.S linux-3.0.4/arch/x86/lib/getuser.S
17662--- linux-3.0.4/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
17663+++ linux-3.0.4/arch/x86/lib/getuser.S 2011-10-07 19:07:23.000000000 -0400
17664@@ -33,15 +33,38 @@
17665 #include <asm/asm-offsets.h>
17666 #include <asm/thread_info.h>
17667 #include <asm/asm.h>
17668+#include <asm/segment.h>
17669+#include <asm/pgtable.h>
17670+#include <asm/alternative-asm.h>
17671+
17672+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17673+#define __copyuser_seg gs;
17674+#else
17675+#define __copyuser_seg
17676+#endif
17677
17678 .text
17679 ENTRY(__get_user_1)
17680 CFI_STARTPROC
17681+
17682+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17683 GET_THREAD_INFO(%_ASM_DX)
17684 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17685 jae bad_get_user
17686-1: movzb (%_ASM_AX),%edx
17687+
17688+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17689+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17690+ cmp %_ASM_DX,%_ASM_AX
17691+ jae 1234f
17692+ add %_ASM_DX,%_ASM_AX
17693+1234:
17694+#endif
17695+
17696+#endif
17697+
17698+1: __copyuser_seg movzb (%_ASM_AX),%edx
17699 xor %eax,%eax
17700+ pax_force_retaddr
17701 ret
17702 CFI_ENDPROC
17703 ENDPROC(__get_user_1)
17704@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
17705 ENTRY(__get_user_2)
17706 CFI_STARTPROC
17707 add $1,%_ASM_AX
17708+
17709+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17710 jc bad_get_user
17711 GET_THREAD_INFO(%_ASM_DX)
17712 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17713 jae bad_get_user
17714-2: movzwl -1(%_ASM_AX),%edx
17715+
17716+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17717+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17718+ cmp %_ASM_DX,%_ASM_AX
17719+ jae 1234f
17720+ add %_ASM_DX,%_ASM_AX
17721+1234:
17722+#endif
17723+
17724+#endif
17725+
17726+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17727 xor %eax,%eax
17728+ pax_force_retaddr
17729 ret
17730 CFI_ENDPROC
17731 ENDPROC(__get_user_2)
17732@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
17733 ENTRY(__get_user_4)
17734 CFI_STARTPROC
17735 add $3,%_ASM_AX
17736+
17737+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17738 jc bad_get_user
17739 GET_THREAD_INFO(%_ASM_DX)
17740 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17741 jae bad_get_user
17742-3: mov -3(%_ASM_AX),%edx
17743+
17744+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17745+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17746+ cmp %_ASM_DX,%_ASM_AX
17747+ jae 1234f
17748+ add %_ASM_DX,%_ASM_AX
17749+1234:
17750+#endif
17751+
17752+#endif
17753+
17754+3: __copyuser_seg mov -3(%_ASM_AX),%edx
17755 xor %eax,%eax
17756+ pax_force_retaddr
17757 ret
17758 CFI_ENDPROC
17759 ENDPROC(__get_user_4)
17760@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
17761 GET_THREAD_INFO(%_ASM_DX)
17762 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17763 jae bad_get_user
17764+
17765+#ifdef CONFIG_PAX_MEMORY_UDEREF
17766+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17767+ cmp %_ASM_DX,%_ASM_AX
17768+ jae 1234f
17769+ add %_ASM_DX,%_ASM_AX
17770+1234:
17771+#endif
17772+
17773 4: movq -7(%_ASM_AX),%_ASM_DX
17774 xor %eax,%eax
17775+ pax_force_retaddr
17776 ret
17777 CFI_ENDPROC
17778 ENDPROC(__get_user_8)
17779@@ -91,6 +152,7 @@ bad_get_user:
17780 CFI_STARTPROC
17781 xor %edx,%edx
17782 mov $(-EFAULT),%_ASM_AX
17783+ pax_force_retaddr
17784 ret
17785 CFI_ENDPROC
17786 END(bad_get_user)
17787diff -urNp linux-3.0.4/arch/x86/lib/insn.c linux-3.0.4/arch/x86/lib/insn.c
17788--- linux-3.0.4/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
17789+++ linux-3.0.4/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
17790@@ -21,6 +21,11 @@
17791 #include <linux/string.h>
17792 #include <asm/inat.h>
17793 #include <asm/insn.h>
17794+#ifdef __KERNEL__
17795+#include <asm/pgtable_types.h>
17796+#else
17797+#define ktla_ktva(addr) addr
17798+#endif
17799
17800 #define get_next(t, insn) \
17801 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17802@@ -40,8 +45,8 @@
17803 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17804 {
17805 memset(insn, 0, sizeof(*insn));
17806- insn->kaddr = kaddr;
17807- insn->next_byte = kaddr;
17808+ insn->kaddr = ktla_ktva(kaddr);
17809+ insn->next_byte = ktla_ktva(kaddr);
17810 insn->x86_64 = x86_64 ? 1 : 0;
17811 insn->opnd_bytes = 4;
17812 if (x86_64)
17813diff -urNp linux-3.0.4/arch/x86/lib/iomap_copy_64.S linux-3.0.4/arch/x86/lib/iomap_copy_64.S
17814--- linux-3.0.4/arch/x86/lib/iomap_copy_64.S 2011-07-21 22:17:23.000000000 -0400
17815+++ linux-3.0.4/arch/x86/lib/iomap_copy_64.S 2011-10-06 04:17:55.000000000 -0400
17816@@ -17,6 +17,7 @@
17817
17818 #include <linux/linkage.h>
17819 #include <asm/dwarf2.h>
17820+#include <asm/alternative-asm.h>
17821
17822 /*
17823 * override generic version in lib/iomap_copy.c
17824@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
17825 CFI_STARTPROC
17826 movl %edx,%ecx
17827 rep movsd
17828+ pax_force_retaddr
17829 ret
17830 CFI_ENDPROC
17831 ENDPROC(__iowrite32_copy)
17832diff -urNp linux-3.0.4/arch/x86/lib/memcpy_64.S linux-3.0.4/arch/x86/lib/memcpy_64.S
17833--- linux-3.0.4/arch/x86/lib/memcpy_64.S 2011-07-21 22:17:23.000000000 -0400
17834+++ linux-3.0.4/arch/x86/lib/memcpy_64.S 2011-10-06 04:17:55.000000000 -0400
17835@@ -34,6 +34,7 @@
17836 rep movsq
17837 movl %edx, %ecx
17838 rep movsb
17839+ pax_force_retaddr
17840 ret
17841 .Lmemcpy_e:
17842 .previous
17843@@ -51,6 +52,7 @@
17844
17845 movl %edx, %ecx
17846 rep movsb
17847+ pax_force_retaddr
17848 ret
17849 .Lmemcpy_e_e:
17850 .previous
17851@@ -141,6 +143,7 @@ ENTRY(memcpy)
17852 movq %r9, 1*8(%rdi)
17853 movq %r10, -2*8(%rdi, %rdx)
17854 movq %r11, -1*8(%rdi, %rdx)
17855+ pax_force_retaddr
17856 retq
17857 .p2align 4
17858 .Lless_16bytes:
17859@@ -153,6 +156,7 @@ ENTRY(memcpy)
17860 movq -1*8(%rsi, %rdx), %r9
17861 movq %r8, 0*8(%rdi)
17862 movq %r9, -1*8(%rdi, %rdx)
17863+ pax_force_retaddr
17864 retq
17865 .p2align 4
17866 .Lless_8bytes:
17867@@ -166,6 +170,7 @@ ENTRY(memcpy)
17868 movl -4(%rsi, %rdx), %r8d
17869 movl %ecx, (%rdi)
17870 movl %r8d, -4(%rdi, %rdx)
17871+ pax_force_retaddr
17872 retq
17873 .p2align 4
17874 .Lless_3bytes:
17875@@ -183,6 +188,7 @@ ENTRY(memcpy)
17876 jnz .Lloop_1
17877
17878 .Lend:
17879+ pax_force_retaddr
17880 retq
17881 CFI_ENDPROC
17882 ENDPROC(memcpy)
17883diff -urNp linux-3.0.4/arch/x86/lib/memmove_64.S linux-3.0.4/arch/x86/lib/memmove_64.S
17884--- linux-3.0.4/arch/x86/lib/memmove_64.S 2011-07-21 22:17:23.000000000 -0400
17885+++ linux-3.0.4/arch/x86/lib/memmove_64.S 2011-10-06 04:17:55.000000000 -0400
17886@@ -9,6 +9,7 @@
17887 #include <linux/linkage.h>
17888 #include <asm/dwarf2.h>
17889 #include <asm/cpufeature.h>
17890+#include <asm/alternative-asm.h>
17891
17892 #undef memmove
17893
17894@@ -201,6 +202,7 @@ ENTRY(memmove)
17895 movb (%rsi), %r11b
17896 movb %r11b, (%rdi)
17897 13:
17898+ pax_force_retaddr
17899 retq
17900 CFI_ENDPROC
17901
17902@@ -209,6 +211,7 @@ ENTRY(memmove)
17903 /* Forward moving data. */
17904 movq %rdx, %rcx
17905 rep movsb
17906+ pax_force_retaddr
17907 retq
17908 .Lmemmove_end_forward_efs:
17909 .previous
17910diff -urNp linux-3.0.4/arch/x86/lib/memset_64.S linux-3.0.4/arch/x86/lib/memset_64.S
17911--- linux-3.0.4/arch/x86/lib/memset_64.S 2011-07-21 22:17:23.000000000 -0400
17912+++ linux-3.0.4/arch/x86/lib/memset_64.S 2011-10-06 04:17:55.000000000 -0400
17913@@ -31,6 +31,7 @@
17914 movl %r8d,%ecx
17915 rep stosb
17916 movq %r9,%rax
17917+ pax_force_retaddr
17918 ret
17919 .Lmemset_e:
17920 .previous
17921@@ -53,6 +54,7 @@
17922 movl %edx,%ecx
17923 rep stosb
17924 movq %r9,%rax
17925+ pax_force_retaddr
17926 ret
17927 .Lmemset_e_e:
17928 .previous
17929@@ -121,6 +123,7 @@ ENTRY(__memset)
17930
17931 .Lende:
17932 movq %r10,%rax
17933+ pax_force_retaddr
17934 ret
17935
17936 CFI_RESTORE_STATE
17937diff -urNp linux-3.0.4/arch/x86/lib/mmx_32.c linux-3.0.4/arch/x86/lib/mmx_32.c
17938--- linux-3.0.4/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
17939+++ linux-3.0.4/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
17940@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17941 {
17942 void *p;
17943 int i;
17944+ unsigned long cr0;
17945
17946 if (unlikely(in_interrupt()))
17947 return __memcpy(to, from, len);
17948@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17949 kernel_fpu_begin();
17950
17951 __asm__ __volatile__ (
17952- "1: prefetch (%0)\n" /* This set is 28 bytes */
17953- " prefetch 64(%0)\n"
17954- " prefetch 128(%0)\n"
17955- " prefetch 192(%0)\n"
17956- " prefetch 256(%0)\n"
17957+ "1: prefetch (%1)\n" /* This set is 28 bytes */
17958+ " prefetch 64(%1)\n"
17959+ " prefetch 128(%1)\n"
17960+ " prefetch 192(%1)\n"
17961+ " prefetch 256(%1)\n"
17962 "2: \n"
17963 ".section .fixup, \"ax\"\n"
17964- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17965+ "3: \n"
17966+
17967+#ifdef CONFIG_PAX_KERNEXEC
17968+ " movl %%cr0, %0\n"
17969+ " movl %0, %%eax\n"
17970+ " andl $0xFFFEFFFF, %%eax\n"
17971+ " movl %%eax, %%cr0\n"
17972+#endif
17973+
17974+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17975+
17976+#ifdef CONFIG_PAX_KERNEXEC
17977+ " movl %0, %%cr0\n"
17978+#endif
17979+
17980 " jmp 2b\n"
17981 ".previous\n"
17982 _ASM_EXTABLE(1b, 3b)
17983- : : "r" (from));
17984+ : "=&r" (cr0) : "r" (from) : "ax");
17985
17986 for ( ; i > 5; i--) {
17987 __asm__ __volatile__ (
17988- "1: prefetch 320(%0)\n"
17989- "2: movq (%0), %%mm0\n"
17990- " movq 8(%0), %%mm1\n"
17991- " movq 16(%0), %%mm2\n"
17992- " movq 24(%0), %%mm3\n"
17993- " movq %%mm0, (%1)\n"
17994- " movq %%mm1, 8(%1)\n"
17995- " movq %%mm2, 16(%1)\n"
17996- " movq %%mm3, 24(%1)\n"
17997- " movq 32(%0), %%mm0\n"
17998- " movq 40(%0), %%mm1\n"
17999- " movq 48(%0), %%mm2\n"
18000- " movq 56(%0), %%mm3\n"
18001- " movq %%mm0, 32(%1)\n"
18002- " movq %%mm1, 40(%1)\n"
18003- " movq %%mm2, 48(%1)\n"
18004- " movq %%mm3, 56(%1)\n"
18005+ "1: prefetch 320(%1)\n"
18006+ "2: movq (%1), %%mm0\n"
18007+ " movq 8(%1), %%mm1\n"
18008+ " movq 16(%1), %%mm2\n"
18009+ " movq 24(%1), %%mm3\n"
18010+ " movq %%mm0, (%2)\n"
18011+ " movq %%mm1, 8(%2)\n"
18012+ " movq %%mm2, 16(%2)\n"
18013+ " movq %%mm3, 24(%2)\n"
18014+ " movq 32(%1), %%mm0\n"
18015+ " movq 40(%1), %%mm1\n"
18016+ " movq 48(%1), %%mm2\n"
18017+ " movq 56(%1), %%mm3\n"
18018+ " movq %%mm0, 32(%2)\n"
18019+ " movq %%mm1, 40(%2)\n"
18020+ " movq %%mm2, 48(%2)\n"
18021+ " movq %%mm3, 56(%2)\n"
18022 ".section .fixup, \"ax\"\n"
18023- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18024+ "3:\n"
18025+
18026+#ifdef CONFIG_PAX_KERNEXEC
18027+ " movl %%cr0, %0\n"
18028+ " movl %0, %%eax\n"
18029+ " andl $0xFFFEFFFF, %%eax\n"
18030+ " movl %%eax, %%cr0\n"
18031+#endif
18032+
18033+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18034+
18035+#ifdef CONFIG_PAX_KERNEXEC
18036+ " movl %0, %%cr0\n"
18037+#endif
18038+
18039 " jmp 2b\n"
18040 ".previous\n"
18041 _ASM_EXTABLE(1b, 3b)
18042- : : "r" (from), "r" (to) : "memory");
18043+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18044
18045 from += 64;
18046 to += 64;
18047@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
18048 static void fast_copy_page(void *to, void *from)
18049 {
18050 int i;
18051+ unsigned long cr0;
18052
18053 kernel_fpu_begin();
18054
18055@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
18056 * but that is for later. -AV
18057 */
18058 __asm__ __volatile__(
18059- "1: prefetch (%0)\n"
18060- " prefetch 64(%0)\n"
18061- " prefetch 128(%0)\n"
18062- " prefetch 192(%0)\n"
18063- " prefetch 256(%0)\n"
18064+ "1: prefetch (%1)\n"
18065+ " prefetch 64(%1)\n"
18066+ " prefetch 128(%1)\n"
18067+ " prefetch 192(%1)\n"
18068+ " prefetch 256(%1)\n"
18069 "2: \n"
18070 ".section .fixup, \"ax\"\n"
18071- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18072+ "3: \n"
18073+
18074+#ifdef CONFIG_PAX_KERNEXEC
18075+ " movl %%cr0, %0\n"
18076+ " movl %0, %%eax\n"
18077+ " andl $0xFFFEFFFF, %%eax\n"
18078+ " movl %%eax, %%cr0\n"
18079+#endif
18080+
18081+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18082+
18083+#ifdef CONFIG_PAX_KERNEXEC
18084+ " movl %0, %%cr0\n"
18085+#endif
18086+
18087 " jmp 2b\n"
18088 ".previous\n"
18089- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18090+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18091
18092 for (i = 0; i < (4096-320)/64; i++) {
18093 __asm__ __volatile__ (
18094- "1: prefetch 320(%0)\n"
18095- "2: movq (%0), %%mm0\n"
18096- " movntq %%mm0, (%1)\n"
18097- " movq 8(%0), %%mm1\n"
18098- " movntq %%mm1, 8(%1)\n"
18099- " movq 16(%0), %%mm2\n"
18100- " movntq %%mm2, 16(%1)\n"
18101- " movq 24(%0), %%mm3\n"
18102- " movntq %%mm3, 24(%1)\n"
18103- " movq 32(%0), %%mm4\n"
18104- " movntq %%mm4, 32(%1)\n"
18105- " movq 40(%0), %%mm5\n"
18106- " movntq %%mm5, 40(%1)\n"
18107- " movq 48(%0), %%mm6\n"
18108- " movntq %%mm6, 48(%1)\n"
18109- " movq 56(%0), %%mm7\n"
18110- " movntq %%mm7, 56(%1)\n"
18111+ "1: prefetch 320(%1)\n"
18112+ "2: movq (%1), %%mm0\n"
18113+ " movntq %%mm0, (%2)\n"
18114+ " movq 8(%1), %%mm1\n"
18115+ " movntq %%mm1, 8(%2)\n"
18116+ " movq 16(%1), %%mm2\n"
18117+ " movntq %%mm2, 16(%2)\n"
18118+ " movq 24(%1), %%mm3\n"
18119+ " movntq %%mm3, 24(%2)\n"
18120+ " movq 32(%1), %%mm4\n"
18121+ " movntq %%mm4, 32(%2)\n"
18122+ " movq 40(%1), %%mm5\n"
18123+ " movntq %%mm5, 40(%2)\n"
18124+ " movq 48(%1), %%mm6\n"
18125+ " movntq %%mm6, 48(%2)\n"
18126+ " movq 56(%1), %%mm7\n"
18127+ " movntq %%mm7, 56(%2)\n"
18128 ".section .fixup, \"ax\"\n"
18129- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18130+ "3:\n"
18131+
18132+#ifdef CONFIG_PAX_KERNEXEC
18133+ " movl %%cr0, %0\n"
18134+ " movl %0, %%eax\n"
18135+ " andl $0xFFFEFFFF, %%eax\n"
18136+ " movl %%eax, %%cr0\n"
18137+#endif
18138+
18139+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18140+
18141+#ifdef CONFIG_PAX_KERNEXEC
18142+ " movl %0, %%cr0\n"
18143+#endif
18144+
18145 " jmp 2b\n"
18146 ".previous\n"
18147- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
18148+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18149
18150 from += 64;
18151 to += 64;
18152@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
18153 static void fast_copy_page(void *to, void *from)
18154 {
18155 int i;
18156+ unsigned long cr0;
18157
18158 kernel_fpu_begin();
18159
18160 __asm__ __volatile__ (
18161- "1: prefetch (%0)\n"
18162- " prefetch 64(%0)\n"
18163- " prefetch 128(%0)\n"
18164- " prefetch 192(%0)\n"
18165- " prefetch 256(%0)\n"
18166+ "1: prefetch (%1)\n"
18167+ " prefetch 64(%1)\n"
18168+ " prefetch 128(%1)\n"
18169+ " prefetch 192(%1)\n"
18170+ " prefetch 256(%1)\n"
18171 "2: \n"
18172 ".section .fixup, \"ax\"\n"
18173- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18174+ "3: \n"
18175+
18176+#ifdef CONFIG_PAX_KERNEXEC
18177+ " movl %%cr0, %0\n"
18178+ " movl %0, %%eax\n"
18179+ " andl $0xFFFEFFFF, %%eax\n"
18180+ " movl %%eax, %%cr0\n"
18181+#endif
18182+
18183+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18184+
18185+#ifdef CONFIG_PAX_KERNEXEC
18186+ " movl %0, %%cr0\n"
18187+#endif
18188+
18189 " jmp 2b\n"
18190 ".previous\n"
18191- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18192+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18193
18194 for (i = 0; i < 4096/64; i++) {
18195 __asm__ __volatile__ (
18196- "1: prefetch 320(%0)\n"
18197- "2: movq (%0), %%mm0\n"
18198- " movq 8(%0), %%mm1\n"
18199- " movq 16(%0), %%mm2\n"
18200- " movq 24(%0), %%mm3\n"
18201- " movq %%mm0, (%1)\n"
18202- " movq %%mm1, 8(%1)\n"
18203- " movq %%mm2, 16(%1)\n"
18204- " movq %%mm3, 24(%1)\n"
18205- " movq 32(%0), %%mm0\n"
18206- " movq 40(%0), %%mm1\n"
18207- " movq 48(%0), %%mm2\n"
18208- " movq 56(%0), %%mm3\n"
18209- " movq %%mm0, 32(%1)\n"
18210- " movq %%mm1, 40(%1)\n"
18211- " movq %%mm2, 48(%1)\n"
18212- " movq %%mm3, 56(%1)\n"
18213+ "1: prefetch 320(%1)\n"
18214+ "2: movq (%1), %%mm0\n"
18215+ " movq 8(%1), %%mm1\n"
18216+ " movq 16(%1), %%mm2\n"
18217+ " movq 24(%1), %%mm3\n"
18218+ " movq %%mm0, (%2)\n"
18219+ " movq %%mm1, 8(%2)\n"
18220+ " movq %%mm2, 16(%2)\n"
18221+ " movq %%mm3, 24(%2)\n"
18222+ " movq 32(%1), %%mm0\n"
18223+ " movq 40(%1), %%mm1\n"
18224+ " movq 48(%1), %%mm2\n"
18225+ " movq 56(%1), %%mm3\n"
18226+ " movq %%mm0, 32(%2)\n"
18227+ " movq %%mm1, 40(%2)\n"
18228+ " movq %%mm2, 48(%2)\n"
18229+ " movq %%mm3, 56(%2)\n"
18230 ".section .fixup, \"ax\"\n"
18231- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18232+ "3:\n"
18233+
18234+#ifdef CONFIG_PAX_KERNEXEC
18235+ " movl %%cr0, %0\n"
18236+ " movl %0, %%eax\n"
18237+ " andl $0xFFFEFFFF, %%eax\n"
18238+ " movl %%eax, %%cr0\n"
18239+#endif
18240+
18241+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18242+
18243+#ifdef CONFIG_PAX_KERNEXEC
18244+ " movl %0, %%cr0\n"
18245+#endif
18246+
18247 " jmp 2b\n"
18248 ".previous\n"
18249 _ASM_EXTABLE(1b, 3b)
18250- : : "r" (from), "r" (to) : "memory");
18251+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18252
18253 from += 64;
18254 to += 64;
18255diff -urNp linux-3.0.4/arch/x86/lib/msr-reg.S linux-3.0.4/arch/x86/lib/msr-reg.S
18256--- linux-3.0.4/arch/x86/lib/msr-reg.S 2011-07-21 22:17:23.000000000 -0400
18257+++ linux-3.0.4/arch/x86/lib/msr-reg.S 2011-10-07 19:07:28.000000000 -0400
18258@@ -3,6 +3,7 @@
18259 #include <asm/dwarf2.h>
18260 #include <asm/asm.h>
18261 #include <asm/msr.h>
18262+#include <asm/alternative-asm.h>
18263
18264 #ifdef CONFIG_X86_64
18265 /*
18266@@ -37,6 +38,7 @@ ENTRY(native_\op\()_safe_regs)
18267 movl %edi, 28(%r10)
18268 popq_cfi %rbp
18269 popq_cfi %rbx
18270+ pax_force_retaddr
18271 ret
18272 3:
18273 CFI_RESTORE_STATE
18274diff -urNp linux-3.0.4/arch/x86/lib/putuser.S linux-3.0.4/arch/x86/lib/putuser.S
18275--- linux-3.0.4/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
18276+++ linux-3.0.4/arch/x86/lib/putuser.S 2011-10-07 19:07:23.000000000 -0400
18277@@ -15,7 +15,9 @@
18278 #include <asm/thread_info.h>
18279 #include <asm/errno.h>
18280 #include <asm/asm.h>
18281-
18282+#include <asm/segment.h>
18283+#include <asm/pgtable.h>
18284+#include <asm/alternative-asm.h>
18285
18286 /*
18287 * __put_user_X
18288@@ -29,52 +31,119 @@
18289 * as they get called from within inline assembly.
18290 */
18291
18292-#define ENTER CFI_STARTPROC ; \
18293- GET_THREAD_INFO(%_ASM_BX)
18294-#define EXIT ret ; \
18295+#define ENTER CFI_STARTPROC
18296+#define EXIT pax_force_retaddr; ret ; \
18297 CFI_ENDPROC
18298
18299+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18300+#define _DEST %_ASM_CX,%_ASM_BX
18301+#else
18302+#define _DEST %_ASM_CX
18303+#endif
18304+
18305+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18306+#define __copyuser_seg gs;
18307+#else
18308+#define __copyuser_seg
18309+#endif
18310+
18311 .text
18312 ENTRY(__put_user_1)
18313 ENTER
18314+
18315+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18316+ GET_THREAD_INFO(%_ASM_BX)
18317 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
18318 jae bad_put_user
18319-1: movb %al,(%_ASM_CX)
18320+
18321+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18322+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18323+ cmp %_ASM_BX,%_ASM_CX
18324+ jb 1234f
18325+ xor %ebx,%ebx
18326+1234:
18327+#endif
18328+
18329+#endif
18330+
18331+1: __copyuser_seg movb %al,(_DEST)
18332 xor %eax,%eax
18333 EXIT
18334 ENDPROC(__put_user_1)
18335
18336 ENTRY(__put_user_2)
18337 ENTER
18338+
18339+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18340+ GET_THREAD_INFO(%_ASM_BX)
18341 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18342 sub $1,%_ASM_BX
18343 cmp %_ASM_BX,%_ASM_CX
18344 jae bad_put_user
18345-2: movw %ax,(%_ASM_CX)
18346+
18347+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18348+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18349+ cmp %_ASM_BX,%_ASM_CX
18350+ jb 1234f
18351+ xor %ebx,%ebx
18352+1234:
18353+#endif
18354+
18355+#endif
18356+
18357+2: __copyuser_seg movw %ax,(_DEST)
18358 xor %eax,%eax
18359 EXIT
18360 ENDPROC(__put_user_2)
18361
18362 ENTRY(__put_user_4)
18363 ENTER
18364+
18365+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18366+ GET_THREAD_INFO(%_ASM_BX)
18367 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18368 sub $3,%_ASM_BX
18369 cmp %_ASM_BX,%_ASM_CX
18370 jae bad_put_user
18371-3: movl %eax,(%_ASM_CX)
18372+
18373+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18374+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18375+ cmp %_ASM_BX,%_ASM_CX
18376+ jb 1234f
18377+ xor %ebx,%ebx
18378+1234:
18379+#endif
18380+
18381+#endif
18382+
18383+3: __copyuser_seg movl %eax,(_DEST)
18384 xor %eax,%eax
18385 EXIT
18386 ENDPROC(__put_user_4)
18387
18388 ENTRY(__put_user_8)
18389 ENTER
18390+
18391+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18392+ GET_THREAD_INFO(%_ASM_BX)
18393 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18394 sub $7,%_ASM_BX
18395 cmp %_ASM_BX,%_ASM_CX
18396 jae bad_put_user
18397-4: mov %_ASM_AX,(%_ASM_CX)
18398+
18399+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18400+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18401+ cmp %_ASM_BX,%_ASM_CX
18402+ jb 1234f
18403+ xor %ebx,%ebx
18404+1234:
18405+#endif
18406+
18407+#endif
18408+
18409+4: __copyuser_seg mov %_ASM_AX,(_DEST)
18410 #ifdef CONFIG_X86_32
18411-5: movl %edx,4(%_ASM_CX)
18412+5: __copyuser_seg movl %edx,4(_DEST)
18413 #endif
18414 xor %eax,%eax
18415 EXIT
18416diff -urNp linux-3.0.4/arch/x86/lib/rwlock_64.S linux-3.0.4/arch/x86/lib/rwlock_64.S
18417--- linux-3.0.4/arch/x86/lib/rwlock_64.S 2011-07-21 22:17:23.000000000 -0400
18418+++ linux-3.0.4/arch/x86/lib/rwlock_64.S 2011-10-06 04:17:55.000000000 -0400
18419@@ -17,6 +17,7 @@ ENTRY(__write_lock_failed)
18420 LOCK_PREFIX
18421 subl $RW_LOCK_BIAS,(%rdi)
18422 jnz __write_lock_failed
18423+ pax_force_retaddr
18424 ret
18425 CFI_ENDPROC
18426 END(__write_lock_failed)
18427@@ -33,6 +34,7 @@ ENTRY(__read_lock_failed)
18428 LOCK_PREFIX
18429 decl (%rdi)
18430 js __read_lock_failed
18431+ pax_force_retaddr
18432 ret
18433 CFI_ENDPROC
18434 END(__read_lock_failed)
18435diff -urNp linux-3.0.4/arch/x86/lib/rwsem_64.S linux-3.0.4/arch/x86/lib/rwsem_64.S
18436--- linux-3.0.4/arch/x86/lib/rwsem_64.S 2011-07-21 22:17:23.000000000 -0400
18437+++ linux-3.0.4/arch/x86/lib/rwsem_64.S 2011-10-07 10:46:47.000000000 -0400
18438@@ -51,6 +51,7 @@ ENTRY(call_rwsem_down_read_failed)
18439 popq_cfi %rdx
18440 CFI_RESTORE rdx
18441 restore_common_regs
18442+ pax_force_retaddr
18443 ret
18444 CFI_ENDPROC
18445 ENDPROC(call_rwsem_down_read_failed)
18446@@ -61,6 +62,7 @@ ENTRY(call_rwsem_down_write_failed)
18447 movq %rax,%rdi
18448 call rwsem_down_write_failed
18449 restore_common_regs
18450+ pax_force_retaddr
18451 ret
18452 CFI_ENDPROC
18453 ENDPROC(call_rwsem_down_write_failed)
18454@@ -73,7 +75,8 @@ ENTRY(call_rwsem_wake)
18455 movq %rax,%rdi
18456 call rwsem_wake
18457 restore_common_regs
18458-1: ret
18459+1: pax_force_retaddr
18460+ ret
18461 CFI_ENDPROC
18462 ENDPROC(call_rwsem_wake)
18463
18464@@ -88,6 +91,7 @@ ENTRY(call_rwsem_downgrade_wake)
18465 popq_cfi %rdx
18466 CFI_RESTORE rdx
18467 restore_common_regs
18468+ pax_force_retaddr
18469 ret
18470 CFI_ENDPROC
18471 ENDPROC(call_rwsem_downgrade_wake)
18472diff -urNp linux-3.0.4/arch/x86/lib/thunk_64.S linux-3.0.4/arch/x86/lib/thunk_64.S
18473--- linux-3.0.4/arch/x86/lib/thunk_64.S 2011-07-21 22:17:23.000000000 -0400
18474+++ linux-3.0.4/arch/x86/lib/thunk_64.S 2011-10-06 04:17:55.000000000 -0400
18475@@ -10,7 +10,8 @@
18476 #include <asm/dwarf2.h>
18477 #include <asm/calling.h>
18478 #include <asm/rwlock.h>
18479-
18480+ #include <asm/alternative-asm.h>
18481+
18482 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
18483 .macro thunk name,func
18484 .globl \name
18485@@ -50,5 +51,6 @@
18486 SAVE_ARGS
18487 restore:
18488 RESTORE_ARGS
18489- ret
18490+ pax_force_retaddr
18491+ ret
18492 CFI_ENDPROC
18493diff -urNp linux-3.0.4/arch/x86/lib/usercopy_32.c linux-3.0.4/arch/x86/lib/usercopy_32.c
18494--- linux-3.0.4/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
18495+++ linux-3.0.4/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
18496@@ -43,7 +43,7 @@ do { \
18497 __asm__ __volatile__( \
18498 " testl %1,%1\n" \
18499 " jz 2f\n" \
18500- "0: lodsb\n" \
18501+ "0: "__copyuser_seg"lodsb\n" \
18502 " stosb\n" \
18503 " testb %%al,%%al\n" \
18504 " jz 1f\n" \
18505@@ -128,10 +128,12 @@ do { \
18506 int __d0; \
18507 might_fault(); \
18508 __asm__ __volatile__( \
18509+ __COPYUSER_SET_ES \
18510 "0: rep; stosl\n" \
18511 " movl %2,%0\n" \
18512 "1: rep; stosb\n" \
18513 "2:\n" \
18514+ __COPYUSER_RESTORE_ES \
18515 ".section .fixup,\"ax\"\n" \
18516 "3: lea 0(%2,%0,4),%0\n" \
18517 " jmp 2b\n" \
18518@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
18519 might_fault();
18520
18521 __asm__ __volatile__(
18522+ __COPYUSER_SET_ES
18523 " testl %0, %0\n"
18524 " jz 3f\n"
18525 " andl %0,%%ecx\n"
18526@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
18527 " subl %%ecx,%0\n"
18528 " addl %0,%%eax\n"
18529 "1:\n"
18530+ __COPYUSER_RESTORE_ES
18531 ".section .fixup,\"ax\"\n"
18532 "2: xorl %%eax,%%eax\n"
18533 " jmp 1b\n"
18534@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
18535
18536 #ifdef CONFIG_X86_INTEL_USERCOPY
18537 static unsigned long
18538-__copy_user_intel(void __user *to, const void *from, unsigned long size)
18539+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
18540 {
18541 int d0, d1;
18542 __asm__ __volatile__(
18543@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
18544 " .align 2,0x90\n"
18545 "3: movl 0(%4), %%eax\n"
18546 "4: movl 4(%4), %%edx\n"
18547- "5: movl %%eax, 0(%3)\n"
18548- "6: movl %%edx, 4(%3)\n"
18549+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
18550+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
18551 "7: movl 8(%4), %%eax\n"
18552 "8: movl 12(%4),%%edx\n"
18553- "9: movl %%eax, 8(%3)\n"
18554- "10: movl %%edx, 12(%3)\n"
18555+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
18556+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
18557 "11: movl 16(%4), %%eax\n"
18558 "12: movl 20(%4), %%edx\n"
18559- "13: movl %%eax, 16(%3)\n"
18560- "14: movl %%edx, 20(%3)\n"
18561+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
18562+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
18563 "15: movl 24(%4), %%eax\n"
18564 "16: movl 28(%4), %%edx\n"
18565- "17: movl %%eax, 24(%3)\n"
18566- "18: movl %%edx, 28(%3)\n"
18567+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
18568+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
18569 "19: movl 32(%4), %%eax\n"
18570 "20: movl 36(%4), %%edx\n"
18571- "21: movl %%eax, 32(%3)\n"
18572- "22: movl %%edx, 36(%3)\n"
18573+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
18574+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
18575 "23: movl 40(%4), %%eax\n"
18576 "24: movl 44(%4), %%edx\n"
18577- "25: movl %%eax, 40(%3)\n"
18578- "26: movl %%edx, 44(%3)\n"
18579+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
18580+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
18581 "27: movl 48(%4), %%eax\n"
18582 "28: movl 52(%4), %%edx\n"
18583- "29: movl %%eax, 48(%3)\n"
18584- "30: movl %%edx, 52(%3)\n"
18585+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
18586+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
18587 "31: movl 56(%4), %%eax\n"
18588 "32: movl 60(%4), %%edx\n"
18589- "33: movl %%eax, 56(%3)\n"
18590- "34: movl %%edx, 60(%3)\n"
18591+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
18592+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
18593 " addl $-64, %0\n"
18594 " addl $64, %4\n"
18595 " addl $64, %3\n"
18596@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
18597 " shrl $2, %0\n"
18598 " andl $3, %%eax\n"
18599 " cld\n"
18600+ __COPYUSER_SET_ES
18601 "99: rep; movsl\n"
18602 "36: movl %%eax, %0\n"
18603 "37: rep; movsb\n"
18604 "100:\n"
18605+ __COPYUSER_RESTORE_ES
18606+ ".section .fixup,\"ax\"\n"
18607+ "101: lea 0(%%eax,%0,4),%0\n"
18608+ " jmp 100b\n"
18609+ ".previous\n"
18610+ ".section __ex_table,\"a\"\n"
18611+ " .align 4\n"
18612+ " .long 1b,100b\n"
18613+ " .long 2b,100b\n"
18614+ " .long 3b,100b\n"
18615+ " .long 4b,100b\n"
18616+ " .long 5b,100b\n"
18617+ " .long 6b,100b\n"
18618+ " .long 7b,100b\n"
18619+ " .long 8b,100b\n"
18620+ " .long 9b,100b\n"
18621+ " .long 10b,100b\n"
18622+ " .long 11b,100b\n"
18623+ " .long 12b,100b\n"
18624+ " .long 13b,100b\n"
18625+ " .long 14b,100b\n"
18626+ " .long 15b,100b\n"
18627+ " .long 16b,100b\n"
18628+ " .long 17b,100b\n"
18629+ " .long 18b,100b\n"
18630+ " .long 19b,100b\n"
18631+ " .long 20b,100b\n"
18632+ " .long 21b,100b\n"
18633+ " .long 22b,100b\n"
18634+ " .long 23b,100b\n"
18635+ " .long 24b,100b\n"
18636+ " .long 25b,100b\n"
18637+ " .long 26b,100b\n"
18638+ " .long 27b,100b\n"
18639+ " .long 28b,100b\n"
18640+ " .long 29b,100b\n"
18641+ " .long 30b,100b\n"
18642+ " .long 31b,100b\n"
18643+ " .long 32b,100b\n"
18644+ " .long 33b,100b\n"
18645+ " .long 34b,100b\n"
18646+ " .long 35b,100b\n"
18647+ " .long 36b,100b\n"
18648+ " .long 37b,100b\n"
18649+ " .long 99b,101b\n"
18650+ ".previous"
18651+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
18652+ : "1"(to), "2"(from), "0"(size)
18653+ : "eax", "edx", "memory");
18654+ return size;
18655+}
18656+
18657+static unsigned long
18658+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
18659+{
18660+ int d0, d1;
18661+ __asm__ __volatile__(
18662+ " .align 2,0x90\n"
18663+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
18664+ " cmpl $67, %0\n"
18665+ " jbe 3f\n"
18666+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
18667+ " .align 2,0x90\n"
18668+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
18669+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
18670+ "5: movl %%eax, 0(%3)\n"
18671+ "6: movl %%edx, 4(%3)\n"
18672+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
18673+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
18674+ "9: movl %%eax, 8(%3)\n"
18675+ "10: movl %%edx, 12(%3)\n"
18676+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
18677+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
18678+ "13: movl %%eax, 16(%3)\n"
18679+ "14: movl %%edx, 20(%3)\n"
18680+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
18681+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
18682+ "17: movl %%eax, 24(%3)\n"
18683+ "18: movl %%edx, 28(%3)\n"
18684+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
18685+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
18686+ "21: movl %%eax, 32(%3)\n"
18687+ "22: movl %%edx, 36(%3)\n"
18688+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
18689+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
18690+ "25: movl %%eax, 40(%3)\n"
18691+ "26: movl %%edx, 44(%3)\n"
18692+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
18693+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
18694+ "29: movl %%eax, 48(%3)\n"
18695+ "30: movl %%edx, 52(%3)\n"
18696+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
18697+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
18698+ "33: movl %%eax, 56(%3)\n"
18699+ "34: movl %%edx, 60(%3)\n"
18700+ " addl $-64, %0\n"
18701+ " addl $64, %4\n"
18702+ " addl $64, %3\n"
18703+ " cmpl $63, %0\n"
18704+ " ja 1b\n"
18705+ "35: movl %0, %%eax\n"
18706+ " shrl $2, %0\n"
18707+ " andl $3, %%eax\n"
18708+ " cld\n"
18709+ "99: rep; "__copyuser_seg" movsl\n"
18710+ "36: movl %%eax, %0\n"
18711+ "37: rep; "__copyuser_seg" movsb\n"
18712+ "100:\n"
18713 ".section .fixup,\"ax\"\n"
18714 "101: lea 0(%%eax,%0,4),%0\n"
18715 " jmp 100b\n"
18716@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
18717 int d0, d1;
18718 __asm__ __volatile__(
18719 " .align 2,0x90\n"
18720- "0: movl 32(%4), %%eax\n"
18721+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18722 " cmpl $67, %0\n"
18723 " jbe 2f\n"
18724- "1: movl 64(%4), %%eax\n"
18725+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18726 " .align 2,0x90\n"
18727- "2: movl 0(%4), %%eax\n"
18728- "21: movl 4(%4), %%edx\n"
18729+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18730+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18731 " movl %%eax, 0(%3)\n"
18732 " movl %%edx, 4(%3)\n"
18733- "3: movl 8(%4), %%eax\n"
18734- "31: movl 12(%4),%%edx\n"
18735+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18736+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18737 " movl %%eax, 8(%3)\n"
18738 " movl %%edx, 12(%3)\n"
18739- "4: movl 16(%4), %%eax\n"
18740- "41: movl 20(%4), %%edx\n"
18741+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18742+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18743 " movl %%eax, 16(%3)\n"
18744 " movl %%edx, 20(%3)\n"
18745- "10: movl 24(%4), %%eax\n"
18746- "51: movl 28(%4), %%edx\n"
18747+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18748+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18749 " movl %%eax, 24(%3)\n"
18750 " movl %%edx, 28(%3)\n"
18751- "11: movl 32(%4), %%eax\n"
18752- "61: movl 36(%4), %%edx\n"
18753+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18754+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18755 " movl %%eax, 32(%3)\n"
18756 " movl %%edx, 36(%3)\n"
18757- "12: movl 40(%4), %%eax\n"
18758- "71: movl 44(%4), %%edx\n"
18759+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18760+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18761 " movl %%eax, 40(%3)\n"
18762 " movl %%edx, 44(%3)\n"
18763- "13: movl 48(%4), %%eax\n"
18764- "81: movl 52(%4), %%edx\n"
18765+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18766+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18767 " movl %%eax, 48(%3)\n"
18768 " movl %%edx, 52(%3)\n"
18769- "14: movl 56(%4), %%eax\n"
18770- "91: movl 60(%4), %%edx\n"
18771+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18772+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18773 " movl %%eax, 56(%3)\n"
18774 " movl %%edx, 60(%3)\n"
18775 " addl $-64, %0\n"
18776@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
18777 " shrl $2, %0\n"
18778 " andl $3, %%eax\n"
18779 " cld\n"
18780- "6: rep; movsl\n"
18781+ "6: rep; "__copyuser_seg" movsl\n"
18782 " movl %%eax,%0\n"
18783- "7: rep; movsb\n"
18784+ "7: rep; "__copyuser_seg" movsb\n"
18785 "8:\n"
18786 ".section .fixup,\"ax\"\n"
18787 "9: lea 0(%%eax,%0,4),%0\n"
18788@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
18789
18790 __asm__ __volatile__(
18791 " .align 2,0x90\n"
18792- "0: movl 32(%4), %%eax\n"
18793+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18794 " cmpl $67, %0\n"
18795 " jbe 2f\n"
18796- "1: movl 64(%4), %%eax\n"
18797+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18798 " .align 2,0x90\n"
18799- "2: movl 0(%4), %%eax\n"
18800- "21: movl 4(%4), %%edx\n"
18801+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18802+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18803 " movnti %%eax, 0(%3)\n"
18804 " movnti %%edx, 4(%3)\n"
18805- "3: movl 8(%4), %%eax\n"
18806- "31: movl 12(%4),%%edx\n"
18807+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18808+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18809 " movnti %%eax, 8(%3)\n"
18810 " movnti %%edx, 12(%3)\n"
18811- "4: movl 16(%4), %%eax\n"
18812- "41: movl 20(%4), %%edx\n"
18813+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18814+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18815 " movnti %%eax, 16(%3)\n"
18816 " movnti %%edx, 20(%3)\n"
18817- "10: movl 24(%4), %%eax\n"
18818- "51: movl 28(%4), %%edx\n"
18819+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18820+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18821 " movnti %%eax, 24(%3)\n"
18822 " movnti %%edx, 28(%3)\n"
18823- "11: movl 32(%4), %%eax\n"
18824- "61: movl 36(%4), %%edx\n"
18825+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18826+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18827 " movnti %%eax, 32(%3)\n"
18828 " movnti %%edx, 36(%3)\n"
18829- "12: movl 40(%4), %%eax\n"
18830- "71: movl 44(%4), %%edx\n"
18831+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18832+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18833 " movnti %%eax, 40(%3)\n"
18834 " movnti %%edx, 44(%3)\n"
18835- "13: movl 48(%4), %%eax\n"
18836- "81: movl 52(%4), %%edx\n"
18837+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18838+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18839 " movnti %%eax, 48(%3)\n"
18840 " movnti %%edx, 52(%3)\n"
18841- "14: movl 56(%4), %%eax\n"
18842- "91: movl 60(%4), %%edx\n"
18843+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18844+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18845 " movnti %%eax, 56(%3)\n"
18846 " movnti %%edx, 60(%3)\n"
18847 " addl $-64, %0\n"
18848@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18849 " shrl $2, %0\n"
18850 " andl $3, %%eax\n"
18851 " cld\n"
18852- "6: rep; movsl\n"
18853+ "6: rep; "__copyuser_seg" movsl\n"
18854 " movl %%eax,%0\n"
18855- "7: rep; movsb\n"
18856+ "7: rep; "__copyuser_seg" movsb\n"
18857 "8:\n"
18858 ".section .fixup,\"ax\"\n"
18859 "9: lea 0(%%eax,%0,4),%0\n"
18860@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18861
18862 __asm__ __volatile__(
18863 " .align 2,0x90\n"
18864- "0: movl 32(%4), %%eax\n"
18865+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18866 " cmpl $67, %0\n"
18867 " jbe 2f\n"
18868- "1: movl 64(%4), %%eax\n"
18869+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18870 " .align 2,0x90\n"
18871- "2: movl 0(%4), %%eax\n"
18872- "21: movl 4(%4), %%edx\n"
18873+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18874+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18875 " movnti %%eax, 0(%3)\n"
18876 " movnti %%edx, 4(%3)\n"
18877- "3: movl 8(%4), %%eax\n"
18878- "31: movl 12(%4),%%edx\n"
18879+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18880+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18881 " movnti %%eax, 8(%3)\n"
18882 " movnti %%edx, 12(%3)\n"
18883- "4: movl 16(%4), %%eax\n"
18884- "41: movl 20(%4), %%edx\n"
18885+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18886+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18887 " movnti %%eax, 16(%3)\n"
18888 " movnti %%edx, 20(%3)\n"
18889- "10: movl 24(%4), %%eax\n"
18890- "51: movl 28(%4), %%edx\n"
18891+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18892+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18893 " movnti %%eax, 24(%3)\n"
18894 " movnti %%edx, 28(%3)\n"
18895- "11: movl 32(%4), %%eax\n"
18896- "61: movl 36(%4), %%edx\n"
18897+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18898+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18899 " movnti %%eax, 32(%3)\n"
18900 " movnti %%edx, 36(%3)\n"
18901- "12: movl 40(%4), %%eax\n"
18902- "71: movl 44(%4), %%edx\n"
18903+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18904+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18905 " movnti %%eax, 40(%3)\n"
18906 " movnti %%edx, 44(%3)\n"
18907- "13: movl 48(%4), %%eax\n"
18908- "81: movl 52(%4), %%edx\n"
18909+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18910+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18911 " movnti %%eax, 48(%3)\n"
18912 " movnti %%edx, 52(%3)\n"
18913- "14: movl 56(%4), %%eax\n"
18914- "91: movl 60(%4), %%edx\n"
18915+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18916+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18917 " movnti %%eax, 56(%3)\n"
18918 " movnti %%edx, 60(%3)\n"
18919 " addl $-64, %0\n"
18920@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18921 " shrl $2, %0\n"
18922 " andl $3, %%eax\n"
18923 " cld\n"
18924- "6: rep; movsl\n"
18925+ "6: rep; "__copyuser_seg" movsl\n"
18926 " movl %%eax,%0\n"
18927- "7: rep; movsb\n"
18928+ "7: rep; "__copyuser_seg" movsb\n"
18929 "8:\n"
18930 ".section .fixup,\"ax\"\n"
18931 "9: lea 0(%%eax,%0,4),%0\n"
18932@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18933 */
18934 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18935 unsigned long size);
18936-unsigned long __copy_user_intel(void __user *to, const void *from,
18937+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18938+ unsigned long size);
18939+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18940 unsigned long size);
18941 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18942 const void __user *from, unsigned long size);
18943 #endif /* CONFIG_X86_INTEL_USERCOPY */
18944
18945 /* Generic arbitrary sized copy. */
18946-#define __copy_user(to, from, size) \
18947+#define __copy_user(to, from, size, prefix, set, restore) \
18948 do { \
18949 int __d0, __d1, __d2; \
18950 __asm__ __volatile__( \
18951+ set \
18952 " cmp $7,%0\n" \
18953 " jbe 1f\n" \
18954 " movl %1,%0\n" \
18955 " negl %0\n" \
18956 " andl $7,%0\n" \
18957 " subl %0,%3\n" \
18958- "4: rep; movsb\n" \
18959+ "4: rep; "prefix"movsb\n" \
18960 " movl %3,%0\n" \
18961 " shrl $2,%0\n" \
18962 " andl $3,%3\n" \
18963 " .align 2,0x90\n" \
18964- "0: rep; movsl\n" \
18965+ "0: rep; "prefix"movsl\n" \
18966 " movl %3,%0\n" \
18967- "1: rep; movsb\n" \
18968+ "1: rep; "prefix"movsb\n" \
18969 "2:\n" \
18970+ restore \
18971 ".section .fixup,\"ax\"\n" \
18972 "5: addl %3,%0\n" \
18973 " jmp 2b\n" \
18974@@ -682,14 +799,14 @@ do { \
18975 " negl %0\n" \
18976 " andl $7,%0\n" \
18977 " subl %0,%3\n" \
18978- "4: rep; movsb\n" \
18979+ "4: rep; "__copyuser_seg"movsb\n" \
18980 " movl %3,%0\n" \
18981 " shrl $2,%0\n" \
18982 " andl $3,%3\n" \
18983 " .align 2,0x90\n" \
18984- "0: rep; movsl\n" \
18985+ "0: rep; "__copyuser_seg"movsl\n" \
18986 " movl %3,%0\n" \
18987- "1: rep; movsb\n" \
18988+ "1: rep; "__copyuser_seg"movsb\n" \
18989 "2:\n" \
18990 ".section .fixup,\"ax\"\n" \
18991 "5: addl %3,%0\n" \
18992@@ -775,9 +892,9 @@ survive:
18993 }
18994 #endif
18995 if (movsl_is_ok(to, from, n))
18996- __copy_user(to, from, n);
18997+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18998 else
18999- n = __copy_user_intel(to, from, n);
19000+ n = __generic_copy_to_user_intel(to, from, n);
19001 return n;
19002 }
19003 EXPORT_SYMBOL(__copy_to_user_ll);
19004@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
19005 unsigned long n)
19006 {
19007 if (movsl_is_ok(to, from, n))
19008- __copy_user(to, from, n);
19009+ __copy_user(to, from, n, __copyuser_seg, "", "");
19010 else
19011- n = __copy_user_intel((void __user *)to,
19012- (const void *)from, n);
19013+ n = __generic_copy_from_user_intel(to, from, n);
19014 return n;
19015 }
19016 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
19017@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
19018 if (n > 64 && cpu_has_xmm2)
19019 n = __copy_user_intel_nocache(to, from, n);
19020 else
19021- __copy_user(to, from, n);
19022+ __copy_user(to, from, n, __copyuser_seg, "", "");
19023 #else
19024- __copy_user(to, from, n);
19025+ __copy_user(to, from, n, __copyuser_seg, "", "");
19026 #endif
19027 return n;
19028 }
19029 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
19030
19031-/**
19032- * copy_to_user: - Copy a block of data into user space.
19033- * @to: Destination address, in user space.
19034- * @from: Source address, in kernel space.
19035- * @n: Number of bytes to copy.
19036- *
19037- * Context: User context only. This function may sleep.
19038- *
19039- * Copy data from kernel space to user space.
19040- *
19041- * Returns number of bytes that could not be copied.
19042- * On success, this will be zero.
19043- */
19044-unsigned long
19045-copy_to_user(void __user *to, const void *from, unsigned long n)
19046+void copy_from_user_overflow(void)
19047 {
19048- if (access_ok(VERIFY_WRITE, to, n))
19049- n = __copy_to_user(to, from, n);
19050- return n;
19051+ WARN(1, "Buffer overflow detected!\n");
19052 }
19053-EXPORT_SYMBOL(copy_to_user);
19054+EXPORT_SYMBOL(copy_from_user_overflow);
19055
19056-/**
19057- * copy_from_user: - Copy a block of data from user space.
19058- * @to: Destination address, in kernel space.
19059- * @from: Source address, in user space.
19060- * @n: Number of bytes to copy.
19061- *
19062- * Context: User context only. This function may sleep.
19063- *
19064- * Copy data from user space to kernel space.
19065- *
19066- * Returns number of bytes that could not be copied.
19067- * On success, this will be zero.
19068- *
19069- * If some data could not be copied, this function will pad the copied
19070- * data to the requested size using zero bytes.
19071- */
19072-unsigned long
19073-_copy_from_user(void *to, const void __user *from, unsigned long n)
19074+void copy_to_user_overflow(void)
19075 {
19076- if (access_ok(VERIFY_READ, from, n))
19077- n = __copy_from_user(to, from, n);
19078- else
19079- memset(to, 0, n);
19080- return n;
19081+ WARN(1, "Buffer overflow detected!\n");
19082 }
19083-EXPORT_SYMBOL(_copy_from_user);
19084+EXPORT_SYMBOL(copy_to_user_overflow);
19085
19086-void copy_from_user_overflow(void)
19087+#ifdef CONFIG_PAX_MEMORY_UDEREF
19088+void __set_fs(mm_segment_t x)
19089 {
19090- WARN(1, "Buffer overflow detected!\n");
19091+ switch (x.seg) {
19092+ case 0:
19093+ loadsegment(gs, 0);
19094+ break;
19095+ case TASK_SIZE_MAX:
19096+ loadsegment(gs, __USER_DS);
19097+ break;
19098+ case -1UL:
19099+ loadsegment(gs, __KERNEL_DS);
19100+ break;
19101+ default:
19102+ BUG();
19103+ }
19104+ return;
19105 }
19106-EXPORT_SYMBOL(copy_from_user_overflow);
19107+EXPORT_SYMBOL(__set_fs);
19108+
19109+void set_fs(mm_segment_t x)
19110+{
19111+ current_thread_info()->addr_limit = x;
19112+ __set_fs(x);
19113+}
19114+EXPORT_SYMBOL(set_fs);
19115+#endif
19116diff -urNp linux-3.0.4/arch/x86/lib/usercopy_64.c linux-3.0.4/arch/x86/lib/usercopy_64.c
19117--- linux-3.0.4/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
19118+++ linux-3.0.4/arch/x86/lib/usercopy_64.c 2011-10-06 04:17:55.000000000 -0400
19119@@ -42,6 +42,12 @@ long
19120 __strncpy_from_user(char *dst, const char __user *src, long count)
19121 {
19122 long res;
19123+
19124+#ifdef CONFIG_PAX_MEMORY_UDEREF
19125+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19126+ src += PAX_USER_SHADOW_BASE;
19127+#endif
19128+
19129 __do_strncpy_from_user(dst, src, count, res);
19130 return res;
19131 }
19132@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
19133 {
19134 long __d0;
19135 might_fault();
19136+
19137+#ifdef CONFIG_PAX_MEMORY_UDEREF
19138+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
19139+ addr += PAX_USER_SHADOW_BASE;
19140+#endif
19141+
19142 /* no memory constraint because it doesn't change any memory gcc knows
19143 about */
19144 asm volatile(
19145@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
19146
19147 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
19148 {
19149- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19150- return copy_user_generic((__force void *)to, (__force void *)from, len);
19151- }
19152- return len;
19153+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19154+
19155+#ifdef CONFIG_PAX_MEMORY_UDEREF
19156+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
19157+ to += PAX_USER_SHADOW_BASE;
19158+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
19159+ from += PAX_USER_SHADOW_BASE;
19160+#endif
19161+
19162+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
19163+ }
19164+ return len;
19165 }
19166 EXPORT_SYMBOL(copy_in_user);
19167
19168@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
19169 * it is not necessary to optimize tail handling.
19170 */
19171 unsigned long
19172-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
19173+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest)
19174 {
19175 char c;
19176 unsigned zero_len;
19177diff -urNp linux-3.0.4/arch/x86/Makefile linux-3.0.4/arch/x86/Makefile
19178--- linux-3.0.4/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
19179+++ linux-3.0.4/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
19180@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
19181 else
19182 BITS := 64
19183 UTS_MACHINE := x86_64
19184+ biarch := $(call cc-option,-m64)
19185 CHECKFLAGS += -D__x86_64__ -m64
19186
19187 KBUILD_AFLAGS += -m64
19188@@ -195,3 +196,12 @@ define archhelp
19189 echo ' FDARGS="..." arguments for the booted kernel'
19190 echo ' FDINITRD=file initrd for the booted kernel'
19191 endef
19192+
19193+define OLD_LD
19194+
19195+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
19196+*** Please upgrade your binutils to 2.18 or newer
19197+endef
19198+
19199+archprepare:
19200+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
19201diff -urNp linux-3.0.4/arch/x86/mm/extable.c linux-3.0.4/arch/x86/mm/extable.c
19202--- linux-3.0.4/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
19203+++ linux-3.0.4/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
19204@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
19205 const struct exception_table_entry *fixup;
19206
19207 #ifdef CONFIG_PNPBIOS
19208- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19209+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19210 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19211 extern u32 pnp_bios_is_utter_crap;
19212 pnp_bios_is_utter_crap = 1;
19213diff -urNp linux-3.0.4/arch/x86/mm/fault.c linux-3.0.4/arch/x86/mm/fault.c
19214--- linux-3.0.4/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
19215+++ linux-3.0.4/arch/x86/mm/fault.c 2011-10-06 04:17:55.000000000 -0400
19216@@ -13,10 +13,18 @@
19217 #include <linux/perf_event.h> /* perf_sw_event */
19218 #include <linux/hugetlb.h> /* hstate_index_to_shift */
19219 #include <linux/prefetch.h> /* prefetchw */
19220+#include <linux/unistd.h>
19221+#include <linux/compiler.h>
19222
19223 #include <asm/traps.h> /* dotraplinkage, ... */
19224 #include <asm/pgalloc.h> /* pgd_*(), ... */
19225 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
19226+#include <asm/vsyscall.h>
19227+#include <asm/tlbflush.h>
19228+
19229+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19230+#include <asm/stacktrace.h>
19231+#endif
19232
19233 /*
19234 * Page fault error code bits:
19235@@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
19236 int ret = 0;
19237
19238 /* kprobe_running() needs smp_processor_id() */
19239- if (kprobes_built_in() && !user_mode_vm(regs)) {
19240+ if (kprobes_built_in() && !user_mode(regs)) {
19241 preempt_disable();
19242 if (kprobe_running() && kprobe_fault_handler(regs, 14))
19243 ret = 1;
19244@@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
19245 return !instr_lo || (instr_lo>>1) == 1;
19246 case 0x00:
19247 /* Prefetch instruction is 0x0F0D or 0x0F18 */
19248- if (probe_kernel_address(instr, opcode))
19249+ if (user_mode(regs)) {
19250+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19251+ return 0;
19252+ } else if (probe_kernel_address(instr, opcode))
19253 return 0;
19254
19255 *prefetch = (instr_lo == 0xF) &&
19256@@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
19257 while (instr < max_instr) {
19258 unsigned char opcode;
19259
19260- if (probe_kernel_address(instr, opcode))
19261+ if (user_mode(regs)) {
19262+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19263+ break;
19264+ } else if (probe_kernel_address(instr, opcode))
19265 break;
19266
19267 instr++;
19268@@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
19269 force_sig_info(si_signo, &info, tsk);
19270 }
19271
19272+#ifdef CONFIG_PAX_EMUTRAMP
19273+static int pax_handle_fetch_fault(struct pt_regs *regs);
19274+#endif
19275+
19276+#ifdef CONFIG_PAX_PAGEEXEC
19277+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
19278+{
19279+ pgd_t *pgd;
19280+ pud_t *pud;
19281+ pmd_t *pmd;
19282+
19283+ pgd = pgd_offset(mm, address);
19284+ if (!pgd_present(*pgd))
19285+ return NULL;
19286+ pud = pud_offset(pgd, address);
19287+ if (!pud_present(*pud))
19288+ return NULL;
19289+ pmd = pmd_offset(pud, address);
19290+ if (!pmd_present(*pmd))
19291+ return NULL;
19292+ return pmd;
19293+}
19294+#endif
19295+
19296 DEFINE_SPINLOCK(pgd_lock);
19297 LIST_HEAD(pgd_list);
19298
19299@@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
19300 for (address = VMALLOC_START & PMD_MASK;
19301 address >= TASK_SIZE && address < FIXADDR_TOP;
19302 address += PMD_SIZE) {
19303+
19304+#ifdef CONFIG_PAX_PER_CPU_PGD
19305+ unsigned long cpu;
19306+#else
19307 struct page *page;
19308+#endif
19309
19310 spin_lock(&pgd_lock);
19311+
19312+#ifdef CONFIG_PAX_PER_CPU_PGD
19313+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19314+ pgd_t *pgd = get_cpu_pgd(cpu);
19315+ pmd_t *ret;
19316+#else
19317 list_for_each_entry(page, &pgd_list, lru) {
19318+ pgd_t *pgd = page_address(page);
19319 spinlock_t *pgt_lock;
19320 pmd_t *ret;
19321
19322@@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
19323 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19324
19325 spin_lock(pgt_lock);
19326- ret = vmalloc_sync_one(page_address(page), address);
19327+#endif
19328+
19329+ ret = vmalloc_sync_one(pgd, address);
19330+
19331+#ifndef CONFIG_PAX_PER_CPU_PGD
19332 spin_unlock(pgt_lock);
19333+#endif
19334
19335 if (!ret)
19336 break;
19337@@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
19338 * an interrupt in the middle of a task switch..
19339 */
19340 pgd_paddr = read_cr3();
19341+
19342+#ifdef CONFIG_PAX_PER_CPU_PGD
19343+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19344+#endif
19345+
19346 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19347 if (!pmd_k)
19348 return -1;
19349@@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
19350 * happen within a race in page table update. In the later
19351 * case just flush:
19352 */
19353+
19354+#ifdef CONFIG_PAX_PER_CPU_PGD
19355+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
19356+ pgd = pgd_offset_cpu(smp_processor_id(), address);
19357+#else
19358 pgd = pgd_offset(current->active_mm, address);
19359+#endif
19360+
19361 pgd_ref = pgd_offset_k(address);
19362 if (pgd_none(*pgd_ref))
19363 return -1;
19364@@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
19365 static int is_errata100(struct pt_regs *regs, unsigned long address)
19366 {
19367 #ifdef CONFIG_X86_64
19368- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
19369+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
19370 return 1;
19371 #endif
19372 return 0;
19373@@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
19374 }
19375
19376 static const char nx_warning[] = KERN_CRIT
19377-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
19378+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
19379
19380 static void
19381 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
19382@@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
19383 if (!oops_may_print())
19384 return;
19385
19386- if (error_code & PF_INSTR) {
19387+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
19388 unsigned int level;
19389
19390 pte_t *pte = lookup_address(address, &level);
19391
19392 if (pte && pte_present(*pte) && !pte_exec(*pte))
19393- printk(nx_warning, current_uid());
19394+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
19395+ }
19396+
19397+#ifdef CONFIG_PAX_KERNEXEC
19398+ if (init_mm.start_code <= address && address < init_mm.end_code) {
19399+ if (current->signal->curr_ip)
19400+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19401+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
19402+ else
19403+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19404+ current->comm, task_pid_nr(current), current_uid(), current_euid());
19405 }
19406+#endif
19407
19408 printk(KERN_ALERT "BUG: unable to handle kernel ");
19409 if (address < PAGE_SIZE)
19410@@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
19411 unsigned long address, int si_code)
19412 {
19413 struct task_struct *tsk = current;
19414+#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19415+ struct mm_struct *mm = tsk->mm;
19416+#endif
19417+
19418+#ifdef CONFIG_X86_64
19419+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
19420+ if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
19421+ regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
19422+ regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
19423+ regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
19424+ return;
19425+ }
19426+ }
19427+#endif
19428+
19429+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19430+ if (mm && (error_code & PF_USER)) {
19431+ unsigned long ip = regs->ip;
19432+
19433+ if (v8086_mode(regs))
19434+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
19435+
19436+ /*
19437+ * It's possible to have interrupts off here:
19438+ */
19439+ local_irq_enable();
19440+
19441+#ifdef CONFIG_PAX_PAGEEXEC
19442+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
19443+ (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
19444+
19445+#ifdef CONFIG_PAX_EMUTRAMP
19446+ switch (pax_handle_fetch_fault(regs)) {
19447+ case 2:
19448+ return;
19449+ }
19450+#endif
19451+
19452+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19453+ do_group_exit(SIGKILL);
19454+ }
19455+#endif
19456+
19457+#ifdef CONFIG_PAX_SEGMEXEC
19458+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
19459+
19460+#ifdef CONFIG_PAX_EMUTRAMP
19461+ switch (pax_handle_fetch_fault(regs)) {
19462+ case 2:
19463+ return;
19464+ }
19465+#endif
19466+
19467+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19468+ do_group_exit(SIGKILL);
19469+ }
19470+#endif
19471+
19472+ }
19473+#endif
19474
19475 /* User mode accesses just cause a SIGSEGV */
19476 if (error_code & PF_USER) {
19477@@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
19478 return 1;
19479 }
19480
19481+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19482+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
19483+{
19484+ pte_t *pte;
19485+ pmd_t *pmd;
19486+ spinlock_t *ptl;
19487+ unsigned char pte_mask;
19488+
19489+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
19490+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
19491+ return 0;
19492+
19493+ /* PaX: it's our fault, let's handle it if we can */
19494+
19495+ /* PaX: take a look at read faults before acquiring any locks */
19496+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
19497+ /* instruction fetch attempt from a protected page in user mode */
19498+ up_read(&mm->mmap_sem);
19499+
19500+#ifdef CONFIG_PAX_EMUTRAMP
19501+ switch (pax_handle_fetch_fault(regs)) {
19502+ case 2:
19503+ return 1;
19504+ }
19505+#endif
19506+
19507+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
19508+ do_group_exit(SIGKILL);
19509+ }
19510+
19511+ pmd = pax_get_pmd(mm, address);
19512+ if (unlikely(!pmd))
19513+ return 0;
19514+
19515+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
19516+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
19517+ pte_unmap_unlock(pte, ptl);
19518+ return 0;
19519+ }
19520+
19521+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
19522+ /* write attempt to a protected page in user mode */
19523+ pte_unmap_unlock(pte, ptl);
19524+ return 0;
19525+ }
19526+
19527+#ifdef CONFIG_SMP
19528+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
19529+#else
19530+ if (likely(address > get_limit(regs->cs)))
19531+#endif
19532+ {
19533+ set_pte(pte, pte_mkread(*pte));
19534+ __flush_tlb_one(address);
19535+ pte_unmap_unlock(pte, ptl);
19536+ up_read(&mm->mmap_sem);
19537+ return 1;
19538+ }
19539+
19540+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
19541+
19542+ /*
19543+ * PaX: fill DTLB with user rights and retry
19544+ */
19545+ __asm__ __volatile__ (
19546+ "orb %2,(%1)\n"
19547+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
19548+/*
19549+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
19550+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
19551+ * page fault when examined during a TLB load attempt. this is true not only
19552+ * for PTEs holding a non-present entry but also present entries that will
19553+ * raise a page fault (such as those set up by PaX, or the copy-on-write
19554+ * mechanism). in effect it means that we do *not* need to flush the TLBs
19555+ * for our target pages since their PTEs are simply not in the TLBs at all.
19556+
19557+ * the best thing in omitting it is that we gain around 15-20% speed in the
19558+ * fast path of the page fault handler and can get rid of tracing since we
19559+ * can no longer flush unintended entries.
19560+ */
19561+ "invlpg (%0)\n"
19562+#endif
19563+ __copyuser_seg"testb $0,(%0)\n"
19564+ "xorb %3,(%1)\n"
19565+ :
19566+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
19567+ : "memory", "cc");
19568+ pte_unmap_unlock(pte, ptl);
19569+ up_read(&mm->mmap_sem);
19570+ return 1;
19571+}
19572+#endif
19573+
19574 /*
19575 * Handle a spurious fault caused by a stale TLB entry.
19576 *
19577@@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
19578 static inline int
19579 access_error(unsigned long error_code, struct vm_area_struct *vma)
19580 {
19581+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
19582+ return 1;
19583+
19584 if (error_code & PF_WRITE) {
19585 /* write, present and write, not present: */
19586 if (unlikely(!(vma->vm_flags & VM_WRITE)))
19587@@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
19588 {
19589 struct vm_area_struct *vma;
19590 struct task_struct *tsk;
19591- unsigned long address;
19592 struct mm_struct *mm;
19593 int fault;
19594 int write = error_code & PF_WRITE;
19595 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
19596 (write ? FAULT_FLAG_WRITE : 0);
19597
19598+ /* Get the faulting address: */
19599+ unsigned long address = read_cr2();
19600+
19601+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19602+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
19603+ if (!search_exception_tables(regs->ip)) {
19604+ bad_area_nosemaphore(regs, error_code, address);
19605+ return;
19606+ }
19607+ if (address < PAX_USER_SHADOW_BASE) {
19608+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
19609+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
19610+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
19611+ } else
19612+ address -= PAX_USER_SHADOW_BASE;
19613+ }
19614+#endif
19615+
19616 tsk = current;
19617 mm = tsk->mm;
19618
19619- /* Get the faulting address: */
19620- address = read_cr2();
19621-
19622 /*
19623 * Detect and handle instructions that would cause a page fault for
19624 * both a tracked kernel page and a userspace page.
19625@@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
19626 * User-mode registers count as a user access even for any
19627 * potential system fault or CPU buglet:
19628 */
19629- if (user_mode_vm(regs)) {
19630+ if (user_mode(regs)) {
19631 local_irq_enable();
19632 error_code |= PF_USER;
19633 } else {
19634@@ -1103,6 +1351,11 @@ retry:
19635 might_sleep();
19636 }
19637
19638+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19639+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
19640+ return;
19641+#endif
19642+
19643 vma = find_vma(mm, address);
19644 if (unlikely(!vma)) {
19645 bad_area(regs, error_code, address);
19646@@ -1114,18 +1367,24 @@ retry:
19647 bad_area(regs, error_code, address);
19648 return;
19649 }
19650- if (error_code & PF_USER) {
19651- /*
19652- * Accessing the stack below %sp is always a bug.
19653- * The large cushion allows instructions like enter
19654- * and pusha to work. ("enter $65535, $31" pushes
19655- * 32 pointers and then decrements %sp by 65535.)
19656- */
19657- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
19658- bad_area(regs, error_code, address);
19659- return;
19660- }
19661+ /*
19662+ * Accessing the stack below %sp is always a bug.
19663+ * The large cushion allows instructions like enter
19664+ * and pusha to work. ("enter $65535, $31" pushes
19665+ * 32 pointers and then decrements %sp by 65535.)
19666+ */
19667+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
19668+ bad_area(regs, error_code, address);
19669+ return;
19670 }
19671+
19672+#ifdef CONFIG_PAX_SEGMEXEC
19673+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
19674+ bad_area(regs, error_code, address);
19675+ return;
19676+ }
19677+#endif
19678+
19679 if (unlikely(expand_stack(vma, address))) {
19680 bad_area(regs, error_code, address);
19681 return;
19682@@ -1180,3 +1439,199 @@ good_area:
19683
19684 up_read(&mm->mmap_sem);
19685 }
19686+
19687+#ifdef CONFIG_PAX_EMUTRAMP
19688+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
19689+{
19690+ int err;
19691+
19692+ do { /* PaX: gcc trampoline emulation #1 */
19693+ unsigned char mov1, mov2;
19694+ unsigned short jmp;
19695+ unsigned int addr1, addr2;
19696+
19697+#ifdef CONFIG_X86_64
19698+ if ((regs->ip + 11) >> 32)
19699+ break;
19700+#endif
19701+
19702+ err = get_user(mov1, (unsigned char __user *)regs->ip);
19703+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19704+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
19705+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19706+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
19707+
19708+ if (err)
19709+ break;
19710+
19711+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
19712+ regs->cx = addr1;
19713+ regs->ax = addr2;
19714+ regs->ip = addr2;
19715+ return 2;
19716+ }
19717+ } while (0);
19718+
19719+ do { /* PaX: gcc trampoline emulation #2 */
19720+ unsigned char mov, jmp;
19721+ unsigned int addr1, addr2;
19722+
19723+#ifdef CONFIG_X86_64
19724+ if ((regs->ip + 9) >> 32)
19725+ break;
19726+#endif
19727+
19728+ err = get_user(mov, (unsigned char __user *)regs->ip);
19729+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19730+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
19731+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19732+
19733+ if (err)
19734+ break;
19735+
19736+ if (mov == 0xB9 && jmp == 0xE9) {
19737+ regs->cx = addr1;
19738+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
19739+ return 2;
19740+ }
19741+ } while (0);
19742+
19743+ return 1; /* PaX in action */
19744+}
19745+
19746+#ifdef CONFIG_X86_64
19747+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
19748+{
19749+ int err;
19750+
19751+ do { /* PaX: gcc trampoline emulation #1 */
19752+ unsigned short mov1, mov2, jmp1;
19753+ unsigned char jmp2;
19754+ unsigned int addr1;
19755+ unsigned long addr2;
19756+
19757+ err = get_user(mov1, (unsigned short __user *)regs->ip);
19758+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
19759+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
19760+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
19761+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
19762+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
19763+
19764+ if (err)
19765+ break;
19766+
19767+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19768+ regs->r11 = addr1;
19769+ regs->r10 = addr2;
19770+ regs->ip = addr1;
19771+ return 2;
19772+ }
19773+ } while (0);
19774+
19775+ do { /* PaX: gcc trampoline emulation #2 */
19776+ unsigned short mov1, mov2, jmp1;
19777+ unsigned char jmp2;
19778+ unsigned long addr1, addr2;
19779+
19780+ err = get_user(mov1, (unsigned short __user *)regs->ip);
19781+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
19782+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
19783+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
19784+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
19785+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
19786+
19787+ if (err)
19788+ break;
19789+
19790+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19791+ regs->r11 = addr1;
19792+ regs->r10 = addr2;
19793+ regs->ip = addr1;
19794+ return 2;
19795+ }
19796+ } while (0);
19797+
19798+ return 1; /* PaX in action */
19799+}
19800+#endif
19801+
19802+/*
19803+ * PaX: decide what to do with offenders (regs->ip = fault address)
19804+ *
19805+ * returns 1 when task should be killed
19806+ * 2 when gcc trampoline was detected
19807+ */
19808+static int pax_handle_fetch_fault(struct pt_regs *regs)
19809+{
19810+ if (v8086_mode(regs))
19811+ return 1;
19812+
19813+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19814+ return 1;
19815+
19816+#ifdef CONFIG_X86_32
19817+ return pax_handle_fetch_fault_32(regs);
19818+#else
19819+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19820+ return pax_handle_fetch_fault_32(regs);
19821+ else
19822+ return pax_handle_fetch_fault_64(regs);
19823+#endif
19824+}
19825+#endif
19826+
19827+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19828+void pax_report_insns(void *pc, void *sp)
19829+{
19830+ long i;
19831+
19832+ printk(KERN_ERR "PAX: bytes at PC: ");
19833+ for (i = 0; i < 20; i++) {
19834+ unsigned char c;
19835+ if (get_user(c, (unsigned char __force_user *)pc+i))
19836+ printk(KERN_CONT "?? ");
19837+ else
19838+ printk(KERN_CONT "%02x ", c);
19839+ }
19840+ printk("\n");
19841+
19842+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19843+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
19844+ unsigned long c;
19845+ if (get_user(c, (unsigned long __force_user *)sp+i))
19846+#ifdef CONFIG_X86_32
19847+ printk(KERN_CONT "???????? ");
19848+#else
19849+ printk(KERN_CONT "???????????????? ");
19850+#endif
19851+ else
19852+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19853+ }
19854+ printk("\n");
19855+}
19856+#endif
19857+
19858+/**
19859+ * probe_kernel_write(): safely attempt to write to a location
19860+ * @dst: address to write to
19861+ * @src: pointer to the data that shall be written
19862+ * @size: size of the data chunk
19863+ *
19864+ * Safely write to address @dst from the buffer at @src. If a kernel fault
19865+ * happens, handle that and return -EFAULT.
19866+ */
19867+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19868+{
19869+ long ret;
19870+ mm_segment_t old_fs = get_fs();
19871+
19872+ set_fs(KERNEL_DS);
19873+ pagefault_disable();
19874+ pax_open_kernel();
19875+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
19876+ pax_close_kernel();
19877+ pagefault_enable();
19878+ set_fs(old_fs);
19879+
19880+ return ret ? -EFAULT : 0;
19881+}
19882diff -urNp linux-3.0.4/arch/x86/mm/gup.c linux-3.0.4/arch/x86/mm/gup.c
19883--- linux-3.0.4/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
19884+++ linux-3.0.4/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
19885@@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19886 addr = start;
19887 len = (unsigned long) nr_pages << PAGE_SHIFT;
19888 end = start + len;
19889- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19890+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19891 (void __user *)start, len)))
19892 return 0;
19893
19894diff -urNp linux-3.0.4/arch/x86/mm/highmem_32.c linux-3.0.4/arch/x86/mm/highmem_32.c
19895--- linux-3.0.4/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
19896+++ linux-3.0.4/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
19897@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19898 idx = type + KM_TYPE_NR*smp_processor_id();
19899 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19900 BUG_ON(!pte_none(*(kmap_pte-idx)));
19901+
19902+ pax_open_kernel();
19903 set_pte(kmap_pte-idx, mk_pte(page, prot));
19904+ pax_close_kernel();
19905
19906 return (void *)vaddr;
19907 }
19908diff -urNp linux-3.0.4/arch/x86/mm/hugetlbpage.c linux-3.0.4/arch/x86/mm/hugetlbpage.c
19909--- linux-3.0.4/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
19910+++ linux-3.0.4/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
19911@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19912 struct hstate *h = hstate_file(file);
19913 struct mm_struct *mm = current->mm;
19914 struct vm_area_struct *vma;
19915- unsigned long start_addr;
19916+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19917+
19918+#ifdef CONFIG_PAX_SEGMEXEC
19919+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19920+ pax_task_size = SEGMEXEC_TASK_SIZE;
19921+#endif
19922+
19923+ pax_task_size -= PAGE_SIZE;
19924
19925 if (len > mm->cached_hole_size) {
19926- start_addr = mm->free_area_cache;
19927+ start_addr = mm->free_area_cache;
19928 } else {
19929- start_addr = TASK_UNMAPPED_BASE;
19930- mm->cached_hole_size = 0;
19931+ start_addr = mm->mmap_base;
19932+ mm->cached_hole_size = 0;
19933 }
19934
19935 full_search:
19936@@ -280,26 +287,27 @@ full_search:
19937
19938 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19939 /* At this point: (!vma || addr < vma->vm_end). */
19940- if (TASK_SIZE - len < addr) {
19941+ if (pax_task_size - len < addr) {
19942 /*
19943 * Start a new search - just in case we missed
19944 * some holes.
19945 */
19946- if (start_addr != TASK_UNMAPPED_BASE) {
19947- start_addr = TASK_UNMAPPED_BASE;
19948+ if (start_addr != mm->mmap_base) {
19949+ start_addr = mm->mmap_base;
19950 mm->cached_hole_size = 0;
19951 goto full_search;
19952 }
19953 return -ENOMEM;
19954 }
19955- if (!vma || addr + len <= vma->vm_start) {
19956- mm->free_area_cache = addr + len;
19957- return addr;
19958- }
19959+ if (check_heap_stack_gap(vma, addr, len))
19960+ break;
19961 if (addr + mm->cached_hole_size < vma->vm_start)
19962 mm->cached_hole_size = vma->vm_start - addr;
19963 addr = ALIGN(vma->vm_end, huge_page_size(h));
19964 }
19965+
19966+ mm->free_area_cache = addr + len;
19967+ return addr;
19968 }
19969
19970 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19971@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19972 {
19973 struct hstate *h = hstate_file(file);
19974 struct mm_struct *mm = current->mm;
19975- struct vm_area_struct *vma, *prev_vma;
19976- unsigned long base = mm->mmap_base, addr = addr0;
19977+ struct vm_area_struct *vma;
19978+ unsigned long base = mm->mmap_base, addr;
19979 unsigned long largest_hole = mm->cached_hole_size;
19980- int first_time = 1;
19981
19982 /* don't allow allocations above current base */
19983 if (mm->free_area_cache > base)
19984@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19985 largest_hole = 0;
19986 mm->free_area_cache = base;
19987 }
19988-try_again:
19989+
19990 /* make sure it can fit in the remaining address space */
19991 if (mm->free_area_cache < len)
19992 goto fail;
19993
19994 /* either no address requested or can't fit in requested address hole */
19995- addr = (mm->free_area_cache - len) & huge_page_mask(h);
19996+ addr = (mm->free_area_cache - len);
19997 do {
19998+ addr &= huge_page_mask(h);
19999+ vma = find_vma(mm, addr);
20000 /*
20001 * Lookup failure means no vma is above this address,
20002 * i.e. return with success:
20003- */
20004- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
20005- return addr;
20006-
20007- /*
20008 * new region fits between prev_vma->vm_end and
20009 * vma->vm_start, use it:
20010 */
20011- if (addr + len <= vma->vm_start &&
20012- (!prev_vma || (addr >= prev_vma->vm_end))) {
20013+ if (check_heap_stack_gap(vma, addr, len)) {
20014 /* remember the address as a hint for next time */
20015- mm->cached_hole_size = largest_hole;
20016- return (mm->free_area_cache = addr);
20017- } else {
20018- /* pull free_area_cache down to the first hole */
20019- if (mm->free_area_cache == vma->vm_end) {
20020- mm->free_area_cache = vma->vm_start;
20021- mm->cached_hole_size = largest_hole;
20022- }
20023+ mm->cached_hole_size = largest_hole;
20024+ return (mm->free_area_cache = addr);
20025+ }
20026+ /* pull free_area_cache down to the first hole */
20027+ if (mm->free_area_cache == vma->vm_end) {
20028+ mm->free_area_cache = vma->vm_start;
20029+ mm->cached_hole_size = largest_hole;
20030 }
20031
20032 /* remember the largest hole we saw so far */
20033 if (addr + largest_hole < vma->vm_start)
20034- largest_hole = vma->vm_start - addr;
20035+ largest_hole = vma->vm_start - addr;
20036
20037 /* try just below the current vma->vm_start */
20038- addr = (vma->vm_start - len) & huge_page_mask(h);
20039- } while (len <= vma->vm_start);
20040+ addr = skip_heap_stack_gap(vma, len);
20041+ } while (!IS_ERR_VALUE(addr));
20042
20043 fail:
20044 /*
20045- * if hint left us with no space for the requested
20046- * mapping then try again:
20047- */
20048- if (first_time) {
20049- mm->free_area_cache = base;
20050- largest_hole = 0;
20051- first_time = 0;
20052- goto try_again;
20053- }
20054- /*
20055 * A failed mmap() very likely causes application failure,
20056 * so fall back to the bottom-up function here. This scenario
20057 * can happen with large stack limits and large mmap()
20058 * allocations.
20059 */
20060- mm->free_area_cache = TASK_UNMAPPED_BASE;
20061+
20062+#ifdef CONFIG_PAX_SEGMEXEC
20063+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20064+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20065+ else
20066+#endif
20067+
20068+ mm->mmap_base = TASK_UNMAPPED_BASE;
20069+
20070+#ifdef CONFIG_PAX_RANDMMAP
20071+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20072+ mm->mmap_base += mm->delta_mmap;
20073+#endif
20074+
20075+ mm->free_area_cache = mm->mmap_base;
20076 mm->cached_hole_size = ~0UL;
20077 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
20078 len, pgoff, flags);
20079@@ -386,6 +392,7 @@ fail:
20080 /*
20081 * Restore the topdown base:
20082 */
20083+ mm->mmap_base = base;
20084 mm->free_area_cache = base;
20085 mm->cached_hole_size = ~0UL;
20086
20087@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
20088 struct hstate *h = hstate_file(file);
20089 struct mm_struct *mm = current->mm;
20090 struct vm_area_struct *vma;
20091+ unsigned long pax_task_size = TASK_SIZE;
20092
20093 if (len & ~huge_page_mask(h))
20094 return -EINVAL;
20095- if (len > TASK_SIZE)
20096+
20097+#ifdef CONFIG_PAX_SEGMEXEC
20098+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20099+ pax_task_size = SEGMEXEC_TASK_SIZE;
20100+#endif
20101+
20102+ pax_task_size -= PAGE_SIZE;
20103+
20104+ if (len > pax_task_size)
20105 return -ENOMEM;
20106
20107 if (flags & MAP_FIXED) {
20108@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
20109 if (addr) {
20110 addr = ALIGN(addr, huge_page_size(h));
20111 vma = find_vma(mm, addr);
20112- if (TASK_SIZE - len >= addr &&
20113- (!vma || addr + len <= vma->vm_start))
20114+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
20115 return addr;
20116 }
20117 if (mm->get_unmapped_area == arch_get_unmapped_area)
20118diff -urNp linux-3.0.4/arch/x86/mm/init_32.c linux-3.0.4/arch/x86/mm/init_32.c
20119--- linux-3.0.4/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
20120+++ linux-3.0.4/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
20121@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
20122 }
20123
20124 /*
20125- * Creates a middle page table and puts a pointer to it in the
20126- * given global directory entry. This only returns the gd entry
20127- * in non-PAE compilation mode, since the middle layer is folded.
20128- */
20129-static pmd_t * __init one_md_table_init(pgd_t *pgd)
20130-{
20131- pud_t *pud;
20132- pmd_t *pmd_table;
20133-
20134-#ifdef CONFIG_X86_PAE
20135- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
20136- if (after_bootmem)
20137- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
20138- else
20139- pmd_table = (pmd_t *)alloc_low_page();
20140- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
20141- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
20142- pud = pud_offset(pgd, 0);
20143- BUG_ON(pmd_table != pmd_offset(pud, 0));
20144-
20145- return pmd_table;
20146- }
20147-#endif
20148- pud = pud_offset(pgd, 0);
20149- pmd_table = pmd_offset(pud, 0);
20150-
20151- return pmd_table;
20152-}
20153-
20154-/*
20155 * Create a page table and place a pointer to it in a middle page
20156 * directory entry:
20157 */
20158@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
20159 page_table = (pte_t *)alloc_low_page();
20160
20161 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20162+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20163+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20164+#else
20165 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20166+#endif
20167 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20168 }
20169
20170 return pte_offset_kernel(pmd, 0);
20171 }
20172
20173+static pmd_t * __init one_md_table_init(pgd_t *pgd)
20174+{
20175+ pud_t *pud;
20176+ pmd_t *pmd_table;
20177+
20178+ pud = pud_offset(pgd, 0);
20179+ pmd_table = pmd_offset(pud, 0);
20180+
20181+ return pmd_table;
20182+}
20183+
20184 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20185 {
20186 int pgd_idx = pgd_index(vaddr);
20187@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
20188 int pgd_idx, pmd_idx;
20189 unsigned long vaddr;
20190 pgd_t *pgd;
20191+ pud_t *pud;
20192 pmd_t *pmd;
20193 pte_t *pte = NULL;
20194
20195@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
20196 pgd = pgd_base + pgd_idx;
20197
20198 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
20199- pmd = one_md_table_init(pgd);
20200- pmd = pmd + pmd_index(vaddr);
20201+ pud = pud_offset(pgd, vaddr);
20202+ pmd = pmd_offset(pud, vaddr);
20203+
20204+#ifdef CONFIG_X86_PAE
20205+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20206+#endif
20207+
20208 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
20209 pmd++, pmd_idx++) {
20210 pte = page_table_kmap_check(one_page_table_init(pmd),
20211@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
20212 }
20213 }
20214
20215-static inline int is_kernel_text(unsigned long addr)
20216+static inline int is_kernel_text(unsigned long start, unsigned long end)
20217 {
20218- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
20219- return 1;
20220- return 0;
20221+ if ((start > ktla_ktva((unsigned long)_etext) ||
20222+ end <= ktla_ktva((unsigned long)_stext)) &&
20223+ (start > ktla_ktva((unsigned long)_einittext) ||
20224+ end <= ktla_ktva((unsigned long)_sinittext)) &&
20225+
20226+#ifdef CONFIG_ACPI_SLEEP
20227+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
20228+#endif
20229+
20230+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
20231+ return 0;
20232+ return 1;
20233 }
20234
20235 /*
20236@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
20237 unsigned long last_map_addr = end;
20238 unsigned long start_pfn, end_pfn;
20239 pgd_t *pgd_base = swapper_pg_dir;
20240- int pgd_idx, pmd_idx, pte_ofs;
20241+ unsigned int pgd_idx, pmd_idx, pte_ofs;
20242 unsigned long pfn;
20243 pgd_t *pgd;
20244+ pud_t *pud;
20245 pmd_t *pmd;
20246 pte_t *pte;
20247 unsigned pages_2m, pages_4k;
20248@@ -281,8 +282,13 @@ repeat:
20249 pfn = start_pfn;
20250 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20251 pgd = pgd_base + pgd_idx;
20252- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
20253- pmd = one_md_table_init(pgd);
20254+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
20255+ pud = pud_offset(pgd, 0);
20256+ pmd = pmd_offset(pud, 0);
20257+
20258+#ifdef CONFIG_X86_PAE
20259+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20260+#endif
20261
20262 if (pfn >= end_pfn)
20263 continue;
20264@@ -294,14 +300,13 @@ repeat:
20265 #endif
20266 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
20267 pmd++, pmd_idx++) {
20268- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
20269+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
20270
20271 /*
20272 * Map with big pages if possible, otherwise
20273 * create normal page tables:
20274 */
20275 if (use_pse) {
20276- unsigned int addr2;
20277 pgprot_t prot = PAGE_KERNEL_LARGE;
20278 /*
20279 * first pass will use the same initial
20280@@ -311,11 +316,7 @@ repeat:
20281 __pgprot(PTE_IDENT_ATTR |
20282 _PAGE_PSE);
20283
20284- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
20285- PAGE_OFFSET + PAGE_SIZE-1;
20286-
20287- if (is_kernel_text(addr) ||
20288- is_kernel_text(addr2))
20289+ if (is_kernel_text(address, address + PMD_SIZE))
20290 prot = PAGE_KERNEL_LARGE_EXEC;
20291
20292 pages_2m++;
20293@@ -332,7 +333,7 @@ repeat:
20294 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20295 pte += pte_ofs;
20296 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
20297- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
20298+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
20299 pgprot_t prot = PAGE_KERNEL;
20300 /*
20301 * first pass will use the same initial
20302@@ -340,7 +341,7 @@ repeat:
20303 */
20304 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
20305
20306- if (is_kernel_text(addr))
20307+ if (is_kernel_text(address, address + PAGE_SIZE))
20308 prot = PAGE_KERNEL_EXEC;
20309
20310 pages_4k++;
20311@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
20312
20313 pud = pud_offset(pgd, va);
20314 pmd = pmd_offset(pud, va);
20315- if (!pmd_present(*pmd))
20316+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
20317 break;
20318
20319 pte = pte_offset_kernel(pmd, va);
20320@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
20321
20322 static void __init pagetable_init(void)
20323 {
20324- pgd_t *pgd_base = swapper_pg_dir;
20325-
20326- permanent_kmaps_init(pgd_base);
20327+ permanent_kmaps_init(swapper_pg_dir);
20328 }
20329
20330-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20331+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20332 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20333
20334 /* user-defined highmem size */
20335@@ -757,6 +756,12 @@ void __init mem_init(void)
20336
20337 pci_iommu_alloc();
20338
20339+#ifdef CONFIG_PAX_PER_CPU_PGD
20340+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20341+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20342+ KERNEL_PGD_PTRS);
20343+#endif
20344+
20345 #ifdef CONFIG_FLATMEM
20346 BUG_ON(!mem_map);
20347 #endif
20348@@ -774,7 +779,7 @@ void __init mem_init(void)
20349 set_highmem_pages_init();
20350
20351 codesize = (unsigned long) &_etext - (unsigned long) &_text;
20352- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
20353+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
20354 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
20355
20356 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
20357@@ -815,10 +820,10 @@ void __init mem_init(void)
20358 ((unsigned long)&__init_end -
20359 (unsigned long)&__init_begin) >> 10,
20360
20361- (unsigned long)&_etext, (unsigned long)&_edata,
20362- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
20363+ (unsigned long)&_sdata, (unsigned long)&_edata,
20364+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
20365
20366- (unsigned long)&_text, (unsigned long)&_etext,
20367+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
20368 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
20369
20370 /*
20371@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
20372 if (!kernel_set_to_readonly)
20373 return;
20374
20375+ start = ktla_ktva(start);
20376 pr_debug("Set kernel text: %lx - %lx for read write\n",
20377 start, start+size);
20378
20379@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
20380 if (!kernel_set_to_readonly)
20381 return;
20382
20383+ start = ktla_ktva(start);
20384 pr_debug("Set kernel text: %lx - %lx for read only\n",
20385 start, start+size);
20386
20387@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
20388 unsigned long start = PFN_ALIGN(_text);
20389 unsigned long size = PFN_ALIGN(_etext) - start;
20390
20391+ start = ktla_ktva(start);
20392 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
20393 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
20394 size >> 10);
20395diff -urNp linux-3.0.4/arch/x86/mm/init_64.c linux-3.0.4/arch/x86/mm/init_64.c
20396--- linux-3.0.4/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
20397+++ linux-3.0.4/arch/x86/mm/init_64.c 2011-10-06 04:17:55.000000000 -0400
20398@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
20399 * around without checking the pgd every time.
20400 */
20401
20402-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
20403+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
20404 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20405
20406 int force_personality32;
20407@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
20408
20409 for (address = start; address <= end; address += PGDIR_SIZE) {
20410 const pgd_t *pgd_ref = pgd_offset_k(address);
20411+
20412+#ifdef CONFIG_PAX_PER_CPU_PGD
20413+ unsigned long cpu;
20414+#else
20415 struct page *page;
20416+#endif
20417
20418 if (pgd_none(*pgd_ref))
20419 continue;
20420
20421 spin_lock(&pgd_lock);
20422+
20423+#ifdef CONFIG_PAX_PER_CPU_PGD
20424+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20425+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
20426+#else
20427 list_for_each_entry(page, &pgd_list, lru) {
20428 pgd_t *pgd;
20429 spinlock_t *pgt_lock;
20430@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
20431 /* the pgt_lock only for Xen */
20432 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
20433 spin_lock(pgt_lock);
20434+#endif
20435
20436 if (pgd_none(*pgd))
20437 set_pgd(pgd, *pgd_ref);
20438@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
20439 BUG_ON(pgd_page_vaddr(*pgd)
20440 != pgd_page_vaddr(*pgd_ref));
20441
20442+#ifndef CONFIG_PAX_PER_CPU_PGD
20443 spin_unlock(pgt_lock);
20444+#endif
20445+
20446 }
20447 spin_unlock(&pgd_lock);
20448 }
20449@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
20450 pmd = fill_pmd(pud, vaddr);
20451 pte = fill_pte(pmd, vaddr);
20452
20453+ pax_open_kernel();
20454 set_pte(pte, new_pte);
20455+ pax_close_kernel();
20456
20457 /*
20458 * It's enough to flush this one mapping.
20459@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
20460 pgd = pgd_offset_k((unsigned long)__va(phys));
20461 if (pgd_none(*pgd)) {
20462 pud = (pud_t *) spp_getpage();
20463- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
20464- _PAGE_USER));
20465+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
20466 }
20467 pud = pud_offset(pgd, (unsigned long)__va(phys));
20468 if (pud_none(*pud)) {
20469 pmd = (pmd_t *) spp_getpage();
20470- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
20471- _PAGE_USER));
20472+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
20473 }
20474 pmd = pmd_offset(pud, phys);
20475 BUG_ON(!pmd_none(*pmd));
20476@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsign
20477 if (pfn >= pgt_buf_top)
20478 panic("alloc_low_page: ran out of memory");
20479
20480- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
20481+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
20482 clear_page(adr);
20483 *phys = pfn * PAGE_SIZE;
20484 return adr;
20485@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *vi
20486
20487 phys = __pa(virt);
20488 left = phys & (PAGE_SIZE - 1);
20489- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
20490+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
20491 adr = (void *)(((unsigned long)adr) | left);
20492
20493 return adr;
20494@@ -693,6 +707,12 @@ void __init mem_init(void)
20495
20496 pci_iommu_alloc();
20497
20498+#ifdef CONFIG_PAX_PER_CPU_PGD
20499+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20500+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20501+ KERNEL_PGD_PTRS);
20502+#endif
20503+
20504 /* clear_bss() already clear the empty_zero_page */
20505
20506 reservedpages = 0;
20507@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
20508 static struct vm_area_struct gate_vma = {
20509 .vm_start = VSYSCALL_START,
20510 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
20511- .vm_page_prot = PAGE_READONLY_EXEC,
20512- .vm_flags = VM_READ | VM_EXEC
20513+ .vm_page_prot = PAGE_READONLY,
20514+ .vm_flags = VM_READ
20515 };
20516
20517 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
20518@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
20519
20520 const char *arch_vma_name(struct vm_area_struct *vma)
20521 {
20522- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
20523+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
20524 return "[vdso]";
20525 if (vma == &gate_vma)
20526 return "[vsyscall]";
20527diff -urNp linux-3.0.4/arch/x86/mm/init.c linux-3.0.4/arch/x86/mm/init.c
20528--- linux-3.0.4/arch/x86/mm/init.c 2011-07-21 22:17:23.000000000 -0400
20529+++ linux-3.0.4/arch/x86/mm/init.c 2011-08-23 21:48:14.000000000 -0400
20530@@ -31,7 +31,7 @@ int direct_gbpages
20531 static void __init find_early_table_space(unsigned long end, int use_pse,
20532 int use_gbpages)
20533 {
20534- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
20535+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
20536 phys_addr_t base;
20537
20538 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
20539@@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
20540 */
20541 int devmem_is_allowed(unsigned long pagenr)
20542 {
20543- if (pagenr <= 256)
20544+#ifdef CONFIG_GRKERNSEC_KMEM
20545+ /* allow BDA */
20546+ if (!pagenr)
20547+ return 1;
20548+ /* allow EBDA */
20549+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
20550+ return 1;
20551+#else
20552+ if (!pagenr)
20553+ return 1;
20554+#ifdef CONFIG_VM86
20555+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
20556+ return 1;
20557+#endif
20558+#endif
20559+
20560+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
20561 return 1;
20562+#ifdef CONFIG_GRKERNSEC_KMEM
20563+ /* throw out everything else below 1MB */
20564+ if (pagenr <= 256)
20565+ return 0;
20566+#endif
20567 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
20568 return 0;
20569 if (!page_is_ram(pagenr))
20570 return 1;
20571+
20572 return 0;
20573 }
20574
20575@@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
20576
20577 void free_initmem(void)
20578 {
20579+
20580+#ifdef CONFIG_PAX_KERNEXEC
20581+#ifdef CONFIG_X86_32
20582+ /* PaX: limit KERNEL_CS to actual size */
20583+ unsigned long addr, limit;
20584+ struct desc_struct d;
20585+ int cpu;
20586+
20587+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
20588+ limit = (limit - 1UL) >> PAGE_SHIFT;
20589+
20590+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
20591+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
20592+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
20593+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
20594+ }
20595+
20596+ /* PaX: make KERNEL_CS read-only */
20597+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
20598+ if (!paravirt_enabled())
20599+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
20600+/*
20601+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
20602+ pgd = pgd_offset_k(addr);
20603+ pud = pud_offset(pgd, addr);
20604+ pmd = pmd_offset(pud, addr);
20605+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20606+ }
20607+*/
20608+#ifdef CONFIG_X86_PAE
20609+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
20610+/*
20611+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
20612+ pgd = pgd_offset_k(addr);
20613+ pud = pud_offset(pgd, addr);
20614+ pmd = pmd_offset(pud, addr);
20615+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
20616+ }
20617+*/
20618+#endif
20619+
20620+#ifdef CONFIG_MODULES
20621+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
20622+#endif
20623+
20624+#else
20625+ pgd_t *pgd;
20626+ pud_t *pud;
20627+ pmd_t *pmd;
20628+ unsigned long addr, end;
20629+
20630+ /* PaX: make kernel code/rodata read-only, rest non-executable */
20631+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
20632+ pgd = pgd_offset_k(addr);
20633+ pud = pud_offset(pgd, addr);
20634+ pmd = pmd_offset(pud, addr);
20635+ if (!pmd_present(*pmd))
20636+ continue;
20637+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
20638+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20639+ else
20640+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
20641+ }
20642+
20643+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
20644+ end = addr + KERNEL_IMAGE_SIZE;
20645+ for (; addr < end; addr += PMD_SIZE) {
20646+ pgd = pgd_offset_k(addr);
20647+ pud = pud_offset(pgd, addr);
20648+ pmd = pmd_offset(pud, addr);
20649+ if (!pmd_present(*pmd))
20650+ continue;
20651+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
20652+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20653+ }
20654+#endif
20655+
20656+ flush_tlb_all();
20657+#endif
20658+
20659 free_init_pages("unused kernel memory",
20660 (unsigned long)(&__init_begin),
20661 (unsigned long)(&__init_end));
20662diff -urNp linux-3.0.4/arch/x86/mm/iomap_32.c linux-3.0.4/arch/x86/mm/iomap_32.c
20663--- linux-3.0.4/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
20664+++ linux-3.0.4/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
20665@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
20666 type = kmap_atomic_idx_push();
20667 idx = type + KM_TYPE_NR * smp_processor_id();
20668 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20669+
20670+ pax_open_kernel();
20671 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
20672+ pax_close_kernel();
20673+
20674 arch_flush_lazy_mmu_mode();
20675
20676 return (void *)vaddr;
20677diff -urNp linux-3.0.4/arch/x86/mm/ioremap.c linux-3.0.4/arch/x86/mm/ioremap.c
20678--- linux-3.0.4/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
20679+++ linux-3.0.4/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
20680@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
20681 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
20682 int is_ram = page_is_ram(pfn);
20683
20684- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
20685+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
20686 return NULL;
20687 WARN_ON_ONCE(is_ram);
20688 }
20689@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
20690 early_param("early_ioremap_debug", early_ioremap_debug_setup);
20691
20692 static __initdata int after_paging_init;
20693-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
20694+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
20695
20696 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
20697 {
20698@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
20699 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
20700
20701 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
20702- memset(bm_pte, 0, sizeof(bm_pte));
20703- pmd_populate_kernel(&init_mm, pmd, bm_pte);
20704+ pmd_populate_user(&init_mm, pmd, bm_pte);
20705
20706 /*
20707 * The boot-ioremap range spans multiple pmds, for which
20708diff -urNp linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c
20709--- linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
20710+++ linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
20711@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
20712 * memory (e.g. tracked pages)? For now, we need this to avoid
20713 * invoking kmemcheck for PnP BIOS calls.
20714 */
20715- if (regs->flags & X86_VM_MASK)
20716+ if (v8086_mode(regs))
20717 return false;
20718- if (regs->cs != __KERNEL_CS)
20719+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
20720 return false;
20721
20722 pte = kmemcheck_pte_lookup(address);
20723diff -urNp linux-3.0.4/arch/x86/mm/mmap.c linux-3.0.4/arch/x86/mm/mmap.c
20724--- linux-3.0.4/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
20725+++ linux-3.0.4/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
20726@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
20727 * Leave an at least ~128 MB hole with possible stack randomization.
20728 */
20729 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
20730-#define MAX_GAP (TASK_SIZE/6*5)
20731+#define MAX_GAP (pax_task_size/6*5)
20732
20733 /*
20734 * True on X86_32 or when emulating IA32 on X86_64
20735@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
20736 return rnd << PAGE_SHIFT;
20737 }
20738
20739-static unsigned long mmap_base(void)
20740+static unsigned long mmap_base(struct mm_struct *mm)
20741 {
20742 unsigned long gap = rlimit(RLIMIT_STACK);
20743+ unsigned long pax_task_size = TASK_SIZE;
20744+
20745+#ifdef CONFIG_PAX_SEGMEXEC
20746+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20747+ pax_task_size = SEGMEXEC_TASK_SIZE;
20748+#endif
20749
20750 if (gap < MIN_GAP)
20751 gap = MIN_GAP;
20752 else if (gap > MAX_GAP)
20753 gap = MAX_GAP;
20754
20755- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
20756+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
20757 }
20758
20759 /*
20760 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
20761 * does, but not when emulating X86_32
20762 */
20763-static unsigned long mmap_legacy_base(void)
20764+static unsigned long mmap_legacy_base(struct mm_struct *mm)
20765 {
20766- if (mmap_is_ia32())
20767+ if (mmap_is_ia32()) {
20768+
20769+#ifdef CONFIG_PAX_SEGMEXEC
20770+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20771+ return SEGMEXEC_TASK_UNMAPPED_BASE;
20772+ else
20773+#endif
20774+
20775 return TASK_UNMAPPED_BASE;
20776- else
20777+ } else
20778 return TASK_UNMAPPED_BASE + mmap_rnd();
20779 }
20780
20781@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
20782 void arch_pick_mmap_layout(struct mm_struct *mm)
20783 {
20784 if (mmap_is_legacy()) {
20785- mm->mmap_base = mmap_legacy_base();
20786+ mm->mmap_base = mmap_legacy_base(mm);
20787+
20788+#ifdef CONFIG_PAX_RANDMMAP
20789+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20790+ mm->mmap_base += mm->delta_mmap;
20791+#endif
20792+
20793 mm->get_unmapped_area = arch_get_unmapped_area;
20794 mm->unmap_area = arch_unmap_area;
20795 } else {
20796- mm->mmap_base = mmap_base();
20797+ mm->mmap_base = mmap_base(mm);
20798+
20799+#ifdef CONFIG_PAX_RANDMMAP
20800+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20801+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
20802+#endif
20803+
20804 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
20805 mm->unmap_area = arch_unmap_area_topdown;
20806 }
20807diff -urNp linux-3.0.4/arch/x86/mm/mmio-mod.c linux-3.0.4/arch/x86/mm/mmio-mod.c
20808--- linux-3.0.4/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
20809+++ linux-3.0.4/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
20810@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
20811 break;
20812 default:
20813 {
20814- unsigned char *ip = (unsigned char *)instptr;
20815+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
20816 my_trace->opcode = MMIO_UNKNOWN_OP;
20817 my_trace->width = 0;
20818 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
20819@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
20820 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
20821 void __iomem *addr)
20822 {
20823- static atomic_t next_id;
20824+ static atomic_unchecked_t next_id;
20825 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20826 /* These are page-unaligned. */
20827 struct mmiotrace_map map = {
20828@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20829 .private = trace
20830 },
20831 .phys = offset,
20832- .id = atomic_inc_return(&next_id)
20833+ .id = atomic_inc_return_unchecked(&next_id)
20834 };
20835 map.map_id = trace->id;
20836
20837diff -urNp linux-3.0.4/arch/x86/mm/pageattr.c linux-3.0.4/arch/x86/mm/pageattr.c
20838--- linux-3.0.4/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
20839+++ linux-3.0.4/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
20840@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20841 */
20842 #ifdef CONFIG_PCI_BIOS
20843 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20844- pgprot_val(forbidden) |= _PAGE_NX;
20845+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20846 #endif
20847
20848 /*
20849@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20850 * Does not cover __inittext since that is gone later on. On
20851 * 64bit we do not enforce !NX on the low mapping
20852 */
20853- if (within(address, (unsigned long)_text, (unsigned long)_etext))
20854- pgprot_val(forbidden) |= _PAGE_NX;
20855+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20856+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20857
20858+#ifdef CONFIG_DEBUG_RODATA
20859 /*
20860 * The .rodata section needs to be read-only. Using the pfn
20861 * catches all aliases.
20862@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20863 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20864 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20865 pgprot_val(forbidden) |= _PAGE_RW;
20866+#endif
20867
20868 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20869 /*
20870@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20871 }
20872 #endif
20873
20874+#ifdef CONFIG_PAX_KERNEXEC
20875+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20876+ pgprot_val(forbidden) |= _PAGE_RW;
20877+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20878+ }
20879+#endif
20880+
20881 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20882
20883 return prot;
20884@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20885 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20886 {
20887 /* change init_mm */
20888+ pax_open_kernel();
20889 set_pte_atomic(kpte, pte);
20890+
20891 #ifdef CONFIG_X86_32
20892 if (!SHARED_KERNEL_PMD) {
20893+
20894+#ifdef CONFIG_PAX_PER_CPU_PGD
20895+ unsigned long cpu;
20896+#else
20897 struct page *page;
20898+#endif
20899
20900+#ifdef CONFIG_PAX_PER_CPU_PGD
20901+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20902+ pgd_t *pgd = get_cpu_pgd(cpu);
20903+#else
20904 list_for_each_entry(page, &pgd_list, lru) {
20905- pgd_t *pgd;
20906+ pgd_t *pgd = (pgd_t *)page_address(page);
20907+#endif
20908+
20909 pud_t *pud;
20910 pmd_t *pmd;
20911
20912- pgd = (pgd_t *)page_address(page) + pgd_index(address);
20913+ pgd += pgd_index(address);
20914 pud = pud_offset(pgd, address);
20915 pmd = pmd_offset(pud, address);
20916 set_pte_atomic((pte_t *)pmd, pte);
20917 }
20918 }
20919 #endif
20920+ pax_close_kernel();
20921 }
20922
20923 static int
20924diff -urNp linux-3.0.4/arch/x86/mm/pageattr-test.c linux-3.0.4/arch/x86/mm/pageattr-test.c
20925--- linux-3.0.4/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
20926+++ linux-3.0.4/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
20927@@ -36,7 +36,7 @@ enum {
20928
20929 static int pte_testbit(pte_t pte)
20930 {
20931- return pte_flags(pte) & _PAGE_UNUSED1;
20932+ return pte_flags(pte) & _PAGE_CPA_TEST;
20933 }
20934
20935 struct split_state {
20936diff -urNp linux-3.0.4/arch/x86/mm/pat.c linux-3.0.4/arch/x86/mm/pat.c
20937--- linux-3.0.4/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
20938+++ linux-3.0.4/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
20939@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20940
20941 if (!entry) {
20942 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20943- current->comm, current->pid, start, end);
20944+ current->comm, task_pid_nr(current), start, end);
20945 return -EINVAL;
20946 }
20947
20948@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20949 while (cursor < to) {
20950 if (!devmem_is_allowed(pfn)) {
20951 printk(KERN_INFO
20952- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20953- current->comm, from, to);
20954+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20955+ current->comm, from, to, cursor);
20956 return 0;
20957 }
20958 cursor += PAGE_SIZE;
20959@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20960 printk(KERN_INFO
20961 "%s:%d ioremap_change_attr failed %s "
20962 "for %Lx-%Lx\n",
20963- current->comm, current->pid,
20964+ current->comm, task_pid_nr(current),
20965 cattr_name(flags),
20966 base, (unsigned long long)(base + size));
20967 return -EINVAL;
20968@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20969 if (want_flags != flags) {
20970 printk(KERN_WARNING
20971 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20972- current->comm, current->pid,
20973+ current->comm, task_pid_nr(current),
20974 cattr_name(want_flags),
20975 (unsigned long long)paddr,
20976 (unsigned long long)(paddr + size),
20977@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20978 free_memtype(paddr, paddr + size);
20979 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20980 " for %Lx-%Lx, got %s\n",
20981- current->comm, current->pid,
20982+ current->comm, task_pid_nr(current),
20983 cattr_name(want_flags),
20984 (unsigned long long)paddr,
20985 (unsigned long long)(paddr + size),
20986diff -urNp linux-3.0.4/arch/x86/mm/pf_in.c linux-3.0.4/arch/x86/mm/pf_in.c
20987--- linux-3.0.4/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
20988+++ linux-3.0.4/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
20989@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20990 int i;
20991 enum reason_type rv = OTHERS;
20992
20993- p = (unsigned char *)ins_addr;
20994+ p = (unsigned char *)ktla_ktva(ins_addr);
20995 p += skip_prefix(p, &prf);
20996 p += get_opcode(p, &opcode);
20997
20998@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20999 struct prefix_bits prf;
21000 int i;
21001
21002- p = (unsigned char *)ins_addr;
21003+ p = (unsigned char *)ktla_ktva(ins_addr);
21004 p += skip_prefix(p, &prf);
21005 p += get_opcode(p, &opcode);
21006
21007@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
21008 struct prefix_bits prf;
21009 int i;
21010
21011- p = (unsigned char *)ins_addr;
21012+ p = (unsigned char *)ktla_ktva(ins_addr);
21013 p += skip_prefix(p, &prf);
21014 p += get_opcode(p, &opcode);
21015
21016@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
21017 struct prefix_bits prf;
21018 int i;
21019
21020- p = (unsigned char *)ins_addr;
21021+ p = (unsigned char *)ktla_ktva(ins_addr);
21022 p += skip_prefix(p, &prf);
21023 p += get_opcode(p, &opcode);
21024 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
21025@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
21026 struct prefix_bits prf;
21027 int i;
21028
21029- p = (unsigned char *)ins_addr;
21030+ p = (unsigned char *)ktla_ktva(ins_addr);
21031 p += skip_prefix(p, &prf);
21032 p += get_opcode(p, &opcode);
21033 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
21034diff -urNp linux-3.0.4/arch/x86/mm/pgtable_32.c linux-3.0.4/arch/x86/mm/pgtable_32.c
21035--- linux-3.0.4/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
21036+++ linux-3.0.4/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
21037@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
21038 return;
21039 }
21040 pte = pte_offset_kernel(pmd, vaddr);
21041+
21042+ pax_open_kernel();
21043 if (pte_val(pteval))
21044 set_pte_at(&init_mm, vaddr, pte, pteval);
21045 else
21046 pte_clear(&init_mm, vaddr, pte);
21047+ pax_close_kernel();
21048
21049 /*
21050 * It's enough to flush this one mapping.
21051diff -urNp linux-3.0.4/arch/x86/mm/pgtable.c linux-3.0.4/arch/x86/mm/pgtable.c
21052--- linux-3.0.4/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
21053+++ linux-3.0.4/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
21054@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
21055 list_del(&page->lru);
21056 }
21057
21058-#define UNSHARED_PTRS_PER_PGD \
21059- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21060+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21061+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
21062
21063+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21064+{
21065+ while (count--)
21066+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
21067+}
21068+#endif
21069+
21070+#ifdef CONFIG_PAX_PER_CPU_PGD
21071+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21072+{
21073+ while (count--)
21074+
21075+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21076+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
21077+#else
21078+ *dst++ = *src++;
21079+#endif
21080
21081+}
21082+#endif
21083+
21084+#ifdef CONFIG_X86_64
21085+#define pxd_t pud_t
21086+#define pyd_t pgd_t
21087+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
21088+#define pxd_free(mm, pud) pud_free((mm), (pud))
21089+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
21090+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
21091+#define PYD_SIZE PGDIR_SIZE
21092+#else
21093+#define pxd_t pmd_t
21094+#define pyd_t pud_t
21095+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
21096+#define pxd_free(mm, pud) pmd_free((mm), (pud))
21097+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
21098+#define pyd_offset(mm ,address) pud_offset((mm), (address))
21099+#define PYD_SIZE PUD_SIZE
21100+#endif
21101+
21102+#ifdef CONFIG_PAX_PER_CPU_PGD
21103+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
21104+static inline void pgd_dtor(pgd_t *pgd) {}
21105+#else
21106 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
21107 {
21108 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
21109@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
21110 pgd_list_del(pgd);
21111 spin_unlock(&pgd_lock);
21112 }
21113+#endif
21114
21115 /*
21116 * List of all pgd's needed for non-PAE so it can invalidate entries
21117@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
21118 * -- wli
21119 */
21120
21121-#ifdef CONFIG_X86_PAE
21122+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21123 /*
21124 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
21125 * updating the top-level pagetable entries to guarantee the
21126@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
21127 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
21128 * and initialize the kernel pmds here.
21129 */
21130-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
21131+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21132
21133 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
21134 {
21135@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
21136 */
21137 flush_tlb_mm(mm);
21138 }
21139+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
21140+#define PREALLOCATED_PXDS USER_PGD_PTRS
21141 #else /* !CONFIG_X86_PAE */
21142
21143 /* No need to prepopulate any pagetable entries in non-PAE modes. */
21144-#define PREALLOCATED_PMDS 0
21145+#define PREALLOCATED_PXDS 0
21146
21147 #endif /* CONFIG_X86_PAE */
21148
21149-static void free_pmds(pmd_t *pmds[])
21150+static void free_pxds(pxd_t *pxds[])
21151 {
21152 int i;
21153
21154- for(i = 0; i < PREALLOCATED_PMDS; i++)
21155- if (pmds[i])
21156- free_page((unsigned long)pmds[i]);
21157+ for(i = 0; i < PREALLOCATED_PXDS; i++)
21158+ if (pxds[i])
21159+ free_page((unsigned long)pxds[i]);
21160 }
21161
21162-static int preallocate_pmds(pmd_t *pmds[])
21163+static int preallocate_pxds(pxd_t *pxds[])
21164 {
21165 int i;
21166 bool failed = false;
21167
21168- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21169- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21170- if (pmd == NULL)
21171+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21172+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21173+ if (pxd == NULL)
21174 failed = true;
21175- pmds[i] = pmd;
21176+ pxds[i] = pxd;
21177 }
21178
21179 if (failed) {
21180- free_pmds(pmds);
21181+ free_pxds(pxds);
21182 return -ENOMEM;
21183 }
21184
21185@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
21186 * preallocate which never got a corresponding vma will need to be
21187 * freed manually.
21188 */
21189-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21190+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21191 {
21192 int i;
21193
21194- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21195+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21196 pgd_t pgd = pgdp[i];
21197
21198 if (pgd_val(pgd) != 0) {
21199- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21200+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21201
21202- pgdp[i] = native_make_pgd(0);
21203+ set_pgd(pgdp + i, native_make_pgd(0));
21204
21205- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21206- pmd_free(mm, pmd);
21207+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21208+ pxd_free(mm, pxd);
21209 }
21210 }
21211 }
21212
21213-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21214+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21215 {
21216- pud_t *pud;
21217+ pyd_t *pyd;
21218 unsigned long addr;
21219 int i;
21220
21221- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21222+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21223 return;
21224
21225- pud = pud_offset(pgd, 0);
21226+#ifdef CONFIG_X86_64
21227+ pyd = pyd_offset(mm, 0L);
21228+#else
21229+ pyd = pyd_offset(pgd, 0L);
21230+#endif
21231
21232- for (addr = i = 0; i < PREALLOCATED_PMDS;
21233- i++, pud++, addr += PUD_SIZE) {
21234- pmd_t *pmd = pmds[i];
21235+ for (addr = i = 0; i < PREALLOCATED_PXDS;
21236+ i++, pyd++, addr += PYD_SIZE) {
21237+ pxd_t *pxd = pxds[i];
21238
21239 if (i >= KERNEL_PGD_BOUNDARY)
21240- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21241- sizeof(pmd_t) * PTRS_PER_PMD);
21242+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21243+ sizeof(pxd_t) * PTRS_PER_PMD);
21244
21245- pud_populate(mm, pud, pmd);
21246+ pyd_populate(mm, pyd, pxd);
21247 }
21248 }
21249
21250 pgd_t *pgd_alloc(struct mm_struct *mm)
21251 {
21252 pgd_t *pgd;
21253- pmd_t *pmds[PREALLOCATED_PMDS];
21254+ pxd_t *pxds[PREALLOCATED_PXDS];
21255
21256 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21257
21258@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21259
21260 mm->pgd = pgd;
21261
21262- if (preallocate_pmds(pmds) != 0)
21263+ if (preallocate_pxds(pxds) != 0)
21264 goto out_free_pgd;
21265
21266 if (paravirt_pgd_alloc(mm) != 0)
21267- goto out_free_pmds;
21268+ goto out_free_pxds;
21269
21270 /*
21271 * Make sure that pre-populating the pmds is atomic with
21272@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21273 spin_lock(&pgd_lock);
21274
21275 pgd_ctor(mm, pgd);
21276- pgd_prepopulate_pmd(mm, pgd, pmds);
21277+ pgd_prepopulate_pxd(mm, pgd, pxds);
21278
21279 spin_unlock(&pgd_lock);
21280
21281 return pgd;
21282
21283-out_free_pmds:
21284- free_pmds(pmds);
21285+out_free_pxds:
21286+ free_pxds(pxds);
21287 out_free_pgd:
21288 free_page((unsigned long)pgd);
21289 out:
21290@@ -295,7 +344,7 @@ out:
21291
21292 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
21293 {
21294- pgd_mop_up_pmds(mm, pgd);
21295+ pgd_mop_up_pxds(mm, pgd);
21296 pgd_dtor(pgd);
21297 paravirt_pgd_free(mm, pgd);
21298 free_page((unsigned long)pgd);
21299diff -urNp linux-3.0.4/arch/x86/mm/setup_nx.c linux-3.0.4/arch/x86/mm/setup_nx.c
21300--- linux-3.0.4/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
21301+++ linux-3.0.4/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
21302@@ -5,8 +5,10 @@
21303 #include <asm/pgtable.h>
21304 #include <asm/proto.h>
21305
21306+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21307 static int disable_nx __cpuinitdata;
21308
21309+#ifndef CONFIG_PAX_PAGEEXEC
21310 /*
21311 * noexec = on|off
21312 *
21313@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
21314 return 0;
21315 }
21316 early_param("noexec", noexec_setup);
21317+#endif
21318+
21319+#endif
21320
21321 void __cpuinit x86_configure_nx(void)
21322 {
21323+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21324 if (cpu_has_nx && !disable_nx)
21325 __supported_pte_mask |= _PAGE_NX;
21326 else
21327+#endif
21328 __supported_pte_mask &= ~_PAGE_NX;
21329 }
21330
21331diff -urNp linux-3.0.4/arch/x86/mm/tlb.c linux-3.0.4/arch/x86/mm/tlb.c
21332--- linux-3.0.4/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
21333+++ linux-3.0.4/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
21334@@ -65,7 +65,11 @@ void leave_mm(int cpu)
21335 BUG();
21336 cpumask_clear_cpu(cpu,
21337 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
21338+
21339+#ifndef CONFIG_PAX_PER_CPU_PGD
21340 load_cr3(swapper_pg_dir);
21341+#endif
21342+
21343 }
21344 EXPORT_SYMBOL_GPL(leave_mm);
21345
21346diff -urNp linux-3.0.4/arch/x86/net/bpf_jit_comp.c linux-3.0.4/arch/x86/net/bpf_jit_comp.c
21347--- linux-3.0.4/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
21348+++ linux-3.0.4/arch/x86/net/bpf_jit_comp.c 2011-08-23 21:47:55.000000000 -0400
21349@@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
21350 module_free(NULL, image);
21351 return;
21352 }
21353+ pax_open_kernel();
21354 memcpy(image + proglen, temp, ilen);
21355+ pax_close_kernel();
21356 }
21357 proglen += ilen;
21358 addrs[i] = proglen;
21359@@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
21360 break;
21361 }
21362 if (proglen == oldproglen) {
21363- image = module_alloc(max_t(unsigned int,
21364+ image = module_alloc_exec(max_t(unsigned int,
21365 proglen,
21366 sizeof(struct work_struct)));
21367 if (!image)
21368diff -urNp linux-3.0.4/arch/x86/net/bpf_jit.S linux-3.0.4/arch/x86/net/bpf_jit.S
21369--- linux-3.0.4/arch/x86/net/bpf_jit.S 2011-07-21 22:17:23.000000000 -0400
21370+++ linux-3.0.4/arch/x86/net/bpf_jit.S 2011-10-07 19:07:28.000000000 -0400
21371@@ -9,6 +9,7 @@
21372 */
21373 #include <linux/linkage.h>
21374 #include <asm/dwarf2.h>
21375+#include <asm/alternative-asm.h>
21376
21377 /*
21378 * Calling convention :
21379@@ -35,6 +36,7 @@ sk_load_word:
21380 jle bpf_slow_path_word
21381 mov (SKBDATA,%rsi),%eax
21382 bswap %eax /* ntohl() */
21383+ pax_force_retaddr
21384 ret
21385
21386
21387@@ -53,6 +55,7 @@ sk_load_half:
21388 jle bpf_slow_path_half
21389 movzwl (SKBDATA,%rsi),%eax
21390 rol $8,%ax # ntohs()
21391+ pax_force_retaddr
21392 ret
21393
21394 sk_load_byte_ind:
21395@@ -66,6 +69,7 @@ sk_load_byte:
21396 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
21397 jle bpf_slow_path_byte
21398 movzbl (SKBDATA,%rsi),%eax
21399+ pax_force_retaddr
21400 ret
21401
21402 /**
21403@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
21404 movzbl (SKBDATA,%rsi),%ebx
21405 and $15,%bl
21406 shl $2,%bl
21407+ pax_force_retaddr
21408 ret
21409 CFI_ENDPROC
21410 ENDPROC(sk_load_byte_msh)
21411@@ -91,6 +96,7 @@ bpf_error:
21412 xor %eax,%eax
21413 mov -8(%rbp),%rbx
21414 leaveq
21415+ pax_force_retaddr
21416 ret
21417
21418 /* rsi contains offset and can be scratched */
21419@@ -113,6 +119,7 @@ bpf_slow_path_word:
21420 js bpf_error
21421 mov -12(%rbp),%eax
21422 bswap %eax
21423+ pax_force_retaddr
21424 ret
21425
21426 bpf_slow_path_half:
21427@@ -121,12 +128,14 @@ bpf_slow_path_half:
21428 mov -12(%rbp),%ax
21429 rol $8,%ax
21430 movzwl %ax,%eax
21431+ pax_force_retaddr
21432 ret
21433
21434 bpf_slow_path_byte:
21435 bpf_slow_path_common(1)
21436 js bpf_error
21437 movzbl -12(%rbp),%eax
21438+ pax_force_retaddr
21439 ret
21440
21441 bpf_slow_path_byte_msh:
21442@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
21443 and $15,%al
21444 shl $2,%al
21445 xchg %eax,%ebx
21446+ pax_force_retaddr
21447 ret
21448diff -urNp linux-3.0.4/arch/x86/oprofile/backtrace.c linux-3.0.4/arch/x86/oprofile/backtrace.c
21449--- linux-3.0.4/arch/x86/oprofile/backtrace.c 2011-09-02 18:11:21.000000000 -0400
21450+++ linux-3.0.4/arch/x86/oprofile/backtrace.c 2011-10-06 04:17:55.000000000 -0400
21451@@ -83,11 +83,11 @@ dump_user_backtrace_32(struct stack_fram
21452 struct stack_frame_ia32 *fp;
21453 unsigned long bytes;
21454
21455- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
21456+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
21457 if (bytes != sizeof(bufhead))
21458 return NULL;
21459
21460- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
21461+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
21462
21463 oprofile_add_trace(bufhead[0].return_address);
21464
21465@@ -129,7 +129,7 @@ static struct stack_frame *dump_user_bac
21466 struct stack_frame bufhead[2];
21467 unsigned long bytes;
21468
21469- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
21470+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
21471 if (bytes != sizeof(bufhead))
21472 return NULL;
21473
21474@@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
21475 {
21476 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
21477
21478- if (!user_mode_vm(regs)) {
21479+ if (!user_mode(regs)) {
21480 unsigned long stack = kernel_stack_pointer(regs);
21481 if (depth)
21482 dump_trace(NULL, regs, (unsigned long *)stack, 0,
21483diff -urNp linux-3.0.4/arch/x86/pci/mrst.c linux-3.0.4/arch/x86/pci/mrst.c
21484--- linux-3.0.4/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
21485+++ linux-3.0.4/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
21486@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
21487 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
21488 pci_mmcfg_late_init();
21489 pcibios_enable_irq = mrst_pci_irq_enable;
21490- pci_root_ops = pci_mrst_ops;
21491+ pax_open_kernel();
21492+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
21493+ pax_close_kernel();
21494 /* Continue with standard init */
21495 return 1;
21496 }
21497diff -urNp linux-3.0.4/arch/x86/pci/pcbios.c linux-3.0.4/arch/x86/pci/pcbios.c
21498--- linux-3.0.4/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
21499+++ linux-3.0.4/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
21500@@ -79,50 +79,93 @@ union bios32 {
21501 static struct {
21502 unsigned long address;
21503 unsigned short segment;
21504-} bios32_indirect = { 0, __KERNEL_CS };
21505+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
21506
21507 /*
21508 * Returns the entry point for the given service, NULL on error
21509 */
21510
21511-static unsigned long bios32_service(unsigned long service)
21512+static unsigned long __devinit bios32_service(unsigned long service)
21513 {
21514 unsigned char return_code; /* %al */
21515 unsigned long address; /* %ebx */
21516 unsigned long length; /* %ecx */
21517 unsigned long entry; /* %edx */
21518 unsigned long flags;
21519+ struct desc_struct d, *gdt;
21520
21521 local_irq_save(flags);
21522- __asm__("lcall *(%%edi); cld"
21523+
21524+ gdt = get_cpu_gdt_table(smp_processor_id());
21525+
21526+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
21527+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21528+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
21529+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21530+
21531+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
21532 : "=a" (return_code),
21533 "=b" (address),
21534 "=c" (length),
21535 "=d" (entry)
21536 : "0" (service),
21537 "1" (0),
21538- "D" (&bios32_indirect));
21539+ "D" (&bios32_indirect),
21540+ "r"(__PCIBIOS_DS)
21541+ : "memory");
21542+
21543+ pax_open_kernel();
21544+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
21545+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
21546+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
21547+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
21548+ pax_close_kernel();
21549+
21550 local_irq_restore(flags);
21551
21552 switch (return_code) {
21553- case 0:
21554- return address + entry;
21555- case 0x80: /* Not present */
21556- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
21557- return 0;
21558- default: /* Shouldn't happen */
21559- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
21560- service, return_code);
21561+ case 0: {
21562+ int cpu;
21563+ unsigned char flags;
21564+
21565+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
21566+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
21567+ printk(KERN_WARNING "bios32_service: not valid\n");
21568 return 0;
21569+ }
21570+ address = address + PAGE_OFFSET;
21571+ length += 16UL; /* some BIOSs underreport this... */
21572+ flags = 4;
21573+ if (length >= 64*1024*1024) {
21574+ length >>= PAGE_SHIFT;
21575+ flags |= 8;
21576+ }
21577+
21578+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
21579+ gdt = get_cpu_gdt_table(cpu);
21580+ pack_descriptor(&d, address, length, 0x9b, flags);
21581+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21582+ pack_descriptor(&d, address, length, 0x93, flags);
21583+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21584+ }
21585+ return entry;
21586+ }
21587+ case 0x80: /* Not present */
21588+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
21589+ return 0;
21590+ default: /* Shouldn't happen */
21591+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
21592+ service, return_code);
21593+ return 0;
21594 }
21595 }
21596
21597 static struct {
21598 unsigned long address;
21599 unsigned short segment;
21600-} pci_indirect = { 0, __KERNEL_CS };
21601+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
21602
21603-static int pci_bios_present;
21604+static int pci_bios_present __read_only;
21605
21606 static int __devinit check_pcibios(void)
21607 {
21608@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
21609 unsigned long flags, pcibios_entry;
21610
21611 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
21612- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
21613+ pci_indirect.address = pcibios_entry;
21614
21615 local_irq_save(flags);
21616- __asm__(
21617- "lcall *(%%edi); cld\n\t"
21618+ __asm__("movw %w6, %%ds\n\t"
21619+ "lcall *%%ss:(%%edi); cld\n\t"
21620+ "push %%ss\n\t"
21621+ "pop %%ds\n\t"
21622 "jc 1f\n\t"
21623 "xor %%ah, %%ah\n"
21624 "1:"
21625@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
21626 "=b" (ebx),
21627 "=c" (ecx)
21628 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
21629- "D" (&pci_indirect)
21630+ "D" (&pci_indirect),
21631+ "r" (__PCIBIOS_DS)
21632 : "memory");
21633 local_irq_restore(flags);
21634
21635@@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
21636
21637 switch (len) {
21638 case 1:
21639- __asm__("lcall *(%%esi); cld\n\t"
21640+ __asm__("movw %w6, %%ds\n\t"
21641+ "lcall *%%ss:(%%esi); cld\n\t"
21642+ "push %%ss\n\t"
21643+ "pop %%ds\n\t"
21644 "jc 1f\n\t"
21645 "xor %%ah, %%ah\n"
21646 "1:"
21647@@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
21648 : "1" (PCIBIOS_READ_CONFIG_BYTE),
21649 "b" (bx),
21650 "D" ((long)reg),
21651- "S" (&pci_indirect));
21652+ "S" (&pci_indirect),
21653+ "r" (__PCIBIOS_DS));
21654 /*
21655 * Zero-extend the result beyond 8 bits, do not trust the
21656 * BIOS having done it:
21657@@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
21658 *value &= 0xff;
21659 break;
21660 case 2:
21661- __asm__("lcall *(%%esi); cld\n\t"
21662+ __asm__("movw %w6, %%ds\n\t"
21663+ "lcall *%%ss:(%%esi); cld\n\t"
21664+ "push %%ss\n\t"
21665+ "pop %%ds\n\t"
21666 "jc 1f\n\t"
21667 "xor %%ah, %%ah\n"
21668 "1:"
21669@@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
21670 : "1" (PCIBIOS_READ_CONFIG_WORD),
21671 "b" (bx),
21672 "D" ((long)reg),
21673- "S" (&pci_indirect));
21674+ "S" (&pci_indirect),
21675+ "r" (__PCIBIOS_DS));
21676 /*
21677 * Zero-extend the result beyond 16 bits, do not trust the
21678 * BIOS having done it:
21679@@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
21680 *value &= 0xffff;
21681 break;
21682 case 4:
21683- __asm__("lcall *(%%esi); cld\n\t"
21684+ __asm__("movw %w6, %%ds\n\t"
21685+ "lcall *%%ss:(%%esi); cld\n\t"
21686+ "push %%ss\n\t"
21687+ "pop %%ds\n\t"
21688 "jc 1f\n\t"
21689 "xor %%ah, %%ah\n"
21690 "1:"
21691@@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
21692 : "1" (PCIBIOS_READ_CONFIG_DWORD),
21693 "b" (bx),
21694 "D" ((long)reg),
21695- "S" (&pci_indirect));
21696+ "S" (&pci_indirect),
21697+ "r" (__PCIBIOS_DS));
21698 break;
21699 }
21700
21701@@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
21702
21703 switch (len) {
21704 case 1:
21705- __asm__("lcall *(%%esi); cld\n\t"
21706+ __asm__("movw %w6, %%ds\n\t"
21707+ "lcall *%%ss:(%%esi); cld\n\t"
21708+ "push %%ss\n\t"
21709+ "pop %%ds\n\t"
21710 "jc 1f\n\t"
21711 "xor %%ah, %%ah\n"
21712 "1:"
21713@@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
21714 "c" (value),
21715 "b" (bx),
21716 "D" ((long)reg),
21717- "S" (&pci_indirect));
21718+ "S" (&pci_indirect),
21719+ "r" (__PCIBIOS_DS));
21720 break;
21721 case 2:
21722- __asm__("lcall *(%%esi); cld\n\t"
21723+ __asm__("movw %w6, %%ds\n\t"
21724+ "lcall *%%ss:(%%esi); cld\n\t"
21725+ "push %%ss\n\t"
21726+ "pop %%ds\n\t"
21727 "jc 1f\n\t"
21728 "xor %%ah, %%ah\n"
21729 "1:"
21730@@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
21731 "c" (value),
21732 "b" (bx),
21733 "D" ((long)reg),
21734- "S" (&pci_indirect));
21735+ "S" (&pci_indirect),
21736+ "r" (__PCIBIOS_DS));
21737 break;
21738 case 4:
21739- __asm__("lcall *(%%esi); cld\n\t"
21740+ __asm__("movw %w6, %%ds\n\t"
21741+ "lcall *%%ss:(%%esi); cld\n\t"
21742+ "push %%ss\n\t"
21743+ "pop %%ds\n\t"
21744 "jc 1f\n\t"
21745 "xor %%ah, %%ah\n"
21746 "1:"
21747@@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
21748 "c" (value),
21749 "b" (bx),
21750 "D" ((long)reg),
21751- "S" (&pci_indirect));
21752+ "S" (&pci_indirect),
21753+ "r" (__PCIBIOS_DS));
21754 break;
21755 }
21756
21757@@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
21758
21759 DBG("PCI: Fetching IRQ routing table... ");
21760 __asm__("push %%es\n\t"
21761+ "movw %w8, %%ds\n\t"
21762 "push %%ds\n\t"
21763 "pop %%es\n\t"
21764- "lcall *(%%esi); cld\n\t"
21765+ "lcall *%%ss:(%%esi); cld\n\t"
21766 "pop %%es\n\t"
21767+ "push %%ss\n\t"
21768+ "pop %%ds\n"
21769 "jc 1f\n\t"
21770 "xor %%ah, %%ah\n"
21771 "1:"
21772@@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
21773 "1" (0),
21774 "D" ((long) &opt),
21775 "S" (&pci_indirect),
21776- "m" (opt)
21777+ "m" (opt),
21778+ "r" (__PCIBIOS_DS)
21779 : "memory");
21780 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
21781 if (ret & 0xff00)
21782@@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
21783 {
21784 int ret;
21785
21786- __asm__("lcall *(%%esi); cld\n\t"
21787+ __asm__("movw %w5, %%ds\n\t"
21788+ "lcall *%%ss:(%%esi); cld\n\t"
21789+ "push %%ss\n\t"
21790+ "pop %%ds\n"
21791 "jc 1f\n\t"
21792 "xor %%ah, %%ah\n"
21793 "1:"
21794@@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
21795 : "0" (PCIBIOS_SET_PCI_HW_INT),
21796 "b" ((dev->bus->number << 8) | dev->devfn),
21797 "c" ((irq << 8) | (pin + 10)),
21798- "S" (&pci_indirect));
21799+ "S" (&pci_indirect),
21800+ "r" (__PCIBIOS_DS));
21801 return !(ret & 0xff00);
21802 }
21803 EXPORT_SYMBOL(pcibios_set_irq_routing);
21804diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_32.c linux-3.0.4/arch/x86/platform/efi/efi_32.c
21805--- linux-3.0.4/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
21806+++ linux-3.0.4/arch/x86/platform/efi/efi_32.c 2011-10-06 04:17:55.000000000 -0400
21807@@ -38,70 +38,56 @@
21808 */
21809
21810 static unsigned long efi_rt_eflags;
21811-static pgd_t efi_bak_pg_dir_pointer[2];
21812+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
21813
21814-void efi_call_phys_prelog(void)
21815+void __init efi_call_phys_prelog(void)
21816 {
21817- unsigned long cr4;
21818- unsigned long temp;
21819 struct desc_ptr gdt_descr;
21820
21821- local_irq_save(efi_rt_eflags);
21822+#ifdef CONFIG_PAX_KERNEXEC
21823+ struct desc_struct d;
21824+#endif
21825
21826- /*
21827- * If I don't have PAE, I should just duplicate two entries in page
21828- * directory. If I have PAE, I just need to duplicate one entry in
21829- * page directory.
21830- */
21831- cr4 = read_cr4_safe();
21832+ local_irq_save(efi_rt_eflags);
21833
21834- if (cr4 & X86_CR4_PAE) {
21835- efi_bak_pg_dir_pointer[0].pgd =
21836- swapper_pg_dir[pgd_index(0)].pgd;
21837- swapper_pg_dir[0].pgd =
21838- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21839- } else {
21840- efi_bak_pg_dir_pointer[0].pgd =
21841- swapper_pg_dir[pgd_index(0)].pgd;
21842- efi_bak_pg_dir_pointer[1].pgd =
21843- swapper_pg_dir[pgd_index(0x400000)].pgd;
21844- swapper_pg_dir[pgd_index(0)].pgd =
21845- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21846- temp = PAGE_OFFSET + 0x400000;
21847- swapper_pg_dir[pgd_index(0x400000)].pgd =
21848- swapper_pg_dir[pgd_index(temp)].pgd;
21849- }
21850+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
21851+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21852+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
21853
21854 /*
21855 * After the lock is released, the original page table is restored.
21856 */
21857 __flush_tlb_all();
21858
21859+#ifdef CONFIG_PAX_KERNEXEC
21860+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
21861+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
21862+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
21863+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
21864+#endif
21865+
21866 gdt_descr.address = __pa(get_cpu_gdt_table(0));
21867 gdt_descr.size = GDT_SIZE - 1;
21868 load_gdt(&gdt_descr);
21869 }
21870
21871-void efi_call_phys_epilog(void)
21872+void __init efi_call_phys_epilog(void)
21873 {
21874- unsigned long cr4;
21875 struct desc_ptr gdt_descr;
21876
21877+#ifdef CONFIG_PAX_KERNEXEC
21878+ struct desc_struct d;
21879+
21880+ memset(&d, 0, sizeof d);
21881+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
21882+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
21883+#endif
21884+
21885 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
21886 gdt_descr.size = GDT_SIZE - 1;
21887 load_gdt(&gdt_descr);
21888
21889- cr4 = read_cr4_safe();
21890-
21891- if (cr4 & X86_CR4_PAE) {
21892- swapper_pg_dir[pgd_index(0)].pgd =
21893- efi_bak_pg_dir_pointer[0].pgd;
21894- } else {
21895- swapper_pg_dir[pgd_index(0)].pgd =
21896- efi_bak_pg_dir_pointer[0].pgd;
21897- swapper_pg_dir[pgd_index(0x400000)].pgd =
21898- efi_bak_pg_dir_pointer[1].pgd;
21899- }
21900+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
21901
21902 /*
21903 * After the lock is released, the original page table is restored.
21904diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S
21905--- linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
21906+++ linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S 2011-09-19 09:16:58.000000000 -0400
21907@@ -6,7 +6,9 @@
21908 */
21909
21910 #include <linux/linkage.h>
21911+#include <linux/init.h>
21912 #include <asm/page_types.h>
21913+#include <asm/segment.h>
21914
21915 /*
21916 * efi_call_phys(void *, ...) is a function with variable parameters.
21917@@ -20,7 +22,7 @@
21918 * service functions will comply with gcc calling convention, too.
21919 */
21920
21921-.text
21922+__INIT
21923 ENTRY(efi_call_phys)
21924 /*
21925 * 0. The function can only be called in Linux kernel. So CS has been
21926@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
21927 * The mapping of lower virtual memory has been created in prelog and
21928 * epilog.
21929 */
21930- movl $1f, %edx
21931- subl $__PAGE_OFFSET, %edx
21932- jmp *%edx
21933+ movl $(__KERNEXEC_EFI_DS), %edx
21934+ mov %edx, %ds
21935+ mov %edx, %es
21936+ mov %edx, %ss
21937+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
21938 1:
21939
21940 /*
21941@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
21942 * parameter 2, ..., param n. To make things easy, we save the return
21943 * address of efi_call_phys in a global variable.
21944 */
21945- popl %edx
21946- movl %edx, saved_return_addr
21947- /* get the function pointer into ECX*/
21948- popl %ecx
21949- movl %ecx, efi_rt_function_ptr
21950- movl $2f, %edx
21951- subl $__PAGE_OFFSET, %edx
21952- pushl %edx
21953+ popl (saved_return_addr)
21954+ popl (efi_rt_function_ptr)
21955
21956 /*
21957 * 3. Clear PG bit in %CR0.
21958@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
21959 /*
21960 * 5. Call the physical function.
21961 */
21962- jmp *%ecx
21963+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
21964
21965-2:
21966 /*
21967 * 6. After EFI runtime service returns, control will return to
21968 * following instruction. We'd better readjust stack pointer first.
21969@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
21970 movl %cr0, %edx
21971 orl $0x80000000, %edx
21972 movl %edx, %cr0
21973- jmp 1f
21974-1:
21975+
21976 /*
21977 * 8. Now restore the virtual mode from flat mode by
21978 * adding EIP with PAGE_OFFSET.
21979 */
21980- movl $1f, %edx
21981- jmp *%edx
21982+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
21983 1:
21984+ movl $(__KERNEL_DS), %edx
21985+ mov %edx, %ds
21986+ mov %edx, %es
21987+ mov %edx, %ss
21988
21989 /*
21990 * 9. Balance the stack. And because EAX contain the return value,
21991 * we'd better not clobber it.
21992 */
21993- leal efi_rt_function_ptr, %edx
21994- movl (%edx), %ecx
21995- pushl %ecx
21996+ pushl (efi_rt_function_ptr)
21997
21998 /*
21999- * 10. Push the saved return address onto the stack and return.
22000+ * 10. Return to the saved return address.
22001 */
22002- leal saved_return_addr, %edx
22003- movl (%edx), %ecx
22004- pushl %ecx
22005- ret
22006+ jmpl *(saved_return_addr)
22007 ENDPROC(efi_call_phys)
22008 .previous
22009
22010-.data
22011+__INITDATA
22012 saved_return_addr:
22013 .long 0
22014 efi_rt_function_ptr:
22015diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_stub_64.S linux-3.0.4/arch/x86/platform/efi/efi_stub_64.S
22016--- linux-3.0.4/arch/x86/platform/efi/efi_stub_64.S 2011-07-21 22:17:23.000000000 -0400
22017+++ linux-3.0.4/arch/x86/platform/efi/efi_stub_64.S 2011-10-06 04:17:55.000000000 -0400
22018@@ -7,6 +7,7 @@
22019 */
22020
22021 #include <linux/linkage.h>
22022+#include <asm/alternative-asm.h>
22023
22024 #define SAVE_XMM \
22025 mov %rsp, %rax; \
22026@@ -40,6 +41,7 @@ ENTRY(efi_call0)
22027 call *%rdi
22028 addq $32, %rsp
22029 RESTORE_XMM
22030+ pax_force_retaddr
22031 ret
22032 ENDPROC(efi_call0)
22033
22034@@ -50,6 +52,7 @@ ENTRY(efi_call1)
22035 call *%rdi
22036 addq $32, %rsp
22037 RESTORE_XMM
22038+ pax_force_retaddr
22039 ret
22040 ENDPROC(efi_call1)
22041
22042@@ -60,6 +63,7 @@ ENTRY(efi_call2)
22043 call *%rdi
22044 addq $32, %rsp
22045 RESTORE_XMM
22046+ pax_force_retaddr
22047 ret
22048 ENDPROC(efi_call2)
22049
22050@@ -71,6 +75,7 @@ ENTRY(efi_call3)
22051 call *%rdi
22052 addq $32, %rsp
22053 RESTORE_XMM
22054+ pax_force_retaddr
22055 ret
22056 ENDPROC(efi_call3)
22057
22058@@ -83,6 +88,7 @@ ENTRY(efi_call4)
22059 call *%rdi
22060 addq $32, %rsp
22061 RESTORE_XMM
22062+ pax_force_retaddr
22063 ret
22064 ENDPROC(efi_call4)
22065
22066@@ -96,6 +102,7 @@ ENTRY(efi_call5)
22067 call *%rdi
22068 addq $48, %rsp
22069 RESTORE_XMM
22070+ pax_force_retaddr
22071 ret
22072 ENDPROC(efi_call5)
22073
22074@@ -112,5 +119,6 @@ ENTRY(efi_call6)
22075 call *%rdi
22076 addq $48, %rsp
22077 RESTORE_XMM
22078+ pax_force_retaddr
22079 ret
22080 ENDPROC(efi_call6)
22081diff -urNp linux-3.0.4/arch/x86/platform/mrst/mrst.c linux-3.0.4/arch/x86/platform/mrst/mrst.c
22082--- linux-3.0.4/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
22083+++ linux-3.0.4/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
22084@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
22085 }
22086
22087 /* Reboot and power off are handled by the SCU on a MID device */
22088-static void mrst_power_off(void)
22089+static __noreturn void mrst_power_off(void)
22090 {
22091 intel_scu_ipc_simple_command(0xf1, 1);
22092+ BUG();
22093 }
22094
22095-static void mrst_reboot(void)
22096+static __noreturn void mrst_reboot(void)
22097 {
22098 intel_scu_ipc_simple_command(0xf1, 0);
22099+ BUG();
22100 }
22101
22102 /*
22103diff -urNp linux-3.0.4/arch/x86/platform/uv/tlb_uv.c linux-3.0.4/arch/x86/platform/uv/tlb_uv.c
22104--- linux-3.0.4/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
22105+++ linux-3.0.4/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
22106@@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
22107 cpumask_t mask;
22108 struct reset_args reset_args;
22109
22110+ pax_track_stack();
22111+
22112 reset_args.sender = sender;
22113 cpus_clear(mask);
22114 /* find a single cpu for each uvhub in this distribution mask */
22115diff -urNp linux-3.0.4/arch/x86/power/cpu.c linux-3.0.4/arch/x86/power/cpu.c
22116--- linux-3.0.4/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
22117+++ linux-3.0.4/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
22118@@ -130,7 +130,7 @@ static void do_fpu_end(void)
22119 static void fix_processor_context(void)
22120 {
22121 int cpu = smp_processor_id();
22122- struct tss_struct *t = &per_cpu(init_tss, cpu);
22123+ struct tss_struct *t = init_tss + cpu;
22124
22125 set_tss_desc(cpu, t); /*
22126 * This just modifies memory; should not be
22127@@ -140,7 +140,9 @@ static void fix_processor_context(void)
22128 */
22129
22130 #ifdef CONFIG_X86_64
22131+ pax_open_kernel();
22132 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22133+ pax_close_kernel();
22134
22135 syscall_init(); /* This sets MSR_*STAR and related */
22136 #endif
22137diff -urNp linux-3.0.4/arch/x86/vdso/Makefile linux-3.0.4/arch/x86/vdso/Makefile
22138--- linux-3.0.4/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
22139+++ linux-3.0.4/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
22140@@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
22141 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
22142 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
22143
22144-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22145+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22146 GCOV_PROFILE := n
22147
22148 #
22149diff -urNp linux-3.0.4/arch/x86/vdso/vdso32-setup.c linux-3.0.4/arch/x86/vdso/vdso32-setup.c
22150--- linux-3.0.4/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
22151+++ linux-3.0.4/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
22152@@ -25,6 +25,7 @@
22153 #include <asm/tlbflush.h>
22154 #include <asm/vdso.h>
22155 #include <asm/proto.h>
22156+#include <asm/mman.h>
22157
22158 enum {
22159 VDSO_DISABLED = 0,
22160@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
22161 void enable_sep_cpu(void)
22162 {
22163 int cpu = get_cpu();
22164- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22165+ struct tss_struct *tss = init_tss + cpu;
22166
22167 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22168 put_cpu();
22169@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
22170 gate_vma.vm_start = FIXADDR_USER_START;
22171 gate_vma.vm_end = FIXADDR_USER_END;
22172 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
22173- gate_vma.vm_page_prot = __P101;
22174+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
22175 /*
22176 * Make sure the vDSO gets into every core dump.
22177 * Dumping its contents makes post-mortem fully interpretable later
22178@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
22179 if (compat)
22180 addr = VDSO_HIGH_BASE;
22181 else {
22182- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
22183+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
22184 if (IS_ERR_VALUE(addr)) {
22185 ret = addr;
22186 goto up_fail;
22187 }
22188 }
22189
22190- current->mm->context.vdso = (void *)addr;
22191+ current->mm->context.vdso = addr;
22192
22193 if (compat_uses_vma || !compat) {
22194 /*
22195@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
22196 }
22197
22198 current_thread_info()->sysenter_return =
22199- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22200+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22201
22202 up_fail:
22203 if (ret)
22204- current->mm->context.vdso = NULL;
22205+ current->mm->context.vdso = 0;
22206
22207 up_write(&mm->mmap_sem);
22208
22209@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
22210
22211 const char *arch_vma_name(struct vm_area_struct *vma)
22212 {
22213- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22214+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22215 return "[vdso]";
22216+
22217+#ifdef CONFIG_PAX_SEGMEXEC
22218+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
22219+ return "[vdso]";
22220+#endif
22221+
22222 return NULL;
22223 }
22224
22225@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
22226 * Check to see if the corresponding task was created in compat vdso
22227 * mode.
22228 */
22229- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
22230+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
22231 return &gate_vma;
22232 return NULL;
22233 }
22234diff -urNp linux-3.0.4/arch/x86/vdso/vma.c linux-3.0.4/arch/x86/vdso/vma.c
22235--- linux-3.0.4/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
22236+++ linux-3.0.4/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
22237@@ -15,18 +15,19 @@
22238 #include <asm/proto.h>
22239 #include <asm/vdso.h>
22240
22241-unsigned int __read_mostly vdso_enabled = 1;
22242-
22243 extern char vdso_start[], vdso_end[];
22244 extern unsigned short vdso_sync_cpuid;
22245+extern char __vsyscall_0;
22246
22247 static struct page **vdso_pages;
22248+static struct page *vsyscall_page;
22249 static unsigned vdso_size;
22250
22251 static int __init init_vdso_vars(void)
22252 {
22253- int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
22254- int i;
22255+ size_t nbytes = vdso_end - vdso_start;
22256+ size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
22257+ size_t i;
22258
22259 vdso_size = npages << PAGE_SHIFT;
22260 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
22261@@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
22262 goto oom;
22263 for (i = 0; i < npages; i++) {
22264 struct page *p;
22265- p = alloc_page(GFP_KERNEL);
22266+ p = alloc_page(GFP_KERNEL | __GFP_ZERO);
22267 if (!p)
22268 goto oom;
22269 vdso_pages[i] = p;
22270- copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
22271+ memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
22272+ nbytes -= PAGE_SIZE;
22273 }
22274+ vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
22275
22276 return 0;
22277
22278 oom:
22279- printk("Cannot allocate vdso\n");
22280- vdso_enabled = 0;
22281- return -ENOMEM;
22282+ panic("Cannot allocate vdso\n");
22283 }
22284 subsys_initcall(init_vdso_vars);
22285
22286@@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
22287 unsigned long addr;
22288 int ret;
22289
22290- if (!vdso_enabled)
22291- return 0;
22292-
22293 down_write(&mm->mmap_sem);
22294- addr = vdso_addr(mm->start_stack, vdso_size);
22295- addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
22296+ addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
22297+ addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
22298 if (IS_ERR_VALUE(addr)) {
22299 ret = addr;
22300 goto up_fail;
22301 }
22302
22303- current->mm->context.vdso = (void *)addr;
22304+ mm->context.vdso = addr + PAGE_SIZE;
22305
22306- ret = install_special_mapping(mm, addr, vdso_size,
22307+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
22308 VM_READ|VM_EXEC|
22309- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22310+ VM_MAYREAD|VM_MAYEXEC|
22311 VM_ALWAYSDUMP,
22312- vdso_pages);
22313+ &vsyscall_page);
22314 if (ret) {
22315- current->mm->context.vdso = NULL;
22316+ mm->context.vdso = 0;
22317 goto up_fail;
22318 }
22319
22320+ ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
22321+ VM_READ|VM_EXEC|
22322+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22323+ VM_ALWAYSDUMP,
22324+ vdso_pages);
22325+ if (ret)
22326+ mm->context.vdso = 0;
22327+
22328 up_fail:
22329 up_write(&mm->mmap_sem);
22330 return ret;
22331 }
22332-
22333-static __init int vdso_setup(char *s)
22334-{
22335- vdso_enabled = simple_strtoul(s, NULL, 0);
22336- return 0;
22337-}
22338-__setup("vdso=", vdso_setup);
22339diff -urNp linux-3.0.4/arch/x86/xen/enlighten.c linux-3.0.4/arch/x86/xen/enlighten.c
22340--- linux-3.0.4/arch/x86/xen/enlighten.c 2011-09-02 18:11:26.000000000 -0400
22341+++ linux-3.0.4/arch/x86/xen/enlighten.c 2011-08-29 23:26:21.000000000 -0400
22342@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22343
22344 struct shared_info xen_dummy_shared_info;
22345
22346-void *xen_initial_gdt;
22347-
22348 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
22349 __read_mostly int xen_have_vector_callback;
22350 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
22351@@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
22352 #endif
22353 };
22354
22355-static void xen_reboot(int reason)
22356+static __noreturn void xen_reboot(int reason)
22357 {
22358 struct sched_shutdown r = { .reason = reason };
22359
22360@@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
22361 BUG();
22362 }
22363
22364-static void xen_restart(char *msg)
22365+static __noreturn void xen_restart(char *msg)
22366 {
22367 xen_reboot(SHUTDOWN_reboot);
22368 }
22369
22370-static void xen_emergency_restart(void)
22371+static __noreturn void xen_emergency_restart(void)
22372 {
22373 xen_reboot(SHUTDOWN_reboot);
22374 }
22375
22376-static void xen_machine_halt(void)
22377+static __noreturn void xen_machine_halt(void)
22378 {
22379 xen_reboot(SHUTDOWN_poweroff);
22380 }
22381@@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
22382 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
22383
22384 /* Work out if we support NX */
22385- x86_configure_nx();
22386+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22387+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
22388+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
22389+ unsigned l, h;
22390+
22391+ __supported_pte_mask |= _PAGE_NX;
22392+ rdmsr(MSR_EFER, l, h);
22393+ l |= EFER_NX;
22394+ wrmsr(MSR_EFER, l, h);
22395+ }
22396+#endif
22397
22398 xen_setup_features();
22399
22400@@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
22401
22402 machine_ops = xen_machine_ops;
22403
22404- /*
22405- * The only reliable way to retain the initial address of the
22406- * percpu gdt_page is to remember it here, so we can go and
22407- * mark it RW later, when the initial percpu area is freed.
22408- */
22409- xen_initial_gdt = &per_cpu(gdt_page, 0);
22410-
22411 xen_smp_init();
22412
22413 #ifdef CONFIG_ACPI_NUMA
22414diff -urNp linux-3.0.4/arch/x86/xen/mmu.c linux-3.0.4/arch/x86/xen/mmu.c
22415--- linux-3.0.4/arch/x86/xen/mmu.c 2011-09-02 18:11:26.000000000 -0400
22416+++ linux-3.0.4/arch/x86/xen/mmu.c 2011-08-29 23:26:21.000000000 -0400
22417@@ -1683,6 +1683,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
22418 convert_pfn_mfn(init_level4_pgt);
22419 convert_pfn_mfn(level3_ident_pgt);
22420 convert_pfn_mfn(level3_kernel_pgt);
22421+ convert_pfn_mfn(level3_vmalloc_pgt);
22422+ convert_pfn_mfn(level3_vmemmap_pgt);
22423
22424 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
22425 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
22426@@ -1701,7 +1703,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
22427 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
22428 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
22429 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
22430+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
22431+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
22432 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
22433+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
22434 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
22435 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
22436
22437@@ -1913,6 +1918,7 @@ static void __init xen_post_allocator_in
22438 pv_mmu_ops.set_pud = xen_set_pud;
22439 #if PAGETABLE_LEVELS == 4
22440 pv_mmu_ops.set_pgd = xen_set_pgd;
22441+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
22442 #endif
22443
22444 /* This will work as long as patching hasn't happened yet
22445@@ -1994,6 +2000,7 @@ static const struct pv_mmu_ops xen_mmu_o
22446 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
22447 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
22448 .set_pgd = xen_set_pgd_hyper,
22449+ .set_pgd_batched = xen_set_pgd_hyper,
22450
22451 .alloc_pud = xen_alloc_pmd_init,
22452 .release_pud = xen_release_pmd_init,
22453diff -urNp linux-3.0.4/arch/x86/xen/smp.c linux-3.0.4/arch/x86/xen/smp.c
22454--- linux-3.0.4/arch/x86/xen/smp.c 2011-09-02 18:11:26.000000000 -0400
22455+++ linux-3.0.4/arch/x86/xen/smp.c 2011-08-29 23:26:21.000000000 -0400
22456@@ -193,11 +193,6 @@ static void __init xen_smp_prepare_boot_
22457 {
22458 BUG_ON(smp_processor_id() != 0);
22459 native_smp_prepare_boot_cpu();
22460-
22461- /* We've switched to the "real" per-cpu gdt, so make sure the
22462- old memory can be recycled */
22463- make_lowmem_page_readwrite(xen_initial_gdt);
22464-
22465 xen_filter_cpu_maps();
22466 xen_setup_vcpu_info_placement();
22467 }
22468@@ -265,12 +260,12 @@ cpu_initialize_context(unsigned int cpu,
22469 gdt = get_cpu_gdt_table(cpu);
22470
22471 ctxt->flags = VGCF_IN_KERNEL;
22472- ctxt->user_regs.ds = __USER_DS;
22473- ctxt->user_regs.es = __USER_DS;
22474+ ctxt->user_regs.ds = __KERNEL_DS;
22475+ ctxt->user_regs.es = __KERNEL_DS;
22476 ctxt->user_regs.ss = __KERNEL_DS;
22477 #ifdef CONFIG_X86_32
22478 ctxt->user_regs.fs = __KERNEL_PERCPU;
22479- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
22480+ savesegment(gs, ctxt->user_regs.gs);
22481 #else
22482 ctxt->gs_base_kernel = per_cpu_offset(cpu);
22483 #endif
22484@@ -321,13 +316,12 @@ static int __cpuinit xen_cpu_up(unsigned
22485 int rc;
22486
22487 per_cpu(current_task, cpu) = idle;
22488+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
22489 #ifdef CONFIG_X86_32
22490 irq_ctx_init(cpu);
22491 #else
22492 clear_tsk_thread_flag(idle, TIF_FORK);
22493- per_cpu(kernel_stack, cpu) =
22494- (unsigned long)task_stack_page(idle) -
22495- KERNEL_STACK_OFFSET + THREAD_SIZE;
22496+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22497 #endif
22498 xen_setup_runstate_info(cpu);
22499 xen_setup_timer(cpu);
22500diff -urNp linux-3.0.4/arch/x86/xen/xen-asm_32.S linux-3.0.4/arch/x86/xen/xen-asm_32.S
22501--- linux-3.0.4/arch/x86/xen/xen-asm_32.S 2011-07-21 22:17:23.000000000 -0400
22502+++ linux-3.0.4/arch/x86/xen/xen-asm_32.S 2011-08-23 21:47:55.000000000 -0400
22503@@ -83,14 +83,14 @@ ENTRY(xen_iret)
22504 ESP_OFFSET=4 # bytes pushed onto stack
22505
22506 /*
22507- * Store vcpu_info pointer for easy access. Do it this way to
22508- * avoid having to reload %fs
22509+ * Store vcpu_info pointer for easy access.
22510 */
22511 #ifdef CONFIG_SMP
22512- GET_THREAD_INFO(%eax)
22513- movl TI_cpu(%eax), %eax
22514- movl __per_cpu_offset(,%eax,4), %eax
22515- mov xen_vcpu(%eax), %eax
22516+ push %fs
22517+ mov $(__KERNEL_PERCPU), %eax
22518+ mov %eax, %fs
22519+ mov PER_CPU_VAR(xen_vcpu), %eax
22520+ pop %fs
22521 #else
22522 movl xen_vcpu, %eax
22523 #endif
22524diff -urNp linux-3.0.4/arch/x86/xen/xen-head.S linux-3.0.4/arch/x86/xen/xen-head.S
22525--- linux-3.0.4/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
22526+++ linux-3.0.4/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
22527@@ -19,6 +19,17 @@ ENTRY(startup_xen)
22528 #ifdef CONFIG_X86_32
22529 mov %esi,xen_start_info
22530 mov $init_thread_union+THREAD_SIZE,%esp
22531+#ifdef CONFIG_SMP
22532+ movl $cpu_gdt_table,%edi
22533+ movl $__per_cpu_load,%eax
22534+ movw %ax,__KERNEL_PERCPU + 2(%edi)
22535+ rorl $16,%eax
22536+ movb %al,__KERNEL_PERCPU + 4(%edi)
22537+ movb %ah,__KERNEL_PERCPU + 7(%edi)
22538+ movl $__per_cpu_end - 1,%eax
22539+ subl $__per_cpu_start,%eax
22540+ movw %ax,__KERNEL_PERCPU + 0(%edi)
22541+#endif
22542 #else
22543 mov %rsi,xen_start_info
22544 mov $init_thread_union+THREAD_SIZE,%rsp
22545diff -urNp linux-3.0.4/arch/x86/xen/xen-ops.h linux-3.0.4/arch/x86/xen/xen-ops.h
22546--- linux-3.0.4/arch/x86/xen/xen-ops.h 2011-09-02 18:11:21.000000000 -0400
22547+++ linux-3.0.4/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
22548@@ -10,8 +10,6 @@
22549 extern const char xen_hypervisor_callback[];
22550 extern const char xen_failsafe_callback[];
22551
22552-extern void *xen_initial_gdt;
22553-
22554 struct trap_info;
22555 void xen_copy_trap_info(struct trap_info *traps);
22556
22557diff -urNp linux-3.0.4/block/blk-iopoll.c linux-3.0.4/block/blk-iopoll.c
22558--- linux-3.0.4/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
22559+++ linux-3.0.4/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
22560@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
22561 }
22562 EXPORT_SYMBOL(blk_iopoll_complete);
22563
22564-static void blk_iopoll_softirq(struct softirq_action *h)
22565+static void blk_iopoll_softirq(void)
22566 {
22567 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
22568 int rearm = 0, budget = blk_iopoll_budget;
22569diff -urNp linux-3.0.4/block/blk-map.c linux-3.0.4/block/blk-map.c
22570--- linux-3.0.4/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
22571+++ linux-3.0.4/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
22572@@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
22573 if (!len || !kbuf)
22574 return -EINVAL;
22575
22576- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
22577+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
22578 if (do_copy)
22579 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
22580 else
22581diff -urNp linux-3.0.4/block/blk-softirq.c linux-3.0.4/block/blk-softirq.c
22582--- linux-3.0.4/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
22583+++ linux-3.0.4/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
22584@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
22585 * Softirq action handler - move entries to local list and loop over them
22586 * while passing them to the queue registered handler.
22587 */
22588-static void blk_done_softirq(struct softirq_action *h)
22589+static void blk_done_softirq(void)
22590 {
22591 struct list_head *cpu_list, local_list;
22592
22593diff -urNp linux-3.0.4/block/bsg.c linux-3.0.4/block/bsg.c
22594--- linux-3.0.4/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
22595+++ linux-3.0.4/block/bsg.c 2011-10-06 04:17:55.000000000 -0400
22596@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
22597 struct sg_io_v4 *hdr, struct bsg_device *bd,
22598 fmode_t has_write_perm)
22599 {
22600+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22601+ unsigned char *cmdptr;
22602+
22603 if (hdr->request_len > BLK_MAX_CDB) {
22604 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
22605 if (!rq->cmd)
22606 return -ENOMEM;
22607- }
22608+ cmdptr = rq->cmd;
22609+ } else
22610+ cmdptr = tmpcmd;
22611
22612- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
22613+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
22614 hdr->request_len))
22615 return -EFAULT;
22616
22617+ if (cmdptr != rq->cmd)
22618+ memcpy(rq->cmd, cmdptr, hdr->request_len);
22619+
22620 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
22621 if (blk_verify_command(rq->cmd, has_write_perm))
22622 return -EPERM;
22623@@ -249,7 +257,7 @@ bsg_map_hdr(struct bsg_device *bd, struc
22624 struct request *rq, *next_rq = NULL;
22625 int ret, rw;
22626 unsigned int dxfer_len;
22627- void *dxferp = NULL;
22628+ void __user *dxferp = NULL;
22629 struct bsg_class_device *bcd = &q->bsg_dev;
22630
22631 /* if the LLD has been removed then the bsg_unregister_queue will
22632@@ -291,7 +299,7 @@ bsg_map_hdr(struct bsg_device *bd, struc
22633 rq->next_rq = next_rq;
22634 next_rq->cmd_type = rq->cmd_type;
22635
22636- dxferp = (void*)(unsigned long)hdr->din_xferp;
22637+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
22638 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
22639 hdr->din_xfer_len, GFP_KERNEL);
22640 if (ret)
22641@@ -300,10 +308,10 @@ bsg_map_hdr(struct bsg_device *bd, struc
22642
22643 if (hdr->dout_xfer_len) {
22644 dxfer_len = hdr->dout_xfer_len;
22645- dxferp = (void*)(unsigned long)hdr->dout_xferp;
22646+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
22647 } else if (hdr->din_xfer_len) {
22648 dxfer_len = hdr->din_xfer_len;
22649- dxferp = (void*)(unsigned long)hdr->din_xferp;
22650+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
22651 } else
22652 dxfer_len = 0;
22653
22654@@ -445,7 +453,7 @@ static int blk_complete_sgv4_hdr_rq(stru
22655 int len = min_t(unsigned int, hdr->max_response_len,
22656 rq->sense_len);
22657
22658- ret = copy_to_user((void*)(unsigned long)hdr->response,
22659+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
22660 rq->sense, len);
22661 if (!ret)
22662 hdr->response_len = len;
22663diff -urNp linux-3.0.4/block/compat_ioctl.c linux-3.0.4/block/compat_ioctl.c
22664--- linux-3.0.4/block/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
22665+++ linux-3.0.4/block/compat_ioctl.c 2011-10-06 04:17:55.000000000 -0400
22666@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_
22667 err |= __get_user(f->spec1, &uf->spec1);
22668 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
22669 err |= __get_user(name, &uf->name);
22670- f->name = compat_ptr(name);
22671+ f->name = (void __force_kernel *)compat_ptr(name);
22672 if (err) {
22673 err = -EFAULT;
22674 goto out;
22675diff -urNp linux-3.0.4/block/scsi_ioctl.c linux-3.0.4/block/scsi_ioctl.c
22676--- linux-3.0.4/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
22677+++ linux-3.0.4/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
22678@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
22679 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
22680 struct sg_io_hdr *hdr, fmode_t mode)
22681 {
22682- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
22683+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22684+ unsigned char *cmdptr;
22685+
22686+ if (rq->cmd != rq->__cmd)
22687+ cmdptr = rq->cmd;
22688+ else
22689+ cmdptr = tmpcmd;
22690+
22691+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
22692 return -EFAULT;
22693+
22694+ if (cmdptr != rq->cmd)
22695+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
22696+
22697 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
22698 return -EPERM;
22699
22700@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
22701 int err;
22702 unsigned int in_len, out_len, bytes, opcode, cmdlen;
22703 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
22704+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22705+ unsigned char *cmdptr;
22706
22707 if (!sic)
22708 return -EINVAL;
22709@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
22710 */
22711 err = -EFAULT;
22712 rq->cmd_len = cmdlen;
22713- if (copy_from_user(rq->cmd, sic->data, cmdlen))
22714+
22715+ if (rq->cmd != rq->__cmd)
22716+ cmdptr = rq->cmd;
22717+ else
22718+ cmdptr = tmpcmd;
22719+
22720+ if (copy_from_user(cmdptr, sic->data, cmdlen))
22721 goto error;
22722
22723+ if (rq->cmd != cmdptr)
22724+ memcpy(rq->cmd, cmdptr, cmdlen);
22725+
22726 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
22727 goto error;
22728
22729diff -urNp linux-3.0.4/crypto/cryptd.c linux-3.0.4/crypto/cryptd.c
22730--- linux-3.0.4/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
22731+++ linux-3.0.4/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
22732@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
22733
22734 struct cryptd_blkcipher_request_ctx {
22735 crypto_completion_t complete;
22736-};
22737+} __no_const;
22738
22739 struct cryptd_hash_ctx {
22740 struct crypto_shash *child;
22741@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
22742
22743 struct cryptd_aead_request_ctx {
22744 crypto_completion_t complete;
22745-};
22746+} __no_const;
22747
22748 static void cryptd_queue_worker(struct work_struct *work);
22749
22750diff -urNp linux-3.0.4/crypto/gf128mul.c linux-3.0.4/crypto/gf128mul.c
22751--- linux-3.0.4/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
22752+++ linux-3.0.4/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
22753@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
22754 for (i = 0; i < 7; ++i)
22755 gf128mul_x_lle(&p[i + 1], &p[i]);
22756
22757- memset(r, 0, sizeof(r));
22758+ memset(r, 0, sizeof(*r));
22759 for (i = 0;;) {
22760 u8 ch = ((u8 *)b)[15 - i];
22761
22762@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
22763 for (i = 0; i < 7; ++i)
22764 gf128mul_x_bbe(&p[i + 1], &p[i]);
22765
22766- memset(r, 0, sizeof(r));
22767+ memset(r, 0, sizeof(*r));
22768 for (i = 0;;) {
22769 u8 ch = ((u8 *)b)[i];
22770
22771diff -urNp linux-3.0.4/crypto/serpent.c linux-3.0.4/crypto/serpent.c
22772--- linux-3.0.4/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
22773+++ linux-3.0.4/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
22774@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
22775 u32 r0,r1,r2,r3,r4;
22776 int i;
22777
22778+ pax_track_stack();
22779+
22780 /* Copy key, add padding */
22781
22782 for (i = 0; i < keylen; ++i)
22783diff -urNp linux-3.0.4/Documentation/dontdiff linux-3.0.4/Documentation/dontdiff
22784--- linux-3.0.4/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
22785+++ linux-3.0.4/Documentation/dontdiff 2011-10-07 19:07:23.000000000 -0400
22786@@ -5,6 +5,7 @@
22787 *.cis
22788 *.cpio
22789 *.csp
22790+*.dbg
22791 *.dsp
22792 *.dvi
22793 *.elf
22794@@ -48,9 +49,11 @@
22795 *.tab.h
22796 *.tex
22797 *.ver
22798+*.vim
22799 *.xml
22800 *.xz
22801 *_MODULES
22802+*_reg_safe.h
22803 *_vga16.c
22804 *~
22805 \#*#
22806@@ -70,6 +73,7 @@ Kerntypes
22807 Module.markers
22808 Module.symvers
22809 PENDING
22810+PERF*
22811 SCCS
22812 System.map*
22813 TAGS
22814@@ -98,6 +102,8 @@ bzImage*
22815 capability_names.h
22816 capflags.c
22817 classlist.h*
22818+clut_vga16.c
22819+common-cmds.h
22820 comp*.log
22821 compile.h*
22822 conf
22823@@ -126,12 +132,14 @@ fore200e_pca_fw.c*
22824 gconf
22825 gconf.glade.h
22826 gen-devlist
22827+gen-kdb_cmds.c
22828 gen_crc32table
22829 gen_init_cpio
22830 generated
22831 genheaders
22832 genksyms
22833 *_gray256.c
22834+hash
22835 hpet_example
22836 hugepage-mmap
22837 hugepage-shm
22838@@ -146,7 +154,6 @@ int32.c
22839 int4.c
22840 int8.c
22841 kallsyms
22842-kconfig
22843 keywords.c
22844 ksym.c*
22845 ksym.h*
22846@@ -154,7 +161,6 @@ kxgettext
22847 lkc_defs.h
22848 lex.c
22849 lex.*.c
22850-linux
22851 logo_*.c
22852 logo_*_clut224.c
22853 logo_*_mono.c
22854@@ -166,7 +172,6 @@ machtypes.h
22855 map
22856 map_hugetlb
22857 maui_boot.h
22858-media
22859 mconf
22860 miboot*
22861 mk_elfconfig
22862@@ -174,6 +179,7 @@ mkboot
22863 mkbugboot
22864 mkcpustr
22865 mkdep
22866+mkpiggy
22867 mkprep
22868 mkregtable
22869 mktables
22870@@ -209,6 +215,7 @@ r300_reg_safe.h
22871 r420_reg_safe.h
22872 r600_reg_safe.h
22873 recordmcount
22874+regdb.c
22875 relocs
22876 rlim_names.h
22877 rn50_reg_safe.h
22878@@ -219,6 +226,7 @@ setup
22879 setup.bin
22880 setup.elf
22881 sImage
22882+slabinfo
22883 sm_tbl*
22884 split-include
22885 syscalltab.h
22886@@ -246,7 +254,9 @@ vmlinux
22887 vmlinux-*
22888 vmlinux.aout
22889 vmlinux.bin.all
22890+vmlinux.bin.bz2
22891 vmlinux.lds
22892+vmlinux.relocs
22893 vmlinuz
22894 voffset.h
22895 vsyscall.lds
22896@@ -254,6 +264,7 @@ vsyscall_32.lds
22897 wanxlfw.inc
22898 uImage
22899 unifdef
22900+utsrelease.h
22901 wakeup.bin
22902 wakeup.elf
22903 wakeup.lds
22904diff -urNp linux-3.0.4/Documentation/kernel-parameters.txt linux-3.0.4/Documentation/kernel-parameters.txt
22905--- linux-3.0.4/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
22906+++ linux-3.0.4/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
22907@@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
22908 the specified number of seconds. This is to be used if
22909 your oopses keep scrolling off the screen.
22910
22911+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
22912+ virtualization environments that don't cope well with the
22913+ expand down segment used by UDEREF on X86-32 or the frequent
22914+ page table updates on X86-64.
22915+
22916+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
22917+
22918 pcbit= [HW,ISDN]
22919
22920 pcd. [PARIDE]
22921diff -urNp linux-3.0.4/drivers/acpi/apei/cper.c linux-3.0.4/drivers/acpi/apei/cper.c
22922--- linux-3.0.4/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
22923+++ linux-3.0.4/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
22924@@ -38,12 +38,12 @@
22925 */
22926 u64 cper_next_record_id(void)
22927 {
22928- static atomic64_t seq;
22929+ static atomic64_unchecked_t seq;
22930
22931- if (!atomic64_read(&seq))
22932- atomic64_set(&seq, ((u64)get_seconds()) << 32);
22933+ if (!atomic64_read_unchecked(&seq))
22934+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
22935
22936- return atomic64_inc_return(&seq);
22937+ return atomic64_inc_return_unchecked(&seq);
22938 }
22939 EXPORT_SYMBOL_GPL(cper_next_record_id);
22940
22941diff -urNp linux-3.0.4/drivers/acpi/ec_sys.c linux-3.0.4/drivers/acpi/ec_sys.c
22942--- linux-3.0.4/drivers/acpi/ec_sys.c 2011-07-21 22:17:23.000000000 -0400
22943+++ linux-3.0.4/drivers/acpi/ec_sys.c 2011-08-24 19:06:55.000000000 -0400
22944@@ -11,6 +11,7 @@
22945 #include <linux/kernel.h>
22946 #include <linux/acpi.h>
22947 #include <linux/debugfs.h>
22948+#include <asm/uaccess.h>
22949 #include "internal.h"
22950
22951 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
22952@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
22953 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
22954 */
22955 unsigned int size = EC_SPACE_SIZE;
22956- u8 *data = (u8 *) buf;
22957+ u8 data;
22958 loff_t init_off = *off;
22959 int err = 0;
22960
22961@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
22962 size = count;
22963
22964 while (size) {
22965- err = ec_read(*off, &data[*off - init_off]);
22966+ err = ec_read(*off, &data);
22967 if (err)
22968 return err;
22969+ if (put_user(data, &buf[*off - init_off]))
22970+ return -EFAULT;
22971 *off += 1;
22972 size--;
22973 }
22974@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
22975
22976 unsigned int size = count;
22977 loff_t init_off = *off;
22978- u8 *data = (u8 *) buf;
22979 int err = 0;
22980
22981 if (*off >= EC_SPACE_SIZE)
22982@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
22983 }
22984
22985 while (size) {
22986- u8 byte_write = data[*off - init_off];
22987+ u8 byte_write;
22988+ if (get_user(byte_write, &buf[*off - init_off]))
22989+ return -EFAULT;
22990 err = ec_write(*off, byte_write);
22991 if (err)
22992 return err;
22993diff -urNp linux-3.0.4/drivers/acpi/proc.c linux-3.0.4/drivers/acpi/proc.c
22994--- linux-3.0.4/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
22995+++ linux-3.0.4/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
22996@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
22997 size_t count, loff_t * ppos)
22998 {
22999 struct list_head *node, *next;
23000- char strbuf[5];
23001- char str[5] = "";
23002- unsigned int len = count;
23003-
23004- if (len > 4)
23005- len = 4;
23006- if (len < 0)
23007- return -EFAULT;
23008+ char strbuf[5] = {0};
23009
23010- if (copy_from_user(strbuf, buffer, len))
23011+ if (count > 4)
23012+ count = 4;
23013+ if (copy_from_user(strbuf, buffer, count))
23014 return -EFAULT;
23015- strbuf[len] = '\0';
23016- sscanf(strbuf, "%s", str);
23017+ strbuf[count] = '\0';
23018
23019 mutex_lock(&acpi_device_lock);
23020 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23021@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
23022 if (!dev->wakeup.flags.valid)
23023 continue;
23024
23025- if (!strncmp(dev->pnp.bus_id, str, 4)) {
23026+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23027 if (device_can_wakeup(&dev->dev)) {
23028 bool enable = !device_may_wakeup(&dev->dev);
23029 device_set_wakeup_enable(&dev->dev, enable);
23030diff -urNp linux-3.0.4/drivers/acpi/processor_driver.c linux-3.0.4/drivers/acpi/processor_driver.c
23031--- linux-3.0.4/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
23032+++ linux-3.0.4/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
23033@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
23034 return 0;
23035 #endif
23036
23037- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23038+ BUG_ON(pr->id >= nr_cpu_ids);
23039
23040 /*
23041 * Buggy BIOS check
23042diff -urNp linux-3.0.4/drivers/ata/libata-core.c linux-3.0.4/drivers/ata/libata-core.c
23043--- linux-3.0.4/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
23044+++ linux-3.0.4/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
23045@@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
23046 struct ata_port *ap;
23047 unsigned int tag;
23048
23049- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23050+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23051 ap = qc->ap;
23052
23053 qc->flags = 0;
23054@@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
23055 struct ata_port *ap;
23056 struct ata_link *link;
23057
23058- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23059+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23060 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
23061 ap = qc->ap;
23062 link = qc->dev->link;
23063@@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
23064 return;
23065
23066 spin_lock(&lock);
23067+ pax_open_kernel();
23068
23069 for (cur = ops->inherits; cur; cur = cur->inherits) {
23070 void **inherit = (void **)cur;
23071@@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
23072 if (IS_ERR(*pp))
23073 *pp = NULL;
23074
23075- ops->inherits = NULL;
23076+ *(struct ata_port_operations **)&ops->inherits = NULL;
23077
23078+ pax_close_kernel();
23079 spin_unlock(&lock);
23080 }
23081
23082diff -urNp linux-3.0.4/drivers/ata/libata-eh.c linux-3.0.4/drivers/ata/libata-eh.c
23083--- linux-3.0.4/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
23084+++ linux-3.0.4/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
23085@@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
23086 {
23087 struct ata_link *link;
23088
23089+ pax_track_stack();
23090+
23091 ata_for_each_link(link, ap, HOST_FIRST)
23092 ata_eh_link_report(link);
23093 }
23094diff -urNp linux-3.0.4/drivers/ata/pata_arasan_cf.c linux-3.0.4/drivers/ata/pata_arasan_cf.c
23095--- linux-3.0.4/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
23096+++ linux-3.0.4/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
23097@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
23098 /* Handle platform specific quirks */
23099 if (pdata->quirk) {
23100 if (pdata->quirk & CF_BROKEN_PIO) {
23101- ap->ops->set_piomode = NULL;
23102+ pax_open_kernel();
23103+ *(void **)&ap->ops->set_piomode = NULL;
23104+ pax_close_kernel();
23105 ap->pio_mask = 0;
23106 }
23107 if (pdata->quirk & CF_BROKEN_MWDMA)
23108diff -urNp linux-3.0.4/drivers/atm/adummy.c linux-3.0.4/drivers/atm/adummy.c
23109--- linux-3.0.4/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
23110+++ linux-3.0.4/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
23111@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
23112 vcc->pop(vcc, skb);
23113 else
23114 dev_kfree_skb_any(skb);
23115- atomic_inc(&vcc->stats->tx);
23116+ atomic_inc_unchecked(&vcc->stats->tx);
23117
23118 return 0;
23119 }
23120diff -urNp linux-3.0.4/drivers/atm/ambassador.c linux-3.0.4/drivers/atm/ambassador.c
23121--- linux-3.0.4/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
23122+++ linux-3.0.4/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
23123@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
23124 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
23125
23126 // VC layer stats
23127- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23128+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23129
23130 // free the descriptor
23131 kfree (tx_descr);
23132@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
23133 dump_skb ("<<<", vc, skb);
23134
23135 // VC layer stats
23136- atomic_inc(&atm_vcc->stats->rx);
23137+ atomic_inc_unchecked(&atm_vcc->stats->rx);
23138 __net_timestamp(skb);
23139 // end of our responsibility
23140 atm_vcc->push (atm_vcc, skb);
23141@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
23142 } else {
23143 PRINTK (KERN_INFO, "dropped over-size frame");
23144 // should we count this?
23145- atomic_inc(&atm_vcc->stats->rx_drop);
23146+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23147 }
23148
23149 } else {
23150@@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
23151 }
23152
23153 if (check_area (skb->data, skb->len)) {
23154- atomic_inc(&atm_vcc->stats->tx_err);
23155+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
23156 return -ENOMEM; // ?
23157 }
23158
23159diff -urNp linux-3.0.4/drivers/atm/atmtcp.c linux-3.0.4/drivers/atm/atmtcp.c
23160--- linux-3.0.4/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
23161+++ linux-3.0.4/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
23162@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
23163 if (vcc->pop) vcc->pop(vcc,skb);
23164 else dev_kfree_skb(skb);
23165 if (dev_data) return 0;
23166- atomic_inc(&vcc->stats->tx_err);
23167+ atomic_inc_unchecked(&vcc->stats->tx_err);
23168 return -ENOLINK;
23169 }
23170 size = skb->len+sizeof(struct atmtcp_hdr);
23171@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
23172 if (!new_skb) {
23173 if (vcc->pop) vcc->pop(vcc,skb);
23174 else dev_kfree_skb(skb);
23175- atomic_inc(&vcc->stats->tx_err);
23176+ atomic_inc_unchecked(&vcc->stats->tx_err);
23177 return -ENOBUFS;
23178 }
23179 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
23180@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
23181 if (vcc->pop) vcc->pop(vcc,skb);
23182 else dev_kfree_skb(skb);
23183 out_vcc->push(out_vcc,new_skb);
23184- atomic_inc(&vcc->stats->tx);
23185- atomic_inc(&out_vcc->stats->rx);
23186+ atomic_inc_unchecked(&vcc->stats->tx);
23187+ atomic_inc_unchecked(&out_vcc->stats->rx);
23188 return 0;
23189 }
23190
23191@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
23192 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
23193 read_unlock(&vcc_sklist_lock);
23194 if (!out_vcc) {
23195- atomic_inc(&vcc->stats->tx_err);
23196+ atomic_inc_unchecked(&vcc->stats->tx_err);
23197 goto done;
23198 }
23199 skb_pull(skb,sizeof(struct atmtcp_hdr));
23200@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
23201 __net_timestamp(new_skb);
23202 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
23203 out_vcc->push(out_vcc,new_skb);
23204- atomic_inc(&vcc->stats->tx);
23205- atomic_inc(&out_vcc->stats->rx);
23206+ atomic_inc_unchecked(&vcc->stats->tx);
23207+ atomic_inc_unchecked(&out_vcc->stats->rx);
23208 done:
23209 if (vcc->pop) vcc->pop(vcc,skb);
23210 else dev_kfree_skb(skb);
23211diff -urNp linux-3.0.4/drivers/atm/eni.c linux-3.0.4/drivers/atm/eni.c
23212--- linux-3.0.4/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
23213+++ linux-3.0.4/drivers/atm/eni.c 2011-08-23 21:47:55.000000000 -0400
23214@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
23215 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
23216 vcc->dev->number);
23217 length = 0;
23218- atomic_inc(&vcc->stats->rx_err);
23219+ atomic_inc_unchecked(&vcc->stats->rx_err);
23220 }
23221 else {
23222 length = ATM_CELL_SIZE-1; /* no HEC */
23223@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23224 size);
23225 }
23226 eff = length = 0;
23227- atomic_inc(&vcc->stats->rx_err);
23228+ atomic_inc_unchecked(&vcc->stats->rx_err);
23229 }
23230 else {
23231 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
23232@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23233 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
23234 vcc->dev->number,vcc->vci,length,size << 2,descr);
23235 length = eff = 0;
23236- atomic_inc(&vcc->stats->rx_err);
23237+ atomic_inc_unchecked(&vcc->stats->rx_err);
23238 }
23239 }
23240 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
23241@@ -771,7 +771,7 @@ rx_dequeued++;
23242 vcc->push(vcc,skb);
23243 pushed++;
23244 }
23245- atomic_inc(&vcc->stats->rx);
23246+ atomic_inc_unchecked(&vcc->stats->rx);
23247 }
23248 wake_up(&eni_dev->rx_wait);
23249 }
23250@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
23251 PCI_DMA_TODEVICE);
23252 if (vcc->pop) vcc->pop(vcc,skb);
23253 else dev_kfree_skb_irq(skb);
23254- atomic_inc(&vcc->stats->tx);
23255+ atomic_inc_unchecked(&vcc->stats->tx);
23256 wake_up(&eni_dev->tx_wait);
23257 dma_complete++;
23258 }
23259diff -urNp linux-3.0.4/drivers/atm/firestream.c linux-3.0.4/drivers/atm/firestream.c
23260--- linux-3.0.4/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
23261+++ linux-3.0.4/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
23262@@ -749,7 +749,7 @@ static void process_txdone_queue (struct
23263 }
23264 }
23265
23266- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23267+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23268
23269 fs_dprintk (FS_DEBUG_TXMEM, "i");
23270 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
23271@@ -816,7 +816,7 @@ static void process_incoming (struct fs_
23272 #endif
23273 skb_put (skb, qe->p1 & 0xffff);
23274 ATM_SKB(skb)->vcc = atm_vcc;
23275- atomic_inc(&atm_vcc->stats->rx);
23276+ atomic_inc_unchecked(&atm_vcc->stats->rx);
23277 __net_timestamp(skb);
23278 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
23279 atm_vcc->push (atm_vcc, skb);
23280@@ -837,12 +837,12 @@ static void process_incoming (struct fs_
23281 kfree (pe);
23282 }
23283 if (atm_vcc)
23284- atomic_inc(&atm_vcc->stats->rx_drop);
23285+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23286 break;
23287 case 0x1f: /* Reassembly abort: no buffers. */
23288 /* Silently increment error counter. */
23289 if (atm_vcc)
23290- atomic_inc(&atm_vcc->stats->rx_drop);
23291+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23292 break;
23293 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
23294 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
23295diff -urNp linux-3.0.4/drivers/atm/fore200e.c linux-3.0.4/drivers/atm/fore200e.c
23296--- linux-3.0.4/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
23297+++ linux-3.0.4/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
23298@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
23299 #endif
23300 /* check error condition */
23301 if (*entry->status & STATUS_ERROR)
23302- atomic_inc(&vcc->stats->tx_err);
23303+ atomic_inc_unchecked(&vcc->stats->tx_err);
23304 else
23305- atomic_inc(&vcc->stats->tx);
23306+ atomic_inc_unchecked(&vcc->stats->tx);
23307 }
23308 }
23309
23310@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
23311 if (skb == NULL) {
23312 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
23313
23314- atomic_inc(&vcc->stats->rx_drop);
23315+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23316 return -ENOMEM;
23317 }
23318
23319@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
23320
23321 dev_kfree_skb_any(skb);
23322
23323- atomic_inc(&vcc->stats->rx_drop);
23324+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23325 return -ENOMEM;
23326 }
23327
23328 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23329
23330 vcc->push(vcc, skb);
23331- atomic_inc(&vcc->stats->rx);
23332+ atomic_inc_unchecked(&vcc->stats->rx);
23333
23334 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23335
23336@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
23337 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
23338 fore200e->atm_dev->number,
23339 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
23340- atomic_inc(&vcc->stats->rx_err);
23341+ atomic_inc_unchecked(&vcc->stats->rx_err);
23342 }
23343 }
23344
23345@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
23346 goto retry_here;
23347 }
23348
23349- atomic_inc(&vcc->stats->tx_err);
23350+ atomic_inc_unchecked(&vcc->stats->tx_err);
23351
23352 fore200e->tx_sat++;
23353 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
23354diff -urNp linux-3.0.4/drivers/atm/he.c linux-3.0.4/drivers/atm/he.c
23355--- linux-3.0.4/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
23356+++ linux-3.0.4/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
23357@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23358
23359 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
23360 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
23361- atomic_inc(&vcc->stats->rx_drop);
23362+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23363 goto return_host_buffers;
23364 }
23365
23366@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23367 RBRQ_LEN_ERR(he_dev->rbrq_head)
23368 ? "LEN_ERR" : "",
23369 vcc->vpi, vcc->vci);
23370- atomic_inc(&vcc->stats->rx_err);
23371+ atomic_inc_unchecked(&vcc->stats->rx_err);
23372 goto return_host_buffers;
23373 }
23374
23375@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23376 vcc->push(vcc, skb);
23377 spin_lock(&he_dev->global_lock);
23378
23379- atomic_inc(&vcc->stats->rx);
23380+ atomic_inc_unchecked(&vcc->stats->rx);
23381
23382 return_host_buffers:
23383 ++pdus_assembled;
23384@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
23385 tpd->vcc->pop(tpd->vcc, tpd->skb);
23386 else
23387 dev_kfree_skb_any(tpd->skb);
23388- atomic_inc(&tpd->vcc->stats->tx_err);
23389+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
23390 }
23391 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
23392 return;
23393@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23394 vcc->pop(vcc, skb);
23395 else
23396 dev_kfree_skb_any(skb);
23397- atomic_inc(&vcc->stats->tx_err);
23398+ atomic_inc_unchecked(&vcc->stats->tx_err);
23399 return -EINVAL;
23400 }
23401
23402@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23403 vcc->pop(vcc, skb);
23404 else
23405 dev_kfree_skb_any(skb);
23406- atomic_inc(&vcc->stats->tx_err);
23407+ atomic_inc_unchecked(&vcc->stats->tx_err);
23408 return -EINVAL;
23409 }
23410 #endif
23411@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23412 vcc->pop(vcc, skb);
23413 else
23414 dev_kfree_skb_any(skb);
23415- atomic_inc(&vcc->stats->tx_err);
23416+ atomic_inc_unchecked(&vcc->stats->tx_err);
23417 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23418 return -ENOMEM;
23419 }
23420@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23421 vcc->pop(vcc, skb);
23422 else
23423 dev_kfree_skb_any(skb);
23424- atomic_inc(&vcc->stats->tx_err);
23425+ atomic_inc_unchecked(&vcc->stats->tx_err);
23426 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23427 return -ENOMEM;
23428 }
23429@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23430 __enqueue_tpd(he_dev, tpd, cid);
23431 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23432
23433- atomic_inc(&vcc->stats->tx);
23434+ atomic_inc_unchecked(&vcc->stats->tx);
23435
23436 return 0;
23437 }
23438diff -urNp linux-3.0.4/drivers/atm/horizon.c linux-3.0.4/drivers/atm/horizon.c
23439--- linux-3.0.4/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
23440+++ linux-3.0.4/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
23441@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
23442 {
23443 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
23444 // VC layer stats
23445- atomic_inc(&vcc->stats->rx);
23446+ atomic_inc_unchecked(&vcc->stats->rx);
23447 __net_timestamp(skb);
23448 // end of our responsibility
23449 vcc->push (vcc, skb);
23450@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
23451 dev->tx_iovec = NULL;
23452
23453 // VC layer stats
23454- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23455+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23456
23457 // free the skb
23458 hrz_kfree_skb (skb);
23459diff -urNp linux-3.0.4/drivers/atm/idt77252.c linux-3.0.4/drivers/atm/idt77252.c
23460--- linux-3.0.4/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
23461+++ linux-3.0.4/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
23462@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
23463 else
23464 dev_kfree_skb(skb);
23465
23466- atomic_inc(&vcc->stats->tx);
23467+ atomic_inc_unchecked(&vcc->stats->tx);
23468 }
23469
23470 atomic_dec(&scq->used);
23471@@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
23472 if ((sb = dev_alloc_skb(64)) == NULL) {
23473 printk("%s: Can't allocate buffers for aal0.\n",
23474 card->name);
23475- atomic_add(i, &vcc->stats->rx_drop);
23476+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
23477 break;
23478 }
23479 if (!atm_charge(vcc, sb->truesize)) {
23480 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
23481 card->name);
23482- atomic_add(i - 1, &vcc->stats->rx_drop);
23483+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
23484 dev_kfree_skb(sb);
23485 break;
23486 }
23487@@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
23488 ATM_SKB(sb)->vcc = vcc;
23489 __net_timestamp(sb);
23490 vcc->push(vcc, sb);
23491- atomic_inc(&vcc->stats->rx);
23492+ atomic_inc_unchecked(&vcc->stats->rx);
23493
23494 cell += ATM_CELL_PAYLOAD;
23495 }
23496@@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
23497 "(CDC: %08x)\n",
23498 card->name, len, rpp->len, readl(SAR_REG_CDC));
23499 recycle_rx_pool_skb(card, rpp);
23500- atomic_inc(&vcc->stats->rx_err);
23501+ atomic_inc_unchecked(&vcc->stats->rx_err);
23502 return;
23503 }
23504 if (stat & SAR_RSQE_CRC) {
23505 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
23506 recycle_rx_pool_skb(card, rpp);
23507- atomic_inc(&vcc->stats->rx_err);
23508+ atomic_inc_unchecked(&vcc->stats->rx_err);
23509 return;
23510 }
23511 if (skb_queue_len(&rpp->queue) > 1) {
23512@@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
23513 RXPRINTK("%s: Can't alloc RX skb.\n",
23514 card->name);
23515 recycle_rx_pool_skb(card, rpp);
23516- atomic_inc(&vcc->stats->rx_err);
23517+ atomic_inc_unchecked(&vcc->stats->rx_err);
23518 return;
23519 }
23520 if (!atm_charge(vcc, skb->truesize)) {
23521@@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
23522 __net_timestamp(skb);
23523
23524 vcc->push(vcc, skb);
23525- atomic_inc(&vcc->stats->rx);
23526+ atomic_inc_unchecked(&vcc->stats->rx);
23527
23528 return;
23529 }
23530@@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
23531 __net_timestamp(skb);
23532
23533 vcc->push(vcc, skb);
23534- atomic_inc(&vcc->stats->rx);
23535+ atomic_inc_unchecked(&vcc->stats->rx);
23536
23537 if (skb->truesize > SAR_FB_SIZE_3)
23538 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
23539@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
23540 if (vcc->qos.aal != ATM_AAL0) {
23541 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
23542 card->name, vpi, vci);
23543- atomic_inc(&vcc->stats->rx_drop);
23544+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23545 goto drop;
23546 }
23547
23548 if ((sb = dev_alloc_skb(64)) == NULL) {
23549 printk("%s: Can't allocate buffers for AAL0.\n",
23550 card->name);
23551- atomic_inc(&vcc->stats->rx_err);
23552+ atomic_inc_unchecked(&vcc->stats->rx_err);
23553 goto drop;
23554 }
23555
23556@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
23557 ATM_SKB(sb)->vcc = vcc;
23558 __net_timestamp(sb);
23559 vcc->push(vcc, sb);
23560- atomic_inc(&vcc->stats->rx);
23561+ atomic_inc_unchecked(&vcc->stats->rx);
23562
23563 drop:
23564 skb_pull(queue, 64);
23565@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23566
23567 if (vc == NULL) {
23568 printk("%s: NULL connection in send().\n", card->name);
23569- atomic_inc(&vcc->stats->tx_err);
23570+ atomic_inc_unchecked(&vcc->stats->tx_err);
23571 dev_kfree_skb(skb);
23572 return -EINVAL;
23573 }
23574 if (!test_bit(VCF_TX, &vc->flags)) {
23575 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
23576- atomic_inc(&vcc->stats->tx_err);
23577+ atomic_inc_unchecked(&vcc->stats->tx_err);
23578 dev_kfree_skb(skb);
23579 return -EINVAL;
23580 }
23581@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23582 break;
23583 default:
23584 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
23585- atomic_inc(&vcc->stats->tx_err);
23586+ atomic_inc_unchecked(&vcc->stats->tx_err);
23587 dev_kfree_skb(skb);
23588 return -EINVAL;
23589 }
23590
23591 if (skb_shinfo(skb)->nr_frags != 0) {
23592 printk("%s: No scatter-gather yet.\n", card->name);
23593- atomic_inc(&vcc->stats->tx_err);
23594+ atomic_inc_unchecked(&vcc->stats->tx_err);
23595 dev_kfree_skb(skb);
23596 return -EINVAL;
23597 }
23598@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23599
23600 err = queue_skb(card, vc, skb, oam);
23601 if (err) {
23602- atomic_inc(&vcc->stats->tx_err);
23603+ atomic_inc_unchecked(&vcc->stats->tx_err);
23604 dev_kfree_skb(skb);
23605 return err;
23606 }
23607@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
23608 skb = dev_alloc_skb(64);
23609 if (!skb) {
23610 printk("%s: Out of memory in send_oam().\n", card->name);
23611- atomic_inc(&vcc->stats->tx_err);
23612+ atomic_inc_unchecked(&vcc->stats->tx_err);
23613 return -ENOMEM;
23614 }
23615 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
23616diff -urNp linux-3.0.4/drivers/atm/iphase.c linux-3.0.4/drivers/atm/iphase.c
23617--- linux-3.0.4/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
23618+++ linux-3.0.4/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
23619@@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
23620 status = (u_short) (buf_desc_ptr->desc_mode);
23621 if (status & (RX_CER | RX_PTE | RX_OFL))
23622 {
23623- atomic_inc(&vcc->stats->rx_err);
23624+ atomic_inc_unchecked(&vcc->stats->rx_err);
23625 IF_ERR(printk("IA: bad packet, dropping it");)
23626 if (status & RX_CER) {
23627 IF_ERR(printk(" cause: packet CRC error\n");)
23628@@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
23629 len = dma_addr - buf_addr;
23630 if (len > iadev->rx_buf_sz) {
23631 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
23632- atomic_inc(&vcc->stats->rx_err);
23633+ atomic_inc_unchecked(&vcc->stats->rx_err);
23634 goto out_free_desc;
23635 }
23636
23637@@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
23638 ia_vcc = INPH_IA_VCC(vcc);
23639 if (ia_vcc == NULL)
23640 {
23641- atomic_inc(&vcc->stats->rx_err);
23642+ atomic_inc_unchecked(&vcc->stats->rx_err);
23643 dev_kfree_skb_any(skb);
23644 atm_return(vcc, atm_guess_pdu2truesize(len));
23645 goto INCR_DLE;
23646@@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
23647 if ((length > iadev->rx_buf_sz) || (length >
23648 (skb->len - sizeof(struct cpcs_trailer))))
23649 {
23650- atomic_inc(&vcc->stats->rx_err);
23651+ atomic_inc_unchecked(&vcc->stats->rx_err);
23652 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
23653 length, skb->len);)
23654 dev_kfree_skb_any(skb);
23655@@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
23656
23657 IF_RX(printk("rx_dle_intr: skb push");)
23658 vcc->push(vcc,skb);
23659- atomic_inc(&vcc->stats->rx);
23660+ atomic_inc_unchecked(&vcc->stats->rx);
23661 iadev->rx_pkt_cnt++;
23662 }
23663 INCR_DLE:
23664@@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
23665 {
23666 struct k_sonet_stats *stats;
23667 stats = &PRIV(_ia_dev[board])->sonet_stats;
23668- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
23669- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
23670- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
23671- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
23672- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
23673- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
23674- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
23675- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
23676- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
23677+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
23678+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
23679+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
23680+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
23681+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
23682+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
23683+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
23684+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
23685+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
23686 }
23687 ia_cmds.status = 0;
23688 break;
23689@@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
23690 if ((desc == 0) || (desc > iadev->num_tx_desc))
23691 {
23692 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
23693- atomic_inc(&vcc->stats->tx);
23694+ atomic_inc_unchecked(&vcc->stats->tx);
23695 if (vcc->pop)
23696 vcc->pop(vcc, skb);
23697 else
23698@@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
23699 ATM_DESC(skb) = vcc->vci;
23700 skb_queue_tail(&iadev->tx_dma_q, skb);
23701
23702- atomic_inc(&vcc->stats->tx);
23703+ atomic_inc_unchecked(&vcc->stats->tx);
23704 iadev->tx_pkt_cnt++;
23705 /* Increment transaction counter */
23706 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
23707
23708 #if 0
23709 /* add flow control logic */
23710- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
23711+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
23712 if (iavcc->vc_desc_cnt > 10) {
23713 vcc->tx_quota = vcc->tx_quota * 3 / 4;
23714 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
23715diff -urNp linux-3.0.4/drivers/atm/lanai.c linux-3.0.4/drivers/atm/lanai.c
23716--- linux-3.0.4/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
23717+++ linux-3.0.4/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
23718@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
23719 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
23720 lanai_endtx(lanai, lvcc);
23721 lanai_free_skb(lvcc->tx.atmvcc, skb);
23722- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
23723+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
23724 }
23725
23726 /* Try to fill the buffer - don't call unless there is backlog */
23727@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
23728 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
23729 __net_timestamp(skb);
23730 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
23731- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
23732+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
23733 out:
23734 lvcc->rx.buf.ptr = end;
23735 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
23736@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
23737 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
23738 "vcc %d\n", lanai->number, (unsigned int) s, vci);
23739 lanai->stats.service_rxnotaal5++;
23740- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23741+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23742 return 0;
23743 }
23744 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
23745@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
23746 int bytes;
23747 read_unlock(&vcc_sklist_lock);
23748 DPRINTK("got trashed rx pdu on vci %d\n", vci);
23749- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23750+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23751 lvcc->stats.x.aal5.service_trash++;
23752 bytes = (SERVICE_GET_END(s) * 16) -
23753 (((unsigned long) lvcc->rx.buf.ptr) -
23754@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
23755 }
23756 if (s & SERVICE_STREAM) {
23757 read_unlock(&vcc_sklist_lock);
23758- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23759+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23760 lvcc->stats.x.aal5.service_stream++;
23761 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
23762 "PDU on VCI %d!\n", lanai->number, vci);
23763@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
23764 return 0;
23765 }
23766 DPRINTK("got rx crc error on vci %d\n", vci);
23767- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23768+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23769 lvcc->stats.x.aal5.service_rxcrc++;
23770 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
23771 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
23772diff -urNp linux-3.0.4/drivers/atm/nicstar.c linux-3.0.4/drivers/atm/nicstar.c
23773--- linux-3.0.4/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
23774+++ linux-3.0.4/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
23775@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
23776 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
23777 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
23778 card->index);
23779- atomic_inc(&vcc->stats->tx_err);
23780+ atomic_inc_unchecked(&vcc->stats->tx_err);
23781 dev_kfree_skb_any(skb);
23782 return -EINVAL;
23783 }
23784@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
23785 if (!vc->tx) {
23786 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
23787 card->index);
23788- atomic_inc(&vcc->stats->tx_err);
23789+ atomic_inc_unchecked(&vcc->stats->tx_err);
23790 dev_kfree_skb_any(skb);
23791 return -EINVAL;
23792 }
23793@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
23794 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
23795 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
23796 card->index);
23797- atomic_inc(&vcc->stats->tx_err);
23798+ atomic_inc_unchecked(&vcc->stats->tx_err);
23799 dev_kfree_skb_any(skb);
23800 return -EINVAL;
23801 }
23802
23803 if (skb_shinfo(skb)->nr_frags != 0) {
23804 printk("nicstar%d: No scatter-gather yet.\n", card->index);
23805- atomic_inc(&vcc->stats->tx_err);
23806+ atomic_inc_unchecked(&vcc->stats->tx_err);
23807 dev_kfree_skb_any(skb);
23808 return -EINVAL;
23809 }
23810@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
23811 }
23812
23813 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
23814- atomic_inc(&vcc->stats->tx_err);
23815+ atomic_inc_unchecked(&vcc->stats->tx_err);
23816 dev_kfree_skb_any(skb);
23817 return -EIO;
23818 }
23819- atomic_inc(&vcc->stats->tx);
23820+ atomic_inc_unchecked(&vcc->stats->tx);
23821
23822 return 0;
23823 }
23824@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
23825 printk
23826 ("nicstar%d: Can't allocate buffers for aal0.\n",
23827 card->index);
23828- atomic_add(i, &vcc->stats->rx_drop);
23829+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
23830 break;
23831 }
23832 if (!atm_charge(vcc, sb->truesize)) {
23833 RXPRINTK
23834 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
23835 card->index);
23836- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
23837+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
23838 dev_kfree_skb_any(sb);
23839 break;
23840 }
23841@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
23842 ATM_SKB(sb)->vcc = vcc;
23843 __net_timestamp(sb);
23844 vcc->push(vcc, sb);
23845- atomic_inc(&vcc->stats->rx);
23846+ atomic_inc_unchecked(&vcc->stats->rx);
23847 cell += ATM_CELL_PAYLOAD;
23848 }
23849
23850@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
23851 if (iovb == NULL) {
23852 printk("nicstar%d: Out of iovec buffers.\n",
23853 card->index);
23854- atomic_inc(&vcc->stats->rx_drop);
23855+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23856 recycle_rx_buf(card, skb);
23857 return;
23858 }
23859@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
23860 small or large buffer itself. */
23861 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
23862 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
23863- atomic_inc(&vcc->stats->rx_err);
23864+ atomic_inc_unchecked(&vcc->stats->rx_err);
23865 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23866 NS_MAX_IOVECS);
23867 NS_PRV_IOVCNT(iovb) = 0;
23868@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
23869 ("nicstar%d: Expected a small buffer, and this is not one.\n",
23870 card->index);
23871 which_list(card, skb);
23872- atomic_inc(&vcc->stats->rx_err);
23873+ atomic_inc_unchecked(&vcc->stats->rx_err);
23874 recycle_rx_buf(card, skb);
23875 vc->rx_iov = NULL;
23876 recycle_iov_buf(card, iovb);
23877@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
23878 ("nicstar%d: Expected a large buffer, and this is not one.\n",
23879 card->index);
23880 which_list(card, skb);
23881- atomic_inc(&vcc->stats->rx_err);
23882+ atomic_inc_unchecked(&vcc->stats->rx_err);
23883 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23884 NS_PRV_IOVCNT(iovb));
23885 vc->rx_iov = NULL;
23886@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
23887 printk(" - PDU size mismatch.\n");
23888 else
23889 printk(".\n");
23890- atomic_inc(&vcc->stats->rx_err);
23891+ atomic_inc_unchecked(&vcc->stats->rx_err);
23892 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23893 NS_PRV_IOVCNT(iovb));
23894 vc->rx_iov = NULL;
23895@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
23896 /* skb points to a small buffer */
23897 if (!atm_charge(vcc, skb->truesize)) {
23898 push_rxbufs(card, skb);
23899- atomic_inc(&vcc->stats->rx_drop);
23900+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23901 } else {
23902 skb_put(skb, len);
23903 dequeue_sm_buf(card, skb);
23904@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
23905 ATM_SKB(skb)->vcc = vcc;
23906 __net_timestamp(skb);
23907 vcc->push(vcc, skb);
23908- atomic_inc(&vcc->stats->rx);
23909+ atomic_inc_unchecked(&vcc->stats->rx);
23910 }
23911 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
23912 struct sk_buff *sb;
23913@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
23914 if (len <= NS_SMBUFSIZE) {
23915 if (!atm_charge(vcc, sb->truesize)) {
23916 push_rxbufs(card, sb);
23917- atomic_inc(&vcc->stats->rx_drop);
23918+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23919 } else {
23920 skb_put(sb, len);
23921 dequeue_sm_buf(card, sb);
23922@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
23923 ATM_SKB(sb)->vcc = vcc;
23924 __net_timestamp(sb);
23925 vcc->push(vcc, sb);
23926- atomic_inc(&vcc->stats->rx);
23927+ atomic_inc_unchecked(&vcc->stats->rx);
23928 }
23929
23930 push_rxbufs(card, skb);
23931@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
23932
23933 if (!atm_charge(vcc, skb->truesize)) {
23934 push_rxbufs(card, skb);
23935- atomic_inc(&vcc->stats->rx_drop);
23936+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23937 } else {
23938 dequeue_lg_buf(card, skb);
23939 #ifdef NS_USE_DESTRUCTORS
23940@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
23941 ATM_SKB(skb)->vcc = vcc;
23942 __net_timestamp(skb);
23943 vcc->push(vcc, skb);
23944- atomic_inc(&vcc->stats->rx);
23945+ atomic_inc_unchecked(&vcc->stats->rx);
23946 }
23947
23948 push_rxbufs(card, sb);
23949@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
23950 printk
23951 ("nicstar%d: Out of huge buffers.\n",
23952 card->index);
23953- atomic_inc(&vcc->stats->rx_drop);
23954+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23955 recycle_iovec_rx_bufs(card,
23956 (struct iovec *)
23957 iovb->data,
23958@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
23959 card->hbpool.count++;
23960 } else
23961 dev_kfree_skb_any(hb);
23962- atomic_inc(&vcc->stats->rx_drop);
23963+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23964 } else {
23965 /* Copy the small buffer to the huge buffer */
23966 sb = (struct sk_buff *)iov->iov_base;
23967@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
23968 #endif /* NS_USE_DESTRUCTORS */
23969 __net_timestamp(hb);
23970 vcc->push(vcc, hb);
23971- atomic_inc(&vcc->stats->rx);
23972+ atomic_inc_unchecked(&vcc->stats->rx);
23973 }
23974 }
23975
23976diff -urNp linux-3.0.4/drivers/atm/solos-pci.c linux-3.0.4/drivers/atm/solos-pci.c
23977--- linux-3.0.4/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
23978+++ linux-3.0.4/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
23979@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
23980 }
23981 atm_charge(vcc, skb->truesize);
23982 vcc->push(vcc, skb);
23983- atomic_inc(&vcc->stats->rx);
23984+ atomic_inc_unchecked(&vcc->stats->rx);
23985 break;
23986
23987 case PKT_STATUS:
23988@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
23989 char msg[500];
23990 char item[10];
23991
23992+ pax_track_stack();
23993+
23994 len = buf->len;
23995 for (i = 0; i < len; i++){
23996 if(i % 8 == 0)
23997@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
23998 vcc = SKB_CB(oldskb)->vcc;
23999
24000 if (vcc) {
24001- atomic_inc(&vcc->stats->tx);
24002+ atomic_inc_unchecked(&vcc->stats->tx);
24003 solos_pop(vcc, oldskb);
24004 } else
24005 dev_kfree_skb_irq(oldskb);
24006diff -urNp linux-3.0.4/drivers/atm/suni.c linux-3.0.4/drivers/atm/suni.c
24007--- linux-3.0.4/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
24008+++ linux-3.0.4/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
24009@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
24010
24011
24012 #define ADD_LIMITED(s,v) \
24013- atomic_add((v),&stats->s); \
24014- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
24015+ atomic_add_unchecked((v),&stats->s); \
24016+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
24017
24018
24019 static void suni_hz(unsigned long from_timer)
24020diff -urNp linux-3.0.4/drivers/atm/uPD98402.c linux-3.0.4/drivers/atm/uPD98402.c
24021--- linux-3.0.4/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
24022+++ linux-3.0.4/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
24023@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
24024 struct sonet_stats tmp;
24025 int error = 0;
24026
24027- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24028+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24029 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
24030 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
24031 if (zero && !error) {
24032@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
24033
24034
24035 #define ADD_LIMITED(s,v) \
24036- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
24037- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
24038- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24039+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
24040+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
24041+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24042
24043
24044 static void stat_event(struct atm_dev *dev)
24045@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
24046 if (reason & uPD98402_INT_PFM) stat_event(dev);
24047 if (reason & uPD98402_INT_PCO) {
24048 (void) GET(PCOCR); /* clear interrupt cause */
24049- atomic_add(GET(HECCT),
24050+ atomic_add_unchecked(GET(HECCT),
24051 &PRIV(dev)->sonet_stats.uncorr_hcs);
24052 }
24053 if ((reason & uPD98402_INT_RFO) &&
24054@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
24055 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
24056 uPD98402_INT_LOS),PIMR); /* enable them */
24057 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
24058- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24059- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
24060- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
24061+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24062+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
24063+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
24064 return 0;
24065 }
24066
24067diff -urNp linux-3.0.4/drivers/atm/zatm.c linux-3.0.4/drivers/atm/zatm.c
24068--- linux-3.0.4/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
24069+++ linux-3.0.4/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
24070@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24071 }
24072 if (!size) {
24073 dev_kfree_skb_irq(skb);
24074- if (vcc) atomic_inc(&vcc->stats->rx_err);
24075+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
24076 continue;
24077 }
24078 if (!atm_charge(vcc,skb->truesize)) {
24079@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24080 skb->len = size;
24081 ATM_SKB(skb)->vcc = vcc;
24082 vcc->push(vcc,skb);
24083- atomic_inc(&vcc->stats->rx);
24084+ atomic_inc_unchecked(&vcc->stats->rx);
24085 }
24086 zout(pos & 0xffff,MTA(mbx));
24087 #if 0 /* probably a stupid idea */
24088@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
24089 skb_queue_head(&zatm_vcc->backlog,skb);
24090 break;
24091 }
24092- atomic_inc(&vcc->stats->tx);
24093+ atomic_inc_unchecked(&vcc->stats->tx);
24094 wake_up(&zatm_vcc->tx_wait);
24095 }
24096
24097diff -urNp linux-3.0.4/drivers/base/devtmpfs.c linux-3.0.4/drivers/base/devtmpfs.c
24098--- linux-3.0.4/drivers/base/devtmpfs.c 2011-07-21 22:17:23.000000000 -0400
24099+++ linux-3.0.4/drivers/base/devtmpfs.c 2011-10-06 04:17:55.000000000 -0400
24100@@ -357,7 +357,7 @@ int devtmpfs_mount(const char *mntdir)
24101 if (!dev_mnt)
24102 return 0;
24103
24104- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
24105+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
24106 if (err)
24107 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
24108 else
24109diff -urNp linux-3.0.4/drivers/base/power/wakeup.c linux-3.0.4/drivers/base/power/wakeup.c
24110--- linux-3.0.4/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
24111+++ linux-3.0.4/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
24112@@ -29,14 +29,14 @@ bool events_check_enabled;
24113 * They need to be modified together atomically, so it's better to use one
24114 * atomic variable to hold them both.
24115 */
24116-static atomic_t combined_event_count = ATOMIC_INIT(0);
24117+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
24118
24119 #define IN_PROGRESS_BITS (sizeof(int) * 4)
24120 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
24121
24122 static void split_counters(unsigned int *cnt, unsigned int *inpr)
24123 {
24124- unsigned int comb = atomic_read(&combined_event_count);
24125+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
24126
24127 *cnt = (comb >> IN_PROGRESS_BITS);
24128 *inpr = comb & MAX_IN_PROGRESS;
24129@@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
24130 ws->last_time = ktime_get();
24131
24132 /* Increment the counter of events in progress. */
24133- atomic_inc(&combined_event_count);
24134+ atomic_inc_unchecked(&combined_event_count);
24135 }
24136
24137 /**
24138@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
24139 * Increment the counter of registered wakeup events and decrement the
24140 * couter of wakeup events in progress simultaneously.
24141 */
24142- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
24143+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
24144 }
24145
24146 /**
24147diff -urNp linux-3.0.4/drivers/block/cciss.c linux-3.0.4/drivers/block/cciss.c
24148--- linux-3.0.4/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
24149+++ linux-3.0.4/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
24150@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
24151 int err;
24152 u32 cp;
24153
24154+ memset(&arg64, 0, sizeof(arg64));
24155+
24156 err = 0;
24157 err |=
24158 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
24159@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
24160 while (!list_empty(&h->reqQ)) {
24161 c = list_entry(h->reqQ.next, CommandList_struct, list);
24162 /* can't do anything if fifo is full */
24163- if ((h->access.fifo_full(h))) {
24164+ if ((h->access->fifo_full(h))) {
24165 dev_warn(&h->pdev->dev, "fifo full\n");
24166 break;
24167 }
24168@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
24169 h->Qdepth--;
24170
24171 /* Tell the controller execute command */
24172- h->access.submit_command(h, c);
24173+ h->access->submit_command(h, c);
24174
24175 /* Put job onto the completed Q */
24176 addQ(&h->cmpQ, c);
24177@@ -3422,17 +3424,17 @@ startio:
24178
24179 static inline unsigned long get_next_completion(ctlr_info_t *h)
24180 {
24181- return h->access.command_completed(h);
24182+ return h->access->command_completed(h);
24183 }
24184
24185 static inline int interrupt_pending(ctlr_info_t *h)
24186 {
24187- return h->access.intr_pending(h);
24188+ return h->access->intr_pending(h);
24189 }
24190
24191 static inline long interrupt_not_for_us(ctlr_info_t *h)
24192 {
24193- return ((h->access.intr_pending(h) == 0) ||
24194+ return ((h->access->intr_pending(h) == 0) ||
24195 (h->interrupts_enabled == 0));
24196 }
24197
24198@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
24199 u32 a;
24200
24201 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
24202- return h->access.command_completed(h);
24203+ return h->access->command_completed(h);
24204
24205 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
24206 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
24207@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
24208 trans_support & CFGTBL_Trans_use_short_tags);
24209
24210 /* Change the access methods to the performant access methods */
24211- h->access = SA5_performant_access;
24212+ h->access = &SA5_performant_access;
24213 h->transMethod = CFGTBL_Trans_Performant;
24214
24215 return;
24216@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
24217 if (prod_index < 0)
24218 return -ENODEV;
24219 h->product_name = products[prod_index].product_name;
24220- h->access = *(products[prod_index].access);
24221+ h->access = products[prod_index].access;
24222
24223 if (cciss_board_disabled(h)) {
24224 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
24225@@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
24226 }
24227
24228 /* make sure the board interrupts are off */
24229- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24230+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24231 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
24232 if (rc)
24233 goto clean2;
24234@@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
24235 * fake ones to scoop up any residual completions.
24236 */
24237 spin_lock_irqsave(&h->lock, flags);
24238- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24239+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24240 spin_unlock_irqrestore(&h->lock, flags);
24241 free_irq(h->intr[PERF_MODE_INT], h);
24242 rc = cciss_request_irq(h, cciss_msix_discard_completions,
24243@@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
24244 dev_info(&h->pdev->dev, "Board READY.\n");
24245 dev_info(&h->pdev->dev,
24246 "Waiting for stale completions to drain.\n");
24247- h->access.set_intr_mask(h, CCISS_INTR_ON);
24248+ h->access->set_intr_mask(h, CCISS_INTR_ON);
24249 msleep(10000);
24250- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24251+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24252
24253 rc = controller_reset_failed(h->cfgtable);
24254 if (rc)
24255@@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
24256 cciss_scsi_setup(h);
24257
24258 /* Turn the interrupts on so we can service requests */
24259- h->access.set_intr_mask(h, CCISS_INTR_ON);
24260+ h->access->set_intr_mask(h, CCISS_INTR_ON);
24261
24262 /* Get the firmware version */
24263 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
24264@@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
24265 kfree(flush_buf);
24266 if (return_code != IO_OK)
24267 dev_warn(&h->pdev->dev, "Error flushing cache\n");
24268- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24269+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24270 free_irq(h->intr[PERF_MODE_INT], h);
24271 }
24272
24273diff -urNp linux-3.0.4/drivers/block/cciss.h linux-3.0.4/drivers/block/cciss.h
24274--- linux-3.0.4/drivers/block/cciss.h 2011-09-02 18:11:21.000000000 -0400
24275+++ linux-3.0.4/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
24276@@ -100,7 +100,7 @@ struct ctlr_info
24277 /* information about each logical volume */
24278 drive_info_struct *drv[CISS_MAX_LUN];
24279
24280- struct access_method access;
24281+ struct access_method *access;
24282
24283 /* queue and queue Info */
24284 struct list_head reqQ;
24285diff -urNp linux-3.0.4/drivers/block/cpqarray.c linux-3.0.4/drivers/block/cpqarray.c
24286--- linux-3.0.4/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
24287+++ linux-3.0.4/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
24288@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
24289 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
24290 goto Enomem4;
24291 }
24292- hba[i]->access.set_intr_mask(hba[i], 0);
24293+ hba[i]->access->set_intr_mask(hba[i], 0);
24294 if (request_irq(hba[i]->intr, do_ida_intr,
24295 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
24296 {
24297@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
24298 add_timer(&hba[i]->timer);
24299
24300 /* Enable IRQ now that spinlock and rate limit timer are set up */
24301- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24302+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24303
24304 for(j=0; j<NWD; j++) {
24305 struct gendisk *disk = ida_gendisk[i][j];
24306@@ -694,7 +694,7 @@ DBGINFO(
24307 for(i=0; i<NR_PRODUCTS; i++) {
24308 if (board_id == products[i].board_id) {
24309 c->product_name = products[i].product_name;
24310- c->access = *(products[i].access);
24311+ c->access = products[i].access;
24312 break;
24313 }
24314 }
24315@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
24316 hba[ctlr]->intr = intr;
24317 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
24318 hba[ctlr]->product_name = products[j].product_name;
24319- hba[ctlr]->access = *(products[j].access);
24320+ hba[ctlr]->access = products[j].access;
24321 hba[ctlr]->ctlr = ctlr;
24322 hba[ctlr]->board_id = board_id;
24323 hba[ctlr]->pci_dev = NULL; /* not PCI */
24324@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
24325 struct scatterlist tmp_sg[SG_MAX];
24326 int i, dir, seg;
24327
24328+ pax_track_stack();
24329+
24330 queue_next:
24331 creq = blk_peek_request(q);
24332 if (!creq)
24333@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
24334
24335 while((c = h->reqQ) != NULL) {
24336 /* Can't do anything if we're busy */
24337- if (h->access.fifo_full(h) == 0)
24338+ if (h->access->fifo_full(h) == 0)
24339 return;
24340
24341 /* Get the first entry from the request Q */
24342@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
24343 h->Qdepth--;
24344
24345 /* Tell the controller to do our bidding */
24346- h->access.submit_command(h, c);
24347+ h->access->submit_command(h, c);
24348
24349 /* Get onto the completion Q */
24350 addQ(&h->cmpQ, c);
24351@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
24352 unsigned long flags;
24353 __u32 a,a1;
24354
24355- istat = h->access.intr_pending(h);
24356+ istat = h->access->intr_pending(h);
24357 /* Is this interrupt for us? */
24358 if (istat == 0)
24359 return IRQ_NONE;
24360@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
24361 */
24362 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
24363 if (istat & FIFO_NOT_EMPTY) {
24364- while((a = h->access.command_completed(h))) {
24365+ while((a = h->access->command_completed(h))) {
24366 a1 = a; a &= ~3;
24367 if ((c = h->cmpQ) == NULL)
24368 {
24369@@ -1449,11 +1451,11 @@ static int sendcmd(
24370 /*
24371 * Disable interrupt
24372 */
24373- info_p->access.set_intr_mask(info_p, 0);
24374+ info_p->access->set_intr_mask(info_p, 0);
24375 /* Make sure there is room in the command FIFO */
24376 /* Actually it should be completely empty at this time. */
24377 for (i = 200000; i > 0; i--) {
24378- temp = info_p->access.fifo_full(info_p);
24379+ temp = info_p->access->fifo_full(info_p);
24380 if (temp != 0) {
24381 break;
24382 }
24383@@ -1466,7 +1468,7 @@ DBG(
24384 /*
24385 * Send the cmd
24386 */
24387- info_p->access.submit_command(info_p, c);
24388+ info_p->access->submit_command(info_p, c);
24389 complete = pollcomplete(ctlr);
24390
24391 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
24392@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
24393 * we check the new geometry. Then turn interrupts back on when
24394 * we're done.
24395 */
24396- host->access.set_intr_mask(host, 0);
24397+ host->access->set_intr_mask(host, 0);
24398 getgeometry(ctlr);
24399- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
24400+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
24401
24402 for(i=0; i<NWD; i++) {
24403 struct gendisk *disk = ida_gendisk[ctlr][i];
24404@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
24405 /* Wait (up to 2 seconds) for a command to complete */
24406
24407 for (i = 200000; i > 0; i--) {
24408- done = hba[ctlr]->access.command_completed(hba[ctlr]);
24409+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
24410 if (done == 0) {
24411 udelay(10); /* a short fixed delay */
24412 } else
24413diff -urNp linux-3.0.4/drivers/block/cpqarray.h linux-3.0.4/drivers/block/cpqarray.h
24414--- linux-3.0.4/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
24415+++ linux-3.0.4/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
24416@@ -99,7 +99,7 @@ struct ctlr_info {
24417 drv_info_t drv[NWD];
24418 struct proc_dir_entry *proc;
24419
24420- struct access_method access;
24421+ struct access_method *access;
24422
24423 cmdlist_t *reqQ;
24424 cmdlist_t *cmpQ;
24425diff -urNp linux-3.0.4/drivers/block/DAC960.c linux-3.0.4/drivers/block/DAC960.c
24426--- linux-3.0.4/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
24427+++ linux-3.0.4/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
24428@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
24429 unsigned long flags;
24430 int Channel, TargetID;
24431
24432+ pax_track_stack();
24433+
24434 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
24435 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
24436 sizeof(DAC960_SCSI_Inquiry_T) +
24437diff -urNp linux-3.0.4/drivers/block/drbd/drbd_int.h linux-3.0.4/drivers/block/drbd/drbd_int.h
24438--- linux-3.0.4/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
24439+++ linux-3.0.4/drivers/block/drbd/drbd_int.h 2011-10-06 04:17:55.000000000 -0400
24440@@ -737,7 +737,7 @@ struct drbd_request;
24441 struct drbd_epoch {
24442 struct list_head list;
24443 unsigned int barrier_nr;
24444- atomic_t epoch_size; /* increased on every request added. */
24445+ atomic_unchecked_t epoch_size; /* increased on every request added. */
24446 atomic_t active; /* increased on every req. added, and dec on every finished. */
24447 unsigned long flags;
24448 };
24449@@ -1109,7 +1109,7 @@ struct drbd_conf {
24450 void *int_dig_in;
24451 void *int_dig_vv;
24452 wait_queue_head_t seq_wait;
24453- atomic_t packet_seq;
24454+ atomic_unchecked_t packet_seq;
24455 unsigned int peer_seq;
24456 spinlock_t peer_seq_lock;
24457 unsigned int minor;
24458@@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct
24459
24460 static inline void drbd_tcp_cork(struct socket *sock)
24461 {
24462- int __user val = 1;
24463+ int val = 1;
24464 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
24465- (char __user *)&val, sizeof(val));
24466+ (char __force_user *)&val, sizeof(val));
24467 }
24468
24469 static inline void drbd_tcp_uncork(struct socket *sock)
24470 {
24471- int __user val = 0;
24472+ int val = 0;
24473 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
24474- (char __user *)&val, sizeof(val));
24475+ (char __force_user *)&val, sizeof(val));
24476 }
24477
24478 static inline void drbd_tcp_nodelay(struct socket *sock)
24479 {
24480- int __user val = 1;
24481+ int val = 1;
24482 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
24483- (char __user *)&val, sizeof(val));
24484+ (char __force_user *)&val, sizeof(val));
24485 }
24486
24487 static inline void drbd_tcp_quickack(struct socket *sock)
24488 {
24489- int __user val = 2;
24490+ int val = 2;
24491 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
24492- (char __user *)&val, sizeof(val));
24493+ (char __force_user *)&val, sizeof(val));
24494 }
24495
24496 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
24497diff -urNp linux-3.0.4/drivers/block/drbd/drbd_main.c linux-3.0.4/drivers/block/drbd/drbd_main.c
24498--- linux-3.0.4/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
24499+++ linux-3.0.4/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
24500@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
24501 p.sector = sector;
24502 p.block_id = block_id;
24503 p.blksize = blksize;
24504- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
24505+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
24506
24507 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
24508 return false;
24509@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
24510 p.sector = cpu_to_be64(req->sector);
24511 p.block_id = (unsigned long)req;
24512 p.seq_num = cpu_to_be32(req->seq_num =
24513- atomic_add_return(1, &mdev->packet_seq));
24514+ atomic_add_return_unchecked(1, &mdev->packet_seq));
24515
24516 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
24517
24518@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
24519 atomic_set(&mdev->unacked_cnt, 0);
24520 atomic_set(&mdev->local_cnt, 0);
24521 atomic_set(&mdev->net_cnt, 0);
24522- atomic_set(&mdev->packet_seq, 0);
24523+ atomic_set_unchecked(&mdev->packet_seq, 0);
24524 atomic_set(&mdev->pp_in_use, 0);
24525 atomic_set(&mdev->pp_in_use_by_net, 0);
24526 atomic_set(&mdev->rs_sect_in, 0);
24527@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
24528 mdev->receiver.t_state);
24529
24530 /* no need to lock it, I'm the only thread alive */
24531- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
24532- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
24533+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
24534+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
24535 mdev->al_writ_cnt =
24536 mdev->bm_writ_cnt =
24537 mdev->read_cnt =
24538diff -urNp linux-3.0.4/drivers/block/drbd/drbd_nl.c linux-3.0.4/drivers/block/drbd/drbd_nl.c
24539--- linux-3.0.4/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
24540+++ linux-3.0.4/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
24541@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
24542 module_put(THIS_MODULE);
24543 }
24544
24545-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
24546+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
24547
24548 static unsigned short *
24549 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
24550@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
24551 cn_reply->id.idx = CN_IDX_DRBD;
24552 cn_reply->id.val = CN_VAL_DRBD;
24553
24554- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24555+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24556 cn_reply->ack = 0; /* not used here. */
24557 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24558 (int)((char *)tl - (char *)reply->tag_list);
24559@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
24560 cn_reply->id.idx = CN_IDX_DRBD;
24561 cn_reply->id.val = CN_VAL_DRBD;
24562
24563- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24564+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24565 cn_reply->ack = 0; /* not used here. */
24566 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24567 (int)((char *)tl - (char *)reply->tag_list);
24568@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
24569 cn_reply->id.idx = CN_IDX_DRBD;
24570 cn_reply->id.val = CN_VAL_DRBD;
24571
24572- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
24573+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
24574 cn_reply->ack = 0; // not used here.
24575 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24576 (int)((char*)tl - (char*)reply->tag_list);
24577@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
24578 cn_reply->id.idx = CN_IDX_DRBD;
24579 cn_reply->id.val = CN_VAL_DRBD;
24580
24581- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24582+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24583 cn_reply->ack = 0; /* not used here. */
24584 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24585 (int)((char *)tl - (char *)reply->tag_list);
24586diff -urNp linux-3.0.4/drivers/block/drbd/drbd_receiver.c linux-3.0.4/drivers/block/drbd/drbd_receiver.c
24587--- linux-3.0.4/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
24588+++ linux-3.0.4/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
24589@@ -894,7 +894,7 @@ retry:
24590 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
24591 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
24592
24593- atomic_set(&mdev->packet_seq, 0);
24594+ atomic_set_unchecked(&mdev->packet_seq, 0);
24595 mdev->peer_seq = 0;
24596
24597 drbd_thread_start(&mdev->asender);
24598@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
24599 do {
24600 next_epoch = NULL;
24601
24602- epoch_size = atomic_read(&epoch->epoch_size);
24603+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
24604
24605 switch (ev & ~EV_CLEANUP) {
24606 case EV_PUT:
24607@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
24608 rv = FE_DESTROYED;
24609 } else {
24610 epoch->flags = 0;
24611- atomic_set(&epoch->epoch_size, 0);
24612+ atomic_set_unchecked(&epoch->epoch_size, 0);
24613 /* atomic_set(&epoch->active, 0); is already zero */
24614 if (rv == FE_STILL_LIVE)
24615 rv = FE_RECYCLED;
24616@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
24617 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
24618 drbd_flush(mdev);
24619
24620- if (atomic_read(&mdev->current_epoch->epoch_size)) {
24621+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
24622 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
24623 if (epoch)
24624 break;
24625 }
24626
24627 epoch = mdev->current_epoch;
24628- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
24629+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
24630
24631 D_ASSERT(atomic_read(&epoch->active) == 0);
24632 D_ASSERT(epoch->flags == 0);
24633@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
24634 }
24635
24636 epoch->flags = 0;
24637- atomic_set(&epoch->epoch_size, 0);
24638+ atomic_set_unchecked(&epoch->epoch_size, 0);
24639 atomic_set(&epoch->active, 0);
24640
24641 spin_lock(&mdev->epoch_lock);
24642- if (atomic_read(&mdev->current_epoch->epoch_size)) {
24643+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
24644 list_add(&epoch->list, &mdev->current_epoch->list);
24645 mdev->current_epoch = epoch;
24646 mdev->epochs++;
24647@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
24648 spin_unlock(&mdev->peer_seq_lock);
24649
24650 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
24651- atomic_inc(&mdev->current_epoch->epoch_size);
24652+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
24653 return drbd_drain_block(mdev, data_size);
24654 }
24655
24656@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
24657
24658 spin_lock(&mdev->epoch_lock);
24659 e->epoch = mdev->current_epoch;
24660- atomic_inc(&e->epoch->epoch_size);
24661+ atomic_inc_unchecked(&e->epoch->epoch_size);
24662 atomic_inc(&e->epoch->active);
24663 spin_unlock(&mdev->epoch_lock);
24664
24665@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
24666 D_ASSERT(list_empty(&mdev->done_ee));
24667
24668 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
24669- atomic_set(&mdev->current_epoch->epoch_size, 0);
24670+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
24671 D_ASSERT(list_empty(&mdev->current_epoch->list));
24672 }
24673
24674diff -urNp linux-3.0.4/drivers/block/loop.c linux-3.0.4/drivers/block/loop.c
24675--- linux-3.0.4/drivers/block/loop.c 2011-09-02 18:11:26.000000000 -0400
24676+++ linux-3.0.4/drivers/block/loop.c 2011-10-06 04:17:55.000000000 -0400
24677@@ -283,7 +283,7 @@ static int __do_lo_send_write(struct fil
24678 mm_segment_t old_fs = get_fs();
24679
24680 set_fs(get_ds());
24681- bw = file->f_op->write(file, buf, len, &pos);
24682+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
24683 set_fs(old_fs);
24684 if (likely(bw == len))
24685 return 0;
24686diff -urNp linux-3.0.4/drivers/block/nbd.c linux-3.0.4/drivers/block/nbd.c
24687--- linux-3.0.4/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
24688+++ linux-3.0.4/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
24689@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
24690 struct kvec iov;
24691 sigset_t blocked, oldset;
24692
24693+ pax_track_stack();
24694+
24695 if (unlikely(!sock)) {
24696 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
24697 lo->disk->disk_name, (send ? "send" : "recv"));
24698@@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
24699 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
24700 unsigned int cmd, unsigned long arg)
24701 {
24702+ pax_track_stack();
24703+
24704 switch (cmd) {
24705 case NBD_DISCONNECT: {
24706 struct request sreq;
24707diff -urNp linux-3.0.4/drivers/char/agp/frontend.c linux-3.0.4/drivers/char/agp/frontend.c
24708--- linux-3.0.4/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
24709+++ linux-3.0.4/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
24710@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
24711 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
24712 return -EFAULT;
24713
24714- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
24715+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
24716 return -EFAULT;
24717
24718 client = agp_find_client_by_pid(reserve.pid);
24719diff -urNp linux-3.0.4/drivers/char/briq_panel.c linux-3.0.4/drivers/char/briq_panel.c
24720--- linux-3.0.4/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
24721+++ linux-3.0.4/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
24722@@ -9,6 +9,7 @@
24723 #include <linux/types.h>
24724 #include <linux/errno.h>
24725 #include <linux/tty.h>
24726+#include <linux/mutex.h>
24727 #include <linux/timer.h>
24728 #include <linux/kernel.h>
24729 #include <linux/wait.h>
24730@@ -34,6 +35,7 @@ static int vfd_is_open;
24731 static unsigned char vfd[40];
24732 static int vfd_cursor;
24733 static unsigned char ledpb, led;
24734+static DEFINE_MUTEX(vfd_mutex);
24735
24736 static void update_vfd(void)
24737 {
24738@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
24739 if (!vfd_is_open)
24740 return -EBUSY;
24741
24742+ mutex_lock(&vfd_mutex);
24743 for (;;) {
24744 char c;
24745 if (!indx)
24746 break;
24747- if (get_user(c, buf))
24748+ if (get_user(c, buf)) {
24749+ mutex_unlock(&vfd_mutex);
24750 return -EFAULT;
24751+ }
24752 if (esc) {
24753 set_led(c);
24754 esc = 0;
24755@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
24756 buf++;
24757 }
24758 update_vfd();
24759+ mutex_unlock(&vfd_mutex);
24760
24761 return len;
24762 }
24763diff -urNp linux-3.0.4/drivers/char/genrtc.c linux-3.0.4/drivers/char/genrtc.c
24764--- linux-3.0.4/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
24765+++ linux-3.0.4/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
24766@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
24767 switch (cmd) {
24768
24769 case RTC_PLL_GET:
24770+ memset(&pll, 0, sizeof(pll));
24771 if (get_rtc_pll(&pll))
24772 return -EINVAL;
24773 else
24774diff -urNp linux-3.0.4/drivers/char/hpet.c linux-3.0.4/drivers/char/hpet.c
24775--- linux-3.0.4/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
24776+++ linux-3.0.4/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
24777@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
24778 }
24779
24780 static int
24781-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
24782+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
24783 struct hpet_info *info)
24784 {
24785 struct hpet_timer __iomem *timer;
24786diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c
24787--- linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
24788+++ linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
24789@@ -415,7 +415,7 @@ struct ipmi_smi {
24790 struct proc_dir_entry *proc_dir;
24791 char proc_dir_name[10];
24792
24793- atomic_t stats[IPMI_NUM_STATS];
24794+ atomic_unchecked_t stats[IPMI_NUM_STATS];
24795
24796 /*
24797 * run_to_completion duplicate of smb_info, smi_info
24798@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
24799
24800
24801 #define ipmi_inc_stat(intf, stat) \
24802- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
24803+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
24804 #define ipmi_get_stat(intf, stat) \
24805- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
24806+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
24807
24808 static int is_lan_addr(struct ipmi_addr *addr)
24809 {
24810@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
24811 INIT_LIST_HEAD(&intf->cmd_rcvrs);
24812 init_waitqueue_head(&intf->waitq);
24813 for (i = 0; i < IPMI_NUM_STATS; i++)
24814- atomic_set(&intf->stats[i], 0);
24815+ atomic_set_unchecked(&intf->stats[i], 0);
24816
24817 intf->proc_dir = NULL;
24818
24819@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
24820 struct ipmi_smi_msg smi_msg;
24821 struct ipmi_recv_msg recv_msg;
24822
24823+ pax_track_stack();
24824+
24825 si = (struct ipmi_system_interface_addr *) &addr;
24826 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
24827 si->channel = IPMI_BMC_CHANNEL;
24828diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c
24829--- linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
24830+++ linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
24831@@ -277,7 +277,7 @@ struct smi_info {
24832 unsigned char slave_addr;
24833
24834 /* Counters and things for the proc filesystem. */
24835- atomic_t stats[SI_NUM_STATS];
24836+ atomic_unchecked_t stats[SI_NUM_STATS];
24837
24838 struct task_struct *thread;
24839
24840@@ -286,9 +286,9 @@ struct smi_info {
24841 };
24842
24843 #define smi_inc_stat(smi, stat) \
24844- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
24845+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
24846 #define smi_get_stat(smi, stat) \
24847- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
24848+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
24849
24850 #define SI_MAX_PARMS 4
24851
24852@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
24853 atomic_set(&new_smi->req_events, 0);
24854 new_smi->run_to_completion = 0;
24855 for (i = 0; i < SI_NUM_STATS; i++)
24856- atomic_set(&new_smi->stats[i], 0);
24857+ atomic_set_unchecked(&new_smi->stats[i], 0);
24858
24859 new_smi->interrupt_disabled = 1;
24860 atomic_set(&new_smi->stop_operation, 0);
24861diff -urNp linux-3.0.4/drivers/char/Kconfig linux-3.0.4/drivers/char/Kconfig
24862--- linux-3.0.4/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
24863+++ linux-3.0.4/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
24864@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
24865
24866 config DEVKMEM
24867 bool "/dev/kmem virtual device support"
24868- default y
24869+ default n
24870+ depends on !GRKERNSEC_KMEM
24871 help
24872 Say Y here if you want to support the /dev/kmem device. The
24873 /dev/kmem device is rarely used, but can be used for certain
24874@@ -596,6 +597,7 @@ config DEVPORT
24875 bool
24876 depends on !M68K
24877 depends on ISA || PCI
24878+ depends on !GRKERNSEC_KMEM
24879 default y
24880
24881 source "drivers/s390/char/Kconfig"
24882diff -urNp linux-3.0.4/drivers/char/mem.c linux-3.0.4/drivers/char/mem.c
24883--- linux-3.0.4/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
24884+++ linux-3.0.4/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
24885@@ -18,6 +18,7 @@
24886 #include <linux/raw.h>
24887 #include <linux/tty.h>
24888 #include <linux/capability.h>
24889+#include <linux/security.h>
24890 #include <linux/ptrace.h>
24891 #include <linux/device.h>
24892 #include <linux/highmem.h>
24893@@ -34,6 +35,10 @@
24894 # include <linux/efi.h>
24895 #endif
24896
24897+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
24898+extern struct file_operations grsec_fops;
24899+#endif
24900+
24901 static inline unsigned long size_inside_page(unsigned long start,
24902 unsigned long size)
24903 {
24904@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
24905
24906 while (cursor < to) {
24907 if (!devmem_is_allowed(pfn)) {
24908+#ifdef CONFIG_GRKERNSEC_KMEM
24909+ gr_handle_mem_readwrite(from, to);
24910+#else
24911 printk(KERN_INFO
24912 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
24913 current->comm, from, to);
24914+#endif
24915 return 0;
24916 }
24917 cursor += PAGE_SIZE;
24918@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
24919 }
24920 return 1;
24921 }
24922+#elif defined(CONFIG_GRKERNSEC_KMEM)
24923+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24924+{
24925+ return 0;
24926+}
24927 #else
24928 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24929 {
24930@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
24931
24932 while (count > 0) {
24933 unsigned long remaining;
24934+ char *temp;
24935
24936 sz = size_inside_page(p, count);
24937
24938@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
24939 if (!ptr)
24940 return -EFAULT;
24941
24942- remaining = copy_to_user(buf, ptr, sz);
24943+#ifdef CONFIG_PAX_USERCOPY
24944+ temp = kmalloc(sz, GFP_KERNEL);
24945+ if (!temp) {
24946+ unxlate_dev_mem_ptr(p, ptr);
24947+ return -ENOMEM;
24948+ }
24949+ memcpy(temp, ptr, sz);
24950+#else
24951+ temp = ptr;
24952+#endif
24953+
24954+ remaining = copy_to_user(buf, temp, sz);
24955+
24956+#ifdef CONFIG_PAX_USERCOPY
24957+ kfree(temp);
24958+#endif
24959+
24960 unxlate_dev_mem_ptr(p, ptr);
24961 if (remaining)
24962 return -EFAULT;
24963@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
24964 size_t count, loff_t *ppos)
24965 {
24966 unsigned long p = *ppos;
24967- ssize_t low_count, read, sz;
24968+ ssize_t low_count, read, sz, err = 0;
24969 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
24970- int err = 0;
24971
24972 read = 0;
24973 if (p < (unsigned long) high_memory) {
24974@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
24975 }
24976 #endif
24977 while (low_count > 0) {
24978+ char *temp;
24979+
24980 sz = size_inside_page(p, low_count);
24981
24982 /*
24983@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
24984 */
24985 kbuf = xlate_dev_kmem_ptr((char *)p);
24986
24987- if (copy_to_user(buf, kbuf, sz))
24988+#ifdef CONFIG_PAX_USERCOPY
24989+ temp = kmalloc(sz, GFP_KERNEL);
24990+ if (!temp)
24991+ return -ENOMEM;
24992+ memcpy(temp, kbuf, sz);
24993+#else
24994+ temp = kbuf;
24995+#endif
24996+
24997+ err = copy_to_user(buf, temp, sz);
24998+
24999+#ifdef CONFIG_PAX_USERCOPY
25000+ kfree(temp);
25001+#endif
25002+
25003+ if (err)
25004 return -EFAULT;
25005 buf += sz;
25006 p += sz;
25007@@ -866,6 +913,9 @@ static const struct memdev {
25008 #ifdef CONFIG_CRASH_DUMP
25009 [12] = { "oldmem", 0, &oldmem_fops, NULL },
25010 #endif
25011+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
25012+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
25013+#endif
25014 };
25015
25016 static int memory_open(struct inode *inode, struct file *filp)
25017diff -urNp linux-3.0.4/drivers/char/nvram.c linux-3.0.4/drivers/char/nvram.c
25018--- linux-3.0.4/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
25019+++ linux-3.0.4/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
25020@@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
25021
25022 spin_unlock_irq(&rtc_lock);
25023
25024- if (copy_to_user(buf, contents, tmp - contents))
25025+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
25026 return -EFAULT;
25027
25028 *ppos = i;
25029diff -urNp linux-3.0.4/drivers/char/random.c linux-3.0.4/drivers/char/random.c
25030--- linux-3.0.4/drivers/char/random.c 2011-09-02 18:11:21.000000000 -0400
25031+++ linux-3.0.4/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
25032@@ -261,8 +261,13 @@
25033 /*
25034 * Configuration information
25035 */
25036+#ifdef CONFIG_GRKERNSEC_RANDNET
25037+#define INPUT_POOL_WORDS 512
25038+#define OUTPUT_POOL_WORDS 128
25039+#else
25040 #define INPUT_POOL_WORDS 128
25041 #define OUTPUT_POOL_WORDS 32
25042+#endif
25043 #define SEC_XFER_SIZE 512
25044 #define EXTRACT_SIZE 10
25045
25046@@ -300,10 +305,17 @@ static struct poolinfo {
25047 int poolwords;
25048 int tap1, tap2, tap3, tap4, tap5;
25049 } poolinfo_table[] = {
25050+#ifdef CONFIG_GRKERNSEC_RANDNET
25051+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
25052+ { 512, 411, 308, 208, 104, 1 },
25053+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
25054+ { 128, 103, 76, 51, 25, 1 },
25055+#else
25056 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
25057 { 128, 103, 76, 51, 25, 1 },
25058 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
25059 { 32, 26, 20, 14, 7, 1 },
25060+#endif
25061 #if 0
25062 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
25063 { 2048, 1638, 1231, 819, 411, 1 },
25064@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
25065
25066 extract_buf(r, tmp);
25067 i = min_t(int, nbytes, EXTRACT_SIZE);
25068- if (copy_to_user(buf, tmp, i)) {
25069+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
25070 ret = -EFAULT;
25071 break;
25072 }
25073@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
25074 #include <linux/sysctl.h>
25075
25076 static int min_read_thresh = 8, min_write_thresh;
25077-static int max_read_thresh = INPUT_POOL_WORDS * 32;
25078+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
25079 static int max_write_thresh = INPUT_POOL_WORDS * 32;
25080 static char sysctl_bootid[16];
25081
25082diff -urNp linux-3.0.4/drivers/char/sonypi.c linux-3.0.4/drivers/char/sonypi.c
25083--- linux-3.0.4/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
25084+++ linux-3.0.4/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
25085@@ -55,6 +55,7 @@
25086 #include <asm/uaccess.h>
25087 #include <asm/io.h>
25088 #include <asm/system.h>
25089+#include <asm/local.h>
25090
25091 #include <linux/sonypi.h>
25092
25093@@ -491,7 +492,7 @@ static struct sonypi_device {
25094 spinlock_t fifo_lock;
25095 wait_queue_head_t fifo_proc_list;
25096 struct fasync_struct *fifo_async;
25097- int open_count;
25098+ local_t open_count;
25099 int model;
25100 struct input_dev *input_jog_dev;
25101 struct input_dev *input_key_dev;
25102@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
25103 static int sonypi_misc_release(struct inode *inode, struct file *file)
25104 {
25105 mutex_lock(&sonypi_device.lock);
25106- sonypi_device.open_count--;
25107+ local_dec(&sonypi_device.open_count);
25108 mutex_unlock(&sonypi_device.lock);
25109 return 0;
25110 }
25111@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
25112 {
25113 mutex_lock(&sonypi_device.lock);
25114 /* Flush input queue on first open */
25115- if (!sonypi_device.open_count)
25116+ if (!local_read(&sonypi_device.open_count))
25117 kfifo_reset(&sonypi_device.fifo);
25118- sonypi_device.open_count++;
25119+ local_inc(&sonypi_device.open_count);
25120 mutex_unlock(&sonypi_device.lock);
25121
25122 return 0;
25123diff -urNp linux-3.0.4/drivers/char/tpm/tpm_bios.c linux-3.0.4/drivers/char/tpm/tpm_bios.c
25124--- linux-3.0.4/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
25125+++ linux-3.0.4/drivers/char/tpm/tpm_bios.c 2011-10-06 04:17:55.000000000 -0400
25126@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
25127 event = addr;
25128
25129 if ((event->event_type == 0 && event->event_size == 0) ||
25130- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
25131+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
25132 return NULL;
25133
25134 return addr;
25135@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
25136 return NULL;
25137
25138 if ((event->event_type == 0 && event->event_size == 0) ||
25139- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
25140+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
25141 return NULL;
25142
25143 (*pos)++;
25144@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
25145 int i;
25146
25147 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
25148- seq_putc(m, data[i]);
25149+ if (!seq_putc(m, data[i]))
25150+ return -EFAULT;
25151
25152 return 0;
25153 }
25154@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log
25155 log->bios_event_log_end = log->bios_event_log + len;
25156
25157 virt = acpi_os_map_memory(start, len);
25158+ if (!virt) {
25159+ kfree(log->bios_event_log);
25160+ log->bios_event_log = NULL;
25161+ return -EFAULT;
25162+ }
25163
25164- memcpy(log->bios_event_log, virt, len);
25165+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
25166
25167 acpi_os_unmap_memory(virt, len);
25168 return 0;
25169diff -urNp linux-3.0.4/drivers/char/tpm/tpm.c linux-3.0.4/drivers/char/tpm/tpm.c
25170--- linux-3.0.4/drivers/char/tpm/tpm.c 2011-07-21 22:17:23.000000000 -0400
25171+++ linux-3.0.4/drivers/char/tpm/tpm.c 2011-08-23 21:48:14.000000000 -0400
25172@@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
25173 chip->vendor.req_complete_val)
25174 goto out_recv;
25175
25176- if ((status == chip->vendor.req_canceled)) {
25177+ if (status == chip->vendor.req_canceled) {
25178 dev_err(chip->dev, "Operation Canceled\n");
25179 rc = -ECANCELED;
25180 goto out;
25181@@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
25182
25183 struct tpm_chip *chip = dev_get_drvdata(dev);
25184
25185+ pax_track_stack();
25186+
25187 tpm_cmd.header.in = tpm_readpubek_header;
25188 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
25189 "attempting to read the PUBEK");
25190diff -urNp linux-3.0.4/drivers/char/virtio_console.c linux-3.0.4/drivers/char/virtio_console.c
25191--- linux-3.0.4/drivers/char/virtio_console.c 2011-07-21 22:17:23.000000000 -0400
25192+++ linux-3.0.4/drivers/char/virtio_console.c 2011-10-06 04:17:55.000000000 -0400
25193@@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port
25194 if (to_user) {
25195 ssize_t ret;
25196
25197- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
25198+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
25199 if (ret)
25200 return -EFAULT;
25201 } else {
25202@@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct fil
25203 if (!port_has_data(port) && !port->host_connected)
25204 return 0;
25205
25206- return fill_readbuf(port, ubuf, count, true);
25207+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
25208 }
25209
25210 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
25211diff -urNp linux-3.0.4/drivers/crypto/hifn_795x.c linux-3.0.4/drivers/crypto/hifn_795x.c
25212--- linux-3.0.4/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
25213+++ linux-3.0.4/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
25214@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
25215 0xCA, 0x34, 0x2B, 0x2E};
25216 struct scatterlist sg;
25217
25218+ pax_track_stack();
25219+
25220 memset(src, 0, sizeof(src));
25221 memset(ctx.key, 0, sizeof(ctx.key));
25222
25223diff -urNp linux-3.0.4/drivers/crypto/padlock-aes.c linux-3.0.4/drivers/crypto/padlock-aes.c
25224--- linux-3.0.4/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
25225+++ linux-3.0.4/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
25226@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
25227 struct crypto_aes_ctx gen_aes;
25228 int cpu;
25229
25230+ pax_track_stack();
25231+
25232 if (key_len % 8) {
25233 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
25234 return -EINVAL;
25235diff -urNp linux-3.0.4/drivers/edac/edac_pci_sysfs.c linux-3.0.4/drivers/edac/edac_pci_sysfs.c
25236--- linux-3.0.4/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
25237+++ linux-3.0.4/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
25238@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
25239 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
25240 static int edac_pci_poll_msec = 1000; /* one second workq period */
25241
25242-static atomic_t pci_parity_count = ATOMIC_INIT(0);
25243-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
25244+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
25245+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
25246
25247 static struct kobject *edac_pci_top_main_kobj;
25248 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
25249@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
25250 edac_printk(KERN_CRIT, EDAC_PCI,
25251 "Signaled System Error on %s\n",
25252 pci_name(dev));
25253- atomic_inc(&pci_nonparity_count);
25254+ atomic_inc_unchecked(&pci_nonparity_count);
25255 }
25256
25257 if (status & (PCI_STATUS_PARITY)) {
25258@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
25259 "Master Data Parity Error on %s\n",
25260 pci_name(dev));
25261
25262- atomic_inc(&pci_parity_count);
25263+ atomic_inc_unchecked(&pci_parity_count);
25264 }
25265
25266 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25267@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
25268 "Detected Parity Error on %s\n",
25269 pci_name(dev));
25270
25271- atomic_inc(&pci_parity_count);
25272+ atomic_inc_unchecked(&pci_parity_count);
25273 }
25274 }
25275
25276@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
25277 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
25278 "Signaled System Error on %s\n",
25279 pci_name(dev));
25280- atomic_inc(&pci_nonparity_count);
25281+ atomic_inc_unchecked(&pci_nonparity_count);
25282 }
25283
25284 if (status & (PCI_STATUS_PARITY)) {
25285@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
25286 "Master Data Parity Error on "
25287 "%s\n", pci_name(dev));
25288
25289- atomic_inc(&pci_parity_count);
25290+ atomic_inc_unchecked(&pci_parity_count);
25291 }
25292
25293 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25294@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
25295 "Detected Parity Error on %s\n",
25296 pci_name(dev));
25297
25298- atomic_inc(&pci_parity_count);
25299+ atomic_inc_unchecked(&pci_parity_count);
25300 }
25301 }
25302 }
25303@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
25304 if (!check_pci_errors)
25305 return;
25306
25307- before_count = atomic_read(&pci_parity_count);
25308+ before_count = atomic_read_unchecked(&pci_parity_count);
25309
25310 /* scan all PCI devices looking for a Parity Error on devices and
25311 * bridges.
25312@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
25313 /* Only if operator has selected panic on PCI Error */
25314 if (edac_pci_get_panic_on_pe()) {
25315 /* If the count is different 'after' from 'before' */
25316- if (before_count != atomic_read(&pci_parity_count))
25317+ if (before_count != atomic_read_unchecked(&pci_parity_count))
25318 panic("EDAC: PCI Parity Error");
25319 }
25320 }
25321diff -urNp linux-3.0.4/drivers/edac/mce_amd.h linux-3.0.4/drivers/edac/mce_amd.h
25322--- linux-3.0.4/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
25323+++ linux-3.0.4/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
25324@@ -83,7 +83,7 @@ struct amd_decoder_ops {
25325 bool (*dc_mce)(u16, u8);
25326 bool (*ic_mce)(u16, u8);
25327 bool (*nb_mce)(u16, u8);
25328-};
25329+} __no_const;
25330
25331 void amd_report_gart_errors(bool);
25332 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
25333diff -urNp linux-3.0.4/drivers/firewire/core-card.c linux-3.0.4/drivers/firewire/core-card.c
25334--- linux-3.0.4/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
25335+++ linux-3.0.4/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
25336@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
25337
25338 void fw_core_remove_card(struct fw_card *card)
25339 {
25340- struct fw_card_driver dummy_driver = dummy_driver_template;
25341+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
25342
25343 card->driver->update_phy_reg(card, 4,
25344 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
25345diff -urNp linux-3.0.4/drivers/firewire/core-cdev.c linux-3.0.4/drivers/firewire/core-cdev.c
25346--- linux-3.0.4/drivers/firewire/core-cdev.c 2011-09-02 18:11:21.000000000 -0400
25347+++ linux-3.0.4/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
25348@@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
25349 int ret;
25350
25351 if ((request->channels == 0 && request->bandwidth == 0) ||
25352- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
25353- request->bandwidth < 0)
25354+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
25355 return -EINVAL;
25356
25357 r = kmalloc(sizeof(*r), GFP_KERNEL);
25358diff -urNp linux-3.0.4/drivers/firewire/core.h linux-3.0.4/drivers/firewire/core.h
25359--- linux-3.0.4/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
25360+++ linux-3.0.4/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
25361@@ -101,6 +101,7 @@ struct fw_card_driver {
25362
25363 int (*stop_iso)(struct fw_iso_context *ctx);
25364 };
25365+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
25366
25367 void fw_card_initialize(struct fw_card *card,
25368 const struct fw_card_driver *driver, struct device *device);
25369diff -urNp linux-3.0.4/drivers/firewire/core-transaction.c linux-3.0.4/drivers/firewire/core-transaction.c
25370--- linux-3.0.4/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
25371+++ linux-3.0.4/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
25372@@ -37,6 +37,7 @@
25373 #include <linux/timer.h>
25374 #include <linux/types.h>
25375 #include <linux/workqueue.h>
25376+#include <linux/sched.h>
25377
25378 #include <asm/byteorder.h>
25379
25380@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
25381 struct transaction_callback_data d;
25382 struct fw_transaction t;
25383
25384+ pax_track_stack();
25385+
25386 init_timer_on_stack(&t.split_timeout_timer);
25387 init_completion(&d.done);
25388 d.payload = payload;
25389diff -urNp linux-3.0.4/drivers/firmware/dmi_scan.c linux-3.0.4/drivers/firmware/dmi_scan.c
25390--- linux-3.0.4/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
25391+++ linux-3.0.4/drivers/firmware/dmi_scan.c 2011-10-06 04:17:55.000000000 -0400
25392@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
25393 }
25394 }
25395 else {
25396- /*
25397- * no iounmap() for that ioremap(); it would be a no-op, but
25398- * it's so early in setup that sucker gets confused into doing
25399- * what it shouldn't if we actually call it.
25400- */
25401 p = dmi_ioremap(0xF0000, 0x10000);
25402 if (p == NULL)
25403 goto error;
25404@@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct
25405 if (buf == NULL)
25406 return -1;
25407
25408- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
25409+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
25410
25411 iounmap(buf);
25412 return 0;
25413diff -urNp linux-3.0.4/drivers/gpio/vr41xx_giu.c linux-3.0.4/drivers/gpio/vr41xx_giu.c
25414--- linux-3.0.4/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
25415+++ linux-3.0.4/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
25416@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
25417 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
25418 maskl, pendl, maskh, pendh);
25419
25420- atomic_inc(&irq_err_count);
25421+ atomic_inc_unchecked(&irq_err_count);
25422
25423 return -EINVAL;
25424 }
25425diff -urNp linux-3.0.4/drivers/gpu/drm/drm_crtc.c linux-3.0.4/drivers/gpu/drm/drm_crtc.c
25426--- linux-3.0.4/drivers/gpu/drm/drm_crtc.c 2011-07-21 22:17:23.000000000 -0400
25427+++ linux-3.0.4/drivers/gpu/drm/drm_crtc.c 2011-10-06 04:17:55.000000000 -0400
25428@@ -1372,7 +1372,7 @@ int drm_mode_getconnector(struct drm_dev
25429 */
25430 if ((out_resp->count_modes >= mode_count) && mode_count) {
25431 copied = 0;
25432- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
25433+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
25434 list_for_each_entry(mode, &connector->modes, head) {
25435 drm_crtc_convert_to_umode(&u_mode, mode);
25436 if (copy_to_user(mode_ptr + copied,
25437@@ -1387,8 +1387,8 @@ int drm_mode_getconnector(struct drm_dev
25438
25439 if ((out_resp->count_props >= props_count) && props_count) {
25440 copied = 0;
25441- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
25442- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
25443+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
25444+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
25445 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
25446 if (connector->property_ids[i] != 0) {
25447 if (put_user(connector->property_ids[i],
25448@@ -1410,7 +1410,7 @@ int drm_mode_getconnector(struct drm_dev
25449
25450 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
25451 copied = 0;
25452- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
25453+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
25454 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
25455 if (connector->encoder_ids[i] != 0) {
25456 if (put_user(connector->encoder_ids[i],
25457@@ -1569,7 +1569,7 @@ int drm_mode_setcrtc(struct drm_device *
25458 }
25459
25460 for (i = 0; i < crtc_req->count_connectors; i++) {
25461- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
25462+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
25463 if (get_user(out_id, &set_connectors_ptr[i])) {
25464 ret = -EFAULT;
25465 goto out;
25466@@ -1850,7 +1850,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_de
25467 fb = obj_to_fb(obj);
25468
25469 num_clips = r->num_clips;
25470- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
25471+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
25472
25473 if (!num_clips != !clips_ptr) {
25474 ret = -EINVAL;
25475@@ -2270,7 +2270,7 @@ int drm_mode_getproperty_ioctl(struct dr
25476 out_resp->flags = property->flags;
25477
25478 if ((out_resp->count_values >= value_count) && value_count) {
25479- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
25480+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
25481 for (i = 0; i < value_count; i++) {
25482 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
25483 ret = -EFAULT;
25484@@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct dr
25485 if (property->flags & DRM_MODE_PROP_ENUM) {
25486 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
25487 copied = 0;
25488- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
25489+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
25490 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
25491
25492 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
25493@@ -2306,7 +2306,7 @@ int drm_mode_getproperty_ioctl(struct dr
25494 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
25495 copied = 0;
25496 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
25497- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
25498+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
25499
25500 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
25501 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
25502@@ -2367,7 +2367,7 @@ int drm_mode_getblob_ioctl(struct drm_de
25503 struct drm_mode_get_blob *out_resp = data;
25504 struct drm_property_blob *blob;
25505 int ret = 0;
25506- void *blob_ptr;
25507+ void __user *blob_ptr;
25508
25509 if (!drm_core_check_feature(dev, DRIVER_MODESET))
25510 return -EINVAL;
25511@@ -2381,7 +2381,7 @@ int drm_mode_getblob_ioctl(struct drm_de
25512 blob = obj_to_blob(obj);
25513
25514 if (out_resp->length == blob->length) {
25515- blob_ptr = (void *)(unsigned long)out_resp->data;
25516+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
25517 if (copy_to_user(blob_ptr, blob->data, blob->length)){
25518 ret = -EFAULT;
25519 goto done;
25520diff -urNp linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c
25521--- linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
25522+++ linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
25523@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
25524 struct drm_crtc *tmp;
25525 int crtc_mask = 1;
25526
25527- WARN(!crtc, "checking null crtc?\n");
25528+ BUG_ON(!crtc);
25529
25530 dev = crtc->dev;
25531
25532@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
25533 struct drm_encoder *encoder;
25534 bool ret = true;
25535
25536+ pax_track_stack();
25537+
25538 crtc->enabled = drm_helper_crtc_in_use(crtc);
25539 if (!crtc->enabled)
25540 return true;
25541diff -urNp linux-3.0.4/drivers/gpu/drm/drm_drv.c linux-3.0.4/drivers/gpu/drm/drm_drv.c
25542--- linux-3.0.4/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
25543+++ linux-3.0.4/drivers/gpu/drm/drm_drv.c 2011-10-06 04:17:55.000000000 -0400
25544@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
25545 /**
25546 * Copy and IOCTL return string to user space
25547 */
25548-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
25549+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
25550 {
25551 int len;
25552
25553@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
25554
25555 dev = file_priv->minor->dev;
25556 atomic_inc(&dev->ioctl_count);
25557- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
25558+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
25559 ++file_priv->ioctl_count;
25560
25561 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
25562diff -urNp linux-3.0.4/drivers/gpu/drm/drm_fops.c linux-3.0.4/drivers/gpu/drm/drm_fops.c
25563--- linux-3.0.4/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
25564+++ linux-3.0.4/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
25565@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
25566 }
25567
25568 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
25569- atomic_set(&dev->counts[i], 0);
25570+ atomic_set_unchecked(&dev->counts[i], 0);
25571
25572 dev->sigdata.lock = NULL;
25573
25574@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
25575
25576 retcode = drm_open_helper(inode, filp, dev);
25577 if (!retcode) {
25578- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
25579- if (!dev->open_count++)
25580+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
25581+ if (local_inc_return(&dev->open_count) == 1)
25582 retcode = drm_setup(dev);
25583 }
25584 if (!retcode) {
25585@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
25586
25587 mutex_lock(&drm_global_mutex);
25588
25589- DRM_DEBUG("open_count = %d\n", dev->open_count);
25590+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
25591
25592 if (dev->driver->preclose)
25593 dev->driver->preclose(dev, file_priv);
25594@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
25595 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
25596 task_pid_nr(current),
25597 (long)old_encode_dev(file_priv->minor->device),
25598- dev->open_count);
25599+ local_read(&dev->open_count));
25600
25601 /* if the master has gone away we can't do anything with the lock */
25602 if (file_priv->minor->master)
25603@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
25604 * End inline drm_release
25605 */
25606
25607- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
25608- if (!--dev->open_count) {
25609+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
25610+ if (local_dec_and_test(&dev->open_count)) {
25611 if (atomic_read(&dev->ioctl_count)) {
25612 DRM_ERROR("Device busy: %d\n",
25613 atomic_read(&dev->ioctl_count));
25614diff -urNp linux-3.0.4/drivers/gpu/drm/drm_global.c linux-3.0.4/drivers/gpu/drm/drm_global.c
25615--- linux-3.0.4/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
25616+++ linux-3.0.4/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
25617@@ -36,7 +36,7 @@
25618 struct drm_global_item {
25619 struct mutex mutex;
25620 void *object;
25621- int refcount;
25622+ atomic_t refcount;
25623 };
25624
25625 static struct drm_global_item glob[DRM_GLOBAL_NUM];
25626@@ -49,7 +49,7 @@ void drm_global_init(void)
25627 struct drm_global_item *item = &glob[i];
25628 mutex_init(&item->mutex);
25629 item->object = NULL;
25630- item->refcount = 0;
25631+ atomic_set(&item->refcount, 0);
25632 }
25633 }
25634
25635@@ -59,7 +59,7 @@ void drm_global_release(void)
25636 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
25637 struct drm_global_item *item = &glob[i];
25638 BUG_ON(item->object != NULL);
25639- BUG_ON(item->refcount != 0);
25640+ BUG_ON(atomic_read(&item->refcount) != 0);
25641 }
25642 }
25643
25644@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
25645 void *object;
25646
25647 mutex_lock(&item->mutex);
25648- if (item->refcount == 0) {
25649+ if (atomic_read(&item->refcount) == 0) {
25650 item->object = kzalloc(ref->size, GFP_KERNEL);
25651 if (unlikely(item->object == NULL)) {
25652 ret = -ENOMEM;
25653@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
25654 goto out_err;
25655
25656 }
25657- ++item->refcount;
25658+ atomic_inc(&item->refcount);
25659 ref->object = item->object;
25660 object = item->object;
25661 mutex_unlock(&item->mutex);
25662@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
25663 struct drm_global_item *item = &glob[ref->global_type];
25664
25665 mutex_lock(&item->mutex);
25666- BUG_ON(item->refcount == 0);
25667+ BUG_ON(atomic_read(&item->refcount) == 0);
25668 BUG_ON(ref->object != item->object);
25669- if (--item->refcount == 0) {
25670+ if (atomic_dec_and_test(&item->refcount)) {
25671 ref->release(ref);
25672 item->object = NULL;
25673 }
25674diff -urNp linux-3.0.4/drivers/gpu/drm/drm_info.c linux-3.0.4/drivers/gpu/drm/drm_info.c
25675--- linux-3.0.4/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
25676+++ linux-3.0.4/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
25677@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
25678 struct drm_local_map *map;
25679 struct drm_map_list *r_list;
25680
25681- /* Hardcoded from _DRM_FRAME_BUFFER,
25682- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
25683- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
25684- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
25685+ static const char * const types[] = {
25686+ [_DRM_FRAME_BUFFER] = "FB",
25687+ [_DRM_REGISTERS] = "REG",
25688+ [_DRM_SHM] = "SHM",
25689+ [_DRM_AGP] = "AGP",
25690+ [_DRM_SCATTER_GATHER] = "SG",
25691+ [_DRM_CONSISTENT] = "PCI",
25692+ [_DRM_GEM] = "GEM" };
25693 const char *type;
25694 int i;
25695
25696@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
25697 map = r_list->map;
25698 if (!map)
25699 continue;
25700- if (map->type < 0 || map->type > 5)
25701+ if (map->type >= ARRAY_SIZE(types))
25702 type = "??";
25703 else
25704 type = types[map->type];
25705@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
25706 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
25707 vma->vm_flags & VM_LOCKED ? 'l' : '-',
25708 vma->vm_flags & VM_IO ? 'i' : '-',
25709+#ifdef CONFIG_GRKERNSEC_HIDESYM
25710+ 0);
25711+#else
25712 vma->vm_pgoff);
25713+#endif
25714
25715 #if defined(__i386__)
25716 pgprot = pgprot_val(vma->vm_page_prot);
25717diff -urNp linux-3.0.4/drivers/gpu/drm/drm_ioc32.c linux-3.0.4/drivers/gpu/drm/drm_ioc32.c
25718--- linux-3.0.4/drivers/gpu/drm/drm_ioc32.c 2011-07-21 22:17:23.000000000 -0400
25719+++ linux-3.0.4/drivers/gpu/drm/drm_ioc32.c 2011-10-06 04:17:55.000000000 -0400
25720@@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct fi
25721 request = compat_alloc_user_space(nbytes);
25722 if (!access_ok(VERIFY_WRITE, request, nbytes))
25723 return -EFAULT;
25724- list = (struct drm_buf_desc *) (request + 1);
25725+ list = (struct drm_buf_desc __user *) (request + 1);
25726
25727 if (__put_user(count, &request->count)
25728 || __put_user(list, &request->list))
25729@@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct fil
25730 request = compat_alloc_user_space(nbytes);
25731 if (!access_ok(VERIFY_WRITE, request, nbytes))
25732 return -EFAULT;
25733- list = (struct drm_buf_pub *) (request + 1);
25734+ list = (struct drm_buf_pub __user *) (request + 1);
25735
25736 if (__put_user(count, &request->count)
25737 || __put_user(list, &request->list))
25738diff -urNp linux-3.0.4/drivers/gpu/drm/drm_ioctl.c linux-3.0.4/drivers/gpu/drm/drm_ioctl.c
25739--- linux-3.0.4/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
25740+++ linux-3.0.4/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
25741@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
25742 stats->data[i].value =
25743 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
25744 else
25745- stats->data[i].value = atomic_read(&dev->counts[i]);
25746+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
25747 stats->data[i].type = dev->types[i];
25748 }
25749
25750diff -urNp linux-3.0.4/drivers/gpu/drm/drm_lock.c linux-3.0.4/drivers/gpu/drm/drm_lock.c
25751--- linux-3.0.4/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
25752+++ linux-3.0.4/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
25753@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
25754 if (drm_lock_take(&master->lock, lock->context)) {
25755 master->lock.file_priv = file_priv;
25756 master->lock.lock_time = jiffies;
25757- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
25758+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
25759 break; /* Got lock */
25760 }
25761
25762@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
25763 return -EINVAL;
25764 }
25765
25766- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
25767+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
25768
25769 if (drm_lock_free(&master->lock, lock->context)) {
25770 /* FIXME: Should really bail out here. */
25771diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c
25772--- linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
25773+++ linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
25774@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
25775 dma->buflist[vertex->idx],
25776 vertex->discard, vertex->used);
25777
25778- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
25779- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
25780+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
25781+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
25782 sarea_priv->last_enqueue = dev_priv->counter - 1;
25783 sarea_priv->last_dispatch = (int)hw_status[5];
25784
25785@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
25786 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
25787 mc->last_render);
25788
25789- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
25790- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
25791+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
25792+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
25793 sarea_priv->last_enqueue = dev_priv->counter - 1;
25794 sarea_priv->last_dispatch = (int)hw_status[5];
25795
25796diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h
25797--- linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
25798+++ linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
25799@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
25800 int page_flipping;
25801
25802 wait_queue_head_t irq_queue;
25803- atomic_t irq_received;
25804- atomic_t irq_emitted;
25805+ atomic_unchecked_t irq_received;
25806+ atomic_unchecked_t irq_emitted;
25807
25808 int front_offset;
25809 } drm_i810_private_t;
25810diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c
25811--- linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
25812+++ linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-10-06 04:17:55.000000000 -0400
25813@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
25814 I915_READ(GTIMR));
25815 }
25816 seq_printf(m, "Interrupts received: %d\n",
25817- atomic_read(&dev_priv->irq_received));
25818+ atomic_read_unchecked(&dev_priv->irq_received));
25819 for (i = 0; i < I915_NUM_RINGS; i++) {
25820 if (IS_GEN6(dev)) {
25821 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
25822@@ -1147,7 +1147,7 @@ static int i915_opregion(struct seq_file
25823 return ret;
25824
25825 if (opregion->header)
25826- seq_write(m, opregion->header, OPREGION_SIZE);
25827+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
25828
25829 mutex_unlock(&dev->struct_mutex);
25830
25831diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c
25832--- linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-09-02 18:11:21.000000000 -0400
25833+++ linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
25834@@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
25835 bool can_switch;
25836
25837 spin_lock(&dev->count_lock);
25838- can_switch = (dev->open_count == 0);
25839+ can_switch = (local_read(&dev->open_count) == 0);
25840 spin_unlock(&dev->count_lock);
25841 return can_switch;
25842 }
25843diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h
25844--- linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
25845+++ linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
25846@@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
25847 /* render clock increase/decrease */
25848 /* display clock increase/decrease */
25849 /* pll clock increase/decrease */
25850-};
25851+} __no_const;
25852
25853 struct intel_device_info {
25854 u8 gen;
25855@@ -300,7 +300,7 @@ typedef struct drm_i915_private {
25856 int current_page;
25857 int page_flipping;
25858
25859- atomic_t irq_received;
25860+ atomic_unchecked_t irq_received;
25861
25862 /* protects the irq masks */
25863 spinlock_t irq_lock;
25864@@ -874,7 +874,7 @@ struct drm_i915_gem_object {
25865 * will be page flipped away on the next vblank. When it
25866 * reaches 0, dev_priv->pending_flip_queue will be woken up.
25867 */
25868- atomic_t pending_flip;
25869+ atomic_unchecked_t pending_flip;
25870 };
25871
25872 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
25873@@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
25874 extern void intel_teardown_gmbus(struct drm_device *dev);
25875 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
25876 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
25877-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
25878+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
25879 {
25880 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
25881 }
25882diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
25883--- linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
25884+++ linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
25885@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
25886 i915_gem_clflush_object(obj);
25887
25888 if (obj->base.pending_write_domain)
25889- cd->flips |= atomic_read(&obj->pending_flip);
25890+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
25891
25892 /* The actual obj->write_domain will be updated with
25893 * pending_write_domain after we emit the accumulated flush for all
25894diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c
25895--- linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-09-02 18:11:21.000000000 -0400
25896+++ linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
25897@@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
25898 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
25899 struct drm_i915_master_private *master_priv;
25900
25901- atomic_inc(&dev_priv->irq_received);
25902+ atomic_inc_unchecked(&dev_priv->irq_received);
25903
25904 /* disable master interrupt before clearing iir */
25905 de_ier = I915_READ(DEIER);
25906@@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
25907 struct drm_i915_master_private *master_priv;
25908 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
25909
25910- atomic_inc(&dev_priv->irq_received);
25911+ atomic_inc_unchecked(&dev_priv->irq_received);
25912
25913 if (IS_GEN6(dev))
25914 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
25915@@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
25916 int ret = IRQ_NONE, pipe;
25917 bool blc_event = false;
25918
25919- atomic_inc(&dev_priv->irq_received);
25920+ atomic_inc_unchecked(&dev_priv->irq_received);
25921
25922 iir = I915_READ(IIR);
25923
25924@@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
25925 {
25926 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
25927
25928- atomic_set(&dev_priv->irq_received, 0);
25929+ atomic_set_unchecked(&dev_priv->irq_received, 0);
25930
25931 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
25932 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
25933@@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
25934 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
25935 int pipe;
25936
25937- atomic_set(&dev_priv->irq_received, 0);
25938+ atomic_set_unchecked(&dev_priv->irq_received, 0);
25939
25940 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
25941 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
25942diff -urNp linux-3.0.4/drivers/gpu/drm/i915/intel_display.c linux-3.0.4/drivers/gpu/drm/i915/intel_display.c
25943--- linux-3.0.4/drivers/gpu/drm/i915/intel_display.c 2011-09-02 18:11:21.000000000 -0400
25944+++ linux-3.0.4/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
25945@@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
25946
25947 wait_event(dev_priv->pending_flip_queue,
25948 atomic_read(&dev_priv->mm.wedged) ||
25949- atomic_read(&obj->pending_flip) == 0);
25950+ atomic_read_unchecked(&obj->pending_flip) == 0);
25951
25952 /* Big Hammer, we also need to ensure that any pending
25953 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
25954@@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
25955 obj = to_intel_framebuffer(crtc->fb)->obj;
25956 dev_priv = crtc->dev->dev_private;
25957 wait_event(dev_priv->pending_flip_queue,
25958- atomic_read(&obj->pending_flip) == 0);
25959+ atomic_read_unchecked(&obj->pending_flip) == 0);
25960 }
25961
25962 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
25963@@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
25964
25965 atomic_clear_mask(1 << intel_crtc->plane,
25966 &obj->pending_flip.counter);
25967- if (atomic_read(&obj->pending_flip) == 0)
25968+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
25969 wake_up(&dev_priv->pending_flip_queue);
25970
25971 schedule_work(&work->work);
25972@@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
25973 /* Block clients from rendering to the new back buffer until
25974 * the flip occurs and the object is no longer visible.
25975 */
25976- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
25977+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
25978
25979 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
25980 if (ret)
25981@@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
25982 return 0;
25983
25984 cleanup_pending:
25985- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
25986+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
25987 cleanup_objs:
25988 drm_gem_object_unreference(&work->old_fb_obj->base);
25989 drm_gem_object_unreference(&obj->base);
25990diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h
25991--- linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
25992+++ linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
25993@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
25994 u32 clear_cmd;
25995 u32 maccess;
25996
25997- atomic_t vbl_received; /**< Number of vblanks received. */
25998+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
25999 wait_queue_head_t fence_queue;
26000- atomic_t last_fence_retired;
26001+ atomic_unchecked_t last_fence_retired;
26002 u32 next_fence_to_post;
26003
26004 unsigned int fb_cpp;
26005diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c
26006--- linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
26007+++ linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
26008@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
26009 if (crtc != 0)
26010 return 0;
26011
26012- return atomic_read(&dev_priv->vbl_received);
26013+ return atomic_read_unchecked(&dev_priv->vbl_received);
26014 }
26015
26016
26017@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26018 /* VBLANK interrupt */
26019 if (status & MGA_VLINEPEN) {
26020 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
26021- atomic_inc(&dev_priv->vbl_received);
26022+ atomic_inc_unchecked(&dev_priv->vbl_received);
26023 drm_handle_vblank(dev, 0);
26024 handled = 1;
26025 }
26026@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26027 if ((prim_start & ~0x03) != (prim_end & ~0x03))
26028 MGA_WRITE(MGA_PRIMEND, prim_end);
26029
26030- atomic_inc(&dev_priv->last_fence_retired);
26031+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
26032 DRM_WAKEUP(&dev_priv->fence_queue);
26033 handled = 1;
26034 }
26035@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
26036 * using fences.
26037 */
26038 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
26039- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
26040+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
26041 - *sequence) <= (1 << 23)));
26042
26043 *sequence = cur_fence;
26044diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c
26045--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
26046+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-26 19:49:56.000000000 -0400
26047@@ -200,7 +200,7 @@ struct methods {
26048 const char desc[8];
26049 void (*loadbios)(struct drm_device *, uint8_t *);
26050 const bool rw;
26051-};
26052+} __do_const;
26053
26054 static struct methods shadow_methods[] = {
26055 { "PRAMIN", load_vbios_pramin, true },
26056@@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
26057 struct bit_table {
26058 const char id;
26059 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
26060-};
26061+} __no_const;
26062
26063 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
26064
26065diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h
26066--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
26067+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
26068@@ -227,7 +227,7 @@ struct nouveau_channel {
26069 struct list_head pending;
26070 uint32_t sequence;
26071 uint32_t sequence_ack;
26072- atomic_t last_sequence_irq;
26073+ atomic_unchecked_t last_sequence_irq;
26074 } fence;
26075
26076 /* DMA push buffer */
26077@@ -304,7 +304,7 @@ struct nouveau_exec_engine {
26078 u32 handle, u16 class);
26079 void (*set_tile_region)(struct drm_device *dev, int i);
26080 void (*tlb_flush)(struct drm_device *, int engine);
26081-};
26082+} __no_const;
26083
26084 struct nouveau_instmem_engine {
26085 void *priv;
26086@@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
26087 struct nouveau_mc_engine {
26088 int (*init)(struct drm_device *dev);
26089 void (*takedown)(struct drm_device *dev);
26090-};
26091+} __no_const;
26092
26093 struct nouveau_timer_engine {
26094 int (*init)(struct drm_device *dev);
26095 void (*takedown)(struct drm_device *dev);
26096 uint64_t (*read)(struct drm_device *dev);
26097-};
26098+} __no_const;
26099
26100 struct nouveau_fb_engine {
26101 int num_tiles;
26102@@ -494,7 +494,7 @@ struct nouveau_vram_engine {
26103 void (*put)(struct drm_device *, struct nouveau_mem **);
26104
26105 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
26106-};
26107+} __no_const;
26108
26109 struct nouveau_engine {
26110 struct nouveau_instmem_engine instmem;
26111@@ -640,7 +640,7 @@ struct drm_nouveau_private {
26112 struct drm_global_reference mem_global_ref;
26113 struct ttm_bo_global_ref bo_global_ref;
26114 struct ttm_bo_device bdev;
26115- atomic_t validate_sequence;
26116+ atomic_unchecked_t validate_sequence;
26117 } ttm;
26118
26119 struct {
26120diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c
26121--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
26122+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
26123@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
26124 if (USE_REFCNT(dev))
26125 sequence = nvchan_rd32(chan, 0x48);
26126 else
26127- sequence = atomic_read(&chan->fence.last_sequence_irq);
26128+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
26129
26130 if (chan->fence.sequence_ack == sequence)
26131 goto out;
26132@@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
26133
26134 INIT_LIST_HEAD(&chan->fence.pending);
26135 spin_lock_init(&chan->fence.lock);
26136- atomic_set(&chan->fence.last_sequence_irq, 0);
26137+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
26138 return 0;
26139 }
26140
26141diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c
26142--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
26143+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
26144@@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
26145 int trycnt = 0;
26146 int ret, i;
26147
26148- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
26149+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
26150 retry:
26151 if (++trycnt > 100000) {
26152 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
26153diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c
26154--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
26155+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
26156@@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
26157 bool can_switch;
26158
26159 spin_lock(&dev->count_lock);
26160- can_switch = (dev->open_count == 0);
26161+ can_switch = (local_read(&dev->open_count) == 0);
26162 spin_unlock(&dev->count_lock);
26163 return can_switch;
26164 }
26165diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c
26166--- linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
26167+++ linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
26168@@ -560,7 +560,7 @@ static int
26169 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
26170 u32 class, u32 mthd, u32 data)
26171 {
26172- atomic_set(&chan->fence.last_sequence_irq, data);
26173+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
26174 return 0;
26175 }
26176
26177diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c
26178--- linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
26179+++ linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
26180@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
26181
26182 /* GH: Simple idle check.
26183 */
26184- atomic_set(&dev_priv->idle_count, 0);
26185+ atomic_set_unchecked(&dev_priv->idle_count, 0);
26186
26187 /* We don't support anything other than bus-mastering ring mode,
26188 * but the ring can be in either AGP or PCI space for the ring
26189diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h
26190--- linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
26191+++ linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
26192@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
26193 int is_pci;
26194 unsigned long cce_buffers_offset;
26195
26196- atomic_t idle_count;
26197+ atomic_unchecked_t idle_count;
26198
26199 int page_flipping;
26200 int current_page;
26201 u32 crtc_offset;
26202 u32 crtc_offset_cntl;
26203
26204- atomic_t vbl_received;
26205+ atomic_unchecked_t vbl_received;
26206
26207 u32 color_fmt;
26208 unsigned int front_offset;
26209diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c
26210--- linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
26211+++ linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
26212@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
26213 if (crtc != 0)
26214 return 0;
26215
26216- return atomic_read(&dev_priv->vbl_received);
26217+ return atomic_read_unchecked(&dev_priv->vbl_received);
26218 }
26219
26220 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
26221@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
26222 /* VBLANK interrupt */
26223 if (status & R128_CRTC_VBLANK_INT) {
26224 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
26225- atomic_inc(&dev_priv->vbl_received);
26226+ atomic_inc_unchecked(&dev_priv->vbl_received);
26227 drm_handle_vblank(dev, 0);
26228 return IRQ_HANDLED;
26229 }
26230diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_state.c linux-3.0.4/drivers/gpu/drm/r128/r128_state.c
26231--- linux-3.0.4/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
26232+++ linux-3.0.4/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
26233@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
26234
26235 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
26236 {
26237- if (atomic_read(&dev_priv->idle_count) == 0)
26238+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
26239 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
26240 else
26241- atomic_set(&dev_priv->idle_count, 0);
26242+ atomic_set_unchecked(&dev_priv->idle_count, 0);
26243 }
26244
26245 #endif
26246diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/atom.c linux-3.0.4/drivers/gpu/drm/radeon/atom.c
26247--- linux-3.0.4/drivers/gpu/drm/radeon/atom.c 2011-07-21 22:17:23.000000000 -0400
26248+++ linux-3.0.4/drivers/gpu/drm/radeon/atom.c 2011-08-23 21:48:14.000000000 -0400
26249@@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
26250 char name[512];
26251 int i;
26252
26253+ pax_track_stack();
26254+
26255 ctx->card = card;
26256 ctx->bios = bios;
26257
26258diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c
26259--- linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
26260+++ linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
26261@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
26262 regex_t mask_rex;
26263 regmatch_t match[4];
26264 char buf[1024];
26265- size_t end;
26266+ long end;
26267 int len;
26268 int done = 0;
26269 int r;
26270 unsigned o;
26271 struct offset *offset;
26272 char last_reg_s[10];
26273- int last_reg;
26274+ unsigned long last_reg;
26275
26276 if (regcomp
26277 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
26278diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c
26279--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
26280+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
26281@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
26282 struct radeon_gpio_rec gpio;
26283 struct radeon_hpd hpd;
26284
26285+ pax_track_stack();
26286+
26287 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
26288 return false;
26289
26290diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c
26291--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c 2011-09-02 18:11:21.000000000 -0400
26292+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
26293@@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
26294 bool can_switch;
26295
26296 spin_lock(&dev->count_lock);
26297- can_switch = (dev->open_count == 0);
26298+ can_switch = (local_read(&dev->open_count) == 0);
26299 spin_unlock(&dev->count_lock);
26300 return can_switch;
26301 }
26302diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c
26303--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-09-02 18:11:21.000000000 -0400
26304+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
26305@@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
26306 uint32_t post_div;
26307 u32 pll_out_min, pll_out_max;
26308
26309+ pax_track_stack();
26310+
26311 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
26312 freq = freq * 1000;
26313
26314diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h
26315--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
26316+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
26317@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
26318
26319 /* SW interrupt */
26320 wait_queue_head_t swi_queue;
26321- atomic_t swi_emitted;
26322+ atomic_unchecked_t swi_emitted;
26323 int vblank_crtc;
26324 uint32_t irq_enable_reg;
26325 uint32_t r500_disp_irq_reg;
26326diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c
26327--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
26328+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
26329@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
26330 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
26331 return 0;
26332 }
26333- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
26334+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
26335 if (!rdev->cp.ready)
26336 /* FIXME: cp is not running assume everythings is done right
26337 * away
26338@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
26339 return r;
26340 }
26341 radeon_fence_write(rdev, 0);
26342- atomic_set(&rdev->fence_drv.seq, 0);
26343+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
26344 INIT_LIST_HEAD(&rdev->fence_drv.created);
26345 INIT_LIST_HEAD(&rdev->fence_drv.emited);
26346 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
26347diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon.h linux-3.0.4/drivers/gpu/drm/radeon/radeon.h
26348--- linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-07-21 22:17:23.000000000 -0400
26349+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-08-23 21:47:55.000000000 -0400
26350@@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
26351 */
26352 struct radeon_fence_driver {
26353 uint32_t scratch_reg;
26354- atomic_t seq;
26355+ atomic_unchecked_t seq;
26356 uint32_t last_seq;
26357 unsigned long last_jiffies;
26358 unsigned long last_timeout;
26359@@ -960,7 +960,7 @@ struct radeon_asic {
26360 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
26361 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
26362 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
26363-};
26364+} __no_const;
26365
26366 /*
26367 * Asic structures
26368diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c
26369--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
26370+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
26371@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
26372 request = compat_alloc_user_space(sizeof(*request));
26373 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
26374 || __put_user(req32.param, &request->param)
26375- || __put_user((void __user *)(unsigned long)req32.value,
26376+ || __put_user((unsigned long)req32.value,
26377 &request->value))
26378 return -EFAULT;
26379
26380diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c
26381--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
26382+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
26383@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
26384 unsigned int ret;
26385 RING_LOCALS;
26386
26387- atomic_inc(&dev_priv->swi_emitted);
26388- ret = atomic_read(&dev_priv->swi_emitted);
26389+ atomic_inc_unchecked(&dev_priv->swi_emitted);
26390+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
26391
26392 BEGIN_RING(4);
26393 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
26394@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
26395 drm_radeon_private_t *dev_priv =
26396 (drm_radeon_private_t *) dev->dev_private;
26397
26398- atomic_set(&dev_priv->swi_emitted, 0);
26399+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
26400 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
26401
26402 dev->max_vblank_count = 0x001fffff;
26403diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c
26404--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
26405+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
26406@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
26407 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
26408 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
26409
26410- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
26411+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
26412 sarea_priv->nbox * sizeof(depth_boxes[0])))
26413 return -EFAULT;
26414
26415@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
26416 {
26417 drm_radeon_private_t *dev_priv = dev->dev_private;
26418 drm_radeon_getparam_t *param = data;
26419- int value;
26420+ int value = 0;
26421
26422 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
26423
26424diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c
26425--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-07-21 22:17:23.000000000 -0400
26426+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-23 21:47:55.000000000 -0400
26427@@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
26428 }
26429 if (unlikely(ttm_vm_ops == NULL)) {
26430 ttm_vm_ops = vma->vm_ops;
26431- radeon_ttm_vm_ops = *ttm_vm_ops;
26432- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
26433+ pax_open_kernel();
26434+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
26435+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
26436+ pax_close_kernel();
26437 }
26438 vma->vm_ops = &radeon_ttm_vm_ops;
26439 return 0;
26440diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/rs690.c linux-3.0.4/drivers/gpu/drm/radeon/rs690.c
26441--- linux-3.0.4/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
26442+++ linux-3.0.4/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
26443@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
26444 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
26445 rdev->pm.sideport_bandwidth.full)
26446 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
26447- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
26448+ read_delay_latency.full = dfixed_const(800 * 1000);
26449 read_delay_latency.full = dfixed_div(read_delay_latency,
26450 rdev->pm.igp_sideport_mclk);
26451+ a.full = dfixed_const(370);
26452+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
26453 } else {
26454 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
26455 rdev->pm.k8_bandwidth.full)
26456diff -urNp linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c
26457--- linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
26458+++ linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
26459@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
26460 static int ttm_pool_mm_shrink(struct shrinker *shrink,
26461 struct shrink_control *sc)
26462 {
26463- static atomic_t start_pool = ATOMIC_INIT(0);
26464+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
26465 unsigned i;
26466- unsigned pool_offset = atomic_add_return(1, &start_pool);
26467+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
26468 struct ttm_page_pool *pool;
26469 int shrink_pages = sc->nr_to_scan;
26470
26471diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_drv.h linux-3.0.4/drivers/gpu/drm/via/via_drv.h
26472--- linux-3.0.4/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
26473+++ linux-3.0.4/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
26474@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
26475 typedef uint32_t maskarray_t[5];
26476
26477 typedef struct drm_via_irq {
26478- atomic_t irq_received;
26479+ atomic_unchecked_t irq_received;
26480 uint32_t pending_mask;
26481 uint32_t enable_mask;
26482 wait_queue_head_t irq_queue;
26483@@ -75,7 +75,7 @@ typedef struct drm_via_private {
26484 struct timeval last_vblank;
26485 int last_vblank_valid;
26486 unsigned usec_per_vblank;
26487- atomic_t vbl_received;
26488+ atomic_unchecked_t vbl_received;
26489 drm_via_state_t hc_state;
26490 char pci_buf[VIA_PCI_BUF_SIZE];
26491 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
26492diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_irq.c linux-3.0.4/drivers/gpu/drm/via/via_irq.c
26493--- linux-3.0.4/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
26494+++ linux-3.0.4/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
26495@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
26496 if (crtc != 0)
26497 return 0;
26498
26499- return atomic_read(&dev_priv->vbl_received);
26500+ return atomic_read_unchecked(&dev_priv->vbl_received);
26501 }
26502
26503 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
26504@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
26505
26506 status = VIA_READ(VIA_REG_INTERRUPT);
26507 if (status & VIA_IRQ_VBLANK_PENDING) {
26508- atomic_inc(&dev_priv->vbl_received);
26509- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
26510+ atomic_inc_unchecked(&dev_priv->vbl_received);
26511+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
26512 do_gettimeofday(&cur_vblank);
26513 if (dev_priv->last_vblank_valid) {
26514 dev_priv->usec_per_vblank =
26515@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
26516 dev_priv->last_vblank = cur_vblank;
26517 dev_priv->last_vblank_valid = 1;
26518 }
26519- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
26520+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
26521 DRM_DEBUG("US per vblank is: %u\n",
26522 dev_priv->usec_per_vblank);
26523 }
26524@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
26525
26526 for (i = 0; i < dev_priv->num_irqs; ++i) {
26527 if (status & cur_irq->pending_mask) {
26528- atomic_inc(&cur_irq->irq_received);
26529+ atomic_inc_unchecked(&cur_irq->irq_received);
26530 DRM_WAKEUP(&cur_irq->irq_queue);
26531 handled = 1;
26532 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
26533@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
26534 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
26535 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
26536 masks[irq][4]));
26537- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
26538+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
26539 } else {
26540 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
26541 (((cur_irq_sequence =
26542- atomic_read(&cur_irq->irq_received)) -
26543+ atomic_read_unchecked(&cur_irq->irq_received)) -
26544 *sequence) <= (1 << 23)));
26545 }
26546 *sequence = cur_irq_sequence;
26547@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
26548 }
26549
26550 for (i = 0; i < dev_priv->num_irqs; ++i) {
26551- atomic_set(&cur_irq->irq_received, 0);
26552+ atomic_set_unchecked(&cur_irq->irq_received, 0);
26553 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
26554 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
26555 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
26556@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
26557 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
26558 case VIA_IRQ_RELATIVE:
26559 irqwait->request.sequence +=
26560- atomic_read(&cur_irq->irq_received);
26561+ atomic_read_unchecked(&cur_irq->irq_received);
26562 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
26563 case VIA_IRQ_ABSOLUTE:
26564 break;
26565diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
26566--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
26567+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
26568@@ -240,7 +240,7 @@ struct vmw_private {
26569 * Fencing and IRQs.
26570 */
26571
26572- atomic_t fence_seq;
26573+ atomic_unchecked_t fence_seq;
26574 wait_queue_head_t fence_queue;
26575 wait_queue_head_t fifo_queue;
26576 atomic_t fence_queue_waiters;
26577diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
26578--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-07-21 22:17:23.000000000 -0400
26579+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-10-06 04:17:55.000000000 -0400
26580@@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device
26581 struct drm_vmw_fence_rep fence_rep;
26582 struct drm_vmw_fence_rep __user *user_fence_rep;
26583 int ret;
26584- void *user_cmd;
26585+ void __user *user_cmd;
26586 void *cmd;
26587 uint32_t sequence;
26588 struct vmw_sw_context *sw_context = &dev_priv->ctx;
26589diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
26590--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
26591+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
26592@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
26593 while (!vmw_lag_lt(queue, us)) {
26594 spin_lock(&queue->lock);
26595 if (list_empty(&queue->head))
26596- sequence = atomic_read(&dev_priv->fence_seq);
26597+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
26598 else {
26599 fence = list_first_entry(&queue->head,
26600 struct vmw_fence, head);
26601diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
26602--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
26603+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-10-06 04:17:55.000000000 -0400
26604@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
26605 (unsigned int) min,
26606 (unsigned int) fifo->capabilities);
26607
26608- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
26609+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
26610 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
26611 vmw_fence_queue_init(&fifo->fence_queue);
26612 return vmw_fifo_send_fence(dev_priv, &dummy);
26613@@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_privat
26614 if (reserveable)
26615 iowrite32(bytes, fifo_mem +
26616 SVGA_FIFO_RESERVED);
26617- return fifo_mem + (next_cmd >> 2);
26618+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
26619 } else {
26620 need_bounce = true;
26621 }
26622@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
26623
26624 fm = vmw_fifo_reserve(dev_priv, bytes);
26625 if (unlikely(fm == NULL)) {
26626- *sequence = atomic_read(&dev_priv->fence_seq);
26627+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
26628 ret = -ENOMEM;
26629 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
26630 false, 3*HZ);
26631@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
26632 }
26633
26634 do {
26635- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
26636+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
26637 } while (*sequence == 0);
26638
26639 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
26640diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
26641--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
26642+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
26643@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
26644 * emitted. Then the fence is stale and signaled.
26645 */
26646
26647- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
26648+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
26649 > VMW_FENCE_WRAP);
26650
26651 return ret;
26652@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
26653
26654 if (fifo_idle)
26655 down_read(&fifo_state->rwsem);
26656- signal_seq = atomic_read(&dev_priv->fence_seq);
26657+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
26658 ret = 0;
26659
26660 for (;;) {
26661diff -urNp linux-3.0.4/drivers/hid/hid-core.c linux-3.0.4/drivers/hid/hid-core.c
26662--- linux-3.0.4/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
26663+++ linux-3.0.4/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
26664@@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
26665
26666 int hid_add_device(struct hid_device *hdev)
26667 {
26668- static atomic_t id = ATOMIC_INIT(0);
26669+ static atomic_unchecked_t id = ATOMIC_INIT(0);
26670 int ret;
26671
26672 if (WARN_ON(hdev->status & HID_STAT_ADDED))
26673@@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
26674 /* XXX hack, any other cleaner solution after the driver core
26675 * is converted to allow more than 20 bytes as the device name? */
26676 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
26677- hdev->vendor, hdev->product, atomic_inc_return(&id));
26678+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
26679
26680 hid_debug_register(hdev, dev_name(&hdev->dev));
26681 ret = device_add(&hdev->dev);
26682diff -urNp linux-3.0.4/drivers/hid/usbhid/hiddev.c linux-3.0.4/drivers/hid/usbhid/hiddev.c
26683--- linux-3.0.4/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
26684+++ linux-3.0.4/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
26685@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
26686 break;
26687
26688 case HIDIOCAPPLICATION:
26689- if (arg < 0 || arg >= hid->maxapplication)
26690+ if (arg >= hid->maxapplication)
26691 break;
26692
26693 for (i = 0; i < hid->maxcollection; i++)
26694diff -urNp linux-3.0.4/drivers/hwmon/acpi_power_meter.c linux-3.0.4/drivers/hwmon/acpi_power_meter.c
26695--- linux-3.0.4/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
26696+++ linux-3.0.4/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
26697@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
26698 return res;
26699
26700 temp /= 1000;
26701- if (temp < 0)
26702- return -EINVAL;
26703
26704 mutex_lock(&resource->lock);
26705 resource->trip[attr->index - 7] = temp;
26706diff -urNp linux-3.0.4/drivers/hwmon/sht15.c linux-3.0.4/drivers/hwmon/sht15.c
26707--- linux-3.0.4/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
26708+++ linux-3.0.4/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
26709@@ -166,7 +166,7 @@ struct sht15_data {
26710 int supply_uV;
26711 bool supply_uV_valid;
26712 struct work_struct update_supply_work;
26713- atomic_t interrupt_handled;
26714+ atomic_unchecked_t interrupt_handled;
26715 };
26716
26717 /**
26718@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
26719 return ret;
26720
26721 gpio_direction_input(data->pdata->gpio_data);
26722- atomic_set(&data->interrupt_handled, 0);
26723+ atomic_set_unchecked(&data->interrupt_handled, 0);
26724
26725 enable_irq(gpio_to_irq(data->pdata->gpio_data));
26726 if (gpio_get_value(data->pdata->gpio_data) == 0) {
26727 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
26728 /* Only relevant if the interrupt hasn't occurred. */
26729- if (!atomic_read(&data->interrupt_handled))
26730+ if (!atomic_read_unchecked(&data->interrupt_handled))
26731 schedule_work(&data->read_work);
26732 }
26733 ret = wait_event_timeout(data->wait_queue,
26734@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
26735
26736 /* First disable the interrupt */
26737 disable_irq_nosync(irq);
26738- atomic_inc(&data->interrupt_handled);
26739+ atomic_inc_unchecked(&data->interrupt_handled);
26740 /* Then schedule a reading work struct */
26741 if (data->state != SHT15_READING_NOTHING)
26742 schedule_work(&data->read_work);
26743@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
26744 * If not, then start the interrupt again - care here as could
26745 * have gone low in meantime so verify it hasn't!
26746 */
26747- atomic_set(&data->interrupt_handled, 0);
26748+ atomic_set_unchecked(&data->interrupt_handled, 0);
26749 enable_irq(gpio_to_irq(data->pdata->gpio_data));
26750 /* If still not occurred or another handler has been scheduled */
26751 if (gpio_get_value(data->pdata->gpio_data)
26752- || atomic_read(&data->interrupt_handled))
26753+ || atomic_read_unchecked(&data->interrupt_handled))
26754 return;
26755 }
26756
26757diff -urNp linux-3.0.4/drivers/hwmon/w83791d.c linux-3.0.4/drivers/hwmon/w83791d.c
26758--- linux-3.0.4/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
26759+++ linux-3.0.4/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
26760@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
26761 struct i2c_board_info *info);
26762 static int w83791d_remove(struct i2c_client *client);
26763
26764-static int w83791d_read(struct i2c_client *client, u8 register);
26765-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
26766+static int w83791d_read(struct i2c_client *client, u8 reg);
26767+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
26768 static struct w83791d_data *w83791d_update_device(struct device *dev);
26769
26770 #ifdef DEBUG
26771diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c
26772--- linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
26773+++ linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
26774@@ -43,7 +43,7 @@
26775 extern struct i2c_adapter amd756_smbus;
26776
26777 static struct i2c_adapter *s4882_adapter;
26778-static struct i2c_algorithm *s4882_algo;
26779+static i2c_algorithm_no_const *s4882_algo;
26780
26781 /* Wrapper access functions for multiplexed SMBus */
26782 static DEFINE_MUTEX(amd756_lock);
26783diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c
26784--- linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
26785+++ linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
26786@@ -41,7 +41,7 @@
26787 extern struct i2c_adapter *nforce2_smbus;
26788
26789 static struct i2c_adapter *s4985_adapter;
26790-static struct i2c_algorithm *s4985_algo;
26791+static i2c_algorithm_no_const *s4985_algo;
26792
26793 /* Wrapper access functions for multiplexed SMBus */
26794 static DEFINE_MUTEX(nforce2_lock);
26795diff -urNp linux-3.0.4/drivers/i2c/i2c-mux.c linux-3.0.4/drivers/i2c/i2c-mux.c
26796--- linux-3.0.4/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
26797+++ linux-3.0.4/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
26798@@ -28,7 +28,7 @@
26799 /* multiplexer per channel data */
26800 struct i2c_mux_priv {
26801 struct i2c_adapter adap;
26802- struct i2c_algorithm algo;
26803+ i2c_algorithm_no_const algo;
26804
26805 struct i2c_adapter *parent;
26806 void *mux_dev; /* the mux chip/device */
26807diff -urNp linux-3.0.4/drivers/ide/ide-cd.c linux-3.0.4/drivers/ide/ide-cd.c
26808--- linux-3.0.4/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
26809+++ linux-3.0.4/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
26810@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
26811 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
26812 if ((unsigned long)buf & alignment
26813 || blk_rq_bytes(rq) & q->dma_pad_mask
26814- || object_is_on_stack(buf))
26815+ || object_starts_on_stack(buf))
26816 drive->dma = 0;
26817 }
26818 }
26819diff -urNp linux-3.0.4/drivers/ide/ide-floppy.c linux-3.0.4/drivers/ide/ide-floppy.c
26820--- linux-3.0.4/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
26821+++ linux-3.0.4/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
26822@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
26823 u8 pc_buf[256], header_len, desc_cnt;
26824 int i, rc = 1, blocks, length;
26825
26826+ pax_track_stack();
26827+
26828 ide_debug_log(IDE_DBG_FUNC, "enter");
26829
26830 drive->bios_cyl = 0;
26831diff -urNp linux-3.0.4/drivers/ide/setup-pci.c linux-3.0.4/drivers/ide/setup-pci.c
26832--- linux-3.0.4/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
26833+++ linux-3.0.4/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
26834@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
26835 int ret, i, n_ports = dev2 ? 4 : 2;
26836 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
26837
26838+ pax_track_stack();
26839+
26840 for (i = 0; i < n_ports / 2; i++) {
26841 ret = ide_setup_pci_controller(pdev[i], d, !i);
26842 if (ret < 0)
26843diff -urNp linux-3.0.4/drivers/infiniband/core/cm.c linux-3.0.4/drivers/infiniband/core/cm.c
26844--- linux-3.0.4/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
26845+++ linux-3.0.4/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
26846@@ -113,7 +113,7 @@ static char const counter_group_names[CM
26847
26848 struct cm_counter_group {
26849 struct kobject obj;
26850- atomic_long_t counter[CM_ATTR_COUNT];
26851+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
26852 };
26853
26854 struct cm_counter_attribute {
26855@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
26856 struct ib_mad_send_buf *msg = NULL;
26857 int ret;
26858
26859- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26860+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26861 counter[CM_REQ_COUNTER]);
26862
26863 /* Quick state check to discard duplicate REQs. */
26864@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
26865 if (!cm_id_priv)
26866 return;
26867
26868- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26869+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26870 counter[CM_REP_COUNTER]);
26871 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
26872 if (ret)
26873@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
26874 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
26875 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
26876 spin_unlock_irq(&cm_id_priv->lock);
26877- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26878+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26879 counter[CM_RTU_COUNTER]);
26880 goto out;
26881 }
26882@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
26883 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
26884 dreq_msg->local_comm_id);
26885 if (!cm_id_priv) {
26886- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26887+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26888 counter[CM_DREQ_COUNTER]);
26889 cm_issue_drep(work->port, work->mad_recv_wc);
26890 return -EINVAL;
26891@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
26892 case IB_CM_MRA_REP_RCVD:
26893 break;
26894 case IB_CM_TIMEWAIT:
26895- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26896+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26897 counter[CM_DREQ_COUNTER]);
26898 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
26899 goto unlock;
26900@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
26901 cm_free_msg(msg);
26902 goto deref;
26903 case IB_CM_DREQ_RCVD:
26904- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26905+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26906 counter[CM_DREQ_COUNTER]);
26907 goto unlock;
26908 default:
26909@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
26910 ib_modify_mad(cm_id_priv->av.port->mad_agent,
26911 cm_id_priv->msg, timeout)) {
26912 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
26913- atomic_long_inc(&work->port->
26914+ atomic_long_inc_unchecked(&work->port->
26915 counter_group[CM_RECV_DUPLICATES].
26916 counter[CM_MRA_COUNTER]);
26917 goto out;
26918@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
26919 break;
26920 case IB_CM_MRA_REQ_RCVD:
26921 case IB_CM_MRA_REP_RCVD:
26922- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26923+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26924 counter[CM_MRA_COUNTER]);
26925 /* fall through */
26926 default:
26927@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
26928 case IB_CM_LAP_IDLE:
26929 break;
26930 case IB_CM_MRA_LAP_SENT:
26931- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26932+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26933 counter[CM_LAP_COUNTER]);
26934 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
26935 goto unlock;
26936@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
26937 cm_free_msg(msg);
26938 goto deref;
26939 case IB_CM_LAP_RCVD:
26940- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26941+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26942 counter[CM_LAP_COUNTER]);
26943 goto unlock;
26944 default:
26945@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
26946 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
26947 if (cur_cm_id_priv) {
26948 spin_unlock_irq(&cm.lock);
26949- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26950+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26951 counter[CM_SIDR_REQ_COUNTER]);
26952 goto out; /* Duplicate message. */
26953 }
26954@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
26955 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
26956 msg->retries = 1;
26957
26958- atomic_long_add(1 + msg->retries,
26959+ atomic_long_add_unchecked(1 + msg->retries,
26960 &port->counter_group[CM_XMIT].counter[attr_index]);
26961 if (msg->retries)
26962- atomic_long_add(msg->retries,
26963+ atomic_long_add_unchecked(msg->retries,
26964 &port->counter_group[CM_XMIT_RETRIES].
26965 counter[attr_index]);
26966
26967@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
26968 }
26969
26970 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
26971- atomic_long_inc(&port->counter_group[CM_RECV].
26972+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
26973 counter[attr_id - CM_ATTR_ID_OFFSET]);
26974
26975 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
26976@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
26977 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
26978
26979 return sprintf(buf, "%ld\n",
26980- atomic_long_read(&group->counter[cm_attr->index]));
26981+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
26982 }
26983
26984 static const struct sysfs_ops cm_counter_ops = {
26985diff -urNp linux-3.0.4/drivers/infiniband/core/fmr_pool.c linux-3.0.4/drivers/infiniband/core/fmr_pool.c
26986--- linux-3.0.4/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
26987+++ linux-3.0.4/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
26988@@ -97,8 +97,8 @@ struct ib_fmr_pool {
26989
26990 struct task_struct *thread;
26991
26992- atomic_t req_ser;
26993- atomic_t flush_ser;
26994+ atomic_unchecked_t req_ser;
26995+ atomic_unchecked_t flush_ser;
26996
26997 wait_queue_head_t force_wait;
26998 };
26999@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
27000 struct ib_fmr_pool *pool = pool_ptr;
27001
27002 do {
27003- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
27004+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
27005 ib_fmr_batch_release(pool);
27006
27007- atomic_inc(&pool->flush_ser);
27008+ atomic_inc_unchecked(&pool->flush_ser);
27009 wake_up_interruptible(&pool->force_wait);
27010
27011 if (pool->flush_function)
27012@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
27013 }
27014
27015 set_current_state(TASK_INTERRUPTIBLE);
27016- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
27017+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
27018 !kthread_should_stop())
27019 schedule();
27020 __set_current_state(TASK_RUNNING);
27021@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
27022 pool->dirty_watermark = params->dirty_watermark;
27023 pool->dirty_len = 0;
27024 spin_lock_init(&pool->pool_lock);
27025- atomic_set(&pool->req_ser, 0);
27026- atomic_set(&pool->flush_ser, 0);
27027+ atomic_set_unchecked(&pool->req_ser, 0);
27028+ atomic_set_unchecked(&pool->flush_ser, 0);
27029 init_waitqueue_head(&pool->force_wait);
27030
27031 pool->thread = kthread_run(ib_fmr_cleanup_thread,
27032@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
27033 }
27034 spin_unlock_irq(&pool->pool_lock);
27035
27036- serial = atomic_inc_return(&pool->req_ser);
27037+ serial = atomic_inc_return_unchecked(&pool->req_ser);
27038 wake_up_process(pool->thread);
27039
27040 if (wait_event_interruptible(pool->force_wait,
27041- atomic_read(&pool->flush_ser) - serial >= 0))
27042+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
27043 return -EINTR;
27044
27045 return 0;
27046@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
27047 } else {
27048 list_add_tail(&fmr->list, &pool->dirty_list);
27049 if (++pool->dirty_len >= pool->dirty_watermark) {
27050- atomic_inc(&pool->req_ser);
27051+ atomic_inc_unchecked(&pool->req_ser);
27052 wake_up_process(pool->thread);
27053 }
27054 }
27055diff -urNp linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c
27056--- linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
27057+++ linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
27058@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
27059 int err;
27060 struct fw_ri_tpte tpt;
27061 u32 stag_idx;
27062- static atomic_t key;
27063+ static atomic_unchecked_t key;
27064
27065 if (c4iw_fatal_error(rdev))
27066 return -EIO;
27067@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
27068 &rdev->resource.tpt_fifo_lock);
27069 if (!stag_idx)
27070 return -ENOMEM;
27071- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
27072+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
27073 }
27074 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
27075 __func__, stag_state, type, pdid, stag_idx);
27076diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c
27077--- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
27078+++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
27079@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
27080 struct infinipath_counters counters;
27081 struct ipath_devdata *dd;
27082
27083+ pax_track_stack();
27084+
27085 dd = file->f_path.dentry->d_inode->i_private;
27086 dd->ipath_f_read_counters(dd, &counters);
27087
27088diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c
27089--- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
27090+++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
27091@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
27092 struct ib_atomic_eth *ateth;
27093 struct ipath_ack_entry *e;
27094 u64 vaddr;
27095- atomic64_t *maddr;
27096+ atomic64_unchecked_t *maddr;
27097 u64 sdata;
27098 u32 rkey;
27099 u8 next;
27100@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
27101 IB_ACCESS_REMOTE_ATOMIC)))
27102 goto nack_acc_unlck;
27103 /* Perform atomic OP and save result. */
27104- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
27105+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
27106 sdata = be64_to_cpu(ateth->swap_data);
27107 e = &qp->s_ack_queue[qp->r_head_ack_queue];
27108 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
27109- (u64) atomic64_add_return(sdata, maddr) - sdata :
27110+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
27111 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
27112 be64_to_cpu(ateth->compare_data),
27113 sdata);
27114diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c
27115--- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
27116+++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
27117@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
27118 unsigned long flags;
27119 struct ib_wc wc;
27120 u64 sdata;
27121- atomic64_t *maddr;
27122+ atomic64_unchecked_t *maddr;
27123 enum ib_wc_status send_status;
27124
27125 /*
27126@@ -382,11 +382,11 @@ again:
27127 IB_ACCESS_REMOTE_ATOMIC)))
27128 goto acc_err;
27129 /* Perform atomic OP and save result. */
27130- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
27131+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
27132 sdata = wqe->wr.wr.atomic.compare_add;
27133 *(u64 *) sqp->s_sge.sge.vaddr =
27134 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
27135- (u64) atomic64_add_return(sdata, maddr) - sdata :
27136+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
27137 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
27138 sdata, wqe->wr.wr.atomic.swap);
27139 goto send_comp;
27140diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.c linux-3.0.4/drivers/infiniband/hw/nes/nes.c
27141--- linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
27142+++ linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
27143@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
27144 LIST_HEAD(nes_adapter_list);
27145 static LIST_HEAD(nes_dev_list);
27146
27147-atomic_t qps_destroyed;
27148+atomic_unchecked_t qps_destroyed;
27149
27150 static unsigned int ee_flsh_adapter;
27151 static unsigned int sysfs_nonidx_addr;
27152@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
27153 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
27154 struct nes_adapter *nesadapter = nesdev->nesadapter;
27155
27156- atomic_inc(&qps_destroyed);
27157+ atomic_inc_unchecked(&qps_destroyed);
27158
27159 /* Free the control structures */
27160
27161diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c
27162--- linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
27163+++ linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
27164@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
27165 u32 cm_packets_retrans;
27166 u32 cm_packets_created;
27167 u32 cm_packets_received;
27168-atomic_t cm_listens_created;
27169-atomic_t cm_listens_destroyed;
27170+atomic_unchecked_t cm_listens_created;
27171+atomic_unchecked_t cm_listens_destroyed;
27172 u32 cm_backlog_drops;
27173-atomic_t cm_loopbacks;
27174-atomic_t cm_nodes_created;
27175-atomic_t cm_nodes_destroyed;
27176-atomic_t cm_accel_dropped_pkts;
27177-atomic_t cm_resets_recvd;
27178+atomic_unchecked_t cm_loopbacks;
27179+atomic_unchecked_t cm_nodes_created;
27180+atomic_unchecked_t cm_nodes_destroyed;
27181+atomic_unchecked_t cm_accel_dropped_pkts;
27182+atomic_unchecked_t cm_resets_recvd;
27183
27184 static inline int mini_cm_accelerated(struct nes_cm_core *,
27185 struct nes_cm_node *);
27186@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
27187
27188 static struct nes_cm_core *g_cm_core;
27189
27190-atomic_t cm_connects;
27191-atomic_t cm_accepts;
27192-atomic_t cm_disconnects;
27193-atomic_t cm_closes;
27194-atomic_t cm_connecteds;
27195-atomic_t cm_connect_reqs;
27196-atomic_t cm_rejects;
27197+atomic_unchecked_t cm_connects;
27198+atomic_unchecked_t cm_accepts;
27199+atomic_unchecked_t cm_disconnects;
27200+atomic_unchecked_t cm_closes;
27201+atomic_unchecked_t cm_connecteds;
27202+atomic_unchecked_t cm_connect_reqs;
27203+atomic_unchecked_t cm_rejects;
27204
27205
27206 /**
27207@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
27208 kfree(listener);
27209 listener = NULL;
27210 ret = 0;
27211- atomic_inc(&cm_listens_destroyed);
27212+ atomic_inc_unchecked(&cm_listens_destroyed);
27213 } else {
27214 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
27215 }
27216@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
27217 cm_node->rem_mac);
27218
27219 add_hte_node(cm_core, cm_node);
27220- atomic_inc(&cm_nodes_created);
27221+ atomic_inc_unchecked(&cm_nodes_created);
27222
27223 return cm_node;
27224 }
27225@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
27226 }
27227
27228 atomic_dec(&cm_core->node_cnt);
27229- atomic_inc(&cm_nodes_destroyed);
27230+ atomic_inc_unchecked(&cm_nodes_destroyed);
27231 nesqp = cm_node->nesqp;
27232 if (nesqp) {
27233 nesqp->cm_node = NULL;
27234@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
27235
27236 static void drop_packet(struct sk_buff *skb)
27237 {
27238- atomic_inc(&cm_accel_dropped_pkts);
27239+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
27240 dev_kfree_skb_any(skb);
27241 }
27242
27243@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
27244 {
27245
27246 int reset = 0; /* whether to send reset in case of err.. */
27247- atomic_inc(&cm_resets_recvd);
27248+ atomic_inc_unchecked(&cm_resets_recvd);
27249 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
27250 " refcnt=%d\n", cm_node, cm_node->state,
27251 atomic_read(&cm_node->ref_count));
27252@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
27253 rem_ref_cm_node(cm_node->cm_core, cm_node);
27254 return NULL;
27255 }
27256- atomic_inc(&cm_loopbacks);
27257+ atomic_inc_unchecked(&cm_loopbacks);
27258 loopbackremotenode->loopbackpartner = cm_node;
27259 loopbackremotenode->tcp_cntxt.rcv_wscale =
27260 NES_CM_DEFAULT_RCV_WND_SCALE;
27261@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
27262 add_ref_cm_node(cm_node);
27263 } else if (cm_node->state == NES_CM_STATE_TSA) {
27264 rem_ref_cm_node(cm_core, cm_node);
27265- atomic_inc(&cm_accel_dropped_pkts);
27266+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
27267 dev_kfree_skb_any(skb);
27268 break;
27269 }
27270@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
27271
27272 if ((cm_id) && (cm_id->event_handler)) {
27273 if (issue_disconn) {
27274- atomic_inc(&cm_disconnects);
27275+ atomic_inc_unchecked(&cm_disconnects);
27276 cm_event.event = IW_CM_EVENT_DISCONNECT;
27277 cm_event.status = disconn_status;
27278 cm_event.local_addr = cm_id->local_addr;
27279@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
27280 }
27281
27282 if (issue_close) {
27283- atomic_inc(&cm_closes);
27284+ atomic_inc_unchecked(&cm_closes);
27285 nes_disconnect(nesqp, 1);
27286
27287 cm_id->provider_data = nesqp;
27288@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
27289
27290 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
27291 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
27292- atomic_inc(&cm_accepts);
27293+ atomic_inc_unchecked(&cm_accepts);
27294
27295 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
27296 netdev_refcnt_read(nesvnic->netdev));
27297@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
27298
27299 struct nes_cm_core *cm_core;
27300
27301- atomic_inc(&cm_rejects);
27302+ atomic_inc_unchecked(&cm_rejects);
27303 cm_node = (struct nes_cm_node *) cm_id->provider_data;
27304 loopback = cm_node->loopbackpartner;
27305 cm_core = cm_node->cm_core;
27306@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
27307 ntohl(cm_id->local_addr.sin_addr.s_addr),
27308 ntohs(cm_id->local_addr.sin_port));
27309
27310- atomic_inc(&cm_connects);
27311+ atomic_inc_unchecked(&cm_connects);
27312 nesqp->active_conn = 1;
27313
27314 /* cache the cm_id in the qp */
27315@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
27316 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
27317 return err;
27318 }
27319- atomic_inc(&cm_listens_created);
27320+ atomic_inc_unchecked(&cm_listens_created);
27321 }
27322
27323 cm_id->add_ref(cm_id);
27324@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
27325 if (nesqp->destroyed) {
27326 return;
27327 }
27328- atomic_inc(&cm_connecteds);
27329+ atomic_inc_unchecked(&cm_connecteds);
27330 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
27331 " local port 0x%04X. jiffies = %lu.\n",
27332 nesqp->hwqp.qp_id,
27333@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
27334
27335 cm_id->add_ref(cm_id);
27336 ret = cm_id->event_handler(cm_id, &cm_event);
27337- atomic_inc(&cm_closes);
27338+ atomic_inc_unchecked(&cm_closes);
27339 cm_event.event = IW_CM_EVENT_CLOSE;
27340 cm_event.status = 0;
27341 cm_event.provider_data = cm_id->provider_data;
27342@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
27343 return;
27344 cm_id = cm_node->cm_id;
27345
27346- atomic_inc(&cm_connect_reqs);
27347+ atomic_inc_unchecked(&cm_connect_reqs);
27348 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
27349 cm_node, cm_id, jiffies);
27350
27351@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
27352 return;
27353 cm_id = cm_node->cm_id;
27354
27355- atomic_inc(&cm_connect_reqs);
27356+ atomic_inc_unchecked(&cm_connect_reqs);
27357 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
27358 cm_node, cm_id, jiffies);
27359
27360diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.h linux-3.0.4/drivers/infiniband/hw/nes/nes.h
27361--- linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
27362+++ linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
27363@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
27364 extern unsigned int wqm_quanta;
27365 extern struct list_head nes_adapter_list;
27366
27367-extern atomic_t cm_connects;
27368-extern atomic_t cm_accepts;
27369-extern atomic_t cm_disconnects;
27370-extern atomic_t cm_closes;
27371-extern atomic_t cm_connecteds;
27372-extern atomic_t cm_connect_reqs;
27373-extern atomic_t cm_rejects;
27374-extern atomic_t mod_qp_timouts;
27375-extern atomic_t qps_created;
27376-extern atomic_t qps_destroyed;
27377-extern atomic_t sw_qps_destroyed;
27378+extern atomic_unchecked_t cm_connects;
27379+extern atomic_unchecked_t cm_accepts;
27380+extern atomic_unchecked_t cm_disconnects;
27381+extern atomic_unchecked_t cm_closes;
27382+extern atomic_unchecked_t cm_connecteds;
27383+extern atomic_unchecked_t cm_connect_reqs;
27384+extern atomic_unchecked_t cm_rejects;
27385+extern atomic_unchecked_t mod_qp_timouts;
27386+extern atomic_unchecked_t qps_created;
27387+extern atomic_unchecked_t qps_destroyed;
27388+extern atomic_unchecked_t sw_qps_destroyed;
27389 extern u32 mh_detected;
27390 extern u32 mh_pauses_sent;
27391 extern u32 cm_packets_sent;
27392@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
27393 extern u32 cm_packets_received;
27394 extern u32 cm_packets_dropped;
27395 extern u32 cm_packets_retrans;
27396-extern atomic_t cm_listens_created;
27397-extern atomic_t cm_listens_destroyed;
27398+extern atomic_unchecked_t cm_listens_created;
27399+extern atomic_unchecked_t cm_listens_destroyed;
27400 extern u32 cm_backlog_drops;
27401-extern atomic_t cm_loopbacks;
27402-extern atomic_t cm_nodes_created;
27403-extern atomic_t cm_nodes_destroyed;
27404-extern atomic_t cm_accel_dropped_pkts;
27405-extern atomic_t cm_resets_recvd;
27406+extern atomic_unchecked_t cm_loopbacks;
27407+extern atomic_unchecked_t cm_nodes_created;
27408+extern atomic_unchecked_t cm_nodes_destroyed;
27409+extern atomic_unchecked_t cm_accel_dropped_pkts;
27410+extern atomic_unchecked_t cm_resets_recvd;
27411
27412 extern u32 int_mod_timer_init;
27413 extern u32 int_mod_cq_depth_256;
27414diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c
27415--- linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
27416+++ linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
27417@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
27418 target_stat_values[++index] = mh_detected;
27419 target_stat_values[++index] = mh_pauses_sent;
27420 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
27421- target_stat_values[++index] = atomic_read(&cm_connects);
27422- target_stat_values[++index] = atomic_read(&cm_accepts);
27423- target_stat_values[++index] = atomic_read(&cm_disconnects);
27424- target_stat_values[++index] = atomic_read(&cm_connecteds);
27425- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
27426- target_stat_values[++index] = atomic_read(&cm_rejects);
27427- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
27428- target_stat_values[++index] = atomic_read(&qps_created);
27429- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
27430- target_stat_values[++index] = atomic_read(&qps_destroyed);
27431- target_stat_values[++index] = atomic_read(&cm_closes);
27432+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
27433+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
27434+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
27435+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
27436+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
27437+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
27438+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
27439+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
27440+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
27441+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
27442+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
27443 target_stat_values[++index] = cm_packets_sent;
27444 target_stat_values[++index] = cm_packets_bounced;
27445 target_stat_values[++index] = cm_packets_created;
27446 target_stat_values[++index] = cm_packets_received;
27447 target_stat_values[++index] = cm_packets_dropped;
27448 target_stat_values[++index] = cm_packets_retrans;
27449- target_stat_values[++index] = atomic_read(&cm_listens_created);
27450- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
27451+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
27452+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
27453 target_stat_values[++index] = cm_backlog_drops;
27454- target_stat_values[++index] = atomic_read(&cm_loopbacks);
27455- target_stat_values[++index] = atomic_read(&cm_nodes_created);
27456- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
27457- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
27458- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
27459+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
27460+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
27461+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
27462+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
27463+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
27464 target_stat_values[++index] = nesadapter->free_4kpbl;
27465 target_stat_values[++index] = nesadapter->free_256pbl;
27466 target_stat_values[++index] = int_mod_timer_init;
27467diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c
27468--- linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
27469+++ linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
27470@@ -46,9 +46,9 @@
27471
27472 #include <rdma/ib_umem.h>
27473
27474-atomic_t mod_qp_timouts;
27475-atomic_t qps_created;
27476-atomic_t sw_qps_destroyed;
27477+atomic_unchecked_t mod_qp_timouts;
27478+atomic_unchecked_t qps_created;
27479+atomic_unchecked_t sw_qps_destroyed;
27480
27481 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
27482
27483@@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
27484 if (init_attr->create_flags)
27485 return ERR_PTR(-EINVAL);
27486
27487- atomic_inc(&qps_created);
27488+ atomic_inc_unchecked(&qps_created);
27489 switch (init_attr->qp_type) {
27490 case IB_QPT_RC:
27491 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
27492@@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
27493 struct iw_cm_event cm_event;
27494 int ret;
27495
27496- atomic_inc(&sw_qps_destroyed);
27497+ atomic_inc_unchecked(&sw_qps_destroyed);
27498 nesqp->destroyed = 1;
27499
27500 /* Blow away the connection if it exists. */
27501diff -urNp linux-3.0.4/drivers/infiniband/hw/qib/qib.h linux-3.0.4/drivers/infiniband/hw/qib/qib.h
27502--- linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
27503+++ linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
27504@@ -51,6 +51,7 @@
27505 #include <linux/completion.h>
27506 #include <linux/kref.h>
27507 #include <linux/sched.h>
27508+#include <linux/slab.h>
27509
27510 #include "qib_common.h"
27511 #include "qib_verbs.h"
27512diff -urNp linux-3.0.4/drivers/input/gameport/gameport.c linux-3.0.4/drivers/input/gameport/gameport.c
27513--- linux-3.0.4/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
27514+++ linux-3.0.4/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
27515@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
27516 */
27517 static void gameport_init_port(struct gameport *gameport)
27518 {
27519- static atomic_t gameport_no = ATOMIC_INIT(0);
27520+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
27521
27522 __module_get(THIS_MODULE);
27523
27524 mutex_init(&gameport->drv_mutex);
27525 device_initialize(&gameport->dev);
27526 dev_set_name(&gameport->dev, "gameport%lu",
27527- (unsigned long)atomic_inc_return(&gameport_no) - 1);
27528+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
27529 gameport->dev.bus = &gameport_bus;
27530 gameport->dev.release = gameport_release_port;
27531 if (gameport->parent)
27532diff -urNp linux-3.0.4/drivers/input/input.c linux-3.0.4/drivers/input/input.c
27533--- linux-3.0.4/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
27534+++ linux-3.0.4/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
27535@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
27536 */
27537 int input_register_device(struct input_dev *dev)
27538 {
27539- static atomic_t input_no = ATOMIC_INIT(0);
27540+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
27541 struct input_handler *handler;
27542 const char *path;
27543 int error;
27544@@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
27545 dev->setkeycode = input_default_setkeycode;
27546
27547 dev_set_name(&dev->dev, "input%ld",
27548- (unsigned long) atomic_inc_return(&input_no) - 1);
27549+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
27550
27551 error = device_add(&dev->dev);
27552 if (error)
27553diff -urNp linux-3.0.4/drivers/input/joystick/sidewinder.c linux-3.0.4/drivers/input/joystick/sidewinder.c
27554--- linux-3.0.4/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
27555+++ linux-3.0.4/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
27556@@ -30,6 +30,7 @@
27557 #include <linux/kernel.h>
27558 #include <linux/module.h>
27559 #include <linux/slab.h>
27560+#include <linux/sched.h>
27561 #include <linux/init.h>
27562 #include <linux/input.h>
27563 #include <linux/gameport.h>
27564@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
27565 unsigned char buf[SW_LENGTH];
27566 int i;
27567
27568+ pax_track_stack();
27569+
27570 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
27571
27572 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
27573diff -urNp linux-3.0.4/drivers/input/joystick/xpad.c linux-3.0.4/drivers/input/joystick/xpad.c
27574--- linux-3.0.4/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
27575+++ linux-3.0.4/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
27576@@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
27577
27578 static int xpad_led_probe(struct usb_xpad *xpad)
27579 {
27580- static atomic_t led_seq = ATOMIC_INIT(0);
27581+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
27582 long led_no;
27583 struct xpad_led *led;
27584 struct led_classdev *led_cdev;
27585@@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
27586 if (!led)
27587 return -ENOMEM;
27588
27589- led_no = (long)atomic_inc_return(&led_seq) - 1;
27590+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
27591
27592 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
27593 led->xpad = xpad;
27594diff -urNp linux-3.0.4/drivers/input/mousedev.c linux-3.0.4/drivers/input/mousedev.c
27595--- linux-3.0.4/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
27596+++ linux-3.0.4/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
27597@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
27598
27599 spin_unlock_irq(&client->packet_lock);
27600
27601- if (copy_to_user(buffer, data, count))
27602+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
27603 return -EFAULT;
27604
27605 return count;
27606diff -urNp linux-3.0.4/drivers/input/serio/serio.c linux-3.0.4/drivers/input/serio/serio.c
27607--- linux-3.0.4/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
27608+++ linux-3.0.4/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
27609@@ -497,7 +497,7 @@ static void serio_release_port(struct de
27610 */
27611 static void serio_init_port(struct serio *serio)
27612 {
27613- static atomic_t serio_no = ATOMIC_INIT(0);
27614+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
27615
27616 __module_get(THIS_MODULE);
27617
27618@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
27619 mutex_init(&serio->drv_mutex);
27620 device_initialize(&serio->dev);
27621 dev_set_name(&serio->dev, "serio%ld",
27622- (long)atomic_inc_return(&serio_no) - 1);
27623+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
27624 serio->dev.bus = &serio_bus;
27625 serio->dev.release = serio_release_port;
27626 serio->dev.groups = serio_device_attr_groups;
27627diff -urNp linux-3.0.4/drivers/isdn/capi/capi.c linux-3.0.4/drivers/isdn/capi/capi.c
27628--- linux-3.0.4/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
27629+++ linux-3.0.4/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
27630@@ -83,8 +83,8 @@ struct capiminor {
27631
27632 struct capi20_appl *ap;
27633 u32 ncci;
27634- atomic_t datahandle;
27635- atomic_t msgid;
27636+ atomic_unchecked_t datahandle;
27637+ atomic_unchecked_t msgid;
27638
27639 struct tty_port port;
27640 int ttyinstop;
27641@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
27642 capimsg_setu16(s, 2, mp->ap->applid);
27643 capimsg_setu8 (s, 4, CAPI_DATA_B3);
27644 capimsg_setu8 (s, 5, CAPI_RESP);
27645- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
27646+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
27647 capimsg_setu32(s, 8, mp->ncci);
27648 capimsg_setu16(s, 12, datahandle);
27649 }
27650@@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
27651 mp->outbytes -= len;
27652 spin_unlock_bh(&mp->outlock);
27653
27654- datahandle = atomic_inc_return(&mp->datahandle);
27655+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
27656 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
27657 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
27658 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
27659 capimsg_setu16(skb->data, 2, mp->ap->applid);
27660 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
27661 capimsg_setu8 (skb->data, 5, CAPI_REQ);
27662- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
27663+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
27664 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
27665 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
27666 capimsg_setu16(skb->data, 16, len); /* Data length */
27667diff -urNp linux-3.0.4/drivers/isdn/gigaset/common.c linux-3.0.4/drivers/isdn/gigaset/common.c
27668--- linux-3.0.4/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
27669+++ linux-3.0.4/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
27670@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
27671 cs->commands_pending = 0;
27672 cs->cur_at_seq = 0;
27673 cs->gotfwver = -1;
27674- cs->open_count = 0;
27675+ local_set(&cs->open_count, 0);
27676 cs->dev = NULL;
27677 cs->tty = NULL;
27678 cs->tty_dev = NULL;
27679diff -urNp linux-3.0.4/drivers/isdn/gigaset/gigaset.h linux-3.0.4/drivers/isdn/gigaset/gigaset.h
27680--- linux-3.0.4/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
27681+++ linux-3.0.4/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
27682@@ -35,6 +35,7 @@
27683 #include <linux/tty_driver.h>
27684 #include <linux/list.h>
27685 #include <asm/atomic.h>
27686+#include <asm/local.h>
27687
27688 #define GIG_VERSION {0, 5, 0, 0}
27689 #define GIG_COMPAT {0, 4, 0, 0}
27690@@ -433,7 +434,7 @@ struct cardstate {
27691 spinlock_t cmdlock;
27692 unsigned curlen, cmdbytes;
27693
27694- unsigned open_count;
27695+ local_t open_count;
27696 struct tty_struct *tty;
27697 struct tasklet_struct if_wake_tasklet;
27698 unsigned control_state;
27699diff -urNp linux-3.0.4/drivers/isdn/gigaset/interface.c linux-3.0.4/drivers/isdn/gigaset/interface.c
27700--- linux-3.0.4/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
27701+++ linux-3.0.4/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
27702@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
27703 }
27704 tty->driver_data = cs;
27705
27706- ++cs->open_count;
27707-
27708- if (cs->open_count == 1) {
27709+ if (local_inc_return(&cs->open_count) == 1) {
27710 spin_lock_irqsave(&cs->lock, flags);
27711 cs->tty = tty;
27712 spin_unlock_irqrestore(&cs->lock, flags);
27713@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
27714
27715 if (!cs->connected)
27716 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
27717- else if (!cs->open_count)
27718+ else if (!local_read(&cs->open_count))
27719 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27720 else {
27721- if (!--cs->open_count) {
27722+ if (!local_dec_return(&cs->open_count)) {
27723 spin_lock_irqsave(&cs->lock, flags);
27724 cs->tty = NULL;
27725 spin_unlock_irqrestore(&cs->lock, flags);
27726@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
27727 if (!cs->connected) {
27728 gig_dbg(DEBUG_IF, "not connected");
27729 retval = -ENODEV;
27730- } else if (!cs->open_count)
27731+ } else if (!local_read(&cs->open_count))
27732 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27733 else {
27734 retval = 0;
27735@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
27736 retval = -ENODEV;
27737 goto done;
27738 }
27739- if (!cs->open_count) {
27740+ if (!local_read(&cs->open_count)) {
27741 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27742 retval = -ENODEV;
27743 goto done;
27744@@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
27745 if (!cs->connected) {
27746 gig_dbg(DEBUG_IF, "not connected");
27747 retval = -ENODEV;
27748- } else if (!cs->open_count)
27749+ } else if (!local_read(&cs->open_count))
27750 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27751 else if (cs->mstate != MS_LOCKED) {
27752 dev_warn(cs->dev, "can't write to unlocked device\n");
27753@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
27754
27755 if (!cs->connected)
27756 gig_dbg(DEBUG_IF, "not connected");
27757- else if (!cs->open_count)
27758+ else if (!local_read(&cs->open_count))
27759 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27760 else if (cs->mstate != MS_LOCKED)
27761 dev_warn(cs->dev, "can't write to unlocked device\n");
27762@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
27763
27764 if (!cs->connected)
27765 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
27766- else if (!cs->open_count)
27767+ else if (!local_read(&cs->open_count))
27768 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27769 else
27770 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
27771@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
27772
27773 if (!cs->connected)
27774 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
27775- else if (!cs->open_count)
27776+ else if (!local_read(&cs->open_count))
27777 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27778 else
27779 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
27780@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
27781 goto out;
27782 }
27783
27784- if (!cs->open_count) {
27785+ if (!local_read(&cs->open_count)) {
27786 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27787 goto out;
27788 }
27789diff -urNp linux-3.0.4/drivers/isdn/hardware/avm/b1.c linux-3.0.4/drivers/isdn/hardware/avm/b1.c
27790--- linux-3.0.4/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
27791+++ linux-3.0.4/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
27792@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
27793 }
27794 if (left) {
27795 if (t4file->user) {
27796- if (copy_from_user(buf, dp, left))
27797+ if (left > sizeof buf || copy_from_user(buf, dp, left))
27798 return -EFAULT;
27799 } else {
27800 memcpy(buf, dp, left);
27801@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
27802 }
27803 if (left) {
27804 if (config->user) {
27805- if (copy_from_user(buf, dp, left))
27806+ if (left > sizeof buf || copy_from_user(buf, dp, left))
27807 return -EFAULT;
27808 } else {
27809 memcpy(buf, dp, left);
27810diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c
27811--- linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
27812+++ linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
27813@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
27814 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
27815 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
27816
27817+ pax_track_stack();
27818
27819 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
27820 {
27821diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c
27822--- linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
27823+++ linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
27824@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
27825 IDI_SYNC_REQ req;
27826 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27827
27828+ pax_track_stack();
27829+
27830 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27831
27832 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27833diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c
27834--- linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
27835+++ linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
27836@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
27837 IDI_SYNC_REQ req;
27838 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27839
27840+ pax_track_stack();
27841+
27842 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27843
27844 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27845diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c
27846--- linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
27847+++ linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
27848@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
27849 IDI_SYNC_REQ req;
27850 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27851
27852+ pax_track_stack();
27853+
27854 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27855
27856 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27857diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h
27858--- linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
27859+++ linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
27860@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
27861 } diva_didd_add_adapter_t;
27862 typedef struct _diva_didd_remove_adapter {
27863 IDI_CALL p_request;
27864-} diva_didd_remove_adapter_t;
27865+} __no_const diva_didd_remove_adapter_t;
27866 typedef struct _diva_didd_read_adapter_array {
27867 void * buffer;
27868 dword length;
27869diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c
27870--- linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
27871+++ linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
27872@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
27873 IDI_SYNC_REQ req;
27874 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27875
27876+ pax_track_stack();
27877+
27878 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27879
27880 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27881diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/message.c linux-3.0.4/drivers/isdn/hardware/eicon/message.c
27882--- linux-3.0.4/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
27883+++ linux-3.0.4/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
27884@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
27885 dword d;
27886 word w;
27887
27888+ pax_track_stack();
27889+
27890 a = plci->adapter;
27891 Id = ((word)plci->Id<<8)|a->Id;
27892 PUT_WORD(&SS_Ind[4],0x0000);
27893@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
27894 word j, n, w;
27895 dword d;
27896
27897+ pax_track_stack();
27898+
27899
27900 for(i=0;i<8;i++) bp_parms[i].length = 0;
27901 for(i=0;i<2;i++) global_config[i].length = 0;
27902@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
27903 const byte llc3[] = {4,3,2,2,6,6,0};
27904 const byte header[] = {0,2,3,3,0,0,0};
27905
27906+ pax_track_stack();
27907+
27908 for(i=0;i<8;i++) bp_parms[i].length = 0;
27909 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
27910 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
27911@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
27912 word appl_number_group_type[MAX_APPL];
27913 PLCI *auxplci;
27914
27915+ pax_track_stack();
27916+
27917 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
27918
27919 if(!a->group_optimization_enabled)
27920diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c
27921--- linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
27922+++ linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
27923@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
27924 IDI_SYNC_REQ req;
27925 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27926
27927+ pax_track_stack();
27928+
27929 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27930
27931 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27932diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h
27933--- linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
27934+++ linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
27935@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
27936 typedef struct _diva_os_idi_adapter_interface {
27937 diva_init_card_proc_t cleanup_adapter_proc;
27938 diva_cmd_card_proc_t cmd_proc;
27939-} diva_os_idi_adapter_interface_t;
27940+} __no_const diva_os_idi_adapter_interface_t;
27941
27942 typedef struct _diva_os_xdi_adapter {
27943 struct list_head link;
27944diff -urNp linux-3.0.4/drivers/isdn/i4l/isdn_common.c linux-3.0.4/drivers/isdn/i4l/isdn_common.c
27945--- linux-3.0.4/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
27946+++ linux-3.0.4/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
27947@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
27948 } iocpar;
27949 void __user *argp = (void __user *)arg;
27950
27951+ pax_track_stack();
27952+
27953 #define name iocpar.name
27954 #define bname iocpar.bname
27955 #define iocts iocpar.iocts
27956diff -urNp linux-3.0.4/drivers/isdn/icn/icn.c linux-3.0.4/drivers/isdn/icn/icn.c
27957--- linux-3.0.4/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
27958+++ linux-3.0.4/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
27959@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
27960 if (count > len)
27961 count = len;
27962 if (user) {
27963- if (copy_from_user(msg, buf, count))
27964+ if (count > sizeof msg || copy_from_user(msg, buf, count))
27965 return -EFAULT;
27966 } else
27967 memcpy(msg, buf, count);
27968diff -urNp linux-3.0.4/drivers/lguest/core.c linux-3.0.4/drivers/lguest/core.c
27969--- linux-3.0.4/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
27970+++ linux-3.0.4/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
27971@@ -92,9 +92,17 @@ static __init int map_switcher(void)
27972 * it's worked so far. The end address needs +1 because __get_vm_area
27973 * allocates an extra guard page, so we need space for that.
27974 */
27975+
27976+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27977+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
27978+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
27979+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
27980+#else
27981 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
27982 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
27983 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
27984+#endif
27985+
27986 if (!switcher_vma) {
27987 err = -ENOMEM;
27988 printk("lguest: could not map switcher pages high\n");
27989@@ -119,7 +127,7 @@ static __init int map_switcher(void)
27990 * Now the Switcher is mapped at the right address, we can't fail!
27991 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
27992 */
27993- memcpy(switcher_vma->addr, start_switcher_text,
27994+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
27995 end_switcher_text - start_switcher_text);
27996
27997 printk(KERN_INFO "lguest: mapped switcher at %p\n",
27998diff -urNp linux-3.0.4/drivers/lguest/x86/core.c linux-3.0.4/drivers/lguest/x86/core.c
27999--- linux-3.0.4/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
28000+++ linux-3.0.4/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
28001@@ -59,7 +59,7 @@ static struct {
28002 /* Offset from where switcher.S was compiled to where we've copied it */
28003 static unsigned long switcher_offset(void)
28004 {
28005- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
28006+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
28007 }
28008
28009 /* This cpu's struct lguest_pages. */
28010@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
28011 * These copies are pretty cheap, so we do them unconditionally: */
28012 /* Save the current Host top-level page directory.
28013 */
28014+
28015+#ifdef CONFIG_PAX_PER_CPU_PGD
28016+ pages->state.host_cr3 = read_cr3();
28017+#else
28018 pages->state.host_cr3 = __pa(current->mm->pgd);
28019+#endif
28020+
28021 /*
28022 * Set up the Guest's page tables to see this CPU's pages (and no
28023 * other CPU's pages).
28024@@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
28025 * compiled-in switcher code and the high-mapped copy we just made.
28026 */
28027 for (i = 0; i < IDT_ENTRIES; i++)
28028- default_idt_entries[i] += switcher_offset();
28029+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
28030
28031 /*
28032 * Set up the Switcher's per-cpu areas.
28033@@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
28034 * it will be undisturbed when we switch. To change %cs and jump we
28035 * need this structure to feed to Intel's "lcall" instruction.
28036 */
28037- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
28038+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
28039 lguest_entry.segment = LGUEST_CS;
28040
28041 /*
28042diff -urNp linux-3.0.4/drivers/lguest/x86/switcher_32.S linux-3.0.4/drivers/lguest/x86/switcher_32.S
28043--- linux-3.0.4/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
28044+++ linux-3.0.4/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
28045@@ -87,6 +87,7 @@
28046 #include <asm/page.h>
28047 #include <asm/segment.h>
28048 #include <asm/lguest.h>
28049+#include <asm/processor-flags.h>
28050
28051 // We mark the start of the code to copy
28052 // It's placed in .text tho it's never run here
28053@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
28054 // Changes type when we load it: damn Intel!
28055 // For after we switch over our page tables
28056 // That entry will be read-only: we'd crash.
28057+
28058+#ifdef CONFIG_PAX_KERNEXEC
28059+ mov %cr0, %edx
28060+ xor $X86_CR0_WP, %edx
28061+ mov %edx, %cr0
28062+#endif
28063+
28064 movl $(GDT_ENTRY_TSS*8), %edx
28065 ltr %dx
28066
28067@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
28068 // Let's clear it again for our return.
28069 // The GDT descriptor of the Host
28070 // Points to the table after two "size" bytes
28071- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
28072+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
28073 // Clear "used" from type field (byte 5, bit 2)
28074- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
28075+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
28076+
28077+#ifdef CONFIG_PAX_KERNEXEC
28078+ mov %cr0, %eax
28079+ xor $X86_CR0_WP, %eax
28080+ mov %eax, %cr0
28081+#endif
28082
28083 // Once our page table's switched, the Guest is live!
28084 // The Host fades as we run this final step.
28085@@ -295,13 +309,12 @@ deliver_to_host:
28086 // I consulted gcc, and it gave
28087 // These instructions, which I gladly credit:
28088 leal (%edx,%ebx,8), %eax
28089- movzwl (%eax),%edx
28090- movl 4(%eax), %eax
28091- xorw %ax, %ax
28092- orl %eax, %edx
28093+ movl 4(%eax), %edx
28094+ movw (%eax), %dx
28095 // Now the address of the handler's in %edx
28096 // We call it now: its "iret" drops us home.
28097- jmp *%edx
28098+ ljmp $__KERNEL_CS, $1f
28099+1: jmp *%edx
28100
28101 // Every interrupt can come to us here
28102 // But we must truly tell each apart.
28103diff -urNp linux-3.0.4/drivers/md/dm.c linux-3.0.4/drivers/md/dm.c
28104--- linux-3.0.4/drivers/md/dm.c 2011-09-02 18:11:21.000000000 -0400
28105+++ linux-3.0.4/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
28106@@ -164,9 +164,9 @@ struct mapped_device {
28107 /*
28108 * Event handling.
28109 */
28110- atomic_t event_nr;
28111+ atomic_unchecked_t event_nr;
28112 wait_queue_head_t eventq;
28113- atomic_t uevent_seq;
28114+ atomic_unchecked_t uevent_seq;
28115 struct list_head uevent_list;
28116 spinlock_t uevent_lock; /* Protect access to uevent_list */
28117
28118@@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
28119 rwlock_init(&md->map_lock);
28120 atomic_set(&md->holders, 1);
28121 atomic_set(&md->open_count, 0);
28122- atomic_set(&md->event_nr, 0);
28123- atomic_set(&md->uevent_seq, 0);
28124+ atomic_set_unchecked(&md->event_nr, 0);
28125+ atomic_set_unchecked(&md->uevent_seq, 0);
28126 INIT_LIST_HEAD(&md->uevent_list);
28127 spin_lock_init(&md->uevent_lock);
28128
28129@@ -1977,7 +1977,7 @@ static void event_callback(void *context
28130
28131 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
28132
28133- atomic_inc(&md->event_nr);
28134+ atomic_inc_unchecked(&md->event_nr);
28135 wake_up(&md->eventq);
28136 }
28137
28138@@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
28139
28140 uint32_t dm_next_uevent_seq(struct mapped_device *md)
28141 {
28142- return atomic_add_return(1, &md->uevent_seq);
28143+ return atomic_add_return_unchecked(1, &md->uevent_seq);
28144 }
28145
28146 uint32_t dm_get_event_nr(struct mapped_device *md)
28147 {
28148- return atomic_read(&md->event_nr);
28149+ return atomic_read_unchecked(&md->event_nr);
28150 }
28151
28152 int dm_wait_event(struct mapped_device *md, int event_nr)
28153 {
28154 return wait_event_interruptible(md->eventq,
28155- (event_nr != atomic_read(&md->event_nr)));
28156+ (event_nr != atomic_read_unchecked(&md->event_nr)));
28157 }
28158
28159 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
28160diff -urNp linux-3.0.4/drivers/md/dm-ioctl.c linux-3.0.4/drivers/md/dm-ioctl.c
28161--- linux-3.0.4/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
28162+++ linux-3.0.4/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
28163@@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
28164 cmd == DM_LIST_VERSIONS_CMD)
28165 return 0;
28166
28167- if ((cmd == DM_DEV_CREATE_CMD)) {
28168+ if (cmd == DM_DEV_CREATE_CMD) {
28169 if (!*param->name) {
28170 DMWARN("name not supplied when creating device");
28171 return -EINVAL;
28172diff -urNp linux-3.0.4/drivers/md/dm-raid1.c linux-3.0.4/drivers/md/dm-raid1.c
28173--- linux-3.0.4/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
28174+++ linux-3.0.4/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
28175@@ -40,7 +40,7 @@ enum dm_raid1_error {
28176
28177 struct mirror {
28178 struct mirror_set *ms;
28179- atomic_t error_count;
28180+ atomic_unchecked_t error_count;
28181 unsigned long error_type;
28182 struct dm_dev *dev;
28183 sector_t offset;
28184@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
28185 struct mirror *m;
28186
28187 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
28188- if (!atomic_read(&m->error_count))
28189+ if (!atomic_read_unchecked(&m->error_count))
28190 return m;
28191
28192 return NULL;
28193@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
28194 * simple way to tell if a device has encountered
28195 * errors.
28196 */
28197- atomic_inc(&m->error_count);
28198+ atomic_inc_unchecked(&m->error_count);
28199
28200 if (test_and_set_bit(error_type, &m->error_type))
28201 return;
28202@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
28203 struct mirror *m = get_default_mirror(ms);
28204
28205 do {
28206- if (likely(!atomic_read(&m->error_count)))
28207+ if (likely(!atomic_read_unchecked(&m->error_count)))
28208 return m;
28209
28210 if (m-- == ms->mirror)
28211@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
28212 {
28213 struct mirror *default_mirror = get_default_mirror(m->ms);
28214
28215- return !atomic_read(&default_mirror->error_count);
28216+ return !atomic_read_unchecked(&default_mirror->error_count);
28217 }
28218
28219 static int mirror_available(struct mirror_set *ms, struct bio *bio)
28220@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
28221 */
28222 if (likely(region_in_sync(ms, region, 1)))
28223 m = choose_mirror(ms, bio->bi_sector);
28224- else if (m && atomic_read(&m->error_count))
28225+ else if (m && atomic_read_unchecked(&m->error_count))
28226 m = NULL;
28227
28228 if (likely(m))
28229@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
28230 }
28231
28232 ms->mirror[mirror].ms = ms;
28233- atomic_set(&(ms->mirror[mirror].error_count), 0);
28234+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
28235 ms->mirror[mirror].error_type = 0;
28236 ms->mirror[mirror].offset = offset;
28237
28238@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
28239 */
28240 static char device_status_char(struct mirror *m)
28241 {
28242- if (!atomic_read(&(m->error_count)))
28243+ if (!atomic_read_unchecked(&(m->error_count)))
28244 return 'A';
28245
28246 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
28247diff -urNp linux-3.0.4/drivers/md/dm-stripe.c linux-3.0.4/drivers/md/dm-stripe.c
28248--- linux-3.0.4/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
28249+++ linux-3.0.4/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
28250@@ -20,7 +20,7 @@ struct stripe {
28251 struct dm_dev *dev;
28252 sector_t physical_start;
28253
28254- atomic_t error_count;
28255+ atomic_unchecked_t error_count;
28256 };
28257
28258 struct stripe_c {
28259@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
28260 kfree(sc);
28261 return r;
28262 }
28263- atomic_set(&(sc->stripe[i].error_count), 0);
28264+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
28265 }
28266
28267 ti->private = sc;
28268@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
28269 DMEMIT("%d ", sc->stripes);
28270 for (i = 0; i < sc->stripes; i++) {
28271 DMEMIT("%s ", sc->stripe[i].dev->name);
28272- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
28273+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
28274 'D' : 'A';
28275 }
28276 buffer[i] = '\0';
28277@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
28278 */
28279 for (i = 0; i < sc->stripes; i++)
28280 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
28281- atomic_inc(&(sc->stripe[i].error_count));
28282- if (atomic_read(&(sc->stripe[i].error_count)) <
28283+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
28284+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
28285 DM_IO_ERROR_THRESHOLD)
28286 schedule_work(&sc->trigger_event);
28287 }
28288diff -urNp linux-3.0.4/drivers/md/dm-table.c linux-3.0.4/drivers/md/dm-table.c
28289--- linux-3.0.4/drivers/md/dm-table.c 2011-07-21 22:17:23.000000000 -0400
28290+++ linux-3.0.4/drivers/md/dm-table.c 2011-08-23 21:47:55.000000000 -0400
28291@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
28292 if (!dev_size)
28293 return 0;
28294
28295- if ((start >= dev_size) || (start + len > dev_size)) {
28296+ if ((start >= dev_size) || (len > dev_size - start)) {
28297 DMWARN("%s: %s too small for target: "
28298 "start=%llu, len=%llu, dev_size=%llu",
28299 dm_device_name(ti->table->md), bdevname(bdev, b),
28300diff -urNp linux-3.0.4/drivers/md/md.c linux-3.0.4/drivers/md/md.c
28301--- linux-3.0.4/drivers/md/md.c 2011-07-21 22:17:23.000000000 -0400
28302+++ linux-3.0.4/drivers/md/md.c 2011-08-23 21:47:55.000000000 -0400
28303@@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
28304 * start build, activate spare
28305 */
28306 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
28307-static atomic_t md_event_count;
28308+static atomic_unchecked_t md_event_count;
28309 void md_new_event(mddev_t *mddev)
28310 {
28311- atomic_inc(&md_event_count);
28312+ atomic_inc_unchecked(&md_event_count);
28313 wake_up(&md_event_waiters);
28314 }
28315 EXPORT_SYMBOL_GPL(md_new_event);
28316@@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
28317 */
28318 static void md_new_event_inintr(mddev_t *mddev)
28319 {
28320- atomic_inc(&md_event_count);
28321+ atomic_inc_unchecked(&md_event_count);
28322 wake_up(&md_event_waiters);
28323 }
28324
28325@@ -1457,7 +1457,7 @@ static int super_1_load(mdk_rdev_t *rdev
28326
28327 rdev->preferred_minor = 0xffff;
28328 rdev->data_offset = le64_to_cpu(sb->data_offset);
28329- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
28330+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
28331
28332 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
28333 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
28334@@ -1635,7 +1635,7 @@ static void super_1_sync(mddev_t *mddev,
28335 else
28336 sb->resync_offset = cpu_to_le64(0);
28337
28338- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
28339+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
28340
28341 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
28342 sb->size = cpu_to_le64(mddev->dev_sectors);
28343@@ -2428,7 +2428,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
28344 static ssize_t
28345 errors_show(mdk_rdev_t *rdev, char *page)
28346 {
28347- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
28348+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
28349 }
28350
28351 static ssize_t
28352@@ -2437,7 +2437,7 @@ errors_store(mdk_rdev_t *rdev, const cha
28353 char *e;
28354 unsigned long n = simple_strtoul(buf, &e, 10);
28355 if (*buf && (*e == 0 || *e == '\n')) {
28356- atomic_set(&rdev->corrected_errors, n);
28357+ atomic_set_unchecked(&rdev->corrected_errors, n);
28358 return len;
28359 }
28360 return -EINVAL;
28361@@ -2793,8 +2793,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
28362 rdev->last_read_error.tv_sec = 0;
28363 rdev->last_read_error.tv_nsec = 0;
28364 atomic_set(&rdev->nr_pending, 0);
28365- atomic_set(&rdev->read_errors, 0);
28366- atomic_set(&rdev->corrected_errors, 0);
28367+ atomic_set_unchecked(&rdev->read_errors, 0);
28368+ atomic_set_unchecked(&rdev->corrected_errors, 0);
28369
28370 INIT_LIST_HEAD(&rdev->same_set);
28371 init_waitqueue_head(&rdev->blocked_wait);
28372@@ -6415,7 +6415,7 @@ static int md_seq_show(struct seq_file *
28373
28374 spin_unlock(&pers_lock);
28375 seq_printf(seq, "\n");
28376- mi->event = atomic_read(&md_event_count);
28377+ mi->event = atomic_read_unchecked(&md_event_count);
28378 return 0;
28379 }
28380 if (v == (void*)2) {
28381@@ -6504,7 +6504,7 @@ static int md_seq_show(struct seq_file *
28382 chunk_kb ? "KB" : "B");
28383 if (bitmap->file) {
28384 seq_printf(seq, ", file: ");
28385- seq_path(seq, &bitmap->file->f_path, " \t\n");
28386+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
28387 }
28388
28389 seq_printf(seq, "\n");
28390@@ -6538,7 +6538,7 @@ static int md_seq_open(struct inode *ino
28391 else {
28392 struct seq_file *p = file->private_data;
28393 p->private = mi;
28394- mi->event = atomic_read(&md_event_count);
28395+ mi->event = atomic_read_unchecked(&md_event_count);
28396 }
28397 return error;
28398 }
28399@@ -6554,7 +6554,7 @@ static unsigned int mdstat_poll(struct f
28400 /* always allow read */
28401 mask = POLLIN | POLLRDNORM;
28402
28403- if (mi->event != atomic_read(&md_event_count))
28404+ if (mi->event != atomic_read_unchecked(&md_event_count))
28405 mask |= POLLERR | POLLPRI;
28406 return mask;
28407 }
28408@@ -6598,7 +6598,7 @@ static int is_mddev_idle(mddev_t *mddev,
28409 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
28410 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
28411 (int)part_stat_read(&disk->part0, sectors[1]) -
28412- atomic_read(&disk->sync_io);
28413+ atomic_read_unchecked(&disk->sync_io);
28414 /* sync IO will cause sync_io to increase before the disk_stats
28415 * as sync_io is counted when a request starts, and
28416 * disk_stats is counted when it completes.
28417diff -urNp linux-3.0.4/drivers/md/md.h linux-3.0.4/drivers/md/md.h
28418--- linux-3.0.4/drivers/md/md.h 2011-07-21 22:17:23.000000000 -0400
28419+++ linux-3.0.4/drivers/md/md.h 2011-08-23 21:47:55.000000000 -0400
28420@@ -97,13 +97,13 @@ struct mdk_rdev_s
28421 * only maintained for arrays that
28422 * support hot removal
28423 */
28424- atomic_t read_errors; /* number of consecutive read errors that
28425+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
28426 * we have tried to ignore.
28427 */
28428 struct timespec last_read_error; /* monotonic time since our
28429 * last read error
28430 */
28431- atomic_t corrected_errors; /* number of corrected read errors,
28432+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
28433 * for reporting to userspace and storing
28434 * in superblock.
28435 */
28436@@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
28437
28438 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
28439 {
28440- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
28441+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
28442 }
28443
28444 struct mdk_personality
28445diff -urNp linux-3.0.4/drivers/md/raid10.c linux-3.0.4/drivers/md/raid10.c
28446--- linux-3.0.4/drivers/md/raid10.c 2011-07-21 22:17:23.000000000 -0400
28447+++ linux-3.0.4/drivers/md/raid10.c 2011-08-23 21:47:55.000000000 -0400
28448@@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
28449 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
28450 set_bit(R10BIO_Uptodate, &r10_bio->state);
28451 else {
28452- atomic_add(r10_bio->sectors,
28453+ atomic_add_unchecked(r10_bio->sectors,
28454 &conf->mirrors[d].rdev->corrected_errors);
28455 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
28456 md_error(r10_bio->mddev,
28457@@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
28458 {
28459 struct timespec cur_time_mon;
28460 unsigned long hours_since_last;
28461- unsigned int read_errors = atomic_read(&rdev->read_errors);
28462+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
28463
28464 ktime_get_ts(&cur_time_mon);
28465
28466@@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
28467 * overflowing the shift of read_errors by hours_since_last.
28468 */
28469 if (hours_since_last >= 8 * sizeof(read_errors))
28470- atomic_set(&rdev->read_errors, 0);
28471+ atomic_set_unchecked(&rdev->read_errors, 0);
28472 else
28473- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
28474+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
28475 }
28476
28477 /*
28478@@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
28479 return;
28480
28481 check_decay_read_errors(mddev, rdev);
28482- atomic_inc(&rdev->read_errors);
28483- if (atomic_read(&rdev->read_errors) > max_read_errors) {
28484+ atomic_inc_unchecked(&rdev->read_errors);
28485+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
28486 char b[BDEVNAME_SIZE];
28487 bdevname(rdev->bdev, b);
28488
28489@@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
28490 "md/raid10:%s: %s: Raid device exceeded "
28491 "read_error threshold [cur %d:max %d]\n",
28492 mdname(mddev), b,
28493- atomic_read(&rdev->read_errors), max_read_errors);
28494+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
28495 printk(KERN_NOTICE
28496 "md/raid10:%s: %s: Failing raid device\n",
28497 mdname(mddev), b);
28498@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
28499 test_bit(In_sync, &rdev->flags)) {
28500 atomic_inc(&rdev->nr_pending);
28501 rcu_read_unlock();
28502- atomic_add(s, &rdev->corrected_errors);
28503+ atomic_add_unchecked(s, &rdev->corrected_errors);
28504 if (sync_page_io(rdev,
28505 r10_bio->devs[sl].addr +
28506 sect,
28507diff -urNp linux-3.0.4/drivers/md/raid1.c linux-3.0.4/drivers/md/raid1.c
28508--- linux-3.0.4/drivers/md/raid1.c 2011-07-21 22:17:23.000000000 -0400
28509+++ linux-3.0.4/drivers/md/raid1.c 2011-08-23 21:47:55.000000000 -0400
28510@@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
28511 rdev_dec_pending(rdev, mddev);
28512 md_error(mddev, rdev);
28513 } else
28514- atomic_add(s, &rdev->corrected_errors);
28515+ atomic_add_unchecked(s, &rdev->corrected_errors);
28516 }
28517 d = start;
28518 while (d != r1_bio->read_disk) {
28519@@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
28520 /* Well, this device is dead */
28521 md_error(mddev, rdev);
28522 else {
28523- atomic_add(s, &rdev->corrected_errors);
28524+ atomic_add_unchecked(s, &rdev->corrected_errors);
28525 printk(KERN_INFO
28526 "md/raid1:%s: read error corrected "
28527 "(%d sectors at %llu on %s)\n",
28528diff -urNp linux-3.0.4/drivers/md/raid5.c linux-3.0.4/drivers/md/raid5.c
28529--- linux-3.0.4/drivers/md/raid5.c 2011-07-21 22:17:23.000000000 -0400
28530+++ linux-3.0.4/drivers/md/raid5.c 2011-08-23 21:48:14.000000000 -0400
28531@@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
28532 bi->bi_next = NULL;
28533 if ((rw & WRITE) &&
28534 test_bit(R5_ReWrite, &sh->dev[i].flags))
28535- atomic_add(STRIPE_SECTORS,
28536+ atomic_add_unchecked(STRIPE_SECTORS,
28537 &rdev->corrected_errors);
28538 generic_make_request(bi);
28539 } else {
28540@@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
28541 clear_bit(R5_ReadError, &sh->dev[i].flags);
28542 clear_bit(R5_ReWrite, &sh->dev[i].flags);
28543 }
28544- if (atomic_read(&conf->disks[i].rdev->read_errors))
28545- atomic_set(&conf->disks[i].rdev->read_errors, 0);
28546+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
28547+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
28548 } else {
28549 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
28550 int retry = 0;
28551 rdev = conf->disks[i].rdev;
28552
28553 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
28554- atomic_inc(&rdev->read_errors);
28555+ atomic_inc_unchecked(&rdev->read_errors);
28556 if (conf->mddev->degraded >= conf->max_degraded)
28557 printk_rl(KERN_WARNING
28558 "md/raid:%s: read error not correctable "
28559@@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
28560 (unsigned long long)(sh->sector
28561 + rdev->data_offset),
28562 bdn);
28563- else if (atomic_read(&rdev->read_errors)
28564+ else if (atomic_read_unchecked(&rdev->read_errors)
28565 > conf->max_nr_stripes)
28566 printk(KERN_WARNING
28567 "md/raid:%s: Too many read errors, failing device %s.\n",
28568@@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
28569 sector_t r_sector;
28570 struct stripe_head sh2;
28571
28572+ pax_track_stack();
28573
28574 chunk_offset = sector_div(new_sector, sectors_per_chunk);
28575 stripe = new_sector;
28576diff -urNp linux-3.0.4/drivers/media/common/saa7146_hlp.c linux-3.0.4/drivers/media/common/saa7146_hlp.c
28577--- linux-3.0.4/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
28578+++ linux-3.0.4/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
28579@@ -353,6 +353,8 @@ static void calculate_clipping_registers
28580
28581 int x[32], y[32], w[32], h[32];
28582
28583+ pax_track_stack();
28584+
28585 /* clear out memory */
28586 memset(&line_list[0], 0x00, sizeof(u32)*32);
28587 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
28588diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
28589--- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
28590+++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
28591@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
28592 u8 buf[HOST_LINK_BUF_SIZE];
28593 int i;
28594
28595+ pax_track_stack();
28596+
28597 dprintk("%s\n", __func__);
28598
28599 /* check if we have space for a link buf in the rx_buffer */
28600@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
28601 unsigned long timeout;
28602 int written;
28603
28604+ pax_track_stack();
28605+
28606 dprintk("%s\n", __func__);
28607
28608 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
28609diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h
28610--- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-21 22:17:23.000000000 -0400
28611+++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-24 18:24:40.000000000 -0400
28612@@ -68,12 +68,12 @@ struct dvb_demux_feed {
28613 union {
28614 struct dmx_ts_feed ts;
28615 struct dmx_section_feed sec;
28616- } feed;
28617+ } __no_const feed;
28618
28619 union {
28620 dmx_ts_cb ts;
28621 dmx_section_cb sec;
28622- } cb;
28623+ } __no_const cb;
28624
28625 struct dvb_demux *demux;
28626 void *priv;
28627diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c
28628--- linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-07-21 22:17:23.000000000 -0400
28629+++ linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-24 18:24:19.000000000 -0400
28630@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
28631 const struct dvb_device *template, void *priv, int type)
28632 {
28633 struct dvb_device *dvbdev;
28634- struct file_operations *dvbdevfops;
28635+ file_operations_no_const *dvbdevfops;
28636 struct device *clsdev;
28637 int minor;
28638 int id;
28639diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c
28640--- linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-07-21 22:17:23.000000000 -0400
28641+++ linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-24 18:26:33.000000000 -0400
28642@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
28643 struct dib0700_adapter_state {
28644 int (*set_param_save) (struct dvb_frontend *,
28645 struct dvb_frontend_parameters *);
28646-};
28647+} __no_const;
28648
28649 static int dib7070_set_param_override(struct dvb_frontend *fe,
28650 struct dvb_frontend_parameters *fep)
28651diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c
28652--- linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
28653+++ linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
28654@@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
28655 if (!buf)
28656 return -ENOMEM;
28657
28658+ pax_track_stack();
28659+
28660 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
28661 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
28662 hx.addr, hx.len, hx.chk);
28663diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h
28664--- linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h 2011-07-21 22:17:23.000000000 -0400
28665+++ linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h 2011-08-24 18:27:27.000000000 -0400
28666@@ -97,7 +97,7 @@
28667 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
28668
28669 struct dibusb_state {
28670- struct dib_fe_xfer_ops ops;
28671+ dib_fe_xfer_ops_no_const ops;
28672 int mt2060_present;
28673 u8 tuner_addr;
28674 };
28675diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c
28676--- linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-07-21 22:17:23.000000000 -0400
28677+++ linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-24 18:27:45.000000000 -0400
28678@@ -95,7 +95,7 @@ struct su3000_state {
28679
28680 struct s6x0_state {
28681 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
28682-};
28683+} __no_const;
28684
28685 /* debug */
28686 static int dvb_usb_dw2102_debug;
28687diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c
28688--- linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
28689+++ linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
28690@@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
28691 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
28692 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
28693
28694+ pax_track_stack();
28695
28696 data[0] = 0x8a;
28697 len_in = 1;
28698@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
28699 int ret = 0, len_in;
28700 u8 data[512] = {0};
28701
28702+ pax_track_stack();
28703+
28704 data[0] = 0x0a;
28705 len_in = 1;
28706 info("FRM Firmware Cold Reset");
28707diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000.h linux-3.0.4/drivers/media/dvb/frontends/dib3000.h
28708--- linux-3.0.4/drivers/media/dvb/frontends/dib3000.h 2011-07-21 22:17:23.000000000 -0400
28709+++ linux-3.0.4/drivers/media/dvb/frontends/dib3000.h 2011-10-07 19:07:39.000000000 -0400
28710@@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
28711 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
28712 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
28713 };
28714+typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
28715
28716 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
28717 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
28718- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
28719+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
28720 #else
28721 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
28722 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
28723diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c
28724--- linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
28725+++ linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
28726@@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
28727 static struct dvb_frontend_ops dib3000mb_ops;
28728
28729 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
28730- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
28731+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
28732 {
28733 struct dib3000_state* state = NULL;
28734
28735diff -urNp linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c
28736--- linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
28737+++ linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
28738@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
28739 int ret = -1;
28740 int sync;
28741
28742+ pax_track_stack();
28743+
28744 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
28745
28746 fcp = 3000;
28747diff -urNp linux-3.0.4/drivers/media/dvb/frontends/or51211.c linux-3.0.4/drivers/media/dvb/frontends/or51211.c
28748--- linux-3.0.4/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
28749+++ linux-3.0.4/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
28750@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
28751 u8 tudata[585];
28752 int i;
28753
28754+ pax_track_stack();
28755+
28756 dprintk("Firmware is %zd bytes\n",fw->size);
28757
28758 /* Get eprom data */
28759diff -urNp linux-3.0.4/drivers/media/video/cx18/cx18-driver.c linux-3.0.4/drivers/media/video/cx18/cx18-driver.c
28760--- linux-3.0.4/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
28761+++ linux-3.0.4/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
28762@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
28763 struct i2c_client c;
28764 u8 eedata[256];
28765
28766+ pax_track_stack();
28767+
28768 memset(&c, 0, sizeof(c));
28769 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
28770 c.adapter = &cx->i2c_adap[0];
28771diff -urNp linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c
28772--- linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
28773+++ linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
28774@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
28775 bool handle = false;
28776 struct ir_raw_event ir_core_event[64];
28777
28778+ pax_track_stack();
28779+
28780 do {
28781 num = 0;
28782 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
28783diff -urNp linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
28784--- linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
28785+++ linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
28786@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
28787 u8 *eeprom;
28788 struct tveeprom tvdata;
28789
28790+ pax_track_stack();
28791+
28792 memset(&tvdata,0,sizeof(tvdata));
28793
28794 eeprom = pvr2_eeprom_fetch(hdw);
28795diff -urNp linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c
28796--- linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
28797+++ linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
28798@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
28799 unsigned char localPAT[256];
28800 unsigned char localPMT[256];
28801
28802+ pax_track_stack();
28803+
28804 /* Set video format - must be done first as it resets other settings */
28805 set_reg8(client, 0x41, h->video_format);
28806
28807diff -urNp linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c
28808--- linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
28809+++ linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
28810@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
28811 u8 tmp[512];
28812 dprintk(DBGLVL_CMD, "%s()\n", __func__);
28813
28814+ pax_track_stack();
28815+
28816 /* While any outstand message on the bus exists... */
28817 do {
28818
28819@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
28820 u8 tmp[512];
28821 dprintk(DBGLVL_CMD, "%s()\n", __func__);
28822
28823+ pax_track_stack();
28824+
28825 while (loop) {
28826
28827 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
28828diff -urNp linux-3.0.4/drivers/media/video/timblogiw.c linux-3.0.4/drivers/media/video/timblogiw.c
28829--- linux-3.0.4/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
28830+++ linux-3.0.4/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
28831@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
28832
28833 /* Platform device functions */
28834
28835-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
28836+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
28837 .vidioc_querycap = timblogiw_querycap,
28838 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
28839 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
28840diff -urNp linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c
28841--- linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
28842+++ linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
28843@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
28844 unsigned char rv, gv, bv;
28845 static unsigned char *Y, *U, *V;
28846
28847+ pax_track_stack();
28848+
28849 frame = usbvision->cur_frame;
28850 image_size = frame->frmwidth * frame->frmheight;
28851 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
28852diff -urNp linux-3.0.4/drivers/media/video/videobuf-dma-sg.c linux-3.0.4/drivers/media/video/videobuf-dma-sg.c
28853--- linux-3.0.4/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
28854+++ linux-3.0.4/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
28855@@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
28856 {
28857 struct videobuf_queue q;
28858
28859+ pax_track_stack();
28860+
28861 /* Required to make generic handler to call __videobuf_alloc */
28862 q.int_ops = &sg_ops;
28863
28864diff -urNp linux-3.0.4/drivers/message/fusion/mptbase.c linux-3.0.4/drivers/message/fusion/mptbase.c
28865--- linux-3.0.4/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
28866+++ linux-3.0.4/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
28867@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
28868 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
28869 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
28870
28871+#ifdef CONFIG_GRKERNSEC_HIDESYM
28872+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
28873+#else
28874 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
28875 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
28876+#endif
28877+
28878 /*
28879 * Rounding UP to nearest 4-kB boundary here...
28880 */
28881diff -urNp linux-3.0.4/drivers/message/fusion/mptsas.c linux-3.0.4/drivers/message/fusion/mptsas.c
28882--- linux-3.0.4/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
28883+++ linux-3.0.4/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
28884@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
28885 return 0;
28886 }
28887
28888+static inline void
28889+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
28890+{
28891+ if (phy_info->port_details) {
28892+ phy_info->port_details->rphy = rphy;
28893+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
28894+ ioc->name, rphy));
28895+ }
28896+
28897+ if (rphy) {
28898+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
28899+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
28900+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
28901+ ioc->name, rphy, rphy->dev.release));
28902+ }
28903+}
28904+
28905 /* no mutex */
28906 static void
28907 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
28908@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
28909 return NULL;
28910 }
28911
28912-static inline void
28913-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
28914-{
28915- if (phy_info->port_details) {
28916- phy_info->port_details->rphy = rphy;
28917- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
28918- ioc->name, rphy));
28919- }
28920-
28921- if (rphy) {
28922- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
28923- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
28924- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
28925- ioc->name, rphy, rphy->dev.release));
28926- }
28927-}
28928-
28929 static inline struct sas_port *
28930 mptsas_get_port(struct mptsas_phyinfo *phy_info)
28931 {
28932diff -urNp linux-3.0.4/drivers/message/fusion/mptscsih.c linux-3.0.4/drivers/message/fusion/mptscsih.c
28933--- linux-3.0.4/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
28934+++ linux-3.0.4/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
28935@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
28936
28937 h = shost_priv(SChost);
28938
28939- if (h) {
28940- if (h->info_kbuf == NULL)
28941- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
28942- return h->info_kbuf;
28943- h->info_kbuf[0] = '\0';
28944+ if (!h)
28945+ return NULL;
28946
28947- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
28948- h->info_kbuf[size-1] = '\0';
28949- }
28950+ if (h->info_kbuf == NULL)
28951+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
28952+ return h->info_kbuf;
28953+ h->info_kbuf[0] = '\0';
28954+
28955+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
28956+ h->info_kbuf[size-1] = '\0';
28957
28958 return h->info_kbuf;
28959 }
28960diff -urNp linux-3.0.4/drivers/message/i2o/i2o_config.c linux-3.0.4/drivers/message/i2o/i2o_config.c
28961--- linux-3.0.4/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
28962+++ linux-3.0.4/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
28963@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
28964 struct i2o_message *msg;
28965 unsigned int iop;
28966
28967+ pax_track_stack();
28968+
28969 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
28970 return -EFAULT;
28971
28972diff -urNp linux-3.0.4/drivers/message/i2o/i2o_proc.c linux-3.0.4/drivers/message/i2o/i2o_proc.c
28973--- linux-3.0.4/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
28974+++ linux-3.0.4/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
28975@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
28976 "Array Controller Device"
28977 };
28978
28979-static char *chtostr(u8 * chars, int n)
28980-{
28981- char tmp[256];
28982- tmp[0] = 0;
28983- return strncat(tmp, (char *)chars, n);
28984-}
28985-
28986 static int i2o_report_query_status(struct seq_file *seq, int block_status,
28987 char *group)
28988 {
28989@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
28990
28991 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
28992 seq_printf(seq, "%-#8x", ddm_table.module_id);
28993- seq_printf(seq, "%-29s",
28994- chtostr(ddm_table.module_name_version, 28));
28995+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
28996 seq_printf(seq, "%9d ", ddm_table.data_size);
28997 seq_printf(seq, "%8d", ddm_table.code_size);
28998
28999@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
29000
29001 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
29002 seq_printf(seq, "%-#8x", dst->module_id);
29003- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
29004- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
29005+ seq_printf(seq, "%-.28s", dst->module_name_version);
29006+ seq_printf(seq, "%-.8s", dst->date);
29007 seq_printf(seq, "%8d ", dst->module_size);
29008 seq_printf(seq, "%8d ", dst->mpb_size);
29009 seq_printf(seq, "0x%04x", dst->module_flags);
29010@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
29011 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
29012 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
29013 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
29014- seq_printf(seq, "Vendor info : %s\n",
29015- chtostr((u8 *) (work32 + 2), 16));
29016- seq_printf(seq, "Product info : %s\n",
29017- chtostr((u8 *) (work32 + 6), 16));
29018- seq_printf(seq, "Description : %s\n",
29019- chtostr((u8 *) (work32 + 10), 16));
29020- seq_printf(seq, "Product rev. : %s\n",
29021- chtostr((u8 *) (work32 + 14), 8));
29022+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
29023+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
29024+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
29025+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
29026
29027 seq_printf(seq, "Serial number : ");
29028 print_serial_number(seq, (u8 *) (work32 + 16),
29029@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
29030 }
29031
29032 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
29033- seq_printf(seq, "Module name : %s\n",
29034- chtostr(result.module_name, 24));
29035- seq_printf(seq, "Module revision : %s\n",
29036- chtostr(result.module_rev, 8));
29037+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
29038+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
29039
29040 seq_printf(seq, "Serial number : ");
29041 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
29042@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
29043 return 0;
29044 }
29045
29046- seq_printf(seq, "Device name : %s\n",
29047- chtostr(result.device_name, 64));
29048- seq_printf(seq, "Service name : %s\n",
29049- chtostr(result.service_name, 64));
29050- seq_printf(seq, "Physical name : %s\n",
29051- chtostr(result.physical_location, 64));
29052- seq_printf(seq, "Instance number : %s\n",
29053- chtostr(result.instance_number, 4));
29054+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
29055+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
29056+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
29057+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
29058
29059 return 0;
29060 }
29061diff -urNp linux-3.0.4/drivers/message/i2o/iop.c linux-3.0.4/drivers/message/i2o/iop.c
29062--- linux-3.0.4/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
29063+++ linux-3.0.4/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
29064@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
29065
29066 spin_lock_irqsave(&c->context_list_lock, flags);
29067
29068- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
29069- atomic_inc(&c->context_list_counter);
29070+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
29071+ atomic_inc_unchecked(&c->context_list_counter);
29072
29073- entry->context = atomic_read(&c->context_list_counter);
29074+ entry->context = atomic_read_unchecked(&c->context_list_counter);
29075
29076 list_add(&entry->list, &c->context_list);
29077
29078@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
29079
29080 #if BITS_PER_LONG == 64
29081 spin_lock_init(&c->context_list_lock);
29082- atomic_set(&c->context_list_counter, 0);
29083+ atomic_set_unchecked(&c->context_list_counter, 0);
29084 INIT_LIST_HEAD(&c->context_list);
29085 #endif
29086
29087diff -urNp linux-3.0.4/drivers/mfd/abx500-core.c linux-3.0.4/drivers/mfd/abx500-core.c
29088--- linux-3.0.4/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
29089+++ linux-3.0.4/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
29090@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
29091
29092 struct abx500_device_entry {
29093 struct list_head list;
29094- struct abx500_ops ops;
29095+ abx500_ops_no_const ops;
29096 struct device *dev;
29097 };
29098
29099diff -urNp linux-3.0.4/drivers/mfd/janz-cmodio.c linux-3.0.4/drivers/mfd/janz-cmodio.c
29100--- linux-3.0.4/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
29101+++ linux-3.0.4/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
29102@@ -13,6 +13,7 @@
29103
29104 #include <linux/kernel.h>
29105 #include <linux/module.h>
29106+#include <linux/slab.h>
29107 #include <linux/init.h>
29108 #include <linux/pci.h>
29109 #include <linux/interrupt.h>
29110diff -urNp linux-3.0.4/drivers/mfd/wm8350-i2c.c linux-3.0.4/drivers/mfd/wm8350-i2c.c
29111--- linux-3.0.4/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
29112+++ linux-3.0.4/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
29113@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
29114 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
29115 int ret;
29116
29117+ pax_track_stack();
29118+
29119 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
29120 return -EINVAL;
29121
29122diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c
29123--- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-07-21 22:17:23.000000000 -0400
29124+++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-23 21:47:55.000000000 -0400
29125@@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
29126 * the lid is closed. This leads to interrupts as soon as a little move
29127 * is done.
29128 */
29129- atomic_inc(&lis3_dev.count);
29130+ atomic_inc_unchecked(&lis3_dev.count);
29131
29132 wake_up_interruptible(&lis3_dev.misc_wait);
29133 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
29134@@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
29135 if (lis3_dev.pm_dev)
29136 pm_runtime_get_sync(lis3_dev.pm_dev);
29137
29138- atomic_set(&lis3_dev.count, 0);
29139+ atomic_set_unchecked(&lis3_dev.count, 0);
29140 return 0;
29141 }
29142
29143@@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
29144 add_wait_queue(&lis3_dev.misc_wait, &wait);
29145 while (true) {
29146 set_current_state(TASK_INTERRUPTIBLE);
29147- data = atomic_xchg(&lis3_dev.count, 0);
29148+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
29149 if (data)
29150 break;
29151
29152@@ -583,7 +583,7 @@ out:
29153 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
29154 {
29155 poll_wait(file, &lis3_dev.misc_wait, wait);
29156- if (atomic_read(&lis3_dev.count))
29157+ if (atomic_read_unchecked(&lis3_dev.count))
29158 return POLLIN | POLLRDNORM;
29159 return 0;
29160 }
29161diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h
29162--- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
29163+++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
29164@@ -265,7 +265,7 @@ struct lis3lv02d {
29165 struct input_polled_dev *idev; /* input device */
29166 struct platform_device *pdev; /* platform device */
29167 struct regulator_bulk_data regulators[2];
29168- atomic_t count; /* interrupt count after last read */
29169+ atomic_unchecked_t count; /* interrupt count after last read */
29170 union axis_conversion ac; /* hw -> logical axis */
29171 int mapped_btns[3];
29172
29173diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c
29174--- linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
29175+++ linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
29176@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
29177 unsigned long nsec;
29178
29179 nsec = CLKS2NSEC(clks);
29180- atomic_long_inc(&mcs_op_statistics[op].count);
29181- atomic_long_add(nsec, &mcs_op_statistics[op].total);
29182+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
29183+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
29184 if (mcs_op_statistics[op].max < nsec)
29185 mcs_op_statistics[op].max = nsec;
29186 }
29187diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c
29188--- linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
29189+++ linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
29190@@ -32,9 +32,9 @@
29191
29192 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
29193
29194-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
29195+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
29196 {
29197- unsigned long val = atomic_long_read(v);
29198+ unsigned long val = atomic_long_read_unchecked(v);
29199
29200 seq_printf(s, "%16lu %s\n", val, id);
29201 }
29202@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
29203
29204 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
29205 for (op = 0; op < mcsop_last; op++) {
29206- count = atomic_long_read(&mcs_op_statistics[op].count);
29207- total = atomic_long_read(&mcs_op_statistics[op].total);
29208+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
29209+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
29210 max = mcs_op_statistics[op].max;
29211 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
29212 count ? total / count : 0, max);
29213diff -urNp linux-3.0.4/drivers/misc/sgi-gru/grutables.h linux-3.0.4/drivers/misc/sgi-gru/grutables.h
29214--- linux-3.0.4/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
29215+++ linux-3.0.4/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
29216@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
29217 * GRU statistics.
29218 */
29219 struct gru_stats_s {
29220- atomic_long_t vdata_alloc;
29221- atomic_long_t vdata_free;
29222- atomic_long_t gts_alloc;
29223- atomic_long_t gts_free;
29224- atomic_long_t gms_alloc;
29225- atomic_long_t gms_free;
29226- atomic_long_t gts_double_allocate;
29227- atomic_long_t assign_context;
29228- atomic_long_t assign_context_failed;
29229- atomic_long_t free_context;
29230- atomic_long_t load_user_context;
29231- atomic_long_t load_kernel_context;
29232- atomic_long_t lock_kernel_context;
29233- atomic_long_t unlock_kernel_context;
29234- atomic_long_t steal_user_context;
29235- atomic_long_t steal_kernel_context;
29236- atomic_long_t steal_context_failed;
29237- atomic_long_t nopfn;
29238- atomic_long_t asid_new;
29239- atomic_long_t asid_next;
29240- atomic_long_t asid_wrap;
29241- atomic_long_t asid_reuse;
29242- atomic_long_t intr;
29243- atomic_long_t intr_cbr;
29244- atomic_long_t intr_tfh;
29245- atomic_long_t intr_spurious;
29246- atomic_long_t intr_mm_lock_failed;
29247- atomic_long_t call_os;
29248- atomic_long_t call_os_wait_queue;
29249- atomic_long_t user_flush_tlb;
29250- atomic_long_t user_unload_context;
29251- atomic_long_t user_exception;
29252- atomic_long_t set_context_option;
29253- atomic_long_t check_context_retarget_intr;
29254- atomic_long_t check_context_unload;
29255- atomic_long_t tlb_dropin;
29256- atomic_long_t tlb_preload_page;
29257- atomic_long_t tlb_dropin_fail_no_asid;
29258- atomic_long_t tlb_dropin_fail_upm;
29259- atomic_long_t tlb_dropin_fail_invalid;
29260- atomic_long_t tlb_dropin_fail_range_active;
29261- atomic_long_t tlb_dropin_fail_idle;
29262- atomic_long_t tlb_dropin_fail_fmm;
29263- atomic_long_t tlb_dropin_fail_no_exception;
29264- atomic_long_t tfh_stale_on_fault;
29265- atomic_long_t mmu_invalidate_range;
29266- atomic_long_t mmu_invalidate_page;
29267- atomic_long_t flush_tlb;
29268- atomic_long_t flush_tlb_gru;
29269- atomic_long_t flush_tlb_gru_tgh;
29270- atomic_long_t flush_tlb_gru_zero_asid;
29271-
29272- atomic_long_t copy_gpa;
29273- atomic_long_t read_gpa;
29274-
29275- atomic_long_t mesq_receive;
29276- atomic_long_t mesq_receive_none;
29277- atomic_long_t mesq_send;
29278- atomic_long_t mesq_send_failed;
29279- atomic_long_t mesq_noop;
29280- atomic_long_t mesq_send_unexpected_error;
29281- atomic_long_t mesq_send_lb_overflow;
29282- atomic_long_t mesq_send_qlimit_reached;
29283- atomic_long_t mesq_send_amo_nacked;
29284- atomic_long_t mesq_send_put_nacked;
29285- atomic_long_t mesq_page_overflow;
29286- atomic_long_t mesq_qf_locked;
29287- atomic_long_t mesq_qf_noop_not_full;
29288- atomic_long_t mesq_qf_switch_head_failed;
29289- atomic_long_t mesq_qf_unexpected_error;
29290- atomic_long_t mesq_noop_unexpected_error;
29291- atomic_long_t mesq_noop_lb_overflow;
29292- atomic_long_t mesq_noop_qlimit_reached;
29293- atomic_long_t mesq_noop_amo_nacked;
29294- atomic_long_t mesq_noop_put_nacked;
29295- atomic_long_t mesq_noop_page_overflow;
29296+ atomic_long_unchecked_t vdata_alloc;
29297+ atomic_long_unchecked_t vdata_free;
29298+ atomic_long_unchecked_t gts_alloc;
29299+ atomic_long_unchecked_t gts_free;
29300+ atomic_long_unchecked_t gms_alloc;
29301+ atomic_long_unchecked_t gms_free;
29302+ atomic_long_unchecked_t gts_double_allocate;
29303+ atomic_long_unchecked_t assign_context;
29304+ atomic_long_unchecked_t assign_context_failed;
29305+ atomic_long_unchecked_t free_context;
29306+ atomic_long_unchecked_t load_user_context;
29307+ atomic_long_unchecked_t load_kernel_context;
29308+ atomic_long_unchecked_t lock_kernel_context;
29309+ atomic_long_unchecked_t unlock_kernel_context;
29310+ atomic_long_unchecked_t steal_user_context;
29311+ atomic_long_unchecked_t steal_kernel_context;
29312+ atomic_long_unchecked_t steal_context_failed;
29313+ atomic_long_unchecked_t nopfn;
29314+ atomic_long_unchecked_t asid_new;
29315+ atomic_long_unchecked_t asid_next;
29316+ atomic_long_unchecked_t asid_wrap;
29317+ atomic_long_unchecked_t asid_reuse;
29318+ atomic_long_unchecked_t intr;
29319+ atomic_long_unchecked_t intr_cbr;
29320+ atomic_long_unchecked_t intr_tfh;
29321+ atomic_long_unchecked_t intr_spurious;
29322+ atomic_long_unchecked_t intr_mm_lock_failed;
29323+ atomic_long_unchecked_t call_os;
29324+ atomic_long_unchecked_t call_os_wait_queue;
29325+ atomic_long_unchecked_t user_flush_tlb;
29326+ atomic_long_unchecked_t user_unload_context;
29327+ atomic_long_unchecked_t user_exception;
29328+ atomic_long_unchecked_t set_context_option;
29329+ atomic_long_unchecked_t check_context_retarget_intr;
29330+ atomic_long_unchecked_t check_context_unload;
29331+ atomic_long_unchecked_t tlb_dropin;
29332+ atomic_long_unchecked_t tlb_preload_page;
29333+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
29334+ atomic_long_unchecked_t tlb_dropin_fail_upm;
29335+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
29336+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
29337+ atomic_long_unchecked_t tlb_dropin_fail_idle;
29338+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
29339+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
29340+ atomic_long_unchecked_t tfh_stale_on_fault;
29341+ atomic_long_unchecked_t mmu_invalidate_range;
29342+ atomic_long_unchecked_t mmu_invalidate_page;
29343+ atomic_long_unchecked_t flush_tlb;
29344+ atomic_long_unchecked_t flush_tlb_gru;
29345+ atomic_long_unchecked_t flush_tlb_gru_tgh;
29346+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
29347+
29348+ atomic_long_unchecked_t copy_gpa;
29349+ atomic_long_unchecked_t read_gpa;
29350+
29351+ atomic_long_unchecked_t mesq_receive;
29352+ atomic_long_unchecked_t mesq_receive_none;
29353+ atomic_long_unchecked_t mesq_send;
29354+ atomic_long_unchecked_t mesq_send_failed;
29355+ atomic_long_unchecked_t mesq_noop;
29356+ atomic_long_unchecked_t mesq_send_unexpected_error;
29357+ atomic_long_unchecked_t mesq_send_lb_overflow;
29358+ atomic_long_unchecked_t mesq_send_qlimit_reached;
29359+ atomic_long_unchecked_t mesq_send_amo_nacked;
29360+ atomic_long_unchecked_t mesq_send_put_nacked;
29361+ atomic_long_unchecked_t mesq_page_overflow;
29362+ atomic_long_unchecked_t mesq_qf_locked;
29363+ atomic_long_unchecked_t mesq_qf_noop_not_full;
29364+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
29365+ atomic_long_unchecked_t mesq_qf_unexpected_error;
29366+ atomic_long_unchecked_t mesq_noop_unexpected_error;
29367+ atomic_long_unchecked_t mesq_noop_lb_overflow;
29368+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
29369+ atomic_long_unchecked_t mesq_noop_amo_nacked;
29370+ atomic_long_unchecked_t mesq_noop_put_nacked;
29371+ atomic_long_unchecked_t mesq_noop_page_overflow;
29372
29373 };
29374
29375@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
29376 tghop_invalidate, mcsop_last};
29377
29378 struct mcs_op_statistic {
29379- atomic_long_t count;
29380- atomic_long_t total;
29381+ atomic_long_unchecked_t count;
29382+ atomic_long_unchecked_t total;
29383 unsigned long max;
29384 };
29385
29386@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
29387
29388 #define STAT(id) do { \
29389 if (gru_options & OPT_STATS) \
29390- atomic_long_inc(&gru_stats.id); \
29391+ atomic_long_inc_unchecked(&gru_stats.id); \
29392 } while (0)
29393
29394 #ifdef CONFIG_SGI_GRU_DEBUG
29395diff -urNp linux-3.0.4/drivers/misc/sgi-xp/xp.h linux-3.0.4/drivers/misc/sgi-xp/xp.h
29396--- linux-3.0.4/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
29397+++ linux-3.0.4/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
29398@@ -289,7 +289,7 @@ struct xpc_interface {
29399 xpc_notify_func, void *);
29400 void (*received) (short, int, void *);
29401 enum xp_retval (*partid_to_nasids) (short, void *);
29402-};
29403+} __no_const;
29404
29405 extern struct xpc_interface xpc_interface;
29406
29407diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c
29408--- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
29409+++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
29410@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
29411 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
29412 unsigned long timeo = jiffies + HZ;
29413
29414+ pax_track_stack();
29415+
29416 /* Prevent setting state FL_SYNCING for chip in suspended state. */
29417 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
29418 goto sleep;
29419@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
29420 unsigned long initial_adr;
29421 int initial_len = len;
29422
29423+ pax_track_stack();
29424+
29425 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
29426 adr += chip->start;
29427 initial_adr = adr;
29428@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
29429 int retries = 3;
29430 int ret;
29431
29432+ pax_track_stack();
29433+
29434 adr += chip->start;
29435
29436 retry:
29437diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c
29438--- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
29439+++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
29440@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
29441 unsigned long cmd_addr;
29442 struct cfi_private *cfi = map->fldrv_priv;
29443
29444+ pax_track_stack();
29445+
29446 adr += chip->start;
29447
29448 /* Ensure cmd read/writes are aligned. */
29449@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
29450 DECLARE_WAITQUEUE(wait, current);
29451 int wbufsize, z;
29452
29453+ pax_track_stack();
29454+
29455 /* M58LW064A requires bus alignment for buffer wriets -- saw */
29456 if (adr & (map_bankwidth(map)-1))
29457 return -EINVAL;
29458@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
29459 DECLARE_WAITQUEUE(wait, current);
29460 int ret = 0;
29461
29462+ pax_track_stack();
29463+
29464 adr += chip->start;
29465
29466 /* Let's determine this according to the interleave only once */
29467@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
29468 unsigned long timeo = jiffies + HZ;
29469 DECLARE_WAITQUEUE(wait, current);
29470
29471+ pax_track_stack();
29472+
29473 adr += chip->start;
29474
29475 /* Let's determine this according to the interleave only once */
29476@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
29477 unsigned long timeo = jiffies + HZ;
29478 DECLARE_WAITQUEUE(wait, current);
29479
29480+ pax_track_stack();
29481+
29482 adr += chip->start;
29483
29484 /* Let's determine this according to the interleave only once */
29485diff -urNp linux-3.0.4/drivers/mtd/devices/doc2000.c linux-3.0.4/drivers/mtd/devices/doc2000.c
29486--- linux-3.0.4/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
29487+++ linux-3.0.4/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
29488@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
29489
29490 /* The ECC will not be calculated correctly if less than 512 is written */
29491 /* DBB-
29492- if (len != 0x200 && eccbuf)
29493+ if (len != 0x200)
29494 printk(KERN_WARNING
29495 "ECC needs a full sector write (adr: %lx size %lx)\n",
29496 (long) to, (long) len);
29497diff -urNp linux-3.0.4/drivers/mtd/devices/doc2001.c linux-3.0.4/drivers/mtd/devices/doc2001.c
29498--- linux-3.0.4/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
29499+++ linux-3.0.4/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
29500@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
29501 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
29502
29503 /* Don't allow read past end of device */
29504- if (from >= this->totlen)
29505+ if (from >= this->totlen || !len)
29506 return -EINVAL;
29507
29508 /* Don't allow a single read to cross a 512-byte block boundary */
29509diff -urNp linux-3.0.4/drivers/mtd/ftl.c linux-3.0.4/drivers/mtd/ftl.c
29510--- linux-3.0.4/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
29511+++ linux-3.0.4/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
29512@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
29513 loff_t offset;
29514 uint16_t srcunitswap = cpu_to_le16(srcunit);
29515
29516+ pax_track_stack();
29517+
29518 eun = &part->EUNInfo[srcunit];
29519 xfer = &part->XferInfo[xferunit];
29520 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
29521diff -urNp linux-3.0.4/drivers/mtd/inftlcore.c linux-3.0.4/drivers/mtd/inftlcore.c
29522--- linux-3.0.4/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
29523+++ linux-3.0.4/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
29524@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
29525 struct inftl_oob oob;
29526 size_t retlen;
29527
29528+ pax_track_stack();
29529+
29530 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
29531 "pending=%d)\n", inftl, thisVUC, pendingblock);
29532
29533diff -urNp linux-3.0.4/drivers/mtd/inftlmount.c linux-3.0.4/drivers/mtd/inftlmount.c
29534--- linux-3.0.4/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
29535+++ linux-3.0.4/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
29536@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
29537 struct INFTLPartition *ip;
29538 size_t retlen;
29539
29540+ pax_track_stack();
29541+
29542 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
29543
29544 /*
29545diff -urNp linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c
29546--- linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
29547+++ linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
29548@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
29549 {
29550 map_word pfow_val[4];
29551
29552+ pax_track_stack();
29553+
29554 /* Check identification string */
29555 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
29556 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
29557diff -urNp linux-3.0.4/drivers/mtd/mtdchar.c linux-3.0.4/drivers/mtd/mtdchar.c
29558--- linux-3.0.4/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
29559+++ linux-3.0.4/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
29560@@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
29561 u_long size;
29562 struct mtd_info_user info;
29563
29564+ pax_track_stack();
29565+
29566 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
29567
29568 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
29569diff -urNp linux-3.0.4/drivers/mtd/nand/denali.c linux-3.0.4/drivers/mtd/nand/denali.c
29570--- linux-3.0.4/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
29571+++ linux-3.0.4/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
29572@@ -26,6 +26,7 @@
29573 #include <linux/pci.h>
29574 #include <linux/mtd/mtd.h>
29575 #include <linux/module.h>
29576+#include <linux/slab.h>
29577
29578 #include "denali.h"
29579
29580diff -urNp linux-3.0.4/drivers/mtd/nftlcore.c linux-3.0.4/drivers/mtd/nftlcore.c
29581--- linux-3.0.4/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
29582+++ linux-3.0.4/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
29583@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
29584 int inplace = 1;
29585 size_t retlen;
29586
29587+ pax_track_stack();
29588+
29589 memset(BlockMap, 0xff, sizeof(BlockMap));
29590 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
29591
29592diff -urNp linux-3.0.4/drivers/mtd/nftlmount.c linux-3.0.4/drivers/mtd/nftlmount.c
29593--- linux-3.0.4/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
29594+++ linux-3.0.4/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
29595@@ -24,6 +24,7 @@
29596 #include <asm/errno.h>
29597 #include <linux/delay.h>
29598 #include <linux/slab.h>
29599+#include <linux/sched.h>
29600 #include <linux/mtd/mtd.h>
29601 #include <linux/mtd/nand.h>
29602 #include <linux/mtd/nftl.h>
29603@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
29604 struct mtd_info *mtd = nftl->mbd.mtd;
29605 unsigned int i;
29606
29607+ pax_track_stack();
29608+
29609 /* Assume logical EraseSize == physical erasesize for starting the scan.
29610 We'll sort it out later if we find a MediaHeader which says otherwise */
29611 /* Actually, we won't. The new DiskOnChip driver has already scanned
29612diff -urNp linux-3.0.4/drivers/mtd/ubi/build.c linux-3.0.4/drivers/mtd/ubi/build.c
29613--- linux-3.0.4/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
29614+++ linux-3.0.4/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
29615@@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
29616 static int __init bytes_str_to_int(const char *str)
29617 {
29618 char *endp;
29619- unsigned long result;
29620+ unsigned long result, scale = 1;
29621
29622 result = simple_strtoul(str, &endp, 0);
29623 if (str == endp || result >= INT_MAX) {
29624@@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
29625
29626 switch (*endp) {
29627 case 'G':
29628- result *= 1024;
29629+ scale *= 1024;
29630 case 'M':
29631- result *= 1024;
29632+ scale *= 1024;
29633 case 'K':
29634- result *= 1024;
29635+ scale *= 1024;
29636 if (endp[1] == 'i' && endp[2] == 'B')
29637 endp += 2;
29638 case '\0':
29639@@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
29640 return -EINVAL;
29641 }
29642
29643- return result;
29644+ if ((intoverflow_t)result*scale >= INT_MAX) {
29645+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
29646+ str);
29647+ return -EINVAL;
29648+ }
29649+
29650+ return result*scale;
29651 }
29652
29653 /**
29654diff -urNp linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c
29655--- linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
29656+++ linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
29657@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
29658 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
29659 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
29660
29661-static struct bfa_ioc_hwif nw_hwif_ct;
29662+static struct bfa_ioc_hwif nw_hwif_ct = {
29663+ .ioc_pll_init = bfa_ioc_ct_pll_init,
29664+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
29665+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
29666+ .ioc_reg_init = bfa_ioc_ct_reg_init,
29667+ .ioc_map_port = bfa_ioc_ct_map_port,
29668+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
29669+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
29670+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
29671+ .ioc_sync_start = bfa_ioc_ct_sync_start,
29672+ .ioc_sync_join = bfa_ioc_ct_sync_join,
29673+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
29674+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
29675+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
29676+};
29677
29678 /**
29679 * Called from bfa_ioc_attach() to map asic specific calls.
29680@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
29681 void
29682 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
29683 {
29684- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
29685- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
29686- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
29687- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
29688- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
29689- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
29690- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
29691- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
29692- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
29693- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
29694- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
29695- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
29696- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
29697-
29698 ioc->ioc_hwif = &nw_hwif_ct;
29699 }
29700
29701diff -urNp linux-3.0.4/drivers/net/bna/bnad.c linux-3.0.4/drivers/net/bna/bnad.c
29702--- linux-3.0.4/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
29703+++ linux-3.0.4/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
29704@@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
29705 struct bna_intr_info *intr_info =
29706 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
29707 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
29708- struct bna_tx_event_cbfn tx_cbfn;
29709+ static struct bna_tx_event_cbfn tx_cbfn = {
29710+ /* Initialize the tx event handlers */
29711+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
29712+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
29713+ .tx_stall_cbfn = bnad_cb_tx_stall,
29714+ .tx_resume_cbfn = bnad_cb_tx_resume,
29715+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
29716+ };
29717 struct bna_tx *tx;
29718 unsigned long flags;
29719
29720@@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
29721 tx_config->txq_depth = bnad->txq_depth;
29722 tx_config->tx_type = BNA_TX_T_REGULAR;
29723
29724- /* Initialize the tx event handlers */
29725- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
29726- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
29727- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
29728- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
29729- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
29730-
29731 /* Get BNA's resource requirement for one tx object */
29732 spin_lock_irqsave(&bnad->bna_lock, flags);
29733 bna_tx_res_req(bnad->num_txq_per_tx,
29734@@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
29735 struct bna_intr_info *intr_info =
29736 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
29737 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
29738- struct bna_rx_event_cbfn rx_cbfn;
29739+ static struct bna_rx_event_cbfn rx_cbfn = {
29740+ /* Initialize the Rx event handlers */
29741+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
29742+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
29743+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
29744+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
29745+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
29746+ .rx_post_cbfn = bnad_cb_rx_post
29747+ };
29748 struct bna_rx *rx;
29749 unsigned long flags;
29750
29751 /* Initialize the Rx object configuration */
29752 bnad_init_rx_config(bnad, rx_config);
29753
29754- /* Initialize the Rx event handlers */
29755- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
29756- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
29757- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
29758- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
29759- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
29760- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
29761-
29762 /* Get BNA's resource requirement for one Rx object */
29763 spin_lock_irqsave(&bnad->bna_lock, flags);
29764 bna_rx_res_req(rx_config, res_info);
29765diff -urNp linux-3.0.4/drivers/net/bnx2.c linux-3.0.4/drivers/net/bnx2.c
29766--- linux-3.0.4/drivers/net/bnx2.c 2011-07-21 22:17:23.000000000 -0400
29767+++ linux-3.0.4/drivers/net/bnx2.c 2011-08-23 21:48:14.000000000 -0400
29768@@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
29769 int rc = 0;
29770 u32 magic, csum;
29771
29772+ pax_track_stack();
29773+
29774 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
29775 goto test_nvram_done;
29776
29777diff -urNp linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c
29778--- linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
29779+++ linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
29780@@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
29781 int i, rc;
29782 u32 magic, crc;
29783
29784+ pax_track_stack();
29785+
29786 if (BP_NOMCP(bp))
29787 return 0;
29788
29789diff -urNp linux-3.0.4/drivers/net/cxgb3/l2t.h linux-3.0.4/drivers/net/cxgb3/l2t.h
29790--- linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-07-21 22:17:23.000000000 -0400
29791+++ linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-08-23 21:47:55.000000000 -0400
29792@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
29793 */
29794 struct l2t_skb_cb {
29795 arp_failure_handler_func arp_failure_handler;
29796-};
29797+} __no_const;
29798
29799 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
29800
29801diff -urNp linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c
29802--- linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
29803+++ linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
29804@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
29805 unsigned int nchan = adap->params.nports;
29806 struct msix_entry entries[MAX_INGQ + 1];
29807
29808+ pax_track_stack();
29809+
29810 for (i = 0; i < ARRAY_SIZE(entries); ++i)
29811 entries[i].entry = i;
29812
29813diff -urNp linux-3.0.4/drivers/net/cxgb4/t4_hw.c linux-3.0.4/drivers/net/cxgb4/t4_hw.c
29814--- linux-3.0.4/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
29815+++ linux-3.0.4/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
29816@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
29817 u8 vpd[VPD_LEN], csum;
29818 unsigned int vpdr_len, kw_offset, id_len;
29819
29820+ pax_track_stack();
29821+
29822 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
29823 if (ret < 0)
29824 return ret;
29825diff -urNp linux-3.0.4/drivers/net/e1000e/82571.c linux-3.0.4/drivers/net/e1000e/82571.c
29826--- linux-3.0.4/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
29827+++ linux-3.0.4/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
29828@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
29829 {
29830 struct e1000_hw *hw = &adapter->hw;
29831 struct e1000_mac_info *mac = &hw->mac;
29832- struct e1000_mac_operations *func = &mac->ops;
29833+ e1000_mac_operations_no_const *func = &mac->ops;
29834 u32 swsm = 0;
29835 u32 swsm2 = 0;
29836 bool force_clear_smbi = false;
29837diff -urNp linux-3.0.4/drivers/net/e1000e/es2lan.c linux-3.0.4/drivers/net/e1000e/es2lan.c
29838--- linux-3.0.4/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
29839+++ linux-3.0.4/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
29840@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
29841 {
29842 struct e1000_hw *hw = &adapter->hw;
29843 struct e1000_mac_info *mac = &hw->mac;
29844- struct e1000_mac_operations *func = &mac->ops;
29845+ e1000_mac_operations_no_const *func = &mac->ops;
29846
29847 /* Set media type */
29848 switch (adapter->pdev->device) {
29849diff -urNp linux-3.0.4/drivers/net/e1000e/hw.h linux-3.0.4/drivers/net/e1000e/hw.h
29850--- linux-3.0.4/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
29851+++ linux-3.0.4/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
29852@@ -776,6 +776,7 @@ struct e1000_mac_operations {
29853 void (*write_vfta)(struct e1000_hw *, u32, u32);
29854 s32 (*read_mac_addr)(struct e1000_hw *);
29855 };
29856+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
29857
29858 /* Function pointers for the PHY. */
29859 struct e1000_phy_operations {
29860@@ -799,6 +800,7 @@ struct e1000_phy_operations {
29861 void (*power_up)(struct e1000_hw *);
29862 void (*power_down)(struct e1000_hw *);
29863 };
29864+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
29865
29866 /* Function pointers for the NVM. */
29867 struct e1000_nvm_operations {
29868@@ -810,9 +812,10 @@ struct e1000_nvm_operations {
29869 s32 (*validate)(struct e1000_hw *);
29870 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
29871 };
29872+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
29873
29874 struct e1000_mac_info {
29875- struct e1000_mac_operations ops;
29876+ e1000_mac_operations_no_const ops;
29877 u8 addr[ETH_ALEN];
29878 u8 perm_addr[ETH_ALEN];
29879
29880@@ -853,7 +856,7 @@ struct e1000_mac_info {
29881 };
29882
29883 struct e1000_phy_info {
29884- struct e1000_phy_operations ops;
29885+ e1000_phy_operations_no_const ops;
29886
29887 enum e1000_phy_type type;
29888
29889@@ -887,7 +890,7 @@ struct e1000_phy_info {
29890 };
29891
29892 struct e1000_nvm_info {
29893- struct e1000_nvm_operations ops;
29894+ e1000_nvm_operations_no_const ops;
29895
29896 enum e1000_nvm_type type;
29897 enum e1000_nvm_override override;
29898diff -urNp linux-3.0.4/drivers/net/hamradio/6pack.c linux-3.0.4/drivers/net/hamradio/6pack.c
29899--- linux-3.0.4/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
29900+++ linux-3.0.4/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
29901@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
29902 unsigned char buf[512];
29903 int count1;
29904
29905+ pax_track_stack();
29906+
29907 if (!count)
29908 return;
29909
29910diff -urNp linux-3.0.4/drivers/net/igb/e1000_hw.h linux-3.0.4/drivers/net/igb/e1000_hw.h
29911--- linux-3.0.4/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
29912+++ linux-3.0.4/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
29913@@ -314,6 +314,7 @@ struct e1000_mac_operations {
29914 s32 (*read_mac_addr)(struct e1000_hw *);
29915 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
29916 };
29917+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
29918
29919 struct e1000_phy_operations {
29920 s32 (*acquire)(struct e1000_hw *);
29921@@ -330,6 +331,7 @@ struct e1000_phy_operations {
29922 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
29923 s32 (*write_reg)(struct e1000_hw *, u32, u16);
29924 };
29925+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
29926
29927 struct e1000_nvm_operations {
29928 s32 (*acquire)(struct e1000_hw *);
29929@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
29930 s32 (*update)(struct e1000_hw *);
29931 s32 (*validate)(struct e1000_hw *);
29932 };
29933+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
29934
29935 struct e1000_info {
29936 s32 (*get_invariants)(struct e1000_hw *);
29937@@ -350,7 +353,7 @@ struct e1000_info {
29938 extern const struct e1000_info e1000_82575_info;
29939
29940 struct e1000_mac_info {
29941- struct e1000_mac_operations ops;
29942+ e1000_mac_operations_no_const ops;
29943
29944 u8 addr[6];
29945 u8 perm_addr[6];
29946@@ -388,7 +391,7 @@ struct e1000_mac_info {
29947 };
29948
29949 struct e1000_phy_info {
29950- struct e1000_phy_operations ops;
29951+ e1000_phy_operations_no_const ops;
29952
29953 enum e1000_phy_type type;
29954
29955@@ -423,7 +426,7 @@ struct e1000_phy_info {
29956 };
29957
29958 struct e1000_nvm_info {
29959- struct e1000_nvm_operations ops;
29960+ e1000_nvm_operations_no_const ops;
29961 enum e1000_nvm_type type;
29962 enum e1000_nvm_override override;
29963
29964@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
29965 s32 (*check_for_ack)(struct e1000_hw *, u16);
29966 s32 (*check_for_rst)(struct e1000_hw *, u16);
29967 };
29968+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
29969
29970 struct e1000_mbx_stats {
29971 u32 msgs_tx;
29972@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
29973 };
29974
29975 struct e1000_mbx_info {
29976- struct e1000_mbx_operations ops;
29977+ e1000_mbx_operations_no_const ops;
29978 struct e1000_mbx_stats stats;
29979 u32 timeout;
29980 u32 usec_delay;
29981diff -urNp linux-3.0.4/drivers/net/igbvf/vf.h linux-3.0.4/drivers/net/igbvf/vf.h
29982--- linux-3.0.4/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
29983+++ linux-3.0.4/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
29984@@ -189,9 +189,10 @@ struct e1000_mac_operations {
29985 s32 (*read_mac_addr)(struct e1000_hw *);
29986 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
29987 };
29988+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
29989
29990 struct e1000_mac_info {
29991- struct e1000_mac_operations ops;
29992+ e1000_mac_operations_no_const ops;
29993 u8 addr[6];
29994 u8 perm_addr[6];
29995
29996@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
29997 s32 (*check_for_ack)(struct e1000_hw *);
29998 s32 (*check_for_rst)(struct e1000_hw *);
29999 };
30000+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
30001
30002 struct e1000_mbx_stats {
30003 u32 msgs_tx;
30004@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
30005 };
30006
30007 struct e1000_mbx_info {
30008- struct e1000_mbx_operations ops;
30009+ e1000_mbx_operations_no_const ops;
30010 struct e1000_mbx_stats stats;
30011 u32 timeout;
30012 u32 usec_delay;
30013diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_main.c linux-3.0.4/drivers/net/ixgb/ixgb_main.c
30014--- linux-3.0.4/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
30015+++ linux-3.0.4/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
30016@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
30017 u32 rctl;
30018 int i;
30019
30020+ pax_track_stack();
30021+
30022 /* Check for Promiscuous and All Multicast modes */
30023
30024 rctl = IXGB_READ_REG(hw, RCTL);
30025diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_param.c linux-3.0.4/drivers/net/ixgb/ixgb_param.c
30026--- linux-3.0.4/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
30027+++ linux-3.0.4/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
30028@@ -261,6 +261,9 @@ void __devinit
30029 ixgb_check_options(struct ixgb_adapter *adapter)
30030 {
30031 int bd = adapter->bd_number;
30032+
30033+ pax_track_stack();
30034+
30035 if (bd >= IXGB_MAX_NIC) {
30036 pr_notice("Warning: no configuration for board #%i\n", bd);
30037 pr_notice("Using defaults for all values\n");
30038diff -urNp linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h
30039--- linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
30040+++ linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
30041@@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
30042 s32 (*update_checksum)(struct ixgbe_hw *);
30043 u16 (*calc_checksum)(struct ixgbe_hw *);
30044 };
30045+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
30046
30047 struct ixgbe_mac_operations {
30048 s32 (*init_hw)(struct ixgbe_hw *);
30049@@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
30050 /* Flow Control */
30051 s32 (*fc_enable)(struct ixgbe_hw *, s32);
30052 };
30053+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
30054
30055 struct ixgbe_phy_operations {
30056 s32 (*identify)(struct ixgbe_hw *);
30057@@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
30058 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
30059 s32 (*check_overtemp)(struct ixgbe_hw *);
30060 };
30061+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
30062
30063 struct ixgbe_eeprom_info {
30064- struct ixgbe_eeprom_operations ops;
30065+ ixgbe_eeprom_operations_no_const ops;
30066 enum ixgbe_eeprom_type type;
30067 u32 semaphore_delay;
30068 u16 word_size;
30069@@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
30070
30071 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
30072 struct ixgbe_mac_info {
30073- struct ixgbe_mac_operations ops;
30074+ ixgbe_mac_operations_no_const ops;
30075 enum ixgbe_mac_type type;
30076 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
30077 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
30078@@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
30079 };
30080
30081 struct ixgbe_phy_info {
30082- struct ixgbe_phy_operations ops;
30083+ ixgbe_phy_operations_no_const ops;
30084 struct mdio_if_info mdio;
30085 enum ixgbe_phy_type type;
30086 u32 id;
30087@@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
30088 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
30089 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
30090 };
30091+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
30092
30093 struct ixgbe_mbx_stats {
30094 u32 msgs_tx;
30095@@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
30096 };
30097
30098 struct ixgbe_mbx_info {
30099- struct ixgbe_mbx_operations ops;
30100+ ixgbe_mbx_operations_no_const ops;
30101 struct ixgbe_mbx_stats stats;
30102 u32 timeout;
30103 u32 usec_delay;
30104diff -urNp linux-3.0.4/drivers/net/ixgbevf/vf.h linux-3.0.4/drivers/net/ixgbevf/vf.h
30105--- linux-3.0.4/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
30106+++ linux-3.0.4/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
30107@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
30108 s32 (*clear_vfta)(struct ixgbe_hw *);
30109 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
30110 };
30111+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
30112
30113 enum ixgbe_mac_type {
30114 ixgbe_mac_unknown = 0,
30115@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
30116 };
30117
30118 struct ixgbe_mac_info {
30119- struct ixgbe_mac_operations ops;
30120+ ixgbe_mac_operations_no_const ops;
30121 u8 addr[6];
30122 u8 perm_addr[6];
30123
30124@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
30125 s32 (*check_for_ack)(struct ixgbe_hw *);
30126 s32 (*check_for_rst)(struct ixgbe_hw *);
30127 };
30128+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
30129
30130 struct ixgbe_mbx_stats {
30131 u32 msgs_tx;
30132@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
30133 };
30134
30135 struct ixgbe_mbx_info {
30136- struct ixgbe_mbx_operations ops;
30137+ ixgbe_mbx_operations_no_const ops;
30138 struct ixgbe_mbx_stats stats;
30139 u32 timeout;
30140 u32 udelay;
30141diff -urNp linux-3.0.4/drivers/net/ksz884x.c linux-3.0.4/drivers/net/ksz884x.c
30142--- linux-3.0.4/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
30143+++ linux-3.0.4/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
30144@@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
30145 int rc;
30146 u64 counter[TOTAL_PORT_COUNTER_NUM];
30147
30148+ pax_track_stack();
30149+
30150 mutex_lock(&hw_priv->lock);
30151 n = SWITCH_PORT_NUM;
30152 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
30153diff -urNp linux-3.0.4/drivers/net/mlx4/main.c linux-3.0.4/drivers/net/mlx4/main.c
30154--- linux-3.0.4/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
30155+++ linux-3.0.4/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
30156@@ -40,6 +40,7 @@
30157 #include <linux/dma-mapping.h>
30158 #include <linux/slab.h>
30159 #include <linux/io-mapping.h>
30160+#include <linux/sched.h>
30161
30162 #include <linux/mlx4/device.h>
30163 #include <linux/mlx4/doorbell.h>
30164@@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
30165 u64 icm_size;
30166 int err;
30167
30168+ pax_track_stack();
30169+
30170 err = mlx4_QUERY_FW(dev);
30171 if (err) {
30172 if (err == -EACCES)
30173diff -urNp linux-3.0.4/drivers/net/niu.c linux-3.0.4/drivers/net/niu.c
30174--- linux-3.0.4/drivers/net/niu.c 2011-09-02 18:11:21.000000000 -0400
30175+++ linux-3.0.4/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
30176@@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
30177 int i, num_irqs, err;
30178 u8 first_ldg;
30179
30180+ pax_track_stack();
30181+
30182 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
30183 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
30184 ldg_num_map[i] = first_ldg + i;
30185diff -urNp linux-3.0.4/drivers/net/pcnet32.c linux-3.0.4/drivers/net/pcnet32.c
30186--- linux-3.0.4/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
30187+++ linux-3.0.4/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
30188@@ -82,7 +82,7 @@ static int cards_found;
30189 /*
30190 * VLB I/O addresses
30191 */
30192-static unsigned int pcnet32_portlist[] __initdata =
30193+static unsigned int pcnet32_portlist[] __devinitdata =
30194 { 0x300, 0x320, 0x340, 0x360, 0 };
30195
30196 static int pcnet32_debug;
30197@@ -270,7 +270,7 @@ struct pcnet32_private {
30198 struct sk_buff **rx_skbuff;
30199 dma_addr_t *tx_dma_addr;
30200 dma_addr_t *rx_dma_addr;
30201- struct pcnet32_access a;
30202+ struct pcnet32_access *a;
30203 spinlock_t lock; /* Guard lock */
30204 unsigned int cur_rx, cur_tx; /* The next free ring entry */
30205 unsigned int rx_ring_size; /* current rx ring size */
30206@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
30207 u16 val;
30208
30209 netif_wake_queue(dev);
30210- val = lp->a.read_csr(ioaddr, CSR3);
30211+ val = lp->a->read_csr(ioaddr, CSR3);
30212 val &= 0x00ff;
30213- lp->a.write_csr(ioaddr, CSR3, val);
30214+ lp->a->write_csr(ioaddr, CSR3, val);
30215 napi_enable(&lp->napi);
30216 }
30217
30218@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
30219 r = mii_link_ok(&lp->mii_if);
30220 } else if (lp->chip_version >= PCNET32_79C970A) {
30221 ulong ioaddr = dev->base_addr; /* card base I/O address */
30222- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
30223+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
30224 } else { /* can not detect link on really old chips */
30225 r = 1;
30226 }
30227@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
30228 pcnet32_netif_stop(dev);
30229
30230 spin_lock_irqsave(&lp->lock, flags);
30231- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
30232+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
30233
30234 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
30235
30236@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
30237 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
30238 {
30239 struct pcnet32_private *lp = netdev_priv(dev);
30240- struct pcnet32_access *a = &lp->a; /* access to registers */
30241+ struct pcnet32_access *a = lp->a; /* access to registers */
30242 ulong ioaddr = dev->base_addr; /* card base I/O address */
30243 struct sk_buff *skb; /* sk buff */
30244 int x, i; /* counters */
30245@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
30246 pcnet32_netif_stop(dev);
30247
30248 spin_lock_irqsave(&lp->lock, flags);
30249- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
30250+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
30251
30252 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
30253
30254 /* Reset the PCNET32 */
30255- lp->a.reset(ioaddr);
30256- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
30257+ lp->a->reset(ioaddr);
30258+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
30259
30260 /* switch pcnet32 to 32bit mode */
30261- lp->a.write_bcr(ioaddr, 20, 2);
30262+ lp->a->write_bcr(ioaddr, 20, 2);
30263
30264 /* purge & init rings but don't actually restart */
30265 pcnet32_restart(dev, 0x0000);
30266
30267- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
30268+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
30269
30270 /* Initialize Transmit buffers. */
30271 size = data_len + 15;
30272@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
30273
30274 /* set int loopback in CSR15 */
30275 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
30276- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
30277+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
30278
30279 teststatus = cpu_to_le16(0x8000);
30280- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
30281+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
30282
30283 /* Check status of descriptors */
30284 for (x = 0; x < numbuffs; x++) {
30285@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
30286 }
30287 }
30288
30289- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
30290+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
30291 wmb();
30292 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
30293 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
30294@@ -1015,7 +1015,7 @@ clean_up:
30295 pcnet32_restart(dev, CSR0_NORMAL);
30296 } else {
30297 pcnet32_purge_rx_ring(dev);
30298- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
30299+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
30300 }
30301 spin_unlock_irqrestore(&lp->lock, flags);
30302
30303@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
30304 enum ethtool_phys_id_state state)
30305 {
30306 struct pcnet32_private *lp = netdev_priv(dev);
30307- struct pcnet32_access *a = &lp->a;
30308+ struct pcnet32_access *a = lp->a;
30309 ulong ioaddr = dev->base_addr;
30310 unsigned long flags;
30311 int i;
30312@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
30313 {
30314 int csr5;
30315 struct pcnet32_private *lp = netdev_priv(dev);
30316- struct pcnet32_access *a = &lp->a;
30317+ struct pcnet32_access *a = lp->a;
30318 ulong ioaddr = dev->base_addr;
30319 int ticks;
30320
30321@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
30322 spin_lock_irqsave(&lp->lock, flags);
30323 if (pcnet32_tx(dev)) {
30324 /* reset the chip to clear the error condition, then restart */
30325- lp->a.reset(ioaddr);
30326- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
30327+ lp->a->reset(ioaddr);
30328+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
30329 pcnet32_restart(dev, CSR0_START);
30330 netif_wake_queue(dev);
30331 }
30332@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
30333 __napi_complete(napi);
30334
30335 /* clear interrupt masks */
30336- val = lp->a.read_csr(ioaddr, CSR3);
30337+ val = lp->a->read_csr(ioaddr, CSR3);
30338 val &= 0x00ff;
30339- lp->a.write_csr(ioaddr, CSR3, val);
30340+ lp->a->write_csr(ioaddr, CSR3, val);
30341
30342 /* Set interrupt enable. */
30343- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
30344+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
30345
30346 spin_unlock_irqrestore(&lp->lock, flags);
30347 }
30348@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
30349 int i, csr0;
30350 u16 *buff = ptr;
30351 struct pcnet32_private *lp = netdev_priv(dev);
30352- struct pcnet32_access *a = &lp->a;
30353+ struct pcnet32_access *a = lp->a;
30354 ulong ioaddr = dev->base_addr;
30355 unsigned long flags;
30356
30357@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
30358 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
30359 if (lp->phymask & (1 << j)) {
30360 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
30361- lp->a.write_bcr(ioaddr, 33,
30362+ lp->a->write_bcr(ioaddr, 33,
30363 (j << 5) | i);
30364- *buff++ = lp->a.read_bcr(ioaddr, 34);
30365+ *buff++ = lp->a->read_bcr(ioaddr, 34);
30366 }
30367 }
30368 }
30369@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
30370 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
30371 lp->options |= PCNET32_PORT_FD;
30372
30373- lp->a = *a;
30374+ lp->a = a;
30375
30376 /* prior to register_netdev, dev->name is not yet correct */
30377 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
30378@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
30379 if (lp->mii) {
30380 /* lp->phycount and lp->phymask are set to 0 by memset above */
30381
30382- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
30383+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
30384 /* scan for PHYs */
30385 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
30386 unsigned short id1, id2;
30387@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
30388 pr_info("Found PHY %04x:%04x at address %d\n",
30389 id1, id2, i);
30390 }
30391- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
30392+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
30393 if (lp->phycount > 1)
30394 lp->options |= PCNET32_PORT_MII;
30395 }
30396@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
30397 }
30398
30399 /* Reset the PCNET32 */
30400- lp->a.reset(ioaddr);
30401+ lp->a->reset(ioaddr);
30402
30403 /* switch pcnet32 to 32bit mode */
30404- lp->a.write_bcr(ioaddr, 20, 2);
30405+ lp->a->write_bcr(ioaddr, 20, 2);
30406
30407 netif_printk(lp, ifup, KERN_DEBUG, dev,
30408 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
30409@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
30410 (u32) (lp->init_dma_addr));
30411
30412 /* set/reset autoselect bit */
30413- val = lp->a.read_bcr(ioaddr, 2) & ~2;
30414+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
30415 if (lp->options & PCNET32_PORT_ASEL)
30416 val |= 2;
30417- lp->a.write_bcr(ioaddr, 2, val);
30418+ lp->a->write_bcr(ioaddr, 2, val);
30419
30420 /* handle full duplex setting */
30421 if (lp->mii_if.full_duplex) {
30422- val = lp->a.read_bcr(ioaddr, 9) & ~3;
30423+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
30424 if (lp->options & PCNET32_PORT_FD) {
30425 val |= 1;
30426 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
30427@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
30428 if (lp->chip_version == 0x2627)
30429 val |= 3;
30430 }
30431- lp->a.write_bcr(ioaddr, 9, val);
30432+ lp->a->write_bcr(ioaddr, 9, val);
30433 }
30434
30435 /* set/reset GPSI bit in test register */
30436- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
30437+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
30438 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
30439 val |= 0x10;
30440- lp->a.write_csr(ioaddr, 124, val);
30441+ lp->a->write_csr(ioaddr, 124, val);
30442
30443 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
30444 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
30445@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
30446 * duplex, and/or enable auto negotiation, and clear DANAS
30447 */
30448 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
30449- lp->a.write_bcr(ioaddr, 32,
30450- lp->a.read_bcr(ioaddr, 32) | 0x0080);
30451+ lp->a->write_bcr(ioaddr, 32,
30452+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
30453 /* disable Auto Negotiation, set 10Mpbs, HD */
30454- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
30455+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
30456 if (lp->options & PCNET32_PORT_FD)
30457 val |= 0x10;
30458 if (lp->options & PCNET32_PORT_100)
30459 val |= 0x08;
30460- lp->a.write_bcr(ioaddr, 32, val);
30461+ lp->a->write_bcr(ioaddr, 32, val);
30462 } else {
30463 if (lp->options & PCNET32_PORT_ASEL) {
30464- lp->a.write_bcr(ioaddr, 32,
30465- lp->a.read_bcr(ioaddr,
30466+ lp->a->write_bcr(ioaddr, 32,
30467+ lp->a->read_bcr(ioaddr,
30468 32) | 0x0080);
30469 /* enable auto negotiate, setup, disable fd */
30470- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
30471+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
30472 val |= 0x20;
30473- lp->a.write_bcr(ioaddr, 32, val);
30474+ lp->a->write_bcr(ioaddr, 32, val);
30475 }
30476 }
30477 } else {
30478@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
30479 * There is really no good other way to handle multiple PHYs
30480 * other than turning off all automatics
30481 */
30482- val = lp->a.read_bcr(ioaddr, 2);
30483- lp->a.write_bcr(ioaddr, 2, val & ~2);
30484- val = lp->a.read_bcr(ioaddr, 32);
30485- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
30486+ val = lp->a->read_bcr(ioaddr, 2);
30487+ lp->a->write_bcr(ioaddr, 2, val & ~2);
30488+ val = lp->a->read_bcr(ioaddr, 32);
30489+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
30490
30491 if (!(lp->options & PCNET32_PORT_ASEL)) {
30492 /* setup ecmd */
30493@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
30494 ethtool_cmd_speed_set(&ecmd,
30495 (lp->options & PCNET32_PORT_100) ?
30496 SPEED_100 : SPEED_10);
30497- bcr9 = lp->a.read_bcr(ioaddr, 9);
30498+ bcr9 = lp->a->read_bcr(ioaddr, 9);
30499
30500 if (lp->options & PCNET32_PORT_FD) {
30501 ecmd.duplex = DUPLEX_FULL;
30502@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
30503 ecmd.duplex = DUPLEX_HALF;
30504 bcr9 |= ~(1 << 0);
30505 }
30506- lp->a.write_bcr(ioaddr, 9, bcr9);
30507+ lp->a->write_bcr(ioaddr, 9, bcr9);
30508 }
30509
30510 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
30511@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
30512
30513 #ifdef DO_DXSUFLO
30514 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
30515- val = lp->a.read_csr(ioaddr, CSR3);
30516+ val = lp->a->read_csr(ioaddr, CSR3);
30517 val |= 0x40;
30518- lp->a.write_csr(ioaddr, CSR3, val);
30519+ lp->a->write_csr(ioaddr, CSR3, val);
30520 }
30521 #endif
30522
30523@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
30524 napi_enable(&lp->napi);
30525
30526 /* Re-initialize the PCNET32, and start it when done. */
30527- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
30528- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
30529+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
30530+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
30531
30532- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
30533- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
30534+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
30535+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
30536
30537 netif_start_queue(dev);
30538
30539@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
30540
30541 i = 0;
30542 while (i++ < 100)
30543- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
30544+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
30545 break;
30546 /*
30547 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
30548 * reports that doing so triggers a bug in the '974.
30549 */
30550- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
30551+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
30552
30553 netif_printk(lp, ifup, KERN_DEBUG, dev,
30554 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
30555 i,
30556 (u32) (lp->init_dma_addr),
30557- lp->a.read_csr(ioaddr, CSR0));
30558+ lp->a->read_csr(ioaddr, CSR0));
30559
30560 spin_unlock_irqrestore(&lp->lock, flags);
30561
30562@@ -2218,7 +2218,7 @@ err_free_ring:
30563 * Switch back to 16bit mode to avoid problems with dumb
30564 * DOS packet driver after a warm reboot
30565 */
30566- lp->a.write_bcr(ioaddr, 20, 4);
30567+ lp->a->write_bcr(ioaddr, 20, 4);
30568
30569 err_free_irq:
30570 spin_unlock_irqrestore(&lp->lock, flags);
30571@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
30572
30573 /* wait for stop */
30574 for (i = 0; i < 100; i++)
30575- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
30576+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
30577 break;
30578
30579 if (i >= 100)
30580@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
30581 return;
30582
30583 /* ReInit Ring */
30584- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
30585+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
30586 i = 0;
30587 while (i++ < 1000)
30588- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
30589+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
30590 break;
30591
30592- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
30593+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
30594 }
30595
30596 static void pcnet32_tx_timeout(struct net_device *dev)
30597@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
30598 /* Transmitter timeout, serious problems. */
30599 if (pcnet32_debug & NETIF_MSG_DRV)
30600 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
30601- dev->name, lp->a.read_csr(ioaddr, CSR0));
30602- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
30603+ dev->name, lp->a->read_csr(ioaddr, CSR0));
30604+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
30605 dev->stats.tx_errors++;
30606 if (netif_msg_tx_err(lp)) {
30607 int i;
30608@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
30609
30610 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
30611 "%s() called, csr0 %4.4x\n",
30612- __func__, lp->a.read_csr(ioaddr, CSR0));
30613+ __func__, lp->a->read_csr(ioaddr, CSR0));
30614
30615 /* Default status -- will not enable Successful-TxDone
30616 * interrupt when that option is available to us.
30617@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
30618 dev->stats.tx_bytes += skb->len;
30619
30620 /* Trigger an immediate send poll. */
30621- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
30622+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
30623
30624 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
30625 lp->tx_full = 1;
30626@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
30627
30628 spin_lock(&lp->lock);
30629
30630- csr0 = lp->a.read_csr(ioaddr, CSR0);
30631+ csr0 = lp->a->read_csr(ioaddr, CSR0);
30632 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
30633 if (csr0 == 0xffff)
30634 break; /* PCMCIA remove happened */
30635 /* Acknowledge all of the current interrupt sources ASAP. */
30636- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
30637+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
30638
30639 netif_printk(lp, intr, KERN_DEBUG, dev,
30640 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
30641- csr0, lp->a.read_csr(ioaddr, CSR0));
30642+ csr0, lp->a->read_csr(ioaddr, CSR0));
30643
30644 /* Log misc errors. */
30645 if (csr0 & 0x4000)
30646@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
30647 if (napi_schedule_prep(&lp->napi)) {
30648 u16 val;
30649 /* set interrupt masks */
30650- val = lp->a.read_csr(ioaddr, CSR3);
30651+ val = lp->a->read_csr(ioaddr, CSR3);
30652 val |= 0x5f00;
30653- lp->a.write_csr(ioaddr, CSR3, val);
30654+ lp->a->write_csr(ioaddr, CSR3, val);
30655
30656 __napi_schedule(&lp->napi);
30657 break;
30658 }
30659- csr0 = lp->a.read_csr(ioaddr, CSR0);
30660+ csr0 = lp->a->read_csr(ioaddr, CSR0);
30661 }
30662
30663 netif_printk(lp, intr, KERN_DEBUG, dev,
30664 "exiting interrupt, csr0=%#4.4x\n",
30665- lp->a.read_csr(ioaddr, CSR0));
30666+ lp->a->read_csr(ioaddr, CSR0));
30667
30668 spin_unlock(&lp->lock);
30669
30670@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
30671
30672 spin_lock_irqsave(&lp->lock, flags);
30673
30674- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
30675+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
30676
30677 netif_printk(lp, ifdown, KERN_DEBUG, dev,
30678 "Shutting down ethercard, status was %2.2x\n",
30679- lp->a.read_csr(ioaddr, CSR0));
30680+ lp->a->read_csr(ioaddr, CSR0));
30681
30682 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
30683- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
30684+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
30685
30686 /*
30687 * Switch back to 16bit mode to avoid problems with dumb
30688 * DOS packet driver after a warm reboot
30689 */
30690- lp->a.write_bcr(ioaddr, 20, 4);
30691+ lp->a->write_bcr(ioaddr, 20, 4);
30692
30693 spin_unlock_irqrestore(&lp->lock, flags);
30694
30695@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
30696 unsigned long flags;
30697
30698 spin_lock_irqsave(&lp->lock, flags);
30699- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
30700+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
30701 spin_unlock_irqrestore(&lp->lock, flags);
30702
30703 return &dev->stats;
30704@@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
30705 if (dev->flags & IFF_ALLMULTI) {
30706 ib->filter[0] = cpu_to_le32(~0U);
30707 ib->filter[1] = cpu_to_le32(~0U);
30708- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
30709- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
30710- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
30711- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
30712+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
30713+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
30714+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
30715+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
30716 return;
30717 }
30718 /* clear the multicast filter */
30719@@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
30720 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
30721 }
30722 for (i = 0; i < 4; i++)
30723- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
30724+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
30725 le16_to_cpu(mcast_table[i]));
30726 }
30727
30728@@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
30729
30730 spin_lock_irqsave(&lp->lock, flags);
30731 suspended = pcnet32_suspend(dev, &flags, 0);
30732- csr15 = lp->a.read_csr(ioaddr, CSR15);
30733+ csr15 = lp->a->read_csr(ioaddr, CSR15);
30734 if (dev->flags & IFF_PROMISC) {
30735 /* Log any net taps. */
30736 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
30737 lp->init_block->mode =
30738 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
30739 7);
30740- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
30741+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
30742 } else {
30743 lp->init_block->mode =
30744 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
30745- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
30746+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
30747 pcnet32_load_multicast(dev);
30748 }
30749
30750 if (suspended) {
30751 int csr5;
30752 /* clear SUSPEND (SPND) - CSR5 bit 0 */
30753- csr5 = lp->a.read_csr(ioaddr, CSR5);
30754- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
30755+ csr5 = lp->a->read_csr(ioaddr, CSR5);
30756+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
30757 } else {
30758- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
30759+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
30760 pcnet32_restart(dev, CSR0_NORMAL);
30761 netif_wake_queue(dev);
30762 }
30763@@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
30764 if (!lp->mii)
30765 return 0;
30766
30767- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
30768- val_out = lp->a.read_bcr(ioaddr, 34);
30769+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
30770+ val_out = lp->a->read_bcr(ioaddr, 34);
30771
30772 return val_out;
30773 }
30774@@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
30775 if (!lp->mii)
30776 return;
30777
30778- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
30779- lp->a.write_bcr(ioaddr, 34, val);
30780+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
30781+ lp->a->write_bcr(ioaddr, 34, val);
30782 }
30783
30784 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
30785@@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
30786 curr_link = mii_link_ok(&lp->mii_if);
30787 } else {
30788 ulong ioaddr = dev->base_addr; /* card base I/O address */
30789- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
30790+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
30791 }
30792 if (!curr_link) {
30793 if (prev_link || verbose) {
30794@@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
30795 (ecmd.duplex == DUPLEX_FULL)
30796 ? "full" : "half");
30797 }
30798- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
30799+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
30800 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
30801 if (lp->mii_if.full_duplex)
30802 bcr9 |= (1 << 0);
30803 else
30804 bcr9 &= ~(1 << 0);
30805- lp->a.write_bcr(dev->base_addr, 9, bcr9);
30806+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
30807 }
30808 } else {
30809 netif_info(lp, link, dev, "link up\n");
30810diff -urNp linux-3.0.4/drivers/net/ppp_generic.c linux-3.0.4/drivers/net/ppp_generic.c
30811--- linux-3.0.4/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
30812+++ linux-3.0.4/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
30813@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
30814 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
30815 struct ppp_stats stats;
30816 struct ppp_comp_stats cstats;
30817- char *vers;
30818
30819 switch (cmd) {
30820 case SIOCGPPPSTATS:
30821@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
30822 break;
30823
30824 case SIOCGPPPVER:
30825- vers = PPP_VERSION;
30826- if (copy_to_user(addr, vers, strlen(vers) + 1))
30827+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
30828 break;
30829 err = 0;
30830 break;
30831diff -urNp linux-3.0.4/drivers/net/r8169.c linux-3.0.4/drivers/net/r8169.c
30832--- linux-3.0.4/drivers/net/r8169.c 2011-09-02 18:11:21.000000000 -0400
30833+++ linux-3.0.4/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
30834@@ -645,12 +645,12 @@ struct rtl8169_private {
30835 struct mdio_ops {
30836 void (*write)(void __iomem *, int, int);
30837 int (*read)(void __iomem *, int);
30838- } mdio_ops;
30839+ } __no_const mdio_ops;
30840
30841 struct pll_power_ops {
30842 void (*down)(struct rtl8169_private *);
30843 void (*up)(struct rtl8169_private *);
30844- } pll_power_ops;
30845+ } __no_const pll_power_ops;
30846
30847 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
30848 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
30849diff -urNp linux-3.0.4/drivers/net/tg3.h linux-3.0.4/drivers/net/tg3.h
30850--- linux-3.0.4/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
30851+++ linux-3.0.4/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
30852@@ -134,6 +134,7 @@
30853 #define CHIPREV_ID_5750_A0 0x4000
30854 #define CHIPREV_ID_5750_A1 0x4001
30855 #define CHIPREV_ID_5750_A3 0x4003
30856+#define CHIPREV_ID_5750_C1 0x4201
30857 #define CHIPREV_ID_5750_C2 0x4202
30858 #define CHIPREV_ID_5752_A0_HW 0x5000
30859 #define CHIPREV_ID_5752_A0 0x6000
30860diff -urNp linux-3.0.4/drivers/net/tokenring/abyss.c linux-3.0.4/drivers/net/tokenring/abyss.c
30861--- linux-3.0.4/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
30862+++ linux-3.0.4/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
30863@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
30864
30865 static int __init abyss_init (void)
30866 {
30867- abyss_netdev_ops = tms380tr_netdev_ops;
30868+ pax_open_kernel();
30869+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
30870
30871- abyss_netdev_ops.ndo_open = abyss_open;
30872- abyss_netdev_ops.ndo_stop = abyss_close;
30873+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
30874+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
30875+ pax_close_kernel();
30876
30877 return pci_register_driver(&abyss_driver);
30878 }
30879diff -urNp linux-3.0.4/drivers/net/tokenring/madgemc.c linux-3.0.4/drivers/net/tokenring/madgemc.c
30880--- linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
30881+++ linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
30882@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
30883
30884 static int __init madgemc_init (void)
30885 {
30886- madgemc_netdev_ops = tms380tr_netdev_ops;
30887- madgemc_netdev_ops.ndo_open = madgemc_open;
30888- madgemc_netdev_ops.ndo_stop = madgemc_close;
30889+ pax_open_kernel();
30890+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
30891+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
30892+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
30893+ pax_close_kernel();
30894
30895 return mca_register_driver (&madgemc_driver);
30896 }
30897diff -urNp linux-3.0.4/drivers/net/tokenring/proteon.c linux-3.0.4/drivers/net/tokenring/proteon.c
30898--- linux-3.0.4/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
30899+++ linux-3.0.4/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
30900@@ -353,9 +353,11 @@ static int __init proteon_init(void)
30901 struct platform_device *pdev;
30902 int i, num = 0, err = 0;
30903
30904- proteon_netdev_ops = tms380tr_netdev_ops;
30905- proteon_netdev_ops.ndo_open = proteon_open;
30906- proteon_netdev_ops.ndo_stop = tms380tr_close;
30907+ pax_open_kernel();
30908+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
30909+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
30910+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
30911+ pax_close_kernel();
30912
30913 err = platform_driver_register(&proteon_driver);
30914 if (err)
30915diff -urNp linux-3.0.4/drivers/net/tokenring/skisa.c linux-3.0.4/drivers/net/tokenring/skisa.c
30916--- linux-3.0.4/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
30917+++ linux-3.0.4/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
30918@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
30919 struct platform_device *pdev;
30920 int i, num = 0, err = 0;
30921
30922- sk_isa_netdev_ops = tms380tr_netdev_ops;
30923- sk_isa_netdev_ops.ndo_open = sk_isa_open;
30924- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
30925+ pax_open_kernel();
30926+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
30927+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
30928+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
30929+ pax_close_kernel();
30930
30931 err = platform_driver_register(&sk_isa_driver);
30932 if (err)
30933diff -urNp linux-3.0.4/drivers/net/tulip/de2104x.c linux-3.0.4/drivers/net/tulip/de2104x.c
30934--- linux-3.0.4/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
30935+++ linux-3.0.4/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
30936@@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
30937 struct de_srom_info_leaf *il;
30938 void *bufp;
30939
30940+ pax_track_stack();
30941+
30942 /* download entire eeprom */
30943 for (i = 0; i < DE_EEPROM_WORDS; i++)
30944 ((__le16 *)ee_data)[i] =
30945diff -urNp linux-3.0.4/drivers/net/tulip/de4x5.c linux-3.0.4/drivers/net/tulip/de4x5.c
30946--- linux-3.0.4/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
30947+++ linux-3.0.4/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
30948@@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
30949 for (i=0; i<ETH_ALEN; i++) {
30950 tmp.addr[i] = dev->dev_addr[i];
30951 }
30952- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
30953+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
30954 break;
30955
30956 case DE4X5_SET_HWADDR: /* Set the hardware address */
30957@@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
30958 spin_lock_irqsave(&lp->lock, flags);
30959 memcpy(&statbuf, &lp->pktStats, ioc->len);
30960 spin_unlock_irqrestore(&lp->lock, flags);
30961- if (copy_to_user(ioc->data, &statbuf, ioc->len))
30962+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
30963 return -EFAULT;
30964 break;
30965 }
30966diff -urNp linux-3.0.4/drivers/net/usb/hso.c linux-3.0.4/drivers/net/usb/hso.c
30967--- linux-3.0.4/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
30968+++ linux-3.0.4/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
30969@@ -71,7 +71,7 @@
30970 #include <asm/byteorder.h>
30971 #include <linux/serial_core.h>
30972 #include <linux/serial.h>
30973-
30974+#include <asm/local.h>
30975
30976 #define MOD_AUTHOR "Option Wireless"
30977 #define MOD_DESCRIPTION "USB High Speed Option driver"
30978@@ -257,7 +257,7 @@ struct hso_serial {
30979
30980 /* from usb_serial_port */
30981 struct tty_struct *tty;
30982- int open_count;
30983+ local_t open_count;
30984 spinlock_t serial_lock;
30985
30986 int (*write_data) (struct hso_serial *serial);
30987@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
30988 struct urb *urb;
30989
30990 urb = serial->rx_urb[0];
30991- if (serial->open_count > 0) {
30992+ if (local_read(&serial->open_count) > 0) {
30993 count = put_rxbuf_data(urb, serial);
30994 if (count == -1)
30995 return;
30996@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
30997 DUMP1(urb->transfer_buffer, urb->actual_length);
30998
30999 /* Anyone listening? */
31000- if (serial->open_count == 0)
31001+ if (local_read(&serial->open_count) == 0)
31002 return;
31003
31004 if (status == 0) {
31005@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
31006 spin_unlock_irq(&serial->serial_lock);
31007
31008 /* check for port already opened, if not set the termios */
31009- serial->open_count++;
31010- if (serial->open_count == 1) {
31011+ if (local_inc_return(&serial->open_count) == 1) {
31012 serial->rx_state = RX_IDLE;
31013 /* Force default termio settings */
31014 _hso_serial_set_termios(tty, NULL);
31015@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
31016 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
31017 if (result) {
31018 hso_stop_serial_device(serial->parent);
31019- serial->open_count--;
31020+ local_dec(&serial->open_count);
31021 kref_put(&serial->parent->ref, hso_serial_ref_free);
31022 }
31023 } else {
31024@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
31025
31026 /* reset the rts and dtr */
31027 /* do the actual close */
31028- serial->open_count--;
31029+ local_dec(&serial->open_count);
31030
31031- if (serial->open_count <= 0) {
31032- serial->open_count = 0;
31033+ if (local_read(&serial->open_count) <= 0) {
31034+ local_set(&serial->open_count, 0);
31035 spin_lock_irq(&serial->serial_lock);
31036 if (serial->tty == tty) {
31037 serial->tty->driver_data = NULL;
31038@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
31039
31040 /* the actual setup */
31041 spin_lock_irqsave(&serial->serial_lock, flags);
31042- if (serial->open_count)
31043+ if (local_read(&serial->open_count))
31044 _hso_serial_set_termios(tty, old);
31045 else
31046 tty->termios = old;
31047@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
31048 D1("Pending read interrupt on port %d\n", i);
31049 spin_lock(&serial->serial_lock);
31050 if (serial->rx_state == RX_IDLE &&
31051- serial->open_count > 0) {
31052+ local_read(&serial->open_count) > 0) {
31053 /* Setup and send a ctrl req read on
31054 * port i */
31055 if (!serial->rx_urb_filled[0]) {
31056@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
31057 /* Start all serial ports */
31058 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
31059 if (serial_table[i] && (serial_table[i]->interface == iface)) {
31060- if (dev2ser(serial_table[i])->open_count) {
31061+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
31062 result =
31063 hso_start_serial_device(serial_table[i], GFP_NOIO);
31064 hso_kick_transmit(dev2ser(serial_table[i]));
31065diff -urNp linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c
31066--- linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
31067+++ linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
31068@@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
31069 * Return with error code if any of the queue indices
31070 * is out of range
31071 */
31072- if (p->ring_index[i] < 0 ||
31073- p->ring_index[i] >= adapter->num_rx_queues)
31074+ if (p->ring_index[i] >= adapter->num_rx_queues)
31075 return -EINVAL;
31076 }
31077
31078diff -urNp linux-3.0.4/drivers/net/vxge/vxge-config.h linux-3.0.4/drivers/net/vxge/vxge-config.h
31079--- linux-3.0.4/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
31080+++ linux-3.0.4/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
31081@@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
31082 void (*link_down)(struct __vxge_hw_device *devh);
31083 void (*crit_err)(struct __vxge_hw_device *devh,
31084 enum vxge_hw_event type, u64 ext_data);
31085-};
31086+} __no_const;
31087
31088 /*
31089 * struct __vxge_hw_blockpool_entry - Block private data structure
31090diff -urNp linux-3.0.4/drivers/net/vxge/vxge-main.c linux-3.0.4/drivers/net/vxge/vxge-main.c
31091--- linux-3.0.4/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
31092+++ linux-3.0.4/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
31093@@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
31094 struct sk_buff *completed[NR_SKB_COMPLETED];
31095 int more;
31096
31097+ pax_track_stack();
31098+
31099 do {
31100 more = 0;
31101 skb_ptr = completed;
31102@@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
31103 u8 mtable[256] = {0}; /* CPU to vpath mapping */
31104 int index;
31105
31106+ pax_track_stack();
31107+
31108 /*
31109 * Filling
31110 * - itable with bucket numbers
31111diff -urNp linux-3.0.4/drivers/net/vxge/vxge-traffic.h linux-3.0.4/drivers/net/vxge/vxge-traffic.h
31112--- linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
31113+++ linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
31114@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
31115 struct vxge_hw_mempool_dma *dma_object,
31116 u32 index,
31117 u32 is_last);
31118-};
31119+} __no_const;
31120
31121 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
31122 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
31123diff -urNp linux-3.0.4/drivers/net/wan/cycx_x25.c linux-3.0.4/drivers/net/wan/cycx_x25.c
31124--- linux-3.0.4/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
31125+++ linux-3.0.4/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
31126@@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
31127 unsigned char hex[1024],
31128 * phex = hex;
31129
31130+ pax_track_stack();
31131+
31132 if (len >= (sizeof(hex) / 2))
31133 len = (sizeof(hex) / 2) - 1;
31134
31135diff -urNp linux-3.0.4/drivers/net/wan/hdlc_x25.c linux-3.0.4/drivers/net/wan/hdlc_x25.c
31136--- linux-3.0.4/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
31137+++ linux-3.0.4/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
31138@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
31139
31140 static int x25_open(struct net_device *dev)
31141 {
31142- struct lapb_register_struct cb;
31143+ static struct lapb_register_struct cb = {
31144+ .connect_confirmation = x25_connected,
31145+ .connect_indication = x25_connected,
31146+ .disconnect_confirmation = x25_disconnected,
31147+ .disconnect_indication = x25_disconnected,
31148+ .data_indication = x25_data_indication,
31149+ .data_transmit = x25_data_transmit
31150+ };
31151 int result;
31152
31153- cb.connect_confirmation = x25_connected;
31154- cb.connect_indication = x25_connected;
31155- cb.disconnect_confirmation = x25_disconnected;
31156- cb.disconnect_indication = x25_disconnected;
31157- cb.data_indication = x25_data_indication;
31158- cb.data_transmit = x25_data_transmit;
31159-
31160 result = lapb_register(dev, &cb);
31161 if (result != LAPB_OK)
31162 return result;
31163diff -urNp linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c
31164--- linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
31165+++ linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
31166@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
31167 int do_autopm = 1;
31168 DECLARE_COMPLETION_ONSTACK(notif_completion);
31169
31170+ pax_track_stack();
31171+
31172 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
31173 i2400m, ack, ack_size);
31174 BUG_ON(_ack == i2400m->bm_ack_buf);
31175diff -urNp linux-3.0.4/drivers/net/wireless/airo.c linux-3.0.4/drivers/net/wireless/airo.c
31176--- linux-3.0.4/drivers/net/wireless/airo.c 2011-09-02 18:11:21.000000000 -0400
31177+++ linux-3.0.4/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
31178@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
31179 BSSListElement * loop_net;
31180 BSSListElement * tmp_net;
31181
31182+ pax_track_stack();
31183+
31184 /* Blow away current list of scan results */
31185 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
31186 list_move_tail (&loop_net->list, &ai->network_free_list);
31187@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
31188 WepKeyRid wkr;
31189 int rc;
31190
31191+ pax_track_stack();
31192+
31193 memset( &mySsid, 0, sizeof( mySsid ) );
31194 kfree (ai->flash);
31195 ai->flash = NULL;
31196@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
31197 __le32 *vals = stats.vals;
31198 int len;
31199
31200+ pax_track_stack();
31201+
31202 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
31203 return -ENOMEM;
31204 data = file->private_data;
31205@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
31206 /* If doLoseSync is not 1, we won't do a Lose Sync */
31207 int doLoseSync = -1;
31208
31209+ pax_track_stack();
31210+
31211 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
31212 return -ENOMEM;
31213 data = file->private_data;
31214@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
31215 int i;
31216 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
31217
31218+ pax_track_stack();
31219+
31220 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
31221 if (!qual)
31222 return -ENOMEM;
31223@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
31224 CapabilityRid cap_rid;
31225 __le32 *vals = stats_rid.vals;
31226
31227+ pax_track_stack();
31228+
31229 /* Get stats out of the card */
31230 clear_bit(JOB_WSTATS, &local->jobs);
31231 if (local->power.event) {
31232diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c
31233--- linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
31234+++ linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
31235@@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
31236 unsigned int v;
31237 u64 tsf;
31238
31239+ pax_track_stack();
31240+
31241 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
31242 len += snprintf(buf+len, sizeof(buf)-len,
31243 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
31244@@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
31245 unsigned int len = 0;
31246 unsigned int i;
31247
31248+ pax_track_stack();
31249+
31250 len += snprintf(buf+len, sizeof(buf)-len,
31251 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
31252
31253@@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
31254 unsigned int i;
31255 unsigned int v;
31256
31257+ pax_track_stack();
31258+
31259 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
31260 sc->ah->ah_ant_mode);
31261 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
31262@@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
31263 unsigned int len = 0;
31264 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
31265
31266+ pax_track_stack();
31267+
31268 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
31269 sc->bssidmask);
31270 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
31271@@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
31272 unsigned int len = 0;
31273 int i;
31274
31275+ pax_track_stack();
31276+
31277 len += snprintf(buf+len, sizeof(buf)-len,
31278 "RX\n---------------------\n");
31279 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
31280@@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
31281 char buf[700];
31282 unsigned int len = 0;
31283
31284+ pax_track_stack();
31285+
31286 len += snprintf(buf+len, sizeof(buf)-len,
31287 "HW has PHY error counters:\t%s\n",
31288 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
31289@@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
31290 struct ath5k_buf *bf, *bf0;
31291 int i, n;
31292
31293+ pax_track_stack();
31294+
31295 len += snprintf(buf+len, sizeof(buf)-len,
31296 "available txbuffers: %d\n", sc->txbuf_len);
31297
31298diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c
31299--- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
31300+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
31301@@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
31302 int i, im, j;
31303 int nmeasurement;
31304
31305+ pax_track_stack();
31306+
31307 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
31308 if (ah->txchainmask & (1 << i))
31309 num_chains++;
31310diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
31311--- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
31312+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
31313@@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
31314 int theta_low_bin = 0;
31315 int i;
31316
31317+ pax_track_stack();
31318+
31319 /* disregard any bin that contains <= 16 samples */
31320 thresh_accum_cnt = 16;
31321 scale_factor = 5;
31322diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c
31323--- linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
31324+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
31325@@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
31326 char buf[512];
31327 unsigned int len = 0;
31328
31329+ pax_track_stack();
31330+
31331 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
31332 len += snprintf(buf + len, sizeof(buf) - len,
31333 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
31334@@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
31335 u8 addr[ETH_ALEN];
31336 u32 tmp;
31337
31338+ pax_track_stack();
31339+
31340 len += snprintf(buf + len, sizeof(buf) - len,
31341 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
31342 wiphy_name(sc->hw->wiphy),
31343diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
31344--- linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
31345+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
31346@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
31347 unsigned int len = 0;
31348 int ret = 0;
31349
31350+ pax_track_stack();
31351+
31352 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
31353
31354 ath9k_htc_ps_wakeup(priv);
31355@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
31356 unsigned int len = 0;
31357 int ret = 0;
31358
31359+ pax_track_stack();
31360+
31361 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
31362
31363 ath9k_htc_ps_wakeup(priv);
31364@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
31365 unsigned int len = 0;
31366 int ret = 0;
31367
31368+ pax_track_stack();
31369+
31370 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
31371
31372 ath9k_htc_ps_wakeup(priv);
31373@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
31374 char buf[512];
31375 unsigned int len = 0;
31376
31377+ pax_track_stack();
31378+
31379 len += snprintf(buf + len, sizeof(buf) - len,
31380 "%20s : %10u\n", "Buffers queued",
31381 priv->debug.tx_stats.buf_queued);
31382@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
31383 char buf[512];
31384 unsigned int len = 0;
31385
31386+ pax_track_stack();
31387+
31388 spin_lock_bh(&priv->tx.tx_lock);
31389
31390 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
31391@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
31392 char buf[512];
31393 unsigned int len = 0;
31394
31395+ pax_track_stack();
31396+
31397 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
31398 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
31399
31400diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h
31401--- linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h 2011-09-02 18:11:21.000000000 -0400
31402+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
31403@@ -585,7 +585,7 @@ struct ath_hw_private_ops {
31404
31405 /* ANI */
31406 void (*ani_cache_ini_regs)(struct ath_hw *ah);
31407-};
31408+} __no_const;
31409
31410 /**
31411 * struct ath_hw_ops - callbacks used by hardware code and driver code
31412@@ -637,7 +637,7 @@ struct ath_hw_ops {
31413 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
31414 struct ath_hw_antcomb_conf *antconf);
31415
31416-};
31417+} __no_const;
31418
31419 struct ath_nf_limits {
31420 s16 max;
31421@@ -650,7 +650,7 @@ struct ath_nf_limits {
31422 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
31423
31424 struct ath_hw {
31425- struct ath_ops reg_ops;
31426+ ath_ops_no_const reg_ops;
31427
31428 struct ieee80211_hw *hw;
31429 struct ath_common common;
31430diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath.h linux-3.0.4/drivers/net/wireless/ath/ath.h
31431--- linux-3.0.4/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
31432+++ linux-3.0.4/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
31433@@ -121,6 +121,7 @@ struct ath_ops {
31434 void (*write_flush) (void *);
31435 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
31436 };
31437+typedef struct ath_ops __no_const ath_ops_no_const;
31438
31439 struct ath_common;
31440 struct ath_bus_ops;
31441diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c
31442--- linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
31443+++ linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
31444@@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
31445 int err;
31446 DECLARE_SSID_BUF(ssid);
31447
31448+ pax_track_stack();
31449+
31450 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
31451
31452 if (ssid_len)
31453@@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
31454 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
31455 int err;
31456
31457+ pax_track_stack();
31458+
31459 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
31460 idx, keylen, len);
31461
31462diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c
31463--- linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
31464+++ linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
31465@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
31466 unsigned long flags;
31467 DECLARE_SSID_BUF(ssid);
31468
31469+ pax_track_stack();
31470+
31471 LIBIPW_DEBUG_SCAN("'%s' (%pM"
31472 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
31473 print_ssid(ssid, info_element->data, info_element->len),
31474diff -urNp linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c
31475--- linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-07-21 22:17:23.000000000 -0400
31476+++ linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-23 21:47:55.000000000 -0400
31477@@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
31478 */
31479 if (iwl3945_mod_params.disable_hw_scan) {
31480 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
31481- iwl3945_hw_ops.hw_scan = NULL;
31482+ pax_open_kernel();
31483+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
31484+ pax_close_kernel();
31485 }
31486
31487 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
31488diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
31489--- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
31490+++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
31491@@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
31492 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
31493 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
31494
31495+ pax_track_stack();
31496+
31497 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
31498
31499 /* Treat uninitialized rate scaling data same as non-existing. */
31500@@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
31501 container_of(lq_sta, struct iwl_station_priv, lq_sta);
31502 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
31503
31504+ pax_track_stack();
31505+
31506 /* Override starting rate (index 0) if needed for debug purposes */
31507 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
31508
31509diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c
31510--- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
31511+++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
31512@@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
31513 int pos = 0;
31514 const size_t bufsz = sizeof(buf);
31515
31516+ pax_track_stack();
31517+
31518 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
31519 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
31520 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
31521@@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
31522 char buf[256 * NUM_IWL_RXON_CTX];
31523 const size_t bufsz = sizeof(buf);
31524
31525+ pax_track_stack();
31526+
31527 for_each_context(priv, ctx) {
31528 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
31529 ctx->ctxid);
31530diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h
31531--- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
31532+++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
31533@@ -68,8 +68,8 @@ do {
31534 } while (0)
31535
31536 #else
31537-#define IWL_DEBUG(__priv, level, fmt, args...)
31538-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
31539+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
31540+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
31541 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
31542 const void *p, u32 len)
31543 {}
31544diff -urNp linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c
31545--- linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
31546+++ linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
31547@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
31548 int buf_len = 512;
31549 size_t len = 0;
31550
31551+ pax_track_stack();
31552+
31553 if (*ppos != 0)
31554 return 0;
31555 if (count < sizeof(buf))
31556diff -urNp linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c
31557--- linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
31558+++ linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
31559@@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
31560 return -EINVAL;
31561
31562 if (fake_hw_scan) {
31563- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
31564- mac80211_hwsim_ops.sw_scan_start = NULL;
31565- mac80211_hwsim_ops.sw_scan_complete = NULL;
31566+ pax_open_kernel();
31567+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
31568+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
31569+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
31570+ pax_close_kernel();
31571 }
31572
31573 spin_lock_init(&hwsim_radio_lock);
31574diff -urNp linux-3.0.4/drivers/net/wireless/rndis_wlan.c linux-3.0.4/drivers/net/wireless/rndis_wlan.c
31575--- linux-3.0.4/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
31576+++ linux-3.0.4/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
31577@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
31578
31579 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
31580
31581- if (rts_threshold < 0 || rts_threshold > 2347)
31582+ if (rts_threshold > 2347)
31583 rts_threshold = 2347;
31584
31585 tmp = cpu_to_le32(rts_threshold);
31586diff -urNp linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
31587--- linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
31588+++ linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
31589@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
31590 u8 rfpath;
31591 u8 num_total_rfpath = rtlphy->num_total_rfpath;
31592
31593+ pax_track_stack();
31594+
31595 precommoncmdcnt = 0;
31596 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
31597 MAX_PRECMD_CNT,
31598diff -urNp linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h
31599--- linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
31600+++ linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
31601@@ -266,7 +266,7 @@ struct wl1251_if_operations {
31602 void (*reset)(struct wl1251 *wl);
31603 void (*enable_irq)(struct wl1251 *wl);
31604 void (*disable_irq)(struct wl1251 *wl);
31605-};
31606+} __no_const;
31607
31608 struct wl1251 {
31609 struct ieee80211_hw *hw;
31610diff -urNp linux-3.0.4/drivers/net/wireless/wl12xx/spi.c linux-3.0.4/drivers/net/wireless/wl12xx/spi.c
31611--- linux-3.0.4/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
31612+++ linux-3.0.4/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
31613@@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
31614 u32 chunk_len;
31615 int i;
31616
31617+ pax_track_stack();
31618+
31619 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
31620
31621 spi_message_init(&m);
31622diff -urNp linux-3.0.4/drivers/oprofile/buffer_sync.c linux-3.0.4/drivers/oprofile/buffer_sync.c
31623--- linux-3.0.4/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
31624+++ linux-3.0.4/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
31625@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
31626 if (cookie == NO_COOKIE)
31627 offset = pc;
31628 if (cookie == INVALID_COOKIE) {
31629- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
31630+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
31631 offset = pc;
31632 }
31633 if (cookie != last_cookie) {
31634@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
31635 /* add userspace sample */
31636
31637 if (!mm) {
31638- atomic_inc(&oprofile_stats.sample_lost_no_mm);
31639+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
31640 return 0;
31641 }
31642
31643 cookie = lookup_dcookie(mm, s->eip, &offset);
31644
31645 if (cookie == INVALID_COOKIE) {
31646- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
31647+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
31648 return 0;
31649 }
31650
31651@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
31652 /* ignore backtraces if failed to add a sample */
31653 if (state == sb_bt_start) {
31654 state = sb_bt_ignore;
31655- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
31656+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
31657 }
31658 }
31659 release_mm(mm);
31660diff -urNp linux-3.0.4/drivers/oprofile/event_buffer.c linux-3.0.4/drivers/oprofile/event_buffer.c
31661--- linux-3.0.4/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
31662+++ linux-3.0.4/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
31663@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
31664 }
31665
31666 if (buffer_pos == buffer_size) {
31667- atomic_inc(&oprofile_stats.event_lost_overflow);
31668+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
31669 return;
31670 }
31671
31672diff -urNp linux-3.0.4/drivers/oprofile/oprof.c linux-3.0.4/drivers/oprofile/oprof.c
31673--- linux-3.0.4/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
31674+++ linux-3.0.4/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
31675@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
31676 if (oprofile_ops.switch_events())
31677 return;
31678
31679- atomic_inc(&oprofile_stats.multiplex_counter);
31680+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
31681 start_switch_worker();
31682 }
31683
31684diff -urNp linux-3.0.4/drivers/oprofile/oprofilefs.c linux-3.0.4/drivers/oprofile/oprofilefs.c
31685--- linux-3.0.4/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
31686+++ linux-3.0.4/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
31687@@ -186,7 +186,7 @@ static const struct file_operations atom
31688
31689
31690 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
31691- char const *name, atomic_t *val)
31692+ char const *name, atomic_unchecked_t *val)
31693 {
31694 return __oprofilefs_create_file(sb, root, name,
31695 &atomic_ro_fops, 0444, val);
31696diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.c linux-3.0.4/drivers/oprofile/oprofile_stats.c
31697--- linux-3.0.4/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
31698+++ linux-3.0.4/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
31699@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
31700 cpu_buf->sample_invalid_eip = 0;
31701 }
31702
31703- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
31704- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
31705- atomic_set(&oprofile_stats.event_lost_overflow, 0);
31706- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
31707- atomic_set(&oprofile_stats.multiplex_counter, 0);
31708+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
31709+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
31710+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
31711+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
31712+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
31713 }
31714
31715
31716diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.h linux-3.0.4/drivers/oprofile/oprofile_stats.h
31717--- linux-3.0.4/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
31718+++ linux-3.0.4/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
31719@@ -13,11 +13,11 @@
31720 #include <asm/atomic.h>
31721
31722 struct oprofile_stat_struct {
31723- atomic_t sample_lost_no_mm;
31724- atomic_t sample_lost_no_mapping;
31725- atomic_t bt_lost_no_mapping;
31726- atomic_t event_lost_overflow;
31727- atomic_t multiplex_counter;
31728+ atomic_unchecked_t sample_lost_no_mm;
31729+ atomic_unchecked_t sample_lost_no_mapping;
31730+ atomic_unchecked_t bt_lost_no_mapping;
31731+ atomic_unchecked_t event_lost_overflow;
31732+ atomic_unchecked_t multiplex_counter;
31733 };
31734
31735 extern struct oprofile_stat_struct oprofile_stats;
31736diff -urNp linux-3.0.4/drivers/parport/procfs.c linux-3.0.4/drivers/parport/procfs.c
31737--- linux-3.0.4/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
31738+++ linux-3.0.4/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
31739@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
31740
31741 *ppos += len;
31742
31743- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
31744+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
31745 }
31746
31747 #ifdef CONFIG_PARPORT_1284
31748@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
31749
31750 *ppos += len;
31751
31752- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
31753+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
31754 }
31755 #endif /* IEEE1284.3 support. */
31756
31757diff -urNp linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h
31758--- linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
31759+++ linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
31760@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
31761 int (*hardware_test) (struct slot* slot, u32 value);
31762 u8 (*get_power) (struct slot* slot);
31763 int (*set_power) (struct slot* slot, int value);
31764-};
31765+} __no_const;
31766
31767 struct cpci_hp_controller {
31768 unsigned int irq;
31769diff -urNp linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c
31770--- linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
31771+++ linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
31772@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
31773
31774 void compaq_nvram_init (void __iomem *rom_start)
31775 {
31776+
31777+#ifndef CONFIG_PAX_KERNEXEC
31778 if (rom_start) {
31779 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
31780 }
31781+#endif
31782+
31783 dbg("int15 entry = %p\n", compaq_int15_entry_point);
31784
31785 /* initialize our int15 lock */
31786diff -urNp linux-3.0.4/drivers/pci/pcie/aspm.c linux-3.0.4/drivers/pci/pcie/aspm.c
31787--- linux-3.0.4/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
31788+++ linux-3.0.4/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
31789@@ -27,9 +27,9 @@
31790 #define MODULE_PARAM_PREFIX "pcie_aspm."
31791
31792 /* Note: those are not register definitions */
31793-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
31794-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
31795-#define ASPM_STATE_L1 (4) /* L1 state */
31796+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
31797+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
31798+#define ASPM_STATE_L1 (4U) /* L1 state */
31799 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
31800 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
31801
31802diff -urNp linux-3.0.4/drivers/pci/probe.c linux-3.0.4/drivers/pci/probe.c
31803--- linux-3.0.4/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
31804+++ linux-3.0.4/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
31805@@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
31806 u32 l, sz, mask;
31807 u16 orig_cmd;
31808
31809- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
31810+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
31811
31812 if (!dev->mmio_always_on) {
31813 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
31814diff -urNp linux-3.0.4/drivers/pci/proc.c linux-3.0.4/drivers/pci/proc.c
31815--- linux-3.0.4/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
31816+++ linux-3.0.4/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
31817@@ -476,7 +476,16 @@ static const struct file_operations proc
31818 static int __init pci_proc_init(void)
31819 {
31820 struct pci_dev *dev = NULL;
31821+
31822+#ifdef CONFIG_GRKERNSEC_PROC_ADD
31823+#ifdef CONFIG_GRKERNSEC_PROC_USER
31824+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
31825+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
31826+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
31827+#endif
31828+#else
31829 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
31830+#endif
31831 proc_create("devices", 0, proc_bus_pci_dir,
31832 &proc_bus_pci_dev_operations);
31833 proc_initialized = 1;
31834diff -urNp linux-3.0.4/drivers/pci/xen-pcifront.c linux-3.0.4/drivers/pci/xen-pcifront.c
31835--- linux-3.0.4/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
31836+++ linux-3.0.4/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
31837@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
31838 struct pcifront_sd *sd = bus->sysdata;
31839 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31840
31841+ pax_track_stack();
31842+
31843 if (verbose_request)
31844 dev_info(&pdev->xdev->dev,
31845 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
31846@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
31847 struct pcifront_sd *sd = bus->sysdata;
31848 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31849
31850+ pax_track_stack();
31851+
31852 if (verbose_request)
31853 dev_info(&pdev->xdev->dev,
31854 "write dev=%04x:%02x:%02x.%01x - "
31855@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
31856 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31857 struct msi_desc *entry;
31858
31859+ pax_track_stack();
31860+
31861 if (nvec > SH_INFO_MAX_VEC) {
31862 dev_err(&dev->dev, "too much vector for pci frontend: %x."
31863 " Increase SH_INFO_MAX_VEC.\n", nvec);
31864@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
31865 struct pcifront_sd *sd = dev->bus->sysdata;
31866 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31867
31868+ pax_track_stack();
31869+
31870 err = do_pci_op(pdev, &op);
31871
31872 /* What should do for error ? */
31873@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
31874 struct pcifront_sd *sd = dev->bus->sysdata;
31875 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31876
31877+ pax_track_stack();
31878+
31879 err = do_pci_op(pdev, &op);
31880 if (likely(!err)) {
31881 vector[0] = op.value;
31882diff -urNp linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c
31883--- linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
31884+++ linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
31885@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
31886 return 0;
31887 }
31888
31889-void static hotkey_mask_warn_incomplete_mask(void)
31890+static void hotkey_mask_warn_incomplete_mask(void)
31891 {
31892 /* log only what the user can fix... */
31893 const u32 wantedmask = hotkey_driver_mask &
31894diff -urNp linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c
31895--- linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
31896+++ linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
31897@@ -59,7 +59,7 @@ do { \
31898 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
31899 } while(0)
31900
31901-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
31902+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
31903 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
31904
31905 /*
31906@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
31907
31908 cpu = get_cpu();
31909 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
31910+
31911+ pax_open_kernel();
31912 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
31913+ pax_close_kernel();
31914
31915 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
31916 spin_lock_irqsave(&pnp_bios_lock, flags);
31917@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
31918 :"memory");
31919 spin_unlock_irqrestore(&pnp_bios_lock, flags);
31920
31921+ pax_open_kernel();
31922 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
31923+ pax_close_kernel();
31924+
31925 put_cpu();
31926
31927 /* If we get here and this is set then the PnP BIOS faulted on us. */
31928@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
31929 return status;
31930 }
31931
31932-void pnpbios_calls_init(union pnp_bios_install_struct *header)
31933+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
31934 {
31935 int i;
31936
31937@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
31938 pnp_bios_callpoint.offset = header->fields.pm16offset;
31939 pnp_bios_callpoint.segment = PNP_CS16;
31940
31941+ pax_open_kernel();
31942+
31943 for_each_possible_cpu(i) {
31944 struct desc_struct *gdt = get_cpu_gdt_table(i);
31945 if (!gdt)
31946@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
31947 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
31948 (unsigned long)__va(header->fields.pm16dseg));
31949 }
31950+
31951+ pax_close_kernel();
31952 }
31953diff -urNp linux-3.0.4/drivers/pnp/resource.c linux-3.0.4/drivers/pnp/resource.c
31954--- linux-3.0.4/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
31955+++ linux-3.0.4/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
31956@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
31957 return 1;
31958
31959 /* check if the resource is valid */
31960- if (*irq < 0 || *irq > 15)
31961+ if (*irq > 15)
31962 return 0;
31963
31964 /* check if the resource is reserved */
31965@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
31966 return 1;
31967
31968 /* check if the resource is valid */
31969- if (*dma < 0 || *dma == 4 || *dma > 7)
31970+ if (*dma == 4 || *dma > 7)
31971 return 0;
31972
31973 /* check if the resource is reserved */
31974diff -urNp linux-3.0.4/drivers/power/bq27x00_battery.c linux-3.0.4/drivers/power/bq27x00_battery.c
31975--- linux-3.0.4/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
31976+++ linux-3.0.4/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
31977@@ -67,7 +67,7 @@
31978 struct bq27x00_device_info;
31979 struct bq27x00_access_methods {
31980 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
31981-};
31982+} __no_const;
31983
31984 enum bq27x00_chip { BQ27000, BQ27500 };
31985
31986diff -urNp linux-3.0.4/drivers/regulator/max8660.c linux-3.0.4/drivers/regulator/max8660.c
31987--- linux-3.0.4/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
31988+++ linux-3.0.4/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
31989@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
31990 max8660->shadow_regs[MAX8660_OVER1] = 5;
31991 } else {
31992 /* Otherwise devices can be toggled via software */
31993- max8660_dcdc_ops.enable = max8660_dcdc_enable;
31994- max8660_dcdc_ops.disable = max8660_dcdc_disable;
31995+ pax_open_kernel();
31996+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
31997+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
31998+ pax_close_kernel();
31999 }
32000
32001 /*
32002diff -urNp linux-3.0.4/drivers/regulator/mc13892-regulator.c linux-3.0.4/drivers/regulator/mc13892-regulator.c
32003--- linux-3.0.4/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
32004+++ linux-3.0.4/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
32005@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
32006 }
32007 mc13xxx_unlock(mc13892);
32008
32009- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
32010+ pax_open_kernel();
32011+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
32012 = mc13892_vcam_set_mode;
32013- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
32014+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
32015 = mc13892_vcam_get_mode;
32016+ pax_close_kernel();
32017 for (i = 0; i < pdata->num_regulators; i++) {
32018 init_data = &pdata->regulators[i];
32019 priv->regulators[i] = regulator_register(
32020diff -urNp linux-3.0.4/drivers/rtc/rtc-dev.c linux-3.0.4/drivers/rtc/rtc-dev.c
32021--- linux-3.0.4/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
32022+++ linux-3.0.4/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
32023@@ -14,6 +14,7 @@
32024 #include <linux/module.h>
32025 #include <linux/rtc.h>
32026 #include <linux/sched.h>
32027+#include <linux/grsecurity.h>
32028 #include "rtc-core.h"
32029
32030 static dev_t rtc_devt;
32031@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
32032 if (copy_from_user(&tm, uarg, sizeof(tm)))
32033 return -EFAULT;
32034
32035+ gr_log_timechange();
32036+
32037 return rtc_set_time(rtc, &tm);
32038
32039 case RTC_PIE_ON:
32040diff -urNp linux-3.0.4/drivers/scsi/aacraid/aacraid.h linux-3.0.4/drivers/scsi/aacraid/aacraid.h
32041--- linux-3.0.4/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
32042+++ linux-3.0.4/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
32043@@ -492,7 +492,7 @@ struct adapter_ops
32044 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
32045 /* Administrative operations */
32046 int (*adapter_comm)(struct aac_dev * dev, int comm);
32047-};
32048+} __no_const;
32049
32050 /*
32051 * Define which interrupt handler needs to be installed
32052diff -urNp linux-3.0.4/drivers/scsi/aacraid/commctrl.c linux-3.0.4/drivers/scsi/aacraid/commctrl.c
32053--- linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
32054+++ linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
32055@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
32056 u32 actual_fibsize64, actual_fibsize = 0;
32057 int i;
32058
32059+ pax_track_stack();
32060
32061 if (dev->in_reset) {
32062 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
32063diff -urNp linux-3.0.4/drivers/scsi/bfa/bfad.c linux-3.0.4/drivers/scsi/bfa/bfad.c
32064--- linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
32065+++ linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
32066@@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
32067 struct bfad_vport_s *vport, *vport_new;
32068 struct bfa_fcs_driver_info_s driver_info;
32069
32070+ pax_track_stack();
32071+
32072 /* Fill the driver_info info to fcs*/
32073 memset(&driver_info, 0, sizeof(driver_info));
32074 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
32075diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c
32076--- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
32077+++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
32078@@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
32079 u16 len, count;
32080 u16 templen;
32081
32082+ pax_track_stack();
32083+
32084 /*
32085 * get hba attributes
32086 */
32087@@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
32088 u8 count = 0;
32089 u16 templen;
32090
32091+ pax_track_stack();
32092+
32093 /*
32094 * get port attributes
32095 */
32096diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c
32097--- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
32098+++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
32099@@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
32100 struct fc_rpsc_speed_info_s speeds;
32101 struct bfa_port_attr_s pport_attr;
32102
32103+ pax_track_stack();
32104+
32105 bfa_trc(port->fcs, rx_fchs->s_id);
32106 bfa_trc(port->fcs, rx_fchs->d_id);
32107
32108diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa.h linux-3.0.4/drivers/scsi/bfa/bfa.h
32109--- linux-3.0.4/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
32110+++ linux-3.0.4/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
32111@@ -238,7 +238,7 @@ struct bfa_hwif_s {
32112 u32 *nvecs, u32 *maxvec);
32113 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
32114 u32 *end);
32115-};
32116+} __no_const;
32117 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
32118
32119 struct bfa_iocfc_s {
32120diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h
32121--- linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
32122+++ linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
32123@@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
32124 bfa_ioc_disable_cbfn_t disable_cbfn;
32125 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
32126 bfa_ioc_reset_cbfn_t reset_cbfn;
32127-};
32128+} __no_const;
32129
32130 /*
32131 * Heartbeat failure notification queue element.
32132@@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
32133 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
32134 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
32135 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
32136-};
32137+} __no_const;
32138
32139 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
32140 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
32141diff -urNp linux-3.0.4/drivers/scsi/BusLogic.c linux-3.0.4/drivers/scsi/BusLogic.c
32142--- linux-3.0.4/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
32143+++ linux-3.0.4/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
32144@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
32145 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
32146 *PrototypeHostAdapter)
32147 {
32148+ pax_track_stack();
32149+
32150 /*
32151 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
32152 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
32153diff -urNp linux-3.0.4/drivers/scsi/dpt_i2o.c linux-3.0.4/drivers/scsi/dpt_i2o.c
32154--- linux-3.0.4/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
32155+++ linux-3.0.4/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
32156@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
32157 dma_addr_t addr;
32158 ulong flags = 0;
32159
32160+ pax_track_stack();
32161+
32162 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
32163 // get user msg size in u32s
32164 if(get_user(size, &user_msg[0])){
32165@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
32166 s32 rcode;
32167 dma_addr_t addr;
32168
32169+ pax_track_stack();
32170+
32171 memset(msg, 0 , sizeof(msg));
32172 len = scsi_bufflen(cmd);
32173 direction = 0x00000000;
32174diff -urNp linux-3.0.4/drivers/scsi/eata.c linux-3.0.4/drivers/scsi/eata.c
32175--- linux-3.0.4/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
32176+++ linux-3.0.4/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
32177@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
32178 struct hostdata *ha;
32179 char name[16];
32180
32181+ pax_track_stack();
32182+
32183 sprintf(name, "%s%d", driver_name, j);
32184
32185 if (!request_region(port_base, REGION_SIZE, driver_name)) {
32186diff -urNp linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c
32187--- linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
32188+++ linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
32189@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
32190 } buf;
32191 int rc;
32192
32193+ pax_track_stack();
32194+
32195 fiph = (struct fip_header *)skb->data;
32196 sub = fiph->fip_subcode;
32197
32198diff -urNp linux-3.0.4/drivers/scsi/gdth.c linux-3.0.4/drivers/scsi/gdth.c
32199--- linux-3.0.4/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
32200+++ linux-3.0.4/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
32201@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
32202 unsigned long flags;
32203 gdth_ha_str *ha;
32204
32205+ pax_track_stack();
32206+
32207 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
32208 return -EFAULT;
32209 ha = gdth_find_ha(ldrv.ionode);
32210@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
32211 gdth_ha_str *ha;
32212 int rval;
32213
32214+ pax_track_stack();
32215+
32216 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
32217 res.number >= MAX_HDRIVES)
32218 return -EFAULT;
32219@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
32220 gdth_ha_str *ha;
32221 int rval;
32222
32223+ pax_track_stack();
32224+
32225 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
32226 return -EFAULT;
32227 ha = gdth_find_ha(gen.ionode);
32228@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
32229 int i;
32230 gdth_cmd_str gdtcmd;
32231 char cmnd[MAX_COMMAND_SIZE];
32232+
32233+ pax_track_stack();
32234+
32235 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
32236
32237 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
32238diff -urNp linux-3.0.4/drivers/scsi/gdth_proc.c linux-3.0.4/drivers/scsi/gdth_proc.c
32239--- linux-3.0.4/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
32240+++ linux-3.0.4/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
32241@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
32242 u64 paddr;
32243
32244 char cmnd[MAX_COMMAND_SIZE];
32245+
32246+ pax_track_stack();
32247+
32248 memset(cmnd, 0xff, 12);
32249 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
32250
32251@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
32252 gdth_hget_str *phg;
32253 char cmnd[MAX_COMMAND_SIZE];
32254
32255+ pax_track_stack();
32256+
32257 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
32258 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
32259 if (!gdtcmd || !estr)
32260diff -urNp linux-3.0.4/drivers/scsi/hosts.c linux-3.0.4/drivers/scsi/hosts.c
32261--- linux-3.0.4/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
32262+++ linux-3.0.4/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
32263@@ -42,7 +42,7 @@
32264 #include "scsi_logging.h"
32265
32266
32267-static atomic_t scsi_host_next_hn; /* host_no for next new host */
32268+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
32269
32270
32271 static void scsi_host_cls_release(struct device *dev)
32272@@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
32273 * subtract one because we increment first then return, but we need to
32274 * know what the next host number was before increment
32275 */
32276- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
32277+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
32278 shost->dma_channel = 0xff;
32279
32280 /* These three are default values which can be overridden */
32281diff -urNp linux-3.0.4/drivers/scsi/hpsa.c linux-3.0.4/drivers/scsi/hpsa.c
32282--- linux-3.0.4/drivers/scsi/hpsa.c 2011-07-21 22:17:23.000000000 -0400
32283+++ linux-3.0.4/drivers/scsi/hpsa.c 2011-08-23 21:47:55.000000000 -0400
32284@@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
32285 u32 a;
32286
32287 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
32288- return h->access.command_completed(h);
32289+ return h->access->command_completed(h);
32290
32291 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
32292 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
32293@@ -2938,7 +2938,7 @@ static void start_io(struct ctlr_info *h
32294 while (!list_empty(&h->reqQ)) {
32295 c = list_entry(h->reqQ.next, struct CommandList, list);
32296 /* can't do anything if fifo is full */
32297- if ((h->access.fifo_full(h))) {
32298+ if ((h->access->fifo_full(h))) {
32299 dev_warn(&h->pdev->dev, "fifo full\n");
32300 break;
32301 }
32302@@ -2948,7 +2948,7 @@ static void start_io(struct ctlr_info *h
32303 h->Qdepth--;
32304
32305 /* Tell the controller execute command */
32306- h->access.submit_command(h, c);
32307+ h->access->submit_command(h, c);
32308
32309 /* Put job onto the completed Q */
32310 addQ(&h->cmpQ, c);
32311@@ -2957,17 +2957,17 @@ static void start_io(struct ctlr_info *h
32312
32313 static inline unsigned long get_next_completion(struct ctlr_info *h)
32314 {
32315- return h->access.command_completed(h);
32316+ return h->access->command_completed(h);
32317 }
32318
32319 static inline bool interrupt_pending(struct ctlr_info *h)
32320 {
32321- return h->access.intr_pending(h);
32322+ return h->access->intr_pending(h);
32323 }
32324
32325 static inline long interrupt_not_for_us(struct ctlr_info *h)
32326 {
32327- return (h->access.intr_pending(h) == 0) ||
32328+ return (h->access->intr_pending(h) == 0) ||
32329 (h->interrupts_enabled == 0);
32330 }
32331
32332@@ -3857,7 +3857,7 @@ static int __devinit hpsa_pci_init(struc
32333 if (prod_index < 0)
32334 return -ENODEV;
32335 h->product_name = products[prod_index].product_name;
32336- h->access = *(products[prod_index].access);
32337+ h->access = products[prod_index].access;
32338
32339 if (hpsa_board_disabled(h->pdev)) {
32340 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
32341@@ -4134,7 +4134,7 @@ reinit_after_soft_reset:
32342 }
32343
32344 /* make sure the board interrupts are off */
32345- h->access.set_intr_mask(h, HPSA_INTR_OFF);
32346+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
32347
32348 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
32349 goto clean2;
32350@@ -4168,7 +4168,7 @@ reinit_after_soft_reset:
32351 * fake ones to scoop up any residual completions.
32352 */
32353 spin_lock_irqsave(&h->lock, flags);
32354- h->access.set_intr_mask(h, HPSA_INTR_OFF);
32355+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
32356 spin_unlock_irqrestore(&h->lock, flags);
32357 free_irq(h->intr[h->intr_mode], h);
32358 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
32359@@ -4187,9 +4187,9 @@ reinit_after_soft_reset:
32360 dev_info(&h->pdev->dev, "Board READY.\n");
32361 dev_info(&h->pdev->dev,
32362 "Waiting for stale completions to drain.\n");
32363- h->access.set_intr_mask(h, HPSA_INTR_ON);
32364+ h->access->set_intr_mask(h, HPSA_INTR_ON);
32365 msleep(10000);
32366- h->access.set_intr_mask(h, HPSA_INTR_OFF);
32367+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
32368
32369 rc = controller_reset_failed(h->cfgtable);
32370 if (rc)
32371@@ -4210,7 +4210,7 @@ reinit_after_soft_reset:
32372 }
32373
32374 /* Turn the interrupts on so we can service requests */
32375- h->access.set_intr_mask(h, HPSA_INTR_ON);
32376+ h->access->set_intr_mask(h, HPSA_INTR_ON);
32377
32378 hpsa_hba_inquiry(h);
32379 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
32380@@ -4263,7 +4263,7 @@ static void hpsa_shutdown(struct pci_dev
32381 * To write all data in the battery backed cache to disks
32382 */
32383 hpsa_flush_cache(h);
32384- h->access.set_intr_mask(h, HPSA_INTR_OFF);
32385+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
32386 free_irq(h->intr[h->intr_mode], h);
32387 #ifdef CONFIG_PCI_MSI
32388 if (h->msix_vector)
32389@@ -4426,7 +4426,7 @@ static __devinit void hpsa_enter_perform
32390 return;
32391 }
32392 /* Change the access methods to the performant access methods */
32393- h->access = SA5_performant_access;
32394+ h->access = &SA5_performant_access;
32395 h->transMethod = CFGTBL_Trans_Performant;
32396 }
32397
32398diff -urNp linux-3.0.4/drivers/scsi/hpsa.h linux-3.0.4/drivers/scsi/hpsa.h
32399--- linux-3.0.4/drivers/scsi/hpsa.h 2011-09-02 18:11:21.000000000 -0400
32400+++ linux-3.0.4/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
32401@@ -73,7 +73,7 @@ struct ctlr_info {
32402 unsigned int msix_vector;
32403 unsigned int msi_vector;
32404 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
32405- struct access_method access;
32406+ struct access_method *access;
32407
32408 /* queue and queue Info */
32409 struct list_head reqQ;
32410diff -urNp linux-3.0.4/drivers/scsi/ips.h linux-3.0.4/drivers/scsi/ips.h
32411--- linux-3.0.4/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
32412+++ linux-3.0.4/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
32413@@ -1027,7 +1027,7 @@ typedef struct {
32414 int (*intr)(struct ips_ha *);
32415 void (*enableint)(struct ips_ha *);
32416 uint32_t (*statupd)(struct ips_ha *);
32417-} ips_hw_func_t;
32418+} __no_const ips_hw_func_t;
32419
32420 typedef struct ips_ha {
32421 uint8_t ha_id[IPS_MAX_CHANNELS+1];
32422diff -urNp linux-3.0.4/drivers/scsi/libfc/fc_exch.c linux-3.0.4/drivers/scsi/libfc/fc_exch.c
32423--- linux-3.0.4/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
32424+++ linux-3.0.4/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
32425@@ -105,12 +105,12 @@ struct fc_exch_mgr {
32426 * all together if not used XXX
32427 */
32428 struct {
32429- atomic_t no_free_exch;
32430- atomic_t no_free_exch_xid;
32431- atomic_t xid_not_found;
32432- atomic_t xid_busy;
32433- atomic_t seq_not_found;
32434- atomic_t non_bls_resp;
32435+ atomic_unchecked_t no_free_exch;
32436+ atomic_unchecked_t no_free_exch_xid;
32437+ atomic_unchecked_t xid_not_found;
32438+ atomic_unchecked_t xid_busy;
32439+ atomic_unchecked_t seq_not_found;
32440+ atomic_unchecked_t non_bls_resp;
32441 } stats;
32442 };
32443
32444@@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
32445 /* allocate memory for exchange */
32446 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
32447 if (!ep) {
32448- atomic_inc(&mp->stats.no_free_exch);
32449+ atomic_inc_unchecked(&mp->stats.no_free_exch);
32450 goto out;
32451 }
32452 memset(ep, 0, sizeof(*ep));
32453@@ -761,7 +761,7 @@ out:
32454 return ep;
32455 err:
32456 spin_unlock_bh(&pool->lock);
32457- atomic_inc(&mp->stats.no_free_exch_xid);
32458+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
32459 mempool_free(ep, mp->ep_pool);
32460 return NULL;
32461 }
32462@@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
32463 xid = ntohs(fh->fh_ox_id); /* we originated exch */
32464 ep = fc_exch_find(mp, xid);
32465 if (!ep) {
32466- atomic_inc(&mp->stats.xid_not_found);
32467+ atomic_inc_unchecked(&mp->stats.xid_not_found);
32468 reject = FC_RJT_OX_ID;
32469 goto out;
32470 }
32471@@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
32472 ep = fc_exch_find(mp, xid);
32473 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
32474 if (ep) {
32475- atomic_inc(&mp->stats.xid_busy);
32476+ atomic_inc_unchecked(&mp->stats.xid_busy);
32477 reject = FC_RJT_RX_ID;
32478 goto rel;
32479 }
32480@@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
32481 }
32482 xid = ep->xid; /* get our XID */
32483 } else if (!ep) {
32484- atomic_inc(&mp->stats.xid_not_found);
32485+ atomic_inc_unchecked(&mp->stats.xid_not_found);
32486 reject = FC_RJT_RX_ID; /* XID not found */
32487 goto out;
32488 }
32489@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
32490 } else {
32491 sp = &ep->seq;
32492 if (sp->id != fh->fh_seq_id) {
32493- atomic_inc(&mp->stats.seq_not_found);
32494+ atomic_inc_unchecked(&mp->stats.seq_not_found);
32495 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
32496 goto rel;
32497 }
32498@@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
32499
32500 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
32501 if (!ep) {
32502- atomic_inc(&mp->stats.xid_not_found);
32503+ atomic_inc_unchecked(&mp->stats.xid_not_found);
32504 goto out;
32505 }
32506 if (ep->esb_stat & ESB_ST_COMPLETE) {
32507- atomic_inc(&mp->stats.xid_not_found);
32508+ atomic_inc_unchecked(&mp->stats.xid_not_found);
32509 goto rel;
32510 }
32511 if (ep->rxid == FC_XID_UNKNOWN)
32512 ep->rxid = ntohs(fh->fh_rx_id);
32513 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
32514- atomic_inc(&mp->stats.xid_not_found);
32515+ atomic_inc_unchecked(&mp->stats.xid_not_found);
32516 goto rel;
32517 }
32518 if (ep->did != ntoh24(fh->fh_s_id) &&
32519 ep->did != FC_FID_FLOGI) {
32520- atomic_inc(&mp->stats.xid_not_found);
32521+ atomic_inc_unchecked(&mp->stats.xid_not_found);
32522 goto rel;
32523 }
32524 sof = fr_sof(fp);
32525@@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
32526 sp->ssb_stat |= SSB_ST_RESP;
32527 sp->id = fh->fh_seq_id;
32528 } else if (sp->id != fh->fh_seq_id) {
32529- atomic_inc(&mp->stats.seq_not_found);
32530+ atomic_inc_unchecked(&mp->stats.seq_not_found);
32531 goto rel;
32532 }
32533
32534@@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
32535 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
32536
32537 if (!sp)
32538- atomic_inc(&mp->stats.xid_not_found);
32539+ atomic_inc_unchecked(&mp->stats.xid_not_found);
32540 else
32541- atomic_inc(&mp->stats.non_bls_resp);
32542+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
32543
32544 fc_frame_free(fp);
32545 }
32546diff -urNp linux-3.0.4/drivers/scsi/libsas/sas_ata.c linux-3.0.4/drivers/scsi/libsas/sas_ata.c
32547--- linux-3.0.4/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
32548+++ linux-3.0.4/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
32549@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
32550 .postreset = ata_std_postreset,
32551 .error_handler = ata_std_error_handler,
32552 .post_internal_cmd = sas_ata_post_internal,
32553- .qc_defer = ata_std_qc_defer,
32554+ .qc_defer = ata_std_qc_defer,
32555 .qc_prep = ata_noop_qc_prep,
32556 .qc_issue = sas_ata_qc_issue,
32557 .qc_fill_rtf = sas_ata_qc_fill_rtf,
32558diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c
32559--- linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
32560+++ linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
32561@@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
32562
32563 #include <linux/debugfs.h>
32564
32565-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
32566+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
32567 static unsigned long lpfc_debugfs_start_time = 0L;
32568
32569 /* iDiag */
32570@@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
32571 lpfc_debugfs_enable = 0;
32572
32573 len = 0;
32574- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
32575+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
32576 (lpfc_debugfs_max_disc_trc - 1);
32577 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
32578 dtp = vport->disc_trc + i;
32579@@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
32580 lpfc_debugfs_enable = 0;
32581
32582 len = 0;
32583- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
32584+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
32585 (lpfc_debugfs_max_slow_ring_trc - 1);
32586 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
32587 dtp = phba->slow_ring_trc + i;
32588@@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
32589 uint32_t *ptr;
32590 char buffer[1024];
32591
32592+ pax_track_stack();
32593+
32594 off = 0;
32595 spin_lock_irq(&phba->hbalock);
32596
32597@@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
32598 !vport || !vport->disc_trc)
32599 return;
32600
32601- index = atomic_inc_return(&vport->disc_trc_cnt) &
32602+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
32603 (lpfc_debugfs_max_disc_trc - 1);
32604 dtp = vport->disc_trc + index;
32605 dtp->fmt = fmt;
32606 dtp->data1 = data1;
32607 dtp->data2 = data2;
32608 dtp->data3 = data3;
32609- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
32610+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
32611 dtp->jif = jiffies;
32612 #endif
32613 return;
32614@@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
32615 !phba || !phba->slow_ring_trc)
32616 return;
32617
32618- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
32619+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
32620 (lpfc_debugfs_max_slow_ring_trc - 1);
32621 dtp = phba->slow_ring_trc + index;
32622 dtp->fmt = fmt;
32623 dtp->data1 = data1;
32624 dtp->data2 = data2;
32625 dtp->data3 = data3;
32626- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
32627+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
32628 dtp->jif = jiffies;
32629 #endif
32630 return;
32631@@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
32632 "slow_ring buffer\n");
32633 goto debug_failed;
32634 }
32635- atomic_set(&phba->slow_ring_trc_cnt, 0);
32636+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
32637 memset(phba->slow_ring_trc, 0,
32638 (sizeof(struct lpfc_debugfs_trc) *
32639 lpfc_debugfs_max_slow_ring_trc));
32640@@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
32641 "buffer\n");
32642 goto debug_failed;
32643 }
32644- atomic_set(&vport->disc_trc_cnt, 0);
32645+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
32646
32647 snprintf(name, sizeof(name), "discovery_trace");
32648 vport->debug_disc_trc =
32649diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc.h linux-3.0.4/drivers/scsi/lpfc/lpfc.h
32650--- linux-3.0.4/drivers/scsi/lpfc/lpfc.h 2011-07-21 22:17:23.000000000 -0400
32651+++ linux-3.0.4/drivers/scsi/lpfc/lpfc.h 2011-08-23 21:47:55.000000000 -0400
32652@@ -420,7 +420,7 @@ struct lpfc_vport {
32653 struct dentry *debug_nodelist;
32654 struct dentry *vport_debugfs_root;
32655 struct lpfc_debugfs_trc *disc_trc;
32656- atomic_t disc_trc_cnt;
32657+ atomic_unchecked_t disc_trc_cnt;
32658 #endif
32659 uint8_t stat_data_enabled;
32660 uint8_t stat_data_blocked;
32661@@ -826,8 +826,8 @@ struct lpfc_hba {
32662 struct timer_list fabric_block_timer;
32663 unsigned long bit_flags;
32664 #define FABRIC_COMANDS_BLOCKED 0
32665- atomic_t num_rsrc_err;
32666- atomic_t num_cmd_success;
32667+ atomic_unchecked_t num_rsrc_err;
32668+ atomic_unchecked_t num_cmd_success;
32669 unsigned long last_rsrc_error_time;
32670 unsigned long last_ramp_down_time;
32671 unsigned long last_ramp_up_time;
32672@@ -841,7 +841,7 @@ struct lpfc_hba {
32673 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
32674 struct dentry *debug_slow_ring_trc;
32675 struct lpfc_debugfs_trc *slow_ring_trc;
32676- atomic_t slow_ring_trc_cnt;
32677+ atomic_unchecked_t slow_ring_trc_cnt;
32678 /* iDiag debugfs sub-directory */
32679 struct dentry *idiag_root;
32680 struct dentry *idiag_pci_cfg;
32681diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c
32682--- linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c 2011-07-21 22:17:23.000000000 -0400
32683+++ linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c 2011-08-23 21:47:56.000000000 -0400
32684@@ -9923,8 +9923,10 @@ lpfc_init(void)
32685 printk(LPFC_COPYRIGHT "\n");
32686
32687 if (lpfc_enable_npiv) {
32688- lpfc_transport_functions.vport_create = lpfc_vport_create;
32689- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
32690+ pax_open_kernel();
32691+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
32692+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
32693+ pax_close_kernel();
32694 }
32695 lpfc_transport_template =
32696 fc_attach_transport(&lpfc_transport_functions);
32697diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c
32698--- linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-07-21 22:17:23.000000000 -0400
32699+++ linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-23 21:47:56.000000000 -0400
32700@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
32701 uint32_t evt_posted;
32702
32703 spin_lock_irqsave(&phba->hbalock, flags);
32704- atomic_inc(&phba->num_rsrc_err);
32705+ atomic_inc_unchecked(&phba->num_rsrc_err);
32706 phba->last_rsrc_error_time = jiffies;
32707
32708 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
32709@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
32710 unsigned long flags;
32711 struct lpfc_hba *phba = vport->phba;
32712 uint32_t evt_posted;
32713- atomic_inc(&phba->num_cmd_success);
32714+ atomic_inc_unchecked(&phba->num_cmd_success);
32715
32716 if (vport->cfg_lun_queue_depth <= queue_depth)
32717 return;
32718@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
32719 unsigned long num_rsrc_err, num_cmd_success;
32720 int i;
32721
32722- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
32723- num_cmd_success = atomic_read(&phba->num_cmd_success);
32724+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
32725+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
32726
32727 vports = lpfc_create_vport_work_array(phba);
32728 if (vports != NULL)
32729@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
32730 }
32731 }
32732 lpfc_destroy_vport_work_array(phba, vports);
32733- atomic_set(&phba->num_rsrc_err, 0);
32734- atomic_set(&phba->num_cmd_success, 0);
32735+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
32736+ atomic_set_unchecked(&phba->num_cmd_success, 0);
32737 }
32738
32739 /**
32740@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
32741 }
32742 }
32743 lpfc_destroy_vport_work_array(phba, vports);
32744- atomic_set(&phba->num_rsrc_err, 0);
32745- atomic_set(&phba->num_cmd_success, 0);
32746+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
32747+ atomic_set_unchecked(&phba->num_cmd_success, 0);
32748 }
32749
32750 /**
32751diff -urNp linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c
32752--- linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
32753+++ linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
32754@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
32755 int rval;
32756 int i;
32757
32758+ pax_track_stack();
32759+
32760 // Allocate memory for the base list of scb for management module.
32761 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
32762
32763diff -urNp linux-3.0.4/drivers/scsi/osd/osd_initiator.c linux-3.0.4/drivers/scsi/osd/osd_initiator.c
32764--- linux-3.0.4/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
32765+++ linux-3.0.4/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
32766@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
32767 int nelem = ARRAY_SIZE(get_attrs), a = 0;
32768 int ret;
32769
32770+ pax_track_stack();
32771+
32772 or = osd_start_request(od, GFP_KERNEL);
32773 if (!or)
32774 return -ENOMEM;
32775diff -urNp linux-3.0.4/drivers/scsi/pmcraid.c linux-3.0.4/drivers/scsi/pmcraid.c
32776--- linux-3.0.4/drivers/scsi/pmcraid.c 2011-09-02 18:11:21.000000000 -0400
32777+++ linux-3.0.4/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
32778@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
32779 res->scsi_dev = scsi_dev;
32780 scsi_dev->hostdata = res;
32781 res->change_detected = 0;
32782- atomic_set(&res->read_failures, 0);
32783- atomic_set(&res->write_failures, 0);
32784+ atomic_set_unchecked(&res->read_failures, 0);
32785+ atomic_set_unchecked(&res->write_failures, 0);
32786 rc = 0;
32787 }
32788 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
32789@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
32790
32791 /* If this was a SCSI read/write command keep count of errors */
32792 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
32793- atomic_inc(&res->read_failures);
32794+ atomic_inc_unchecked(&res->read_failures);
32795 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
32796- atomic_inc(&res->write_failures);
32797+ atomic_inc_unchecked(&res->write_failures);
32798
32799 if (!RES_IS_GSCSI(res->cfg_entry) &&
32800 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
32801@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
32802 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
32803 * hrrq_id assigned here in queuecommand
32804 */
32805- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
32806+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
32807 pinstance->num_hrrq;
32808 cmd->cmd_done = pmcraid_io_done;
32809
32810@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
32811 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
32812 * hrrq_id assigned here in queuecommand
32813 */
32814- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
32815+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
32816 pinstance->num_hrrq;
32817
32818 if (request_size) {
32819@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
32820
32821 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
32822 /* add resources only after host is added into system */
32823- if (!atomic_read(&pinstance->expose_resources))
32824+ if (!atomic_read_unchecked(&pinstance->expose_resources))
32825 return;
32826
32827 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
32828@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
32829 init_waitqueue_head(&pinstance->reset_wait_q);
32830
32831 atomic_set(&pinstance->outstanding_cmds, 0);
32832- atomic_set(&pinstance->last_message_id, 0);
32833- atomic_set(&pinstance->expose_resources, 0);
32834+ atomic_set_unchecked(&pinstance->last_message_id, 0);
32835+ atomic_set_unchecked(&pinstance->expose_resources, 0);
32836
32837 INIT_LIST_HEAD(&pinstance->free_res_q);
32838 INIT_LIST_HEAD(&pinstance->used_res_q);
32839@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
32840 /* Schedule worker thread to handle CCN and take care of adding and
32841 * removing devices to OS
32842 */
32843- atomic_set(&pinstance->expose_resources, 1);
32844+ atomic_set_unchecked(&pinstance->expose_resources, 1);
32845 schedule_work(&pinstance->worker_q);
32846 return rc;
32847
32848diff -urNp linux-3.0.4/drivers/scsi/pmcraid.h linux-3.0.4/drivers/scsi/pmcraid.h
32849--- linux-3.0.4/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
32850+++ linux-3.0.4/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
32851@@ -749,7 +749,7 @@ struct pmcraid_instance {
32852 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
32853
32854 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
32855- atomic_t last_message_id;
32856+ atomic_unchecked_t last_message_id;
32857
32858 /* configuration table */
32859 struct pmcraid_config_table *cfg_table;
32860@@ -778,7 +778,7 @@ struct pmcraid_instance {
32861 atomic_t outstanding_cmds;
32862
32863 /* should add/delete resources to mid-layer now ?*/
32864- atomic_t expose_resources;
32865+ atomic_unchecked_t expose_resources;
32866
32867
32868
32869@@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
32870 struct pmcraid_config_table_entry_ext cfg_entry_ext;
32871 };
32872 struct scsi_device *scsi_dev; /* Link scsi_device structure */
32873- atomic_t read_failures; /* count of failed READ commands */
32874- atomic_t write_failures; /* count of failed WRITE commands */
32875+ atomic_unchecked_t read_failures; /* count of failed READ commands */
32876+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
32877
32878 /* To indicate add/delete/modify during CCN */
32879 u8 change_detected;
32880diff -urNp linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h
32881--- linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
32882+++ linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
32883@@ -2244,7 +2244,7 @@ struct isp_operations {
32884 int (*get_flash_version) (struct scsi_qla_host *, void *);
32885 int (*start_scsi) (srb_t *);
32886 int (*abort_isp) (struct scsi_qla_host *);
32887-};
32888+} __no_const;
32889
32890 /* MSI-X Support *************************************************************/
32891
32892diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h
32893--- linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
32894+++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
32895@@ -256,7 +256,7 @@ struct ddb_entry {
32896 atomic_t retry_relogin_timer; /* Min Time between relogins
32897 * (4000 only) */
32898 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
32899- atomic_t relogin_retry_count; /* Num of times relogin has been
32900+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
32901 * retried */
32902
32903 uint16_t port;
32904diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c
32905--- linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
32906+++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
32907@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
32908 ddb_entry->fw_ddb_index = fw_ddb_index;
32909 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
32910 atomic_set(&ddb_entry->relogin_timer, 0);
32911- atomic_set(&ddb_entry->relogin_retry_count, 0);
32912+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
32913 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
32914 list_add_tail(&ddb_entry->list, &ha->ddb_list);
32915 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
32916@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
32917 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
32918 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
32919 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
32920- atomic_set(&ddb_entry->relogin_retry_count, 0);
32921+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
32922 atomic_set(&ddb_entry->relogin_timer, 0);
32923 clear_bit(DF_RELOGIN, &ddb_entry->flags);
32924 iscsi_unblock_session(ddb_entry->sess);
32925diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c
32926--- linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
32927+++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
32928@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
32929 ddb_entry->fw_ddb_device_state ==
32930 DDB_DS_SESSION_FAILED) {
32931 /* Reset retry relogin timer */
32932- atomic_inc(&ddb_entry->relogin_retry_count);
32933+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
32934 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
32935 " timed out-retrying"
32936 " relogin (%d)\n",
32937 ha->host_no,
32938 ddb_entry->fw_ddb_index,
32939- atomic_read(&ddb_entry->
32940+ atomic_read_unchecked(&ddb_entry->
32941 relogin_retry_count))
32942 );
32943 start_dpc++;
32944diff -urNp linux-3.0.4/drivers/scsi/scsi.c linux-3.0.4/drivers/scsi/scsi.c
32945--- linux-3.0.4/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
32946+++ linux-3.0.4/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
32947@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
32948 unsigned long timeout;
32949 int rtn = 0;
32950
32951- atomic_inc(&cmd->device->iorequest_cnt);
32952+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
32953
32954 /* check if the device is still usable */
32955 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
32956diff -urNp linux-3.0.4/drivers/scsi/scsi_debug.c linux-3.0.4/drivers/scsi/scsi_debug.c
32957--- linux-3.0.4/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
32958+++ linux-3.0.4/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
32959@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
32960 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
32961 unsigned char *cmd = (unsigned char *)scp->cmnd;
32962
32963+ pax_track_stack();
32964+
32965 if ((errsts = check_readiness(scp, 1, devip)))
32966 return errsts;
32967 memset(arr, 0, sizeof(arr));
32968@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
32969 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
32970 unsigned char *cmd = (unsigned char *)scp->cmnd;
32971
32972+ pax_track_stack();
32973+
32974 if ((errsts = check_readiness(scp, 1, devip)))
32975 return errsts;
32976 memset(arr, 0, sizeof(arr));
32977diff -urNp linux-3.0.4/drivers/scsi/scsi_lib.c linux-3.0.4/drivers/scsi/scsi_lib.c
32978--- linux-3.0.4/drivers/scsi/scsi_lib.c 2011-09-02 18:11:21.000000000 -0400
32979+++ linux-3.0.4/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
32980@@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
32981 shost = sdev->host;
32982 scsi_init_cmd_errh(cmd);
32983 cmd->result = DID_NO_CONNECT << 16;
32984- atomic_inc(&cmd->device->iorequest_cnt);
32985+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
32986
32987 /*
32988 * SCSI request completion path will do scsi_device_unbusy(),
32989@@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
32990
32991 INIT_LIST_HEAD(&cmd->eh_entry);
32992
32993- atomic_inc(&cmd->device->iodone_cnt);
32994+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
32995 if (cmd->result)
32996- atomic_inc(&cmd->device->ioerr_cnt);
32997+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
32998
32999 disposition = scsi_decide_disposition(cmd);
33000 if (disposition != SUCCESS &&
33001diff -urNp linux-3.0.4/drivers/scsi/scsi_sysfs.c linux-3.0.4/drivers/scsi/scsi_sysfs.c
33002--- linux-3.0.4/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
33003+++ linux-3.0.4/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
33004@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
33005 char *buf) \
33006 { \
33007 struct scsi_device *sdev = to_scsi_device(dev); \
33008- unsigned long long count = atomic_read(&sdev->field); \
33009+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
33010 return snprintf(buf, 20, "0x%llx\n", count); \
33011 } \
33012 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
33013diff -urNp linux-3.0.4/drivers/scsi/scsi_tgt_lib.c linux-3.0.4/drivers/scsi/scsi_tgt_lib.c
33014--- linux-3.0.4/drivers/scsi/scsi_tgt_lib.c 2011-07-21 22:17:23.000000000 -0400
33015+++ linux-3.0.4/drivers/scsi/scsi_tgt_lib.c 2011-10-06 04:17:55.000000000 -0400
33016@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct sc
33017 int err;
33018
33019 dprintk("%lx %u\n", uaddr, len);
33020- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
33021+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
33022 if (err) {
33023 /*
33024 * TODO: need to fixup sg_tablesize, max_segment_size,
33025diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_fc.c linux-3.0.4/drivers/scsi/scsi_transport_fc.c
33026--- linux-3.0.4/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
33027+++ linux-3.0.4/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
33028@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
33029 * Netlink Infrastructure
33030 */
33031
33032-static atomic_t fc_event_seq;
33033+static atomic_unchecked_t fc_event_seq;
33034
33035 /**
33036 * fc_get_event_number - Obtain the next sequential FC event number
33037@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
33038 u32
33039 fc_get_event_number(void)
33040 {
33041- return atomic_add_return(1, &fc_event_seq);
33042+ return atomic_add_return_unchecked(1, &fc_event_seq);
33043 }
33044 EXPORT_SYMBOL(fc_get_event_number);
33045
33046@@ -645,7 +645,7 @@ static __init int fc_transport_init(void
33047 {
33048 int error;
33049
33050- atomic_set(&fc_event_seq, 0);
33051+ atomic_set_unchecked(&fc_event_seq, 0);
33052
33053 error = transport_class_register(&fc_host_class);
33054 if (error)
33055@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
33056 char *cp;
33057
33058 *val = simple_strtoul(buf, &cp, 0);
33059- if ((*cp && (*cp != '\n')) || (*val < 0))
33060+ if (*cp && (*cp != '\n'))
33061 return -EINVAL;
33062 /*
33063 * Check for overflow; dev_loss_tmo is u32
33064diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c
33065--- linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
33066+++ linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
33067@@ -83,7 +83,7 @@ struct iscsi_internal {
33068 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
33069 };
33070
33071-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
33072+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
33073 static struct workqueue_struct *iscsi_eh_timer_workq;
33074
33075 /*
33076@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
33077 int err;
33078
33079 ihost = shost->shost_data;
33080- session->sid = atomic_add_return(1, &iscsi_session_nr);
33081+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
33082
33083 if (id == ISCSI_MAX_TARGET) {
33084 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
33085@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
33086 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
33087 ISCSI_TRANSPORT_VERSION);
33088
33089- atomic_set(&iscsi_session_nr, 0);
33090+ atomic_set_unchecked(&iscsi_session_nr, 0);
33091
33092 err = class_register(&iscsi_transport_class);
33093 if (err)
33094diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_srp.c linux-3.0.4/drivers/scsi/scsi_transport_srp.c
33095--- linux-3.0.4/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
33096+++ linux-3.0.4/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
33097@@ -33,7 +33,7 @@
33098 #include "scsi_transport_srp_internal.h"
33099
33100 struct srp_host_attrs {
33101- atomic_t next_port_id;
33102+ atomic_unchecked_t next_port_id;
33103 };
33104 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
33105
33106@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
33107 struct Scsi_Host *shost = dev_to_shost(dev);
33108 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
33109
33110- atomic_set(&srp_host->next_port_id, 0);
33111+ atomic_set_unchecked(&srp_host->next_port_id, 0);
33112 return 0;
33113 }
33114
33115@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
33116 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
33117 rport->roles = ids->roles;
33118
33119- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
33120+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
33121 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
33122
33123 transport_setup_device(&rport->dev);
33124diff -urNp linux-3.0.4/drivers/scsi/sg.c linux-3.0.4/drivers/scsi/sg.c
33125--- linux-3.0.4/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
33126+++ linux-3.0.4/drivers/scsi/sg.c 2011-10-06 04:17:55.000000000 -0400
33127@@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int
33128 sdp->disk->disk_name,
33129 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
33130 NULL,
33131- (char *)arg);
33132+ (char __user *)arg);
33133 case BLKTRACESTART:
33134 return blk_trace_startstop(sdp->device->request_queue, 1);
33135 case BLKTRACESTOP:
33136@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
33137 const struct file_operations * fops;
33138 };
33139
33140-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
33141+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
33142 {"allow_dio", &adio_fops},
33143 {"debug", &debug_fops},
33144 {"def_reserved_size", &dressz_fops},
33145@@ -2325,7 +2325,7 @@ sg_proc_init(void)
33146 {
33147 int k, mask;
33148 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
33149- struct sg_proc_leaf * leaf;
33150+ const struct sg_proc_leaf * leaf;
33151
33152 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
33153 if (!sg_proc_sgp)
33154diff -urNp linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c
33155--- linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
33156+++ linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
33157@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
33158 int do_iounmap = 0;
33159 int do_disable_device = 1;
33160
33161+ pax_track_stack();
33162+
33163 memset(&sym_dev, 0, sizeof(sym_dev));
33164 memset(&nvram, 0, sizeof(nvram));
33165 sym_dev.pdev = pdev;
33166diff -urNp linux-3.0.4/drivers/scsi/vmw_pvscsi.c linux-3.0.4/drivers/scsi/vmw_pvscsi.c
33167--- linux-3.0.4/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
33168+++ linux-3.0.4/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
33169@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
33170 dma_addr_t base;
33171 unsigned i;
33172
33173+ pax_track_stack();
33174+
33175 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
33176 cmd.reqRingNumPages = adapter->req_pages;
33177 cmd.cmpRingNumPages = adapter->cmp_pages;
33178diff -urNp linux-3.0.4/drivers/spi/spi.c linux-3.0.4/drivers/spi/spi.c
33179--- linux-3.0.4/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
33180+++ linux-3.0.4/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
33181@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
33182 EXPORT_SYMBOL_GPL(spi_bus_unlock);
33183
33184 /* portable code must never pass more than 32 bytes */
33185-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
33186+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
33187
33188 static u8 *buf;
33189
33190diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c
33191--- linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-09-02 18:11:21.000000000 -0400
33192+++ linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
33193@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
33194 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
33195
33196
33197-static struct net_device_ops ar6000_netdev_ops = {
33198+static net_device_ops_no_const ar6000_netdev_ops = {
33199 .ndo_init = NULL,
33200 .ndo_open = ar6000_open,
33201 .ndo_stop = ar6000_close,
33202diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
33203--- linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
33204+++ linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
33205@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
33206 typedef struct ar6k_pal_config_s
33207 {
33208 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
33209-}ar6k_pal_config_t;
33210+} __no_const ar6k_pal_config_t;
33211
33212 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
33213 #endif /* _AR6K_PAL_H_ */
33214diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
33215--- linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
33216+++ linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
33217@@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
33218 free_netdev(ifp->net);
33219 }
33220 /* Allocate etherdev, including space for private structure */
33221- ifp->net = alloc_etherdev(sizeof(dhd));
33222+ ifp->net = alloc_etherdev(sizeof(*dhd));
33223 if (!ifp->net) {
33224 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
33225 ret = -ENOMEM;
33226 }
33227 if (ret == 0) {
33228 strcpy(ifp->net->name, ifp->name);
33229- memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
33230+ memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
33231 err = dhd_net_attach(&dhd->pub, ifp->idx);
33232 if (err != 0) {
33233 DHD_ERROR(("%s: dhd_net_attach failed, "
33234@@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
33235 strcpy(nv_path, nvram_path);
33236
33237 /* Allocate etherdev, including space for private structure */
33238- net = alloc_etherdev(sizeof(dhd));
33239+ net = alloc_etherdev(sizeof(*dhd));
33240 if (!net) {
33241 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
33242 goto fail;
33243@@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
33244 /*
33245 * Save the dhd_info into the priv
33246 */
33247- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
33248+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
33249
33250 /* Set network interface name if it was provided as module parameter */
33251 if (iface_name[0]) {
33252@@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
33253 /*
33254 * Save the dhd_info into the priv
33255 */
33256- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
33257+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
33258
33259 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
33260 g_bus = bus;
33261diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
33262--- linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
33263+++ linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
33264@@ -593,7 +593,7 @@ struct phy_func_ptr {
33265 initfn_t carrsuppr;
33266 rxsigpwrfn_t rxsigpwr;
33267 detachfn_t detach;
33268-};
33269+} __no_const;
33270 typedef struct phy_func_ptr phy_func_ptr_t;
33271
33272 struct phy_info {
33273diff -urNp linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h
33274--- linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
33275+++ linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
33276@@ -185,7 +185,7 @@ typedef struct {
33277 u16 func, uint bustype, void *regsva, void *param);
33278 /* detach from device */
33279 void (*detach) (void *ch);
33280-} bcmsdh_driver_t;
33281+} __no_const bcmsdh_driver_t;
33282
33283 /* platform specific/high level functions */
33284 extern int bcmsdh_register(bcmsdh_driver_t *driver);
33285diff -urNp linux-3.0.4/drivers/staging/et131x/et1310_tx.c linux-3.0.4/drivers/staging/et131x/et1310_tx.c
33286--- linux-3.0.4/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
33287+++ linux-3.0.4/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
33288@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
33289 struct net_device_stats *stats = &etdev->net_stats;
33290
33291 if (tcb->flags & fMP_DEST_BROAD)
33292- atomic_inc(&etdev->Stats.brdcstxmt);
33293+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
33294 else if (tcb->flags & fMP_DEST_MULTI)
33295- atomic_inc(&etdev->Stats.multixmt);
33296+ atomic_inc_unchecked(&etdev->Stats.multixmt);
33297 else
33298- atomic_inc(&etdev->Stats.unixmt);
33299+ atomic_inc_unchecked(&etdev->Stats.unixmt);
33300
33301 if (tcb->skb) {
33302 stats->tx_bytes += tcb->skb->len;
33303diff -urNp linux-3.0.4/drivers/staging/et131x/et131x_adapter.h linux-3.0.4/drivers/staging/et131x/et131x_adapter.h
33304--- linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
33305+++ linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
33306@@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
33307 * operations
33308 */
33309 u32 unircv; /* # multicast packets received */
33310- atomic_t unixmt; /* # multicast packets for Tx */
33311+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
33312 u32 multircv; /* # multicast packets received */
33313- atomic_t multixmt; /* # multicast packets for Tx */
33314+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
33315 u32 brdcstrcv; /* # broadcast packets received */
33316- atomic_t brdcstxmt; /* # broadcast packets for Tx */
33317+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
33318 u32 norcvbuf; /* # Rx packets discarded */
33319 u32 noxmtbuf; /* # Tx packets discarded */
33320
33321diff -urNp linux-3.0.4/drivers/staging/hv/channel.c linux-3.0.4/drivers/staging/hv/channel.c
33322--- linux-3.0.4/drivers/staging/hv/channel.c 2011-09-02 18:11:21.000000000 -0400
33323+++ linux-3.0.4/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
33324@@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
33325 int ret = 0;
33326 int t;
33327
33328- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
33329- atomic_inc(&vmbus_connection.next_gpadl_handle);
33330+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
33331+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
33332
33333 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
33334 if (ret)
33335diff -urNp linux-3.0.4/drivers/staging/hv/hv.c linux-3.0.4/drivers/staging/hv/hv.c
33336--- linux-3.0.4/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
33337+++ linux-3.0.4/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
33338@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
33339 u64 output_address = (output) ? virt_to_phys(output) : 0;
33340 u32 output_address_hi = output_address >> 32;
33341 u32 output_address_lo = output_address & 0xFFFFFFFF;
33342- volatile void *hypercall_page = hv_context.hypercall_page;
33343+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
33344
33345 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
33346 "=a"(hv_status_lo) : "d" (control_hi),
33347diff -urNp linux-3.0.4/drivers/staging/hv/hv_mouse.c linux-3.0.4/drivers/staging/hv/hv_mouse.c
33348--- linux-3.0.4/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
33349+++ linux-3.0.4/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
33350@@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
33351 if (hid_dev) {
33352 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
33353
33354- hid_dev->ll_driver->open = mousevsc_hid_open;
33355- hid_dev->ll_driver->close = mousevsc_hid_close;
33356+ pax_open_kernel();
33357+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
33358+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
33359+ pax_close_kernel();
33360
33361 hid_dev->bus = BUS_VIRTUAL;
33362 hid_dev->vendor = input_device_ctx->device_info.vendor;
33363diff -urNp linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h
33364--- linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
33365+++ linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
33366@@ -559,7 +559,7 @@ enum vmbus_connect_state {
33367 struct vmbus_connection {
33368 enum vmbus_connect_state conn_state;
33369
33370- atomic_t next_gpadl_handle;
33371+ atomic_unchecked_t next_gpadl_handle;
33372
33373 /*
33374 * Represents channel interrupts. Each bit position represents a
33375diff -urNp linux-3.0.4/drivers/staging/hv/rndis_filter.c linux-3.0.4/drivers/staging/hv/rndis_filter.c
33376--- linux-3.0.4/drivers/staging/hv/rndis_filter.c 2011-09-02 18:11:21.000000000 -0400
33377+++ linux-3.0.4/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
33378@@ -43,7 +43,7 @@ struct rndis_device {
33379
33380 enum rndis_device_state state;
33381 u32 link_stat;
33382- atomic_t new_req_id;
33383+ atomic_unchecked_t new_req_id;
33384
33385 spinlock_t request_lock;
33386 struct list_head req_list;
33387@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
33388 * template
33389 */
33390 set = &rndis_msg->msg.set_req;
33391- set->req_id = atomic_inc_return(&dev->new_req_id);
33392+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
33393
33394 /* Add to the request list */
33395 spin_lock_irqsave(&dev->request_lock, flags);
33396@@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
33397
33398 /* Setup the rndis set */
33399 halt = &request->request_msg.msg.halt_req;
33400- halt->req_id = atomic_inc_return(&dev->new_req_id);
33401+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
33402
33403 /* Ignore return since this msg is optional. */
33404 rndis_filter_send_request(dev, request);
33405diff -urNp linux-3.0.4/drivers/staging/hv/vmbus_drv.c linux-3.0.4/drivers/staging/hv/vmbus_drv.c
33406--- linux-3.0.4/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
33407+++ linux-3.0.4/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
33408@@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
33409 {
33410 int ret = 0;
33411
33412- static atomic_t device_num = ATOMIC_INIT(0);
33413+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
33414
33415 /* Set the device name. Otherwise, device_register() will fail. */
33416 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
33417- atomic_inc_return(&device_num));
33418+ atomic_inc_return_unchecked(&device_num));
33419
33420 /* The new device belongs to this bus */
33421 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
33422diff -urNp linux-3.0.4/drivers/staging/iio/ring_generic.h linux-3.0.4/drivers/staging/iio/ring_generic.h
33423--- linux-3.0.4/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
33424+++ linux-3.0.4/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
33425@@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
33426
33427 int (*is_enabled)(struct iio_ring_buffer *ring);
33428 int (*enable)(struct iio_ring_buffer *ring);
33429-};
33430+} __no_const;
33431
33432 struct iio_ring_setup_ops {
33433 int (*preenable)(struct iio_dev *);
33434diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet.c linux-3.0.4/drivers/staging/octeon/ethernet.c
33435--- linux-3.0.4/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
33436+++ linux-3.0.4/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
33437@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
33438 * since the RX tasklet also increments it.
33439 */
33440 #ifdef CONFIG_64BIT
33441- atomic64_add(rx_status.dropped_packets,
33442- (atomic64_t *)&priv->stats.rx_dropped);
33443+ atomic64_add_unchecked(rx_status.dropped_packets,
33444+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
33445 #else
33446- atomic_add(rx_status.dropped_packets,
33447- (atomic_t *)&priv->stats.rx_dropped);
33448+ atomic_add_unchecked(rx_status.dropped_packets,
33449+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
33450 #endif
33451 }
33452
33453diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet-rx.c linux-3.0.4/drivers/staging/octeon/ethernet-rx.c
33454--- linux-3.0.4/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
33455+++ linux-3.0.4/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
33456@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
33457 /* Increment RX stats for virtual ports */
33458 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
33459 #ifdef CONFIG_64BIT
33460- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
33461- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
33462+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
33463+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
33464 #else
33465- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
33466- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
33467+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
33468+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
33469 #endif
33470 }
33471 netif_receive_skb(skb);
33472@@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
33473 dev->name);
33474 */
33475 #ifdef CONFIG_64BIT
33476- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
33477+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
33478 #else
33479- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
33480+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
33481 #endif
33482 dev_kfree_skb_irq(skb);
33483 }
33484diff -urNp linux-3.0.4/drivers/staging/pohmelfs/inode.c linux-3.0.4/drivers/staging/pohmelfs/inode.c
33485--- linux-3.0.4/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
33486+++ linux-3.0.4/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
33487@@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
33488 mutex_init(&psb->mcache_lock);
33489 psb->mcache_root = RB_ROOT;
33490 psb->mcache_timeout = msecs_to_jiffies(5000);
33491- atomic_long_set(&psb->mcache_gen, 0);
33492+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
33493
33494 psb->trans_max_pages = 100;
33495
33496@@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
33497 INIT_LIST_HEAD(&psb->crypto_ready_list);
33498 INIT_LIST_HEAD(&psb->crypto_active_list);
33499
33500- atomic_set(&psb->trans_gen, 1);
33501+ atomic_set_unchecked(&psb->trans_gen, 1);
33502 atomic_long_set(&psb->total_inodes, 0);
33503
33504 mutex_init(&psb->state_lock);
33505diff -urNp linux-3.0.4/drivers/staging/pohmelfs/mcache.c linux-3.0.4/drivers/staging/pohmelfs/mcache.c
33506--- linux-3.0.4/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
33507+++ linux-3.0.4/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
33508@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
33509 m->data = data;
33510 m->start = start;
33511 m->size = size;
33512- m->gen = atomic_long_inc_return(&psb->mcache_gen);
33513+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
33514
33515 mutex_lock(&psb->mcache_lock);
33516 err = pohmelfs_mcache_insert(psb, m);
33517diff -urNp linux-3.0.4/drivers/staging/pohmelfs/netfs.h linux-3.0.4/drivers/staging/pohmelfs/netfs.h
33518--- linux-3.0.4/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
33519+++ linux-3.0.4/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
33520@@ -571,14 +571,14 @@ struct pohmelfs_config;
33521 struct pohmelfs_sb {
33522 struct rb_root mcache_root;
33523 struct mutex mcache_lock;
33524- atomic_long_t mcache_gen;
33525+ atomic_long_unchecked_t mcache_gen;
33526 unsigned long mcache_timeout;
33527
33528 unsigned int idx;
33529
33530 unsigned int trans_retries;
33531
33532- atomic_t trans_gen;
33533+ atomic_unchecked_t trans_gen;
33534
33535 unsigned int crypto_attached_size;
33536 unsigned int crypto_align_size;
33537diff -urNp linux-3.0.4/drivers/staging/pohmelfs/trans.c linux-3.0.4/drivers/staging/pohmelfs/trans.c
33538--- linux-3.0.4/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
33539+++ linux-3.0.4/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
33540@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
33541 int err;
33542 struct netfs_cmd *cmd = t->iovec.iov_base;
33543
33544- t->gen = atomic_inc_return(&psb->trans_gen);
33545+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
33546
33547 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
33548 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
33549diff -urNp linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h
33550--- linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
33551+++ linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
33552@@ -83,7 +83,7 @@ struct _io_ops {
33553 u8 *pmem);
33554 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
33555 u8 *pmem);
33556-};
33557+} __no_const;
33558
33559 struct io_req {
33560 struct list_head list;
33561diff -urNp linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c
33562--- linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
33563+++ linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c 2011-08-24 18:21:41.000000000 -0400
33564@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
33565 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
33566
33567 if (rlen)
33568- if (copy_to_user(data, &resp, rlen))
33569+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
33570 return -EFAULT;
33571
33572 return 0;
33573diff -urNp linux-3.0.4/drivers/staging/tty/stallion.c linux-3.0.4/drivers/staging/tty/stallion.c
33574--- linux-3.0.4/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
33575+++ linux-3.0.4/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
33576@@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
33577 struct stlport stl_dummyport;
33578 struct stlport *portp;
33579
33580+ pax_track_stack();
33581+
33582 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
33583 return -EFAULT;
33584 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
33585diff -urNp linux-3.0.4/drivers/staging/usbip/usbip_common.h linux-3.0.4/drivers/staging/usbip/usbip_common.h
33586--- linux-3.0.4/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
33587+++ linux-3.0.4/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
33588@@ -315,7 +315,7 @@ struct usbip_device {
33589 void (*shutdown)(struct usbip_device *);
33590 void (*reset)(struct usbip_device *);
33591 void (*unusable)(struct usbip_device *);
33592- } eh_ops;
33593+ } __no_const eh_ops;
33594 };
33595
33596 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
33597diff -urNp linux-3.0.4/drivers/staging/usbip/vhci.h linux-3.0.4/drivers/staging/usbip/vhci.h
33598--- linux-3.0.4/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
33599+++ linux-3.0.4/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
33600@@ -94,7 +94,7 @@ struct vhci_hcd {
33601 unsigned resuming:1;
33602 unsigned long re_timeout;
33603
33604- atomic_t seqnum;
33605+ atomic_unchecked_t seqnum;
33606
33607 /*
33608 * NOTE:
33609diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_hcd.c linux-3.0.4/drivers/staging/usbip/vhci_hcd.c
33610--- linux-3.0.4/drivers/staging/usbip/vhci_hcd.c 2011-09-02 18:11:21.000000000 -0400
33611+++ linux-3.0.4/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
33612@@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
33613 return;
33614 }
33615
33616- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
33617+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
33618 if (priv->seqnum == 0xffff)
33619 dev_info(&urb->dev->dev, "seqnum max\n");
33620
33621@@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
33622 return -ENOMEM;
33623 }
33624
33625- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
33626+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
33627 if (unlink->seqnum == 0xffff)
33628 pr_info("seqnum max\n");
33629
33630@@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
33631 vdev->rhport = rhport;
33632 }
33633
33634- atomic_set(&vhci->seqnum, 0);
33635+ atomic_set_unchecked(&vhci->seqnum, 0);
33636 spin_lock_init(&vhci->lock);
33637
33638 hcd->power_budget = 0; /* no limit */
33639diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_rx.c linux-3.0.4/drivers/staging/usbip/vhci_rx.c
33640--- linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
33641+++ linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
33642@@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
33643 if (!urb) {
33644 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
33645 pr_info("max seqnum %d\n",
33646- atomic_read(&the_controller->seqnum));
33647+ atomic_read_unchecked(&the_controller->seqnum));
33648 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
33649 return;
33650 }
33651diff -urNp linux-3.0.4/drivers/staging/vt6655/hostap.c linux-3.0.4/drivers/staging/vt6655/hostap.c
33652--- linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
33653+++ linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
33654@@ -79,14 +79,13 @@ static int msglevel
33655 *
33656 */
33657
33658+static net_device_ops_no_const apdev_netdev_ops;
33659+
33660 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
33661 {
33662 PSDevice apdev_priv;
33663 struct net_device *dev = pDevice->dev;
33664 int ret;
33665- const struct net_device_ops apdev_netdev_ops = {
33666- .ndo_start_xmit = pDevice->tx_80211,
33667- };
33668
33669 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
33670
33671@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
33672 *apdev_priv = *pDevice;
33673 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
33674
33675+ /* only half broken now */
33676+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
33677 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
33678
33679 pDevice->apdev->type = ARPHRD_IEEE80211;
33680diff -urNp linux-3.0.4/drivers/staging/vt6656/hostap.c linux-3.0.4/drivers/staging/vt6656/hostap.c
33681--- linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
33682+++ linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
33683@@ -80,14 +80,13 @@ static int msglevel
33684 *
33685 */
33686
33687+static net_device_ops_no_const apdev_netdev_ops;
33688+
33689 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
33690 {
33691 PSDevice apdev_priv;
33692 struct net_device *dev = pDevice->dev;
33693 int ret;
33694- const struct net_device_ops apdev_netdev_ops = {
33695- .ndo_start_xmit = pDevice->tx_80211,
33696- };
33697
33698 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
33699
33700@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
33701 *apdev_priv = *pDevice;
33702 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
33703
33704+ /* only half broken now */
33705+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
33706 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
33707
33708 pDevice->apdev->type = ARPHRD_IEEE80211;
33709diff -urNp linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c
33710--- linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
33711+++ linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
33712@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
33713
33714 struct usbctlx_completor {
33715 int (*complete) (struct usbctlx_completor *);
33716-};
33717+} __no_const;
33718
33719 static int
33720 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
33721diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.c linux-3.0.4/drivers/staging/zcache/tmem.c
33722--- linux-3.0.4/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
33723+++ linux-3.0.4/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
33724@@ -39,7 +39,7 @@
33725 * A tmem host implementation must use this function to register callbacks
33726 * for memory allocation.
33727 */
33728-static struct tmem_hostops tmem_hostops;
33729+static tmem_hostops_no_const tmem_hostops;
33730
33731 static void tmem_objnode_tree_init(void);
33732
33733@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
33734 * A tmem host implementation must use this function to register
33735 * callbacks for a page-accessible memory (PAM) implementation
33736 */
33737-static struct tmem_pamops tmem_pamops;
33738+static tmem_pamops_no_const tmem_pamops;
33739
33740 void tmem_register_pamops(struct tmem_pamops *m)
33741 {
33742diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.h linux-3.0.4/drivers/staging/zcache/tmem.h
33743--- linux-3.0.4/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
33744+++ linux-3.0.4/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
33745@@ -171,6 +171,7 @@ struct tmem_pamops {
33746 int (*get_data)(struct page *, void *, struct tmem_pool *);
33747 void (*free)(void *, struct tmem_pool *);
33748 };
33749+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
33750 extern void tmem_register_pamops(struct tmem_pamops *m);
33751
33752 /* memory allocation methods provided by the host implementation */
33753@@ -180,6 +181,7 @@ struct tmem_hostops {
33754 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
33755 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
33756 };
33757+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
33758 extern void tmem_register_hostops(struct tmem_hostops *m);
33759
33760 /* core tmem accessor functions */
33761diff -urNp linux-3.0.4/drivers/target/target_core_alua.c linux-3.0.4/drivers/target/target_core_alua.c
33762--- linux-3.0.4/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
33763+++ linux-3.0.4/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
33764@@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
33765 char path[ALUA_METADATA_PATH_LEN];
33766 int len;
33767
33768+ pax_track_stack();
33769+
33770 memset(path, 0, ALUA_METADATA_PATH_LEN);
33771
33772 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
33773@@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
33774 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
33775 int len;
33776
33777+ pax_track_stack();
33778+
33779 memset(path, 0, ALUA_METADATA_PATH_LEN);
33780 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
33781
33782diff -urNp linux-3.0.4/drivers/target/target_core_cdb.c linux-3.0.4/drivers/target/target_core_cdb.c
33783--- linux-3.0.4/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
33784+++ linux-3.0.4/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
33785@@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
33786 int length = 0;
33787 unsigned char buf[SE_MODE_PAGE_BUF];
33788
33789+ pax_track_stack();
33790+
33791 memset(buf, 0, SE_MODE_PAGE_BUF);
33792
33793 switch (cdb[2] & 0x3f) {
33794diff -urNp linux-3.0.4/drivers/target/target_core_configfs.c linux-3.0.4/drivers/target/target_core_configfs.c
33795--- linux-3.0.4/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
33796+++ linux-3.0.4/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
33797@@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
33798 ssize_t len = 0;
33799 int reg_count = 0, prf_isid;
33800
33801+ pax_track_stack();
33802+
33803 if (!(su_dev->se_dev_ptr))
33804 return -ENODEV;
33805
33806diff -urNp linux-3.0.4/drivers/target/target_core_pr.c linux-3.0.4/drivers/target/target_core_pr.c
33807--- linux-3.0.4/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
33808+++ linux-3.0.4/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
33809@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
33810 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
33811 u16 tpgt;
33812
33813+ pax_track_stack();
33814+
33815 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
33816 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
33817 /*
33818@@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
33819 ssize_t len = 0;
33820 int reg_count = 0;
33821
33822+ pax_track_stack();
33823+
33824 memset(buf, 0, pr_aptpl_buf_len);
33825 /*
33826 * Called to clear metadata once APTPL has been deactivated.
33827@@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
33828 char path[512];
33829 int ret;
33830
33831+ pax_track_stack();
33832+
33833 memset(iov, 0, sizeof(struct iovec));
33834 memset(path, 0, 512);
33835
33836diff -urNp linux-3.0.4/drivers/target/target_core_tmr.c linux-3.0.4/drivers/target/target_core_tmr.c
33837--- linux-3.0.4/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
33838+++ linux-3.0.4/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
33839@@ -269,7 +269,7 @@ int core_tmr_lun_reset(
33840 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
33841 T_TASK(cmd)->t_task_cdbs,
33842 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
33843- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
33844+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
33845 atomic_read(&T_TASK(cmd)->t_transport_active),
33846 atomic_read(&T_TASK(cmd)->t_transport_stop),
33847 atomic_read(&T_TASK(cmd)->t_transport_sent));
33848@@ -311,7 +311,7 @@ int core_tmr_lun_reset(
33849 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
33850 " task: %p, t_fe_count: %d dev: %p\n", task,
33851 fe_count, dev);
33852- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
33853+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
33854 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
33855 flags);
33856 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
33857@@ -321,7 +321,7 @@ int core_tmr_lun_reset(
33858 }
33859 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
33860 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
33861- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
33862+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
33863 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
33864 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
33865
33866diff -urNp linux-3.0.4/drivers/target/target_core_transport.c linux-3.0.4/drivers/target/target_core_transport.c
33867--- linux-3.0.4/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
33868+++ linux-3.0.4/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
33869@@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
33870
33871 dev->queue_depth = dev_limits->queue_depth;
33872 atomic_set(&dev->depth_left, dev->queue_depth);
33873- atomic_set(&dev->dev_ordered_id, 0);
33874+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
33875
33876 se_dev_set_default_attribs(dev, dev_limits);
33877
33878@@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
33879 * Used to determine when ORDERED commands should go from
33880 * Dormant to Active status.
33881 */
33882- cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
33883+ cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
33884 smp_mb__after_atomic_inc();
33885 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
33886 cmd->se_ordered_id, cmd->sam_task_attr,
33887@@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
33888 " t_transport_active: %d t_transport_stop: %d"
33889 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
33890 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
33891- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
33892+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
33893 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
33894 atomic_read(&T_TASK(cmd)->t_transport_active),
33895 atomic_read(&T_TASK(cmd)->t_transport_stop),
33896@@ -2673,9 +2673,9 @@ check_depth:
33897 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
33898 atomic_set(&task->task_active, 1);
33899 atomic_set(&task->task_sent, 1);
33900- atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
33901+ atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
33902
33903- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
33904+ if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
33905 T_TASK(cmd)->t_task_cdbs)
33906 atomic_set(&cmd->transport_sent, 1);
33907
33908@@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
33909 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
33910 }
33911 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
33912- atomic_read(&T_TASK(cmd)->t_transport_aborted))
33913+ atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
33914 goto remove;
33915
33916 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
33917@@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
33918 {
33919 int ret = 0;
33920
33921- if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
33922+ if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
33923 if (!(send_status) ||
33924 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
33925 return 1;
33926@@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
33927 */
33928 if (cmd->data_direction == DMA_TO_DEVICE) {
33929 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
33930- atomic_inc(&T_TASK(cmd)->t_transport_aborted);
33931+ atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
33932 smp_mb__after_atomic_inc();
33933 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
33934 transport_new_cmd_failure(cmd);
33935@@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
33936 CMD_TFO(cmd)->get_task_tag(cmd),
33937 T_TASK(cmd)->t_task_cdbs,
33938 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
33939- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
33940+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
33941 atomic_read(&T_TASK(cmd)->t_transport_active),
33942 atomic_read(&T_TASK(cmd)->t_transport_stop),
33943 atomic_read(&T_TASK(cmd)->t_transport_sent));
33944diff -urNp linux-3.0.4/drivers/telephony/ixj.c linux-3.0.4/drivers/telephony/ixj.c
33945--- linux-3.0.4/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
33946+++ linux-3.0.4/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
33947@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
33948 bool mContinue;
33949 char *pIn, *pOut;
33950
33951+ pax_track_stack();
33952+
33953 if (!SCI_Prepare(j))
33954 return 0;
33955
33956diff -urNp linux-3.0.4/drivers/tty/hvc/hvcs.c linux-3.0.4/drivers/tty/hvc/hvcs.c
33957--- linux-3.0.4/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
33958+++ linux-3.0.4/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
33959@@ -83,6 +83,7 @@
33960 #include <asm/hvcserver.h>
33961 #include <asm/uaccess.h>
33962 #include <asm/vio.h>
33963+#include <asm/local.h>
33964
33965 /*
33966 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
33967@@ -270,7 +271,7 @@ struct hvcs_struct {
33968 unsigned int index;
33969
33970 struct tty_struct *tty;
33971- int open_count;
33972+ local_t open_count;
33973
33974 /*
33975 * Used to tell the driver kernel_thread what operations need to take
33976@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
33977
33978 spin_lock_irqsave(&hvcsd->lock, flags);
33979
33980- if (hvcsd->open_count > 0) {
33981+ if (local_read(&hvcsd->open_count) > 0) {
33982 spin_unlock_irqrestore(&hvcsd->lock, flags);
33983 printk(KERN_INFO "HVCS: vterm state unchanged. "
33984 "The hvcs device node is still in use.\n");
33985@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
33986 if ((retval = hvcs_partner_connect(hvcsd)))
33987 goto error_release;
33988
33989- hvcsd->open_count = 1;
33990+ local_set(&hvcsd->open_count, 1);
33991 hvcsd->tty = tty;
33992 tty->driver_data = hvcsd;
33993
33994@@ -1179,7 +1180,7 @@ fast_open:
33995
33996 spin_lock_irqsave(&hvcsd->lock, flags);
33997 kref_get(&hvcsd->kref);
33998- hvcsd->open_count++;
33999+ local_inc(&hvcsd->open_count);
34000 hvcsd->todo_mask |= HVCS_SCHED_READ;
34001 spin_unlock_irqrestore(&hvcsd->lock, flags);
34002
34003@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
34004 hvcsd = tty->driver_data;
34005
34006 spin_lock_irqsave(&hvcsd->lock, flags);
34007- if (--hvcsd->open_count == 0) {
34008+ if (local_dec_and_test(&hvcsd->open_count)) {
34009
34010 vio_disable_interrupts(hvcsd->vdev);
34011
34012@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
34013 free_irq(irq, hvcsd);
34014 kref_put(&hvcsd->kref, destroy_hvcs_struct);
34015 return;
34016- } else if (hvcsd->open_count < 0) {
34017+ } else if (local_read(&hvcsd->open_count) < 0) {
34018 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
34019 " is missmanaged.\n",
34020- hvcsd->vdev->unit_address, hvcsd->open_count);
34021+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
34022 }
34023
34024 spin_unlock_irqrestore(&hvcsd->lock, flags);
34025@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
34026
34027 spin_lock_irqsave(&hvcsd->lock, flags);
34028 /* Preserve this so that we know how many kref refs to put */
34029- temp_open_count = hvcsd->open_count;
34030+ temp_open_count = local_read(&hvcsd->open_count);
34031
34032 /*
34033 * Don't kref put inside the spinlock because the destruction
34034@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
34035 hvcsd->tty->driver_data = NULL;
34036 hvcsd->tty = NULL;
34037
34038- hvcsd->open_count = 0;
34039+ local_set(&hvcsd->open_count, 0);
34040
34041 /* This will drop any buffered data on the floor which is OK in a hangup
34042 * scenario. */
34043@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
34044 * the middle of a write operation? This is a crummy place to do this
34045 * but we want to keep it all in the spinlock.
34046 */
34047- if (hvcsd->open_count <= 0) {
34048+ if (local_read(&hvcsd->open_count) <= 0) {
34049 spin_unlock_irqrestore(&hvcsd->lock, flags);
34050 return -ENODEV;
34051 }
34052@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
34053 {
34054 struct hvcs_struct *hvcsd = tty->driver_data;
34055
34056- if (!hvcsd || hvcsd->open_count <= 0)
34057+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
34058 return 0;
34059
34060 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
34061diff -urNp linux-3.0.4/drivers/tty/ipwireless/tty.c linux-3.0.4/drivers/tty/ipwireless/tty.c
34062--- linux-3.0.4/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
34063+++ linux-3.0.4/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
34064@@ -29,6 +29,7 @@
34065 #include <linux/tty_driver.h>
34066 #include <linux/tty_flip.h>
34067 #include <linux/uaccess.h>
34068+#include <asm/local.h>
34069
34070 #include "tty.h"
34071 #include "network.h"
34072@@ -51,7 +52,7 @@ struct ipw_tty {
34073 int tty_type;
34074 struct ipw_network *network;
34075 struct tty_struct *linux_tty;
34076- int open_count;
34077+ local_t open_count;
34078 unsigned int control_lines;
34079 struct mutex ipw_tty_mutex;
34080 int tx_bytes_queued;
34081@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
34082 mutex_unlock(&tty->ipw_tty_mutex);
34083 return -ENODEV;
34084 }
34085- if (tty->open_count == 0)
34086+ if (local_read(&tty->open_count) == 0)
34087 tty->tx_bytes_queued = 0;
34088
34089- tty->open_count++;
34090+ local_inc(&tty->open_count);
34091
34092 tty->linux_tty = linux_tty;
34093 linux_tty->driver_data = tty;
34094@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
34095
34096 static void do_ipw_close(struct ipw_tty *tty)
34097 {
34098- tty->open_count--;
34099-
34100- if (tty->open_count == 0) {
34101+ if (local_dec_return(&tty->open_count) == 0) {
34102 struct tty_struct *linux_tty = tty->linux_tty;
34103
34104 if (linux_tty != NULL) {
34105@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
34106 return;
34107
34108 mutex_lock(&tty->ipw_tty_mutex);
34109- if (tty->open_count == 0) {
34110+ if (local_read(&tty->open_count) == 0) {
34111 mutex_unlock(&tty->ipw_tty_mutex);
34112 return;
34113 }
34114@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
34115 return;
34116 }
34117
34118- if (!tty->open_count) {
34119+ if (!local_read(&tty->open_count)) {
34120 mutex_unlock(&tty->ipw_tty_mutex);
34121 return;
34122 }
34123@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
34124 return -ENODEV;
34125
34126 mutex_lock(&tty->ipw_tty_mutex);
34127- if (!tty->open_count) {
34128+ if (!local_read(&tty->open_count)) {
34129 mutex_unlock(&tty->ipw_tty_mutex);
34130 return -EINVAL;
34131 }
34132@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
34133 if (!tty)
34134 return -ENODEV;
34135
34136- if (!tty->open_count)
34137+ if (!local_read(&tty->open_count))
34138 return -EINVAL;
34139
34140 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
34141@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
34142 if (!tty)
34143 return 0;
34144
34145- if (!tty->open_count)
34146+ if (!local_read(&tty->open_count))
34147 return 0;
34148
34149 return tty->tx_bytes_queued;
34150@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
34151 if (!tty)
34152 return -ENODEV;
34153
34154- if (!tty->open_count)
34155+ if (!local_read(&tty->open_count))
34156 return -EINVAL;
34157
34158 return get_control_lines(tty);
34159@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
34160 if (!tty)
34161 return -ENODEV;
34162
34163- if (!tty->open_count)
34164+ if (!local_read(&tty->open_count))
34165 return -EINVAL;
34166
34167 return set_control_lines(tty, set, clear);
34168@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
34169 if (!tty)
34170 return -ENODEV;
34171
34172- if (!tty->open_count)
34173+ if (!local_read(&tty->open_count))
34174 return -EINVAL;
34175
34176 /* FIXME: Exactly how is the tty object locked here .. */
34177@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
34178 against a parallel ioctl etc */
34179 mutex_lock(&ttyj->ipw_tty_mutex);
34180 }
34181- while (ttyj->open_count)
34182+ while (local_read(&ttyj->open_count))
34183 do_ipw_close(ttyj);
34184 ipwireless_disassociate_network_ttys(network,
34185 ttyj->channel_idx);
34186diff -urNp linux-3.0.4/drivers/tty/n_gsm.c linux-3.0.4/drivers/tty/n_gsm.c
34187--- linux-3.0.4/drivers/tty/n_gsm.c 2011-09-02 18:11:21.000000000 -0400
34188+++ linux-3.0.4/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
34189@@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
34190 return NULL;
34191 spin_lock_init(&dlci->lock);
34192 dlci->fifo = &dlci->_fifo;
34193- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
34194+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
34195 kfree(dlci);
34196 return NULL;
34197 }
34198diff -urNp linux-3.0.4/drivers/tty/n_tty.c linux-3.0.4/drivers/tty/n_tty.c
34199--- linux-3.0.4/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
34200+++ linux-3.0.4/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
34201@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
34202 {
34203 *ops = tty_ldisc_N_TTY;
34204 ops->owner = NULL;
34205- ops->refcount = ops->flags = 0;
34206+ atomic_set(&ops->refcount, 0);
34207+ ops->flags = 0;
34208 }
34209 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
34210diff -urNp linux-3.0.4/drivers/tty/pty.c linux-3.0.4/drivers/tty/pty.c
34211--- linux-3.0.4/drivers/tty/pty.c 2011-07-21 22:17:23.000000000 -0400
34212+++ linux-3.0.4/drivers/tty/pty.c 2011-08-23 21:47:56.000000000 -0400
34213@@ -754,8 +754,10 @@ static void __init unix98_pty_init(void)
34214 register_sysctl_table(pty_root_table);
34215
34216 /* Now create the /dev/ptmx special device */
34217+ pax_open_kernel();
34218 tty_default_fops(&ptmx_fops);
34219- ptmx_fops.open = ptmx_open;
34220+ *(void **)&ptmx_fops.open = ptmx_open;
34221+ pax_close_kernel();
34222
34223 cdev_init(&ptmx_cdev, &ptmx_fops);
34224 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
34225diff -urNp linux-3.0.4/drivers/tty/rocket.c linux-3.0.4/drivers/tty/rocket.c
34226--- linux-3.0.4/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
34227+++ linux-3.0.4/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
34228@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
34229 struct rocket_ports tmp;
34230 int board;
34231
34232+ pax_track_stack();
34233+
34234 if (!retports)
34235 return -EFAULT;
34236 memset(&tmp, 0, sizeof (tmp));
34237diff -urNp linux-3.0.4/drivers/tty/serial/kgdboc.c linux-3.0.4/drivers/tty/serial/kgdboc.c
34238--- linux-3.0.4/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
34239+++ linux-3.0.4/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
34240@@ -23,8 +23,9 @@
34241 #define MAX_CONFIG_LEN 40
34242
34243 static struct kgdb_io kgdboc_io_ops;
34244+static struct kgdb_io kgdboc_io_ops_console;
34245
34246-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
34247+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
34248 static int configured = -1;
34249
34250 static char config[MAX_CONFIG_LEN];
34251@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
34252 kgdboc_unregister_kbd();
34253 if (configured == 1)
34254 kgdb_unregister_io_module(&kgdboc_io_ops);
34255+ else if (configured == 2)
34256+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
34257 }
34258
34259 static int configure_kgdboc(void)
34260@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
34261 int err;
34262 char *cptr = config;
34263 struct console *cons;
34264+ int is_console = 0;
34265
34266 err = kgdboc_option_setup(config);
34267 if (err || !strlen(config) || isspace(config[0]))
34268 goto noconfig;
34269
34270 err = -ENODEV;
34271- kgdboc_io_ops.is_console = 0;
34272 kgdb_tty_driver = NULL;
34273
34274 kgdboc_use_kms = 0;
34275@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
34276 int idx;
34277 if (cons->device && cons->device(cons, &idx) == p &&
34278 idx == tty_line) {
34279- kgdboc_io_ops.is_console = 1;
34280+ is_console = 1;
34281 break;
34282 }
34283 cons = cons->next;
34284@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
34285 kgdb_tty_line = tty_line;
34286
34287 do_register:
34288- err = kgdb_register_io_module(&kgdboc_io_ops);
34289+ if (is_console) {
34290+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
34291+ configured = 2;
34292+ } else {
34293+ err = kgdb_register_io_module(&kgdboc_io_ops);
34294+ configured = 1;
34295+ }
34296 if (err)
34297 goto noconfig;
34298
34299- configured = 1;
34300-
34301 return 0;
34302
34303 noconfig:
34304@@ -212,7 +219,7 @@ noconfig:
34305 static int __init init_kgdboc(void)
34306 {
34307 /* Already configured? */
34308- if (configured == 1)
34309+ if (configured >= 1)
34310 return 0;
34311
34312 return configure_kgdboc();
34313@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
34314 if (config[len - 1] == '\n')
34315 config[len - 1] = '\0';
34316
34317- if (configured == 1)
34318+ if (configured >= 1)
34319 cleanup_kgdboc();
34320
34321 /* Go and configure with the new params. */
34322@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
34323 .post_exception = kgdboc_post_exp_handler,
34324 };
34325
34326+static struct kgdb_io kgdboc_io_ops_console = {
34327+ .name = "kgdboc",
34328+ .read_char = kgdboc_get_char,
34329+ .write_char = kgdboc_put_char,
34330+ .pre_exception = kgdboc_pre_exp_handler,
34331+ .post_exception = kgdboc_post_exp_handler,
34332+ .is_console = 1
34333+};
34334+
34335 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
34336 /* This is only available if kgdboc is a built in for early debugging */
34337 static int __init kgdboc_early_init(char *opt)
34338diff -urNp linux-3.0.4/drivers/tty/serial/mrst_max3110.c linux-3.0.4/drivers/tty/serial/mrst_max3110.c
34339--- linux-3.0.4/drivers/tty/serial/mrst_max3110.c 2011-07-21 22:17:23.000000000 -0400
34340+++ linux-3.0.4/drivers/tty/serial/mrst_max3110.c 2011-08-23 21:48:14.000000000 -0400
34341@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
34342 int loop = 1, num, total = 0;
34343 u8 recv_buf[512], *pbuf;
34344
34345+ pax_track_stack();
34346+
34347 pbuf = recv_buf;
34348 do {
34349 num = max3110_read_multi(max, pbuf);
34350diff -urNp linux-3.0.4/drivers/tty/tty_io.c linux-3.0.4/drivers/tty/tty_io.c
34351--- linux-3.0.4/drivers/tty/tty_io.c 2011-07-21 22:17:23.000000000 -0400
34352+++ linux-3.0.4/drivers/tty/tty_io.c 2011-08-23 21:47:56.000000000 -0400
34353@@ -3215,7 +3215,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
34354
34355 void tty_default_fops(struct file_operations *fops)
34356 {
34357- *fops = tty_fops;
34358+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
34359 }
34360
34361 /*
34362diff -urNp linux-3.0.4/drivers/tty/tty_ldisc.c linux-3.0.4/drivers/tty/tty_ldisc.c
34363--- linux-3.0.4/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
34364+++ linux-3.0.4/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
34365@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
34366 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
34367 struct tty_ldisc_ops *ldo = ld->ops;
34368
34369- ldo->refcount--;
34370+ atomic_dec(&ldo->refcount);
34371 module_put(ldo->owner);
34372 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
34373
34374@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
34375 spin_lock_irqsave(&tty_ldisc_lock, flags);
34376 tty_ldiscs[disc] = new_ldisc;
34377 new_ldisc->num = disc;
34378- new_ldisc->refcount = 0;
34379+ atomic_set(&new_ldisc->refcount, 0);
34380 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
34381
34382 return ret;
34383@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
34384 return -EINVAL;
34385
34386 spin_lock_irqsave(&tty_ldisc_lock, flags);
34387- if (tty_ldiscs[disc]->refcount)
34388+ if (atomic_read(&tty_ldiscs[disc]->refcount))
34389 ret = -EBUSY;
34390 else
34391 tty_ldiscs[disc] = NULL;
34392@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
34393 if (ldops) {
34394 ret = ERR_PTR(-EAGAIN);
34395 if (try_module_get(ldops->owner)) {
34396- ldops->refcount++;
34397+ atomic_inc(&ldops->refcount);
34398 ret = ldops;
34399 }
34400 }
34401@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
34402 unsigned long flags;
34403
34404 spin_lock_irqsave(&tty_ldisc_lock, flags);
34405- ldops->refcount--;
34406+ atomic_dec(&ldops->refcount);
34407 module_put(ldops->owner);
34408 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
34409 }
34410diff -urNp linux-3.0.4/drivers/tty/vt/keyboard.c linux-3.0.4/drivers/tty/vt/keyboard.c
34411--- linux-3.0.4/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
34412+++ linux-3.0.4/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
34413@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
34414 kbd->kbdmode == VC_OFF) &&
34415 value != KVAL(K_SAK))
34416 return; /* SAK is allowed even in raw mode */
34417+
34418+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
34419+ {
34420+ void *func = fn_handler[value];
34421+ if (func == fn_show_state || func == fn_show_ptregs ||
34422+ func == fn_show_mem)
34423+ return;
34424+ }
34425+#endif
34426+
34427 fn_handler[value](vc);
34428 }
34429
34430diff -urNp linux-3.0.4/drivers/tty/vt/vt.c linux-3.0.4/drivers/tty/vt/vt.c
34431--- linux-3.0.4/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
34432+++ linux-3.0.4/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
34433@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
34434
34435 static void notify_write(struct vc_data *vc, unsigned int unicode)
34436 {
34437- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
34438+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
34439 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
34440 }
34441
34442diff -urNp linux-3.0.4/drivers/tty/vt/vt_ioctl.c linux-3.0.4/drivers/tty/vt/vt_ioctl.c
34443--- linux-3.0.4/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
34444+++ linux-3.0.4/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
34445@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
34446 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
34447 return -EFAULT;
34448
34449- if (!capable(CAP_SYS_TTY_CONFIG))
34450- perm = 0;
34451-
34452 switch (cmd) {
34453 case KDGKBENT:
34454 key_map = key_maps[s];
34455@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
34456 val = (i ? K_HOLE : K_NOSUCHMAP);
34457 return put_user(val, &user_kbe->kb_value);
34458 case KDSKBENT:
34459+ if (!capable(CAP_SYS_TTY_CONFIG))
34460+ perm = 0;
34461+
34462 if (!perm)
34463 return -EPERM;
34464 if (!i && v == K_NOSUCHMAP) {
34465@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
34466 int i, j, k;
34467 int ret;
34468
34469- if (!capable(CAP_SYS_TTY_CONFIG))
34470- perm = 0;
34471-
34472 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
34473 if (!kbs) {
34474 ret = -ENOMEM;
34475@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
34476 kfree(kbs);
34477 return ((p && *p) ? -EOVERFLOW : 0);
34478 case KDSKBSENT:
34479+ if (!capable(CAP_SYS_TTY_CONFIG))
34480+ perm = 0;
34481+
34482 if (!perm) {
34483 ret = -EPERM;
34484 goto reterr;
34485diff -urNp linux-3.0.4/drivers/uio/uio.c linux-3.0.4/drivers/uio/uio.c
34486--- linux-3.0.4/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
34487+++ linux-3.0.4/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
34488@@ -25,6 +25,7 @@
34489 #include <linux/kobject.h>
34490 #include <linux/cdev.h>
34491 #include <linux/uio_driver.h>
34492+#include <asm/local.h>
34493
34494 #define UIO_MAX_DEVICES (1U << MINORBITS)
34495
34496@@ -32,10 +33,10 @@ struct uio_device {
34497 struct module *owner;
34498 struct device *dev;
34499 int minor;
34500- atomic_t event;
34501+ atomic_unchecked_t event;
34502 struct fasync_struct *async_queue;
34503 wait_queue_head_t wait;
34504- int vma_count;
34505+ local_t vma_count;
34506 struct uio_info *info;
34507 struct kobject *map_dir;
34508 struct kobject *portio_dir;
34509@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
34510 struct device_attribute *attr, char *buf)
34511 {
34512 struct uio_device *idev = dev_get_drvdata(dev);
34513- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
34514+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
34515 }
34516
34517 static struct device_attribute uio_class_attributes[] = {
34518@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
34519 {
34520 struct uio_device *idev = info->uio_dev;
34521
34522- atomic_inc(&idev->event);
34523+ atomic_inc_unchecked(&idev->event);
34524 wake_up_interruptible(&idev->wait);
34525 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
34526 }
34527@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
34528 }
34529
34530 listener->dev = idev;
34531- listener->event_count = atomic_read(&idev->event);
34532+ listener->event_count = atomic_read_unchecked(&idev->event);
34533 filep->private_data = listener;
34534
34535 if (idev->info->open) {
34536@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
34537 return -EIO;
34538
34539 poll_wait(filep, &idev->wait, wait);
34540- if (listener->event_count != atomic_read(&idev->event))
34541+ if (listener->event_count != atomic_read_unchecked(&idev->event))
34542 return POLLIN | POLLRDNORM;
34543 return 0;
34544 }
34545@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
34546 do {
34547 set_current_state(TASK_INTERRUPTIBLE);
34548
34549- event_count = atomic_read(&idev->event);
34550+ event_count = atomic_read_unchecked(&idev->event);
34551 if (event_count != listener->event_count) {
34552 if (copy_to_user(buf, &event_count, count))
34553 retval = -EFAULT;
34554@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
34555 static void uio_vma_open(struct vm_area_struct *vma)
34556 {
34557 struct uio_device *idev = vma->vm_private_data;
34558- idev->vma_count++;
34559+ local_inc(&idev->vma_count);
34560 }
34561
34562 static void uio_vma_close(struct vm_area_struct *vma)
34563 {
34564 struct uio_device *idev = vma->vm_private_data;
34565- idev->vma_count--;
34566+ local_dec(&idev->vma_count);
34567 }
34568
34569 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
34570@@ -823,7 +824,7 @@ int __uio_register_device(struct module
34571 idev->owner = owner;
34572 idev->info = info;
34573 init_waitqueue_head(&idev->wait);
34574- atomic_set(&idev->event, 0);
34575+ atomic_set_unchecked(&idev->event, 0);
34576
34577 ret = uio_get_minor(idev);
34578 if (ret)
34579diff -urNp linux-3.0.4/drivers/usb/atm/cxacru.c linux-3.0.4/drivers/usb/atm/cxacru.c
34580--- linux-3.0.4/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
34581+++ linux-3.0.4/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
34582@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
34583 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
34584 if (ret < 2)
34585 return -EINVAL;
34586- if (index < 0 || index > 0x7f)
34587+ if (index > 0x7f)
34588 return -EINVAL;
34589 pos += tmp;
34590
34591diff -urNp linux-3.0.4/drivers/usb/atm/usbatm.c linux-3.0.4/drivers/usb/atm/usbatm.c
34592--- linux-3.0.4/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
34593+++ linux-3.0.4/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
34594@@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
34595 if (printk_ratelimit())
34596 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
34597 __func__, vpi, vci);
34598- atomic_inc(&vcc->stats->rx_err);
34599+ atomic_inc_unchecked(&vcc->stats->rx_err);
34600 return;
34601 }
34602
34603@@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
34604 if (length > ATM_MAX_AAL5_PDU) {
34605 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
34606 __func__, length, vcc);
34607- atomic_inc(&vcc->stats->rx_err);
34608+ atomic_inc_unchecked(&vcc->stats->rx_err);
34609 goto out;
34610 }
34611
34612@@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
34613 if (sarb->len < pdu_length) {
34614 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
34615 __func__, pdu_length, sarb->len, vcc);
34616- atomic_inc(&vcc->stats->rx_err);
34617+ atomic_inc_unchecked(&vcc->stats->rx_err);
34618 goto out;
34619 }
34620
34621 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
34622 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
34623 __func__, vcc);
34624- atomic_inc(&vcc->stats->rx_err);
34625+ atomic_inc_unchecked(&vcc->stats->rx_err);
34626 goto out;
34627 }
34628
34629@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
34630 if (printk_ratelimit())
34631 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
34632 __func__, length);
34633- atomic_inc(&vcc->stats->rx_drop);
34634+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34635 goto out;
34636 }
34637
34638@@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
34639
34640 vcc->push(vcc, skb);
34641
34642- atomic_inc(&vcc->stats->rx);
34643+ atomic_inc_unchecked(&vcc->stats->rx);
34644 out:
34645 skb_trim(sarb, 0);
34646 }
34647@@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
34648 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
34649
34650 usbatm_pop(vcc, skb);
34651- atomic_inc(&vcc->stats->tx);
34652+ atomic_inc_unchecked(&vcc->stats->tx);
34653
34654 skb = skb_dequeue(&instance->sndqueue);
34655 }
34656@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
34657 if (!left--)
34658 return sprintf(page,
34659 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
34660- atomic_read(&atm_dev->stats.aal5.tx),
34661- atomic_read(&atm_dev->stats.aal5.tx_err),
34662- atomic_read(&atm_dev->stats.aal5.rx),
34663- atomic_read(&atm_dev->stats.aal5.rx_err),
34664- atomic_read(&atm_dev->stats.aal5.rx_drop));
34665+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
34666+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
34667+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
34668+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
34669+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
34670
34671 if (!left--) {
34672 if (instance->disconnected)
34673diff -urNp linux-3.0.4/drivers/usb/core/devices.c linux-3.0.4/drivers/usb/core/devices.c
34674--- linux-3.0.4/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
34675+++ linux-3.0.4/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
34676@@ -126,7 +126,7 @@ static const char format_endpt[] =
34677 * time it gets called.
34678 */
34679 static struct device_connect_event {
34680- atomic_t count;
34681+ atomic_unchecked_t count;
34682 wait_queue_head_t wait;
34683 } device_event = {
34684 .count = ATOMIC_INIT(1),
34685@@ -164,7 +164,7 @@ static const struct class_info clas_info
34686
34687 void usbfs_conn_disc_event(void)
34688 {
34689- atomic_add(2, &device_event.count);
34690+ atomic_add_unchecked(2, &device_event.count);
34691 wake_up(&device_event.wait);
34692 }
34693
34694@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
34695
34696 poll_wait(file, &device_event.wait, wait);
34697
34698- event_count = atomic_read(&device_event.count);
34699+ event_count = atomic_read_unchecked(&device_event.count);
34700 if (file->f_version != event_count) {
34701 file->f_version = event_count;
34702 return POLLIN | POLLRDNORM;
34703diff -urNp linux-3.0.4/drivers/usb/core/message.c linux-3.0.4/drivers/usb/core/message.c
34704--- linux-3.0.4/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
34705+++ linux-3.0.4/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
34706@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
34707 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
34708 if (buf) {
34709 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
34710- if (len > 0) {
34711- smallbuf = kmalloc(++len, GFP_NOIO);
34712+ if (len++ > 0) {
34713+ smallbuf = kmalloc(len, GFP_NOIO);
34714 if (!smallbuf)
34715 return buf;
34716 memcpy(smallbuf, buf, len);
34717diff -urNp linux-3.0.4/drivers/usb/early/ehci-dbgp.c linux-3.0.4/drivers/usb/early/ehci-dbgp.c
34718--- linux-3.0.4/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
34719+++ linux-3.0.4/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
34720@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
34721
34722 #ifdef CONFIG_KGDB
34723 static struct kgdb_io kgdbdbgp_io_ops;
34724-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
34725+static struct kgdb_io kgdbdbgp_io_ops_console;
34726+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
34727 #else
34728 #define dbgp_kgdb_mode (0)
34729 #endif
34730@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
34731 .write_char = kgdbdbgp_write_char,
34732 };
34733
34734+static struct kgdb_io kgdbdbgp_io_ops_console = {
34735+ .name = "kgdbdbgp",
34736+ .read_char = kgdbdbgp_read_char,
34737+ .write_char = kgdbdbgp_write_char,
34738+ .is_console = 1
34739+};
34740+
34741 static int kgdbdbgp_wait_time;
34742
34743 static int __init kgdbdbgp_parse_config(char *str)
34744@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
34745 ptr++;
34746 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
34747 }
34748- kgdb_register_io_module(&kgdbdbgp_io_ops);
34749- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
34750+ if (early_dbgp_console.index != -1)
34751+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
34752+ else
34753+ kgdb_register_io_module(&kgdbdbgp_io_ops);
34754
34755 return 0;
34756 }
34757diff -urNp linux-3.0.4/drivers/usb/host/xhci-mem.c linux-3.0.4/drivers/usb/host/xhci-mem.c
34758--- linux-3.0.4/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
34759+++ linux-3.0.4/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
34760@@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
34761 unsigned int num_tests;
34762 int i, ret;
34763
34764+ pax_track_stack();
34765+
34766 num_tests = ARRAY_SIZE(simple_test_vector);
34767 for (i = 0; i < num_tests; i++) {
34768 ret = xhci_test_trb_in_td(xhci,
34769diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-hc.h linux-3.0.4/drivers/usb/wusbcore/wa-hc.h
34770--- linux-3.0.4/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
34771+++ linux-3.0.4/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
34772@@ -192,7 +192,7 @@ struct wahc {
34773 struct list_head xfer_delayed_list;
34774 spinlock_t xfer_list_lock;
34775 struct work_struct xfer_work;
34776- atomic_t xfer_id_count;
34777+ atomic_unchecked_t xfer_id_count;
34778 };
34779
34780
34781@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
34782 INIT_LIST_HEAD(&wa->xfer_delayed_list);
34783 spin_lock_init(&wa->xfer_list_lock);
34784 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
34785- atomic_set(&wa->xfer_id_count, 1);
34786+ atomic_set_unchecked(&wa->xfer_id_count, 1);
34787 }
34788
34789 /**
34790diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c
34791--- linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
34792+++ linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
34793@@ -294,7 +294,7 @@ out:
34794 */
34795 static void wa_xfer_id_init(struct wa_xfer *xfer)
34796 {
34797- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
34798+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
34799 }
34800
34801 /*
34802diff -urNp linux-3.0.4/drivers/vhost/vhost.c linux-3.0.4/drivers/vhost/vhost.c
34803--- linux-3.0.4/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
34804+++ linux-3.0.4/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
34805@@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
34806 return get_user(vq->last_used_idx, &used->idx);
34807 }
34808
34809-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
34810+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
34811 {
34812 struct file *eventfp, *filep = NULL,
34813 *pollstart = NULL, *pollstop = NULL;
34814diff -urNp linux-3.0.4/drivers/video/fbcmap.c linux-3.0.4/drivers/video/fbcmap.c
34815--- linux-3.0.4/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
34816+++ linux-3.0.4/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
34817@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
34818 rc = -ENODEV;
34819 goto out;
34820 }
34821- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
34822- !info->fbops->fb_setcmap)) {
34823+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
34824 rc = -EINVAL;
34825 goto out1;
34826 }
34827diff -urNp linux-3.0.4/drivers/video/fbmem.c linux-3.0.4/drivers/video/fbmem.c
34828--- linux-3.0.4/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
34829+++ linux-3.0.4/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
34830@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
34831 image->dx += image->width + 8;
34832 }
34833 } else if (rotate == FB_ROTATE_UD) {
34834- for (x = 0; x < num && image->dx >= 0; x++) {
34835+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
34836 info->fbops->fb_imageblit(info, image);
34837 image->dx -= image->width + 8;
34838 }
34839@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
34840 image->dy += image->height + 8;
34841 }
34842 } else if (rotate == FB_ROTATE_CCW) {
34843- for (x = 0; x < num && image->dy >= 0; x++) {
34844+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
34845 info->fbops->fb_imageblit(info, image);
34846 image->dy -= image->height + 8;
34847 }
34848@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
34849 int flags = info->flags;
34850 int ret = 0;
34851
34852+ pax_track_stack();
34853+
34854 if (var->activate & FB_ACTIVATE_INV_MODE) {
34855 struct fb_videomode mode1, mode2;
34856
34857@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
34858 void __user *argp = (void __user *)arg;
34859 long ret = 0;
34860
34861+ pax_track_stack();
34862+
34863 switch (cmd) {
34864 case FBIOGET_VSCREENINFO:
34865 if (!lock_fb_info(info))
34866@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
34867 return -EFAULT;
34868 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
34869 return -EINVAL;
34870- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
34871+ if (con2fb.framebuffer >= FB_MAX)
34872 return -EINVAL;
34873 if (!registered_fb[con2fb.framebuffer])
34874 request_module("fb%d", con2fb.framebuffer);
34875diff -urNp linux-3.0.4/drivers/video/i810/i810_accel.c linux-3.0.4/drivers/video/i810/i810_accel.c
34876--- linux-3.0.4/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
34877+++ linux-3.0.4/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
34878@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
34879 }
34880 }
34881 printk("ringbuffer lockup!!!\n");
34882+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
34883 i810_report_error(mmio);
34884 par->dev_flags |= LOCKUP;
34885 info->pixmap.scan_align = 1;
34886diff -urNp linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm
34887--- linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm 2011-07-21 22:17:23.000000000 -0400
34888+++ linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm 2011-08-29 23:49:40.000000000 -0400
34889@@ -1,1604 +1,1123 @@
34890 P3
34891-# Standard 224-color Linux logo
34892 80 80
34893 255
34894- 0 0 0 0 0 0 0 0 0 0 0 0
34895- 0 0 0 0 0 0 0 0 0 0 0 0
34896- 0 0 0 0 0 0 0 0 0 0 0 0
34897- 0 0 0 0 0 0 0 0 0 0 0 0
34898- 0 0 0 0 0 0 0 0 0 0 0 0
34899- 0 0 0 0 0 0 0 0 0 0 0 0
34900- 0 0 0 0 0 0 0 0 0 0 0 0
34901- 0 0 0 0 0 0 0 0 0 0 0 0
34902- 0 0 0 0 0 0 0 0 0 0 0 0
34903- 6 6 6 6 6 6 10 10 10 10 10 10
34904- 10 10 10 6 6 6 6 6 6 6 6 6
34905- 0 0 0 0 0 0 0 0 0 0 0 0
34906- 0 0 0 0 0 0 0 0 0 0 0 0
34907- 0 0 0 0 0 0 0 0 0 0 0 0
34908- 0 0 0 0 0 0 0 0 0 0 0 0
34909- 0 0 0 0 0 0 0 0 0 0 0 0
34910- 0 0 0 0 0 0 0 0 0 0 0 0
34911- 0 0 0 0 0 0 0 0 0 0 0 0
34912- 0 0 0 0 0 0 0 0 0 0 0 0
34913- 0 0 0 0 0 0 0 0 0 0 0 0
34914- 0 0 0 0 0 0 0 0 0 0 0 0
34915- 0 0 0 0 0 0 0 0 0 0 0 0
34916- 0 0 0 0 0 0 0 0 0 0 0 0
34917- 0 0 0 0 0 0 0 0 0 0 0 0
34918- 0 0 0 0 0 0 0 0 0 0 0 0
34919- 0 0 0 0 0 0 0 0 0 0 0 0
34920- 0 0 0 0 0 0 0 0 0 0 0 0
34921- 0 0 0 0 0 0 0 0 0 0 0 0
34922- 0 0 0 6 6 6 10 10 10 14 14 14
34923- 22 22 22 26 26 26 30 30 30 34 34 34
34924- 30 30 30 30 30 30 26 26 26 18 18 18
34925- 14 14 14 10 10 10 6 6 6 0 0 0
34926- 0 0 0 0 0 0 0 0 0 0 0 0
34927- 0 0 0 0 0 0 0 0 0 0 0 0
34928- 0 0 0 0 0 0 0 0 0 0 0 0
34929- 0 0 0 0 0 0 0 0 0 0 0 0
34930- 0 0 0 0 0 0 0 0 0 0 0 0
34931- 0 0 0 0 0 0 0 0 0 0 0 0
34932- 0 0 0 0 0 0 0 0 0 0 0 0
34933- 0 0 0 0 0 0 0 0 0 0 0 0
34934- 0 0 0 0 0 0 0 0 0 0 0 0
34935- 0 0 0 0 0 1 0 0 1 0 0 0
34936- 0 0 0 0 0 0 0 0 0 0 0 0
34937- 0 0 0 0 0 0 0 0 0 0 0 0
34938- 0 0 0 0 0 0 0 0 0 0 0 0
34939- 0 0 0 0 0 0 0 0 0 0 0 0
34940- 0 0 0 0 0 0 0 0 0 0 0 0
34941- 0 0 0 0 0 0 0 0 0 0 0 0
34942- 6 6 6 14 14 14 26 26 26 42 42 42
34943- 54 54 54 66 66 66 78 78 78 78 78 78
34944- 78 78 78 74 74 74 66 66 66 54 54 54
34945- 42 42 42 26 26 26 18 18 18 10 10 10
34946- 6 6 6 0 0 0 0 0 0 0 0 0
34947- 0 0 0 0 0 0 0 0 0 0 0 0
34948- 0 0 0 0 0 0 0 0 0 0 0 0
34949- 0 0 0 0 0 0 0 0 0 0 0 0
34950- 0 0 0 0 0 0 0 0 0 0 0 0
34951- 0 0 0 0 0 0 0 0 0 0 0 0
34952- 0 0 0 0 0 0 0 0 0 0 0 0
34953- 0 0 0 0 0 0 0 0 0 0 0 0
34954- 0 0 0 0 0 0 0 0 0 0 0 0
34955- 0 0 1 0 0 0 0 0 0 0 0 0
34956- 0 0 0 0 0 0 0 0 0 0 0 0
34957- 0 0 0 0 0 0 0 0 0 0 0 0
34958- 0 0 0 0 0 0 0 0 0 0 0 0
34959- 0 0 0 0 0 0 0 0 0 0 0 0
34960- 0 0 0 0 0 0 0 0 0 0 0 0
34961- 0 0 0 0 0 0 0 0 0 10 10 10
34962- 22 22 22 42 42 42 66 66 66 86 86 86
34963- 66 66 66 38 38 38 38 38 38 22 22 22
34964- 26 26 26 34 34 34 54 54 54 66 66 66
34965- 86 86 86 70 70 70 46 46 46 26 26 26
34966- 14 14 14 6 6 6 0 0 0 0 0 0
34967- 0 0 0 0 0 0 0 0 0 0 0 0
34968- 0 0 0 0 0 0 0 0 0 0 0 0
34969- 0 0 0 0 0 0 0 0 0 0 0 0
34970- 0 0 0 0 0 0 0 0 0 0 0 0
34971- 0 0 0 0 0 0 0 0 0 0 0 0
34972- 0 0 0 0 0 0 0 0 0 0 0 0
34973- 0 0 0 0 0 0 0 0 0 0 0 0
34974- 0 0 0 0 0 0 0 0 0 0 0 0
34975- 0 0 1 0 0 1 0 0 1 0 0 0
34976- 0 0 0 0 0 0 0 0 0 0 0 0
34977- 0 0 0 0 0 0 0 0 0 0 0 0
34978- 0 0 0 0 0 0 0 0 0 0 0 0
34979- 0 0 0 0 0 0 0 0 0 0 0 0
34980- 0 0 0 0 0 0 0 0 0 0 0 0
34981- 0 0 0 0 0 0 10 10 10 26 26 26
34982- 50 50 50 82 82 82 58 58 58 6 6 6
34983- 2 2 6 2 2 6 2 2 6 2 2 6
34984- 2 2 6 2 2 6 2 2 6 2 2 6
34985- 6 6 6 54 54 54 86 86 86 66 66 66
34986- 38 38 38 18 18 18 6 6 6 0 0 0
34987- 0 0 0 0 0 0 0 0 0 0 0 0
34988- 0 0 0 0 0 0 0 0 0 0 0 0
34989- 0 0 0 0 0 0 0 0 0 0 0 0
34990- 0 0 0 0 0 0 0 0 0 0 0 0
34991- 0 0 0 0 0 0 0 0 0 0 0 0
34992- 0 0 0 0 0 0 0 0 0 0 0 0
34993- 0 0 0 0 0 0 0 0 0 0 0 0
34994- 0 0 0 0 0 0 0 0 0 0 0 0
34995- 0 0 0 0 0 0 0 0 0 0 0 0
34996- 0 0 0 0 0 0 0 0 0 0 0 0
34997- 0 0 0 0 0 0 0 0 0 0 0 0
34998- 0 0 0 0 0 0 0 0 0 0 0 0
34999- 0 0 0 0 0 0 0 0 0 0 0 0
35000- 0 0 0 0 0 0 0 0 0 0 0 0
35001- 0 0 0 6 6 6 22 22 22 50 50 50
35002- 78 78 78 34 34 34 2 2 6 2 2 6
35003- 2 2 6 2 2 6 2 2 6 2 2 6
35004- 2 2 6 2 2 6 2 2 6 2 2 6
35005- 2 2 6 2 2 6 6 6 6 70 70 70
35006- 78 78 78 46 46 46 22 22 22 6 6 6
35007- 0 0 0 0 0 0 0 0 0 0 0 0
35008- 0 0 0 0 0 0 0 0 0 0 0 0
35009- 0 0 0 0 0 0 0 0 0 0 0 0
35010- 0 0 0 0 0 0 0 0 0 0 0 0
35011- 0 0 0 0 0 0 0 0 0 0 0 0
35012- 0 0 0 0 0 0 0 0 0 0 0 0
35013- 0 0 0 0 0 0 0 0 0 0 0 0
35014- 0 0 0 0 0 0 0 0 0 0 0 0
35015- 0 0 1 0 0 1 0 0 1 0 0 0
35016- 0 0 0 0 0 0 0 0 0 0 0 0
35017- 0 0 0 0 0 0 0 0 0 0 0 0
35018- 0 0 0 0 0 0 0 0 0 0 0 0
35019- 0 0 0 0 0 0 0 0 0 0 0 0
35020- 0 0 0 0 0 0 0 0 0 0 0 0
35021- 6 6 6 18 18 18 42 42 42 82 82 82
35022- 26 26 26 2 2 6 2 2 6 2 2 6
35023- 2 2 6 2 2 6 2 2 6 2 2 6
35024- 2 2 6 2 2 6 2 2 6 14 14 14
35025- 46 46 46 34 34 34 6 6 6 2 2 6
35026- 42 42 42 78 78 78 42 42 42 18 18 18
35027- 6 6 6 0 0 0 0 0 0 0 0 0
35028- 0 0 0 0 0 0 0 0 0 0 0 0
35029- 0 0 0 0 0 0 0 0 0 0 0 0
35030- 0 0 0 0 0 0 0 0 0 0 0 0
35031- 0 0 0 0 0 0 0 0 0 0 0 0
35032- 0 0 0 0 0 0 0 0 0 0 0 0
35033- 0 0 0 0 0 0 0 0 0 0 0 0
35034- 0 0 0 0 0 0 0 0 0 0 0 0
35035- 0 0 1 0 0 0 0 0 1 0 0 0
35036- 0 0 0 0 0 0 0 0 0 0 0 0
35037- 0 0 0 0 0 0 0 0 0 0 0 0
35038- 0 0 0 0 0 0 0 0 0 0 0 0
35039- 0 0 0 0 0 0 0 0 0 0 0 0
35040- 0 0 0 0 0 0 0 0 0 0 0 0
35041- 10 10 10 30 30 30 66 66 66 58 58 58
35042- 2 2 6 2 2 6 2 2 6 2 2 6
35043- 2 2 6 2 2 6 2 2 6 2 2 6
35044- 2 2 6 2 2 6 2 2 6 26 26 26
35045- 86 86 86 101 101 101 46 46 46 10 10 10
35046- 2 2 6 58 58 58 70 70 70 34 34 34
35047- 10 10 10 0 0 0 0 0 0 0 0 0
35048- 0 0 0 0 0 0 0 0 0 0 0 0
35049- 0 0 0 0 0 0 0 0 0 0 0 0
35050- 0 0 0 0 0 0 0 0 0 0 0 0
35051- 0 0 0 0 0 0 0 0 0 0 0 0
35052- 0 0 0 0 0 0 0 0 0 0 0 0
35053- 0 0 0 0 0 0 0 0 0 0 0 0
35054- 0 0 0 0 0 0 0 0 0 0 0 0
35055- 0 0 1 0 0 1 0 0 1 0 0 0
35056- 0 0 0 0 0 0 0 0 0 0 0 0
35057- 0 0 0 0 0 0 0 0 0 0 0 0
35058- 0 0 0 0 0 0 0 0 0 0 0 0
35059- 0 0 0 0 0 0 0 0 0 0 0 0
35060- 0 0 0 0 0 0 0 0 0 0 0 0
35061- 14 14 14 42 42 42 86 86 86 10 10 10
35062- 2 2 6 2 2 6 2 2 6 2 2 6
35063- 2 2 6 2 2 6 2 2 6 2 2 6
35064- 2 2 6 2 2 6 2 2 6 30 30 30
35065- 94 94 94 94 94 94 58 58 58 26 26 26
35066- 2 2 6 6 6 6 78 78 78 54 54 54
35067- 22 22 22 6 6 6 0 0 0 0 0 0
35068- 0 0 0 0 0 0 0 0 0 0 0 0
35069- 0 0 0 0 0 0 0 0 0 0 0 0
35070- 0 0 0 0 0 0 0 0 0 0 0 0
35071- 0 0 0 0 0 0 0 0 0 0 0 0
35072- 0 0 0 0 0 0 0 0 0 0 0 0
35073- 0 0 0 0 0 0 0 0 0 0 0 0
35074- 0 0 0 0 0 0 0 0 0 0 0 0
35075- 0 0 0 0 0 0 0 0 0 0 0 0
35076- 0 0 0 0 0 0 0 0 0 0 0 0
35077- 0 0 0 0 0 0 0 0 0 0 0 0
35078- 0 0 0 0 0 0 0 0 0 0 0 0
35079- 0 0 0 0 0 0 0 0 0 0 0 0
35080- 0 0 0 0 0 0 0 0 0 6 6 6
35081- 22 22 22 62 62 62 62 62 62 2 2 6
35082- 2 2 6 2 2 6 2 2 6 2 2 6
35083- 2 2 6 2 2 6 2 2 6 2 2 6
35084- 2 2 6 2 2 6 2 2 6 26 26 26
35085- 54 54 54 38 38 38 18 18 18 10 10 10
35086- 2 2 6 2 2 6 34 34 34 82 82 82
35087- 38 38 38 14 14 14 0 0 0 0 0 0
35088- 0 0 0 0 0 0 0 0 0 0 0 0
35089- 0 0 0 0 0 0 0 0 0 0 0 0
35090- 0 0 0 0 0 0 0 0 0 0 0 0
35091- 0 0 0 0 0 0 0 0 0 0 0 0
35092- 0 0 0 0 0 0 0 0 0 0 0 0
35093- 0 0 0 0 0 0 0 0 0 0 0 0
35094- 0 0 0 0 0 0 0 0 0 0 0 0
35095- 0 0 0 0 0 1 0 0 1 0 0 0
35096- 0 0 0 0 0 0 0 0 0 0 0 0
35097- 0 0 0 0 0 0 0 0 0 0 0 0
35098- 0 0 0 0 0 0 0 0 0 0 0 0
35099- 0 0 0 0 0 0 0 0 0 0 0 0
35100- 0 0 0 0 0 0 0 0 0 6 6 6
35101- 30 30 30 78 78 78 30 30 30 2 2 6
35102- 2 2 6 2 2 6 2 2 6 2 2 6
35103- 2 2 6 2 2 6 2 2 6 2 2 6
35104- 2 2 6 2 2 6 2 2 6 10 10 10
35105- 10 10 10 2 2 6 2 2 6 2 2 6
35106- 2 2 6 2 2 6 2 2 6 78 78 78
35107- 50 50 50 18 18 18 6 6 6 0 0 0
35108- 0 0 0 0 0 0 0 0 0 0 0 0
35109- 0 0 0 0 0 0 0 0 0 0 0 0
35110- 0 0 0 0 0 0 0 0 0 0 0 0
35111- 0 0 0 0 0 0 0 0 0 0 0 0
35112- 0 0 0 0 0 0 0 0 0 0 0 0
35113- 0 0 0 0 0 0 0 0 0 0 0 0
35114- 0 0 0 0 0 0 0 0 0 0 0 0
35115- 0 0 1 0 0 0 0 0 0 0 0 0
35116- 0 0 0 0 0 0 0 0 0 0 0 0
35117- 0 0 0 0 0 0 0 0 0 0 0 0
35118- 0 0 0 0 0 0 0 0 0 0 0 0
35119- 0 0 0 0 0 0 0 0 0 0 0 0
35120- 0 0 0 0 0 0 0 0 0 10 10 10
35121- 38 38 38 86 86 86 14 14 14 2 2 6
35122- 2 2 6 2 2 6 2 2 6 2 2 6
35123- 2 2 6 2 2 6 2 2 6 2 2 6
35124- 2 2 6 2 2 6 2 2 6 2 2 6
35125- 2 2 6 2 2 6 2 2 6 2 2 6
35126- 2 2 6 2 2 6 2 2 6 54 54 54
35127- 66 66 66 26 26 26 6 6 6 0 0 0
35128- 0 0 0 0 0 0 0 0 0 0 0 0
35129- 0 0 0 0 0 0 0 0 0 0 0 0
35130- 0 0 0 0 0 0 0 0 0 0 0 0
35131- 0 0 0 0 0 0 0 0 0 0 0 0
35132- 0 0 0 0 0 0 0 0 0 0 0 0
35133- 0 0 0 0 0 0 0 0 0 0 0 0
35134- 0 0 0 0 0 0 0 0 0 0 0 0
35135- 0 0 0 0 0 1 0 0 1 0 0 0
35136- 0 0 0 0 0 0 0 0 0 0 0 0
35137- 0 0 0 0 0 0 0 0 0 0 0 0
35138- 0 0 0 0 0 0 0 0 0 0 0 0
35139- 0 0 0 0 0 0 0 0 0 0 0 0
35140- 0 0 0 0 0 0 0 0 0 14 14 14
35141- 42 42 42 82 82 82 2 2 6 2 2 6
35142- 2 2 6 6 6 6 10 10 10 2 2 6
35143- 2 2 6 2 2 6 2 2 6 2 2 6
35144- 2 2 6 2 2 6 2 2 6 6 6 6
35145- 14 14 14 10 10 10 2 2 6 2 2 6
35146- 2 2 6 2 2 6 2 2 6 18 18 18
35147- 82 82 82 34 34 34 10 10 10 0 0 0
35148- 0 0 0 0 0 0 0 0 0 0 0 0
35149- 0 0 0 0 0 0 0 0 0 0 0 0
35150- 0 0 0 0 0 0 0 0 0 0 0 0
35151- 0 0 0 0 0 0 0 0 0 0 0 0
35152- 0 0 0 0 0 0 0 0 0 0 0 0
35153- 0 0 0 0 0 0 0 0 0 0 0 0
35154- 0 0 0 0 0 0 0 0 0 0 0 0
35155- 0 0 1 0 0 0 0 0 0 0 0 0
35156- 0 0 0 0 0 0 0 0 0 0 0 0
35157- 0 0 0 0 0 0 0 0 0 0 0 0
35158- 0 0 0 0 0 0 0 0 0 0 0 0
35159- 0 0 0 0 0 0 0 0 0 0 0 0
35160- 0 0 0 0 0 0 0 0 0 14 14 14
35161- 46 46 46 86 86 86 2 2 6 2 2 6
35162- 6 6 6 6 6 6 22 22 22 34 34 34
35163- 6 6 6 2 2 6 2 2 6 2 2 6
35164- 2 2 6 2 2 6 18 18 18 34 34 34
35165- 10 10 10 50 50 50 22 22 22 2 2 6
35166- 2 2 6 2 2 6 2 2 6 10 10 10
35167- 86 86 86 42 42 42 14 14 14 0 0 0
35168- 0 0 0 0 0 0 0 0 0 0 0 0
35169- 0 0 0 0 0 0 0 0 0 0 0 0
35170- 0 0 0 0 0 0 0 0 0 0 0 0
35171- 0 0 0 0 0 0 0 0 0 0 0 0
35172- 0 0 0 0 0 0 0 0 0 0 0 0
35173- 0 0 0 0 0 0 0 0 0 0 0 0
35174- 0 0 0 0 0 0 0 0 0 0 0 0
35175- 0 0 1 0 0 1 0 0 1 0 0 0
35176- 0 0 0 0 0 0 0 0 0 0 0 0
35177- 0 0 0 0 0 0 0 0 0 0 0 0
35178- 0 0 0 0 0 0 0 0 0 0 0 0
35179- 0 0 0 0 0 0 0 0 0 0 0 0
35180- 0 0 0 0 0 0 0 0 0 14 14 14
35181- 46 46 46 86 86 86 2 2 6 2 2 6
35182- 38 38 38 116 116 116 94 94 94 22 22 22
35183- 22 22 22 2 2 6 2 2 6 2 2 6
35184- 14 14 14 86 86 86 138 138 138 162 162 162
35185-154 154 154 38 38 38 26 26 26 6 6 6
35186- 2 2 6 2 2 6 2 2 6 2 2 6
35187- 86 86 86 46 46 46 14 14 14 0 0 0
35188- 0 0 0 0 0 0 0 0 0 0 0 0
35189- 0 0 0 0 0 0 0 0 0 0 0 0
35190- 0 0 0 0 0 0 0 0 0 0 0 0
35191- 0 0 0 0 0 0 0 0 0 0 0 0
35192- 0 0 0 0 0 0 0 0 0 0 0 0
35193- 0 0 0 0 0 0 0 0 0 0 0 0
35194- 0 0 0 0 0 0 0 0 0 0 0 0
35195- 0 0 0 0 0 0 0 0 0 0 0 0
35196- 0 0 0 0 0 0 0 0 0 0 0 0
35197- 0 0 0 0 0 0 0 0 0 0 0 0
35198- 0 0 0 0 0 0 0 0 0 0 0 0
35199- 0 0 0 0 0 0 0 0 0 0 0 0
35200- 0 0 0 0 0 0 0 0 0 14 14 14
35201- 46 46 46 86 86 86 2 2 6 14 14 14
35202-134 134 134 198 198 198 195 195 195 116 116 116
35203- 10 10 10 2 2 6 2 2 6 6 6 6
35204-101 98 89 187 187 187 210 210 210 218 218 218
35205-214 214 214 134 134 134 14 14 14 6 6 6
35206- 2 2 6 2 2 6 2 2 6 2 2 6
35207- 86 86 86 50 50 50 18 18 18 6 6 6
35208- 0 0 0 0 0 0 0 0 0 0 0 0
35209- 0 0 0 0 0 0 0 0 0 0 0 0
35210- 0 0 0 0 0 0 0 0 0 0 0 0
35211- 0 0 0 0 0 0 0 0 0 0 0 0
35212- 0 0 0 0 0 0 0 0 0 0 0 0
35213- 0 0 0 0 0 0 0 0 0 0 0 0
35214- 0 0 0 0 0 0 0 0 1 0 0 0
35215- 0 0 1 0 0 1 0 0 1 0 0 0
35216- 0 0 0 0 0 0 0 0 0 0 0 0
35217- 0 0 0 0 0 0 0 0 0 0 0 0
35218- 0 0 0 0 0 0 0 0 0 0 0 0
35219- 0 0 0 0 0 0 0 0 0 0 0 0
35220- 0 0 0 0 0 0 0 0 0 14 14 14
35221- 46 46 46 86 86 86 2 2 6 54 54 54
35222-218 218 218 195 195 195 226 226 226 246 246 246
35223- 58 58 58 2 2 6 2 2 6 30 30 30
35224-210 210 210 253 253 253 174 174 174 123 123 123
35225-221 221 221 234 234 234 74 74 74 2 2 6
35226- 2 2 6 2 2 6 2 2 6 2 2 6
35227- 70 70 70 58 58 58 22 22 22 6 6 6
35228- 0 0 0 0 0 0 0 0 0 0 0 0
35229- 0 0 0 0 0 0 0 0 0 0 0 0
35230- 0 0 0 0 0 0 0 0 0 0 0 0
35231- 0 0 0 0 0 0 0 0 0 0 0 0
35232- 0 0 0 0 0 0 0 0 0 0 0 0
35233- 0 0 0 0 0 0 0 0 0 0 0 0
35234- 0 0 0 0 0 0 0 0 0 0 0 0
35235- 0 0 0 0 0 0 0 0 0 0 0 0
35236- 0 0 0 0 0 0 0 0 0 0 0 0
35237- 0 0 0 0 0 0 0 0 0 0 0 0
35238- 0 0 0 0 0 0 0 0 0 0 0 0
35239- 0 0 0 0 0 0 0 0 0 0 0 0
35240- 0 0 0 0 0 0 0 0 0 14 14 14
35241- 46 46 46 82 82 82 2 2 6 106 106 106
35242-170 170 170 26 26 26 86 86 86 226 226 226
35243-123 123 123 10 10 10 14 14 14 46 46 46
35244-231 231 231 190 190 190 6 6 6 70 70 70
35245- 90 90 90 238 238 238 158 158 158 2 2 6
35246- 2 2 6 2 2 6 2 2 6 2 2 6
35247- 70 70 70 58 58 58 22 22 22 6 6 6
35248- 0 0 0 0 0 0 0 0 0 0 0 0
35249- 0 0 0 0 0 0 0 0 0 0 0 0
35250- 0 0 0 0 0 0 0 0 0 0 0 0
35251- 0 0 0 0 0 0 0 0 0 0 0 0
35252- 0 0 0 0 0 0 0 0 0 0 0 0
35253- 0 0 0 0 0 0 0 0 0 0 0 0
35254- 0 0 0 0 0 0 0 0 1 0 0 0
35255- 0 0 1 0 0 1 0 0 1 0 0 0
35256- 0 0 0 0 0 0 0 0 0 0 0 0
35257- 0 0 0 0 0 0 0 0 0 0 0 0
35258- 0 0 0 0 0 0 0 0 0 0 0 0
35259- 0 0 0 0 0 0 0 0 0 0 0 0
35260- 0 0 0 0 0 0 0 0 0 14 14 14
35261- 42 42 42 86 86 86 6 6 6 116 116 116
35262-106 106 106 6 6 6 70 70 70 149 149 149
35263-128 128 128 18 18 18 38 38 38 54 54 54
35264-221 221 221 106 106 106 2 2 6 14 14 14
35265- 46 46 46 190 190 190 198 198 198 2 2 6
35266- 2 2 6 2 2 6 2 2 6 2 2 6
35267- 74 74 74 62 62 62 22 22 22 6 6 6
35268- 0 0 0 0 0 0 0 0 0 0 0 0
35269- 0 0 0 0 0 0 0 0 0 0 0 0
35270- 0 0 0 0 0 0 0 0 0 0 0 0
35271- 0 0 0 0 0 0 0 0 0 0 0 0
35272- 0 0 0 0 0 0 0 0 0 0 0 0
35273- 0 0 0 0 0 0 0 0 0 0 0 0
35274- 0 0 0 0 0 0 0 0 1 0 0 0
35275- 0 0 1 0 0 0 0 0 1 0 0 0
35276- 0 0 0 0 0 0 0 0 0 0 0 0
35277- 0 0 0 0 0 0 0 0 0 0 0 0
35278- 0 0 0 0 0 0 0 0 0 0 0 0
35279- 0 0 0 0 0 0 0 0 0 0 0 0
35280- 0 0 0 0 0 0 0 0 0 14 14 14
35281- 42 42 42 94 94 94 14 14 14 101 101 101
35282-128 128 128 2 2 6 18 18 18 116 116 116
35283-118 98 46 121 92 8 121 92 8 98 78 10
35284-162 162 162 106 106 106 2 2 6 2 2 6
35285- 2 2 6 195 195 195 195 195 195 6 6 6
35286- 2 2 6 2 2 6 2 2 6 2 2 6
35287- 74 74 74 62 62 62 22 22 22 6 6 6
35288- 0 0 0 0 0 0 0 0 0 0 0 0
35289- 0 0 0 0 0 0 0 0 0 0 0 0
35290- 0 0 0 0 0 0 0 0 0 0 0 0
35291- 0 0 0 0 0 0 0 0 0 0 0 0
35292- 0 0 0 0 0 0 0 0 0 0 0 0
35293- 0 0 0 0 0 0 0 0 0 0 0 0
35294- 0 0 0 0 0 0 0 0 1 0 0 1
35295- 0 0 1 0 0 0 0 0 1 0 0 0
35296- 0 0 0 0 0 0 0 0 0 0 0 0
35297- 0 0 0 0 0 0 0 0 0 0 0 0
35298- 0 0 0 0 0 0 0 0 0 0 0 0
35299- 0 0 0 0 0 0 0 0 0 0 0 0
35300- 0 0 0 0 0 0 0 0 0 10 10 10
35301- 38 38 38 90 90 90 14 14 14 58 58 58
35302-210 210 210 26 26 26 54 38 6 154 114 10
35303-226 170 11 236 186 11 225 175 15 184 144 12
35304-215 174 15 175 146 61 37 26 9 2 2 6
35305- 70 70 70 246 246 246 138 138 138 2 2 6
35306- 2 2 6 2 2 6 2 2 6 2 2 6
35307- 70 70 70 66 66 66 26 26 26 6 6 6
35308- 0 0 0 0 0 0 0 0 0 0 0 0
35309- 0 0 0 0 0 0 0 0 0 0 0 0
35310- 0 0 0 0 0 0 0 0 0 0 0 0
35311- 0 0 0 0 0 0 0 0 0 0 0 0
35312- 0 0 0 0 0 0 0 0 0 0 0 0
35313- 0 0 0 0 0 0 0 0 0 0 0 0
35314- 0 0 0 0 0 0 0 0 0 0 0 0
35315- 0 0 0 0 0 0 0 0 0 0 0 0
35316- 0 0 0 0 0 0 0 0 0 0 0 0
35317- 0 0 0 0 0 0 0 0 0 0 0 0
35318- 0 0 0 0 0 0 0 0 0 0 0 0
35319- 0 0 0 0 0 0 0 0 0 0 0 0
35320- 0 0 0 0 0 0 0 0 0 10 10 10
35321- 38 38 38 86 86 86 14 14 14 10 10 10
35322-195 195 195 188 164 115 192 133 9 225 175 15
35323-239 182 13 234 190 10 232 195 16 232 200 30
35324-245 207 45 241 208 19 232 195 16 184 144 12
35325-218 194 134 211 206 186 42 42 42 2 2 6
35326- 2 2 6 2 2 6 2 2 6 2 2 6
35327- 50 50 50 74 74 74 30 30 30 6 6 6
35328- 0 0 0 0 0 0 0 0 0 0 0 0
35329- 0 0 0 0 0 0 0 0 0 0 0 0
35330- 0 0 0 0 0 0 0 0 0 0 0 0
35331- 0 0 0 0 0 0 0 0 0 0 0 0
35332- 0 0 0 0 0 0 0 0 0 0 0 0
35333- 0 0 0 0 0 0 0 0 0 0 0 0
35334- 0 0 0 0 0 0 0 0 0 0 0 0
35335- 0 0 0 0 0 0 0 0 0 0 0 0
35336- 0 0 0 0 0 0 0 0 0 0 0 0
35337- 0 0 0 0 0 0 0 0 0 0 0 0
35338- 0 0 0 0 0 0 0 0 0 0 0 0
35339- 0 0 0 0 0 0 0 0 0 0 0 0
35340- 0 0 0 0 0 0 0 0 0 10 10 10
35341- 34 34 34 86 86 86 14 14 14 2 2 6
35342-121 87 25 192 133 9 219 162 10 239 182 13
35343-236 186 11 232 195 16 241 208 19 244 214 54
35344-246 218 60 246 218 38 246 215 20 241 208 19
35345-241 208 19 226 184 13 121 87 25 2 2 6
35346- 2 2 6 2 2 6 2 2 6 2 2 6
35347- 50 50 50 82 82 82 34 34 34 10 10 10
35348- 0 0 0 0 0 0 0 0 0 0 0 0
35349- 0 0 0 0 0 0 0 0 0 0 0 0
35350- 0 0 0 0 0 0 0 0 0 0 0 0
35351- 0 0 0 0 0 0 0 0 0 0 0 0
35352- 0 0 0 0 0 0 0 0 0 0 0 0
35353- 0 0 0 0 0 0 0 0 0 0 0 0
35354- 0 0 0 0 0 0 0 0 0 0 0 0
35355- 0 0 0 0 0 0 0 0 0 0 0 0
35356- 0 0 0 0 0 0 0 0 0 0 0 0
35357- 0 0 0 0 0 0 0 0 0 0 0 0
35358- 0 0 0 0 0 0 0 0 0 0 0 0
35359- 0 0 0 0 0 0 0 0 0 0 0 0
35360- 0 0 0 0 0 0 0 0 0 10 10 10
35361- 34 34 34 82 82 82 30 30 30 61 42 6
35362-180 123 7 206 145 10 230 174 11 239 182 13
35363-234 190 10 238 202 15 241 208 19 246 218 74
35364-246 218 38 246 215 20 246 215 20 246 215 20
35365-226 184 13 215 174 15 184 144 12 6 6 6
35366- 2 2 6 2 2 6 2 2 6 2 2 6
35367- 26 26 26 94 94 94 42 42 42 14 14 14
35368- 0 0 0 0 0 0 0 0 0 0 0 0
35369- 0 0 0 0 0 0 0 0 0 0 0 0
35370- 0 0 0 0 0 0 0 0 0 0 0 0
35371- 0 0 0 0 0 0 0 0 0 0 0 0
35372- 0 0 0 0 0 0 0 0 0 0 0 0
35373- 0 0 0 0 0 0 0 0 0 0 0 0
35374- 0 0 0 0 0 0 0 0 0 0 0 0
35375- 0 0 0 0 0 0 0 0 0 0 0 0
35376- 0 0 0 0 0 0 0 0 0 0 0 0
35377- 0 0 0 0 0 0 0 0 0 0 0 0
35378- 0 0 0 0 0 0 0 0 0 0 0 0
35379- 0 0 0 0 0 0 0 0 0 0 0 0
35380- 0 0 0 0 0 0 0 0 0 10 10 10
35381- 30 30 30 78 78 78 50 50 50 104 69 6
35382-192 133 9 216 158 10 236 178 12 236 186 11
35383-232 195 16 241 208 19 244 214 54 245 215 43
35384-246 215 20 246 215 20 241 208 19 198 155 10
35385-200 144 11 216 158 10 156 118 10 2 2 6
35386- 2 2 6 2 2 6 2 2 6 2 2 6
35387- 6 6 6 90 90 90 54 54 54 18 18 18
35388- 6 6 6 0 0 0 0 0 0 0 0 0
35389- 0 0 0 0 0 0 0 0 0 0 0 0
35390- 0 0 0 0 0 0 0 0 0 0 0 0
35391- 0 0 0 0 0 0 0 0 0 0 0 0
35392- 0 0 0 0 0 0 0 0 0 0 0 0
35393- 0 0 0 0 0 0 0 0 0 0 0 0
35394- 0 0 0 0 0 0 0 0 0 0 0 0
35395- 0 0 0 0 0 0 0 0 0 0 0 0
35396- 0 0 0 0 0 0 0 0 0 0 0 0
35397- 0 0 0 0 0 0 0 0 0 0 0 0
35398- 0 0 0 0 0 0 0 0 0 0 0 0
35399- 0 0 0 0 0 0 0 0 0 0 0 0
35400- 0 0 0 0 0 0 0 0 0 10 10 10
35401- 30 30 30 78 78 78 46 46 46 22 22 22
35402-137 92 6 210 162 10 239 182 13 238 190 10
35403-238 202 15 241 208 19 246 215 20 246 215 20
35404-241 208 19 203 166 17 185 133 11 210 150 10
35405-216 158 10 210 150 10 102 78 10 2 2 6
35406- 6 6 6 54 54 54 14 14 14 2 2 6
35407- 2 2 6 62 62 62 74 74 74 30 30 30
35408- 10 10 10 0 0 0 0 0 0 0 0 0
35409- 0 0 0 0 0 0 0 0 0 0 0 0
35410- 0 0 0 0 0 0 0 0 0 0 0 0
35411- 0 0 0 0 0 0 0 0 0 0 0 0
35412- 0 0 0 0 0 0 0 0 0 0 0 0
35413- 0 0 0 0 0 0 0 0 0 0 0 0
35414- 0 0 0 0 0 0 0 0 0 0 0 0
35415- 0 0 0 0 0 0 0 0 0 0 0 0
35416- 0 0 0 0 0 0 0 0 0 0 0 0
35417- 0 0 0 0 0 0 0 0 0 0 0 0
35418- 0 0 0 0 0 0 0 0 0 0 0 0
35419- 0 0 0 0 0 0 0 0 0 0 0 0
35420- 0 0 0 0 0 0 0 0 0 10 10 10
35421- 34 34 34 78 78 78 50 50 50 6 6 6
35422- 94 70 30 139 102 15 190 146 13 226 184 13
35423-232 200 30 232 195 16 215 174 15 190 146 13
35424-168 122 10 192 133 9 210 150 10 213 154 11
35425-202 150 34 182 157 106 101 98 89 2 2 6
35426- 2 2 6 78 78 78 116 116 116 58 58 58
35427- 2 2 6 22 22 22 90 90 90 46 46 46
35428- 18 18 18 6 6 6 0 0 0 0 0 0
35429- 0 0 0 0 0 0 0 0 0 0 0 0
35430- 0 0 0 0 0 0 0 0 0 0 0 0
35431- 0 0 0 0 0 0 0 0 0 0 0 0
35432- 0 0 0 0 0 0 0 0 0 0 0 0
35433- 0 0 0 0 0 0 0 0 0 0 0 0
35434- 0 0 0 0 0 0 0 0 0 0 0 0
35435- 0 0 0 0 0 0 0 0 0 0 0 0
35436- 0 0 0 0 0 0 0 0 0 0 0 0
35437- 0 0 0 0 0 0 0 0 0 0 0 0
35438- 0 0 0 0 0 0 0 0 0 0 0 0
35439- 0 0 0 0 0 0 0 0 0 0 0 0
35440- 0 0 0 0 0 0 0 0 0 10 10 10
35441- 38 38 38 86 86 86 50 50 50 6 6 6
35442-128 128 128 174 154 114 156 107 11 168 122 10
35443-198 155 10 184 144 12 197 138 11 200 144 11
35444-206 145 10 206 145 10 197 138 11 188 164 115
35445-195 195 195 198 198 198 174 174 174 14 14 14
35446- 2 2 6 22 22 22 116 116 116 116 116 116
35447- 22 22 22 2 2 6 74 74 74 70 70 70
35448- 30 30 30 10 10 10 0 0 0 0 0 0
35449- 0 0 0 0 0 0 0 0 0 0 0 0
35450- 0 0 0 0 0 0 0 0 0 0 0 0
35451- 0 0 0 0 0 0 0 0 0 0 0 0
35452- 0 0 0 0 0 0 0 0 0 0 0 0
35453- 0 0 0 0 0 0 0 0 0 0 0 0
35454- 0 0 0 0 0 0 0 0 0 0 0 0
35455- 0 0 0 0 0 0 0 0 0 0 0 0
35456- 0 0 0 0 0 0 0 0 0 0 0 0
35457- 0 0 0 0 0 0 0 0 0 0 0 0
35458- 0 0 0 0 0 0 0 0 0 0 0 0
35459- 0 0 0 0 0 0 0 0 0 0 0 0
35460- 0 0 0 0 0 0 6 6 6 18 18 18
35461- 50 50 50 101 101 101 26 26 26 10 10 10
35462-138 138 138 190 190 190 174 154 114 156 107 11
35463-197 138 11 200 144 11 197 138 11 192 133 9
35464-180 123 7 190 142 34 190 178 144 187 187 187
35465-202 202 202 221 221 221 214 214 214 66 66 66
35466- 2 2 6 2 2 6 50 50 50 62 62 62
35467- 6 6 6 2 2 6 10 10 10 90 90 90
35468- 50 50 50 18 18 18 6 6 6 0 0 0
35469- 0 0 0 0 0 0 0 0 0 0 0 0
35470- 0 0 0 0 0 0 0 0 0 0 0 0
35471- 0 0 0 0 0 0 0 0 0 0 0 0
35472- 0 0 0 0 0 0 0 0 0 0 0 0
35473- 0 0 0 0 0 0 0 0 0 0 0 0
35474- 0 0 0 0 0 0 0 0 0 0 0 0
35475- 0 0 0 0 0 0 0 0 0 0 0 0
35476- 0 0 0 0 0 0 0 0 0 0 0 0
35477- 0 0 0 0 0 0 0 0 0 0 0 0
35478- 0 0 0 0 0 0 0 0 0 0 0 0
35479- 0 0 0 0 0 0 0 0 0 0 0 0
35480- 0 0 0 0 0 0 10 10 10 34 34 34
35481- 74 74 74 74 74 74 2 2 6 6 6 6
35482-144 144 144 198 198 198 190 190 190 178 166 146
35483-154 121 60 156 107 11 156 107 11 168 124 44
35484-174 154 114 187 187 187 190 190 190 210 210 210
35485-246 246 246 253 253 253 253 253 253 182 182 182
35486- 6 6 6 2 2 6 2 2 6 2 2 6
35487- 2 2 6 2 2 6 2 2 6 62 62 62
35488- 74 74 74 34 34 34 14 14 14 0 0 0
35489- 0 0 0 0 0 0 0 0 0 0 0 0
35490- 0 0 0 0 0 0 0 0 0 0 0 0
35491- 0 0 0 0 0 0 0 0 0 0 0 0
35492- 0 0 0 0 0 0 0 0 0 0 0 0
35493- 0 0 0 0 0 0 0 0 0 0 0 0
35494- 0 0 0 0 0 0 0 0 0 0 0 0
35495- 0 0 0 0 0 0 0 0 0 0 0 0
35496- 0 0 0 0 0 0 0 0 0 0 0 0
35497- 0 0 0 0 0 0 0 0 0 0 0 0
35498- 0 0 0 0 0 0 0 0 0 0 0 0
35499- 0 0 0 0 0 0 0 0 0 0 0 0
35500- 0 0 0 10 10 10 22 22 22 54 54 54
35501- 94 94 94 18 18 18 2 2 6 46 46 46
35502-234 234 234 221 221 221 190 190 190 190 190 190
35503-190 190 190 187 187 187 187 187 187 190 190 190
35504-190 190 190 195 195 195 214 214 214 242 242 242
35505-253 253 253 253 253 253 253 253 253 253 253 253
35506- 82 82 82 2 2 6 2 2 6 2 2 6
35507- 2 2 6 2 2 6 2 2 6 14 14 14
35508- 86 86 86 54 54 54 22 22 22 6 6 6
35509- 0 0 0 0 0 0 0 0 0 0 0 0
35510- 0 0 0 0 0 0 0 0 0 0 0 0
35511- 0 0 0 0 0 0 0 0 0 0 0 0
35512- 0 0 0 0 0 0 0 0 0 0 0 0
35513- 0 0 0 0 0 0 0 0 0 0 0 0
35514- 0 0 0 0 0 0 0 0 0 0 0 0
35515- 0 0 0 0 0 0 0 0 0 0 0 0
35516- 0 0 0 0 0 0 0 0 0 0 0 0
35517- 0 0 0 0 0 0 0 0 0 0 0 0
35518- 0 0 0 0 0 0 0 0 0 0 0 0
35519- 0 0 0 0 0 0 0 0 0 0 0 0
35520- 6 6 6 18 18 18 46 46 46 90 90 90
35521- 46 46 46 18 18 18 6 6 6 182 182 182
35522-253 253 253 246 246 246 206 206 206 190 190 190
35523-190 190 190 190 190 190 190 190 190 190 190 190
35524-206 206 206 231 231 231 250 250 250 253 253 253
35525-253 253 253 253 253 253 253 253 253 253 253 253
35526-202 202 202 14 14 14 2 2 6 2 2 6
35527- 2 2 6 2 2 6 2 2 6 2 2 6
35528- 42 42 42 86 86 86 42 42 42 18 18 18
35529- 6 6 6 0 0 0 0 0 0 0 0 0
35530- 0 0 0 0 0 0 0 0 0 0 0 0
35531- 0 0 0 0 0 0 0 0 0 0 0 0
35532- 0 0 0 0 0 0 0 0 0 0 0 0
35533- 0 0 0 0 0 0 0 0 0 0 0 0
35534- 0 0 0 0 0 0 0 0 0 0 0 0
35535- 0 0 0 0 0 0 0 0 0 0 0 0
35536- 0 0 0 0 0 0 0 0 0 0 0 0
35537- 0 0 0 0 0 0 0 0 0 0 0 0
35538- 0 0 0 0 0 0 0 0 0 0 0 0
35539- 0 0 0 0 0 0 0 0 0 6 6 6
35540- 14 14 14 38 38 38 74 74 74 66 66 66
35541- 2 2 6 6 6 6 90 90 90 250 250 250
35542-253 253 253 253 253 253 238 238 238 198 198 198
35543-190 190 190 190 190 190 195 195 195 221 221 221
35544-246 246 246 253 253 253 253 253 253 253 253 253
35545-253 253 253 253 253 253 253 253 253 253 253 253
35546-253 253 253 82 82 82 2 2 6 2 2 6
35547- 2 2 6 2 2 6 2 2 6 2 2 6
35548- 2 2 6 78 78 78 70 70 70 34 34 34
35549- 14 14 14 6 6 6 0 0 0 0 0 0
35550- 0 0 0 0 0 0 0 0 0 0 0 0
35551- 0 0 0 0 0 0 0 0 0 0 0 0
35552- 0 0 0 0 0 0 0 0 0 0 0 0
35553- 0 0 0 0 0 0 0 0 0 0 0 0
35554- 0 0 0 0 0 0 0 0 0 0 0 0
35555- 0 0 0 0 0 0 0 0 0 0 0 0
35556- 0 0 0 0 0 0 0 0 0 0 0 0
35557- 0 0 0 0 0 0 0 0 0 0 0 0
35558- 0 0 0 0 0 0 0 0 0 0 0 0
35559- 0 0 0 0 0 0 0 0 0 14 14 14
35560- 34 34 34 66 66 66 78 78 78 6 6 6
35561- 2 2 6 18 18 18 218 218 218 253 253 253
35562-253 253 253 253 253 253 253 253 253 246 246 246
35563-226 226 226 231 231 231 246 246 246 253 253 253
35564-253 253 253 253 253 253 253 253 253 253 253 253
35565-253 253 253 253 253 253 253 253 253 253 253 253
35566-253 253 253 178 178 178 2 2 6 2 2 6
35567- 2 2 6 2 2 6 2 2 6 2 2 6
35568- 2 2 6 18 18 18 90 90 90 62 62 62
35569- 30 30 30 10 10 10 0 0 0 0 0 0
35570- 0 0 0 0 0 0 0 0 0 0 0 0
35571- 0 0 0 0 0 0 0 0 0 0 0 0
35572- 0 0 0 0 0 0 0 0 0 0 0 0
35573- 0 0 0 0 0 0 0 0 0 0 0 0
35574- 0 0 0 0 0 0 0 0 0 0 0 0
35575- 0 0 0 0 0 0 0 0 0 0 0 0
35576- 0 0 0 0 0 0 0 0 0 0 0 0
35577- 0 0 0 0 0 0 0 0 0 0 0 0
35578- 0 0 0 0 0 0 0 0 0 0 0 0
35579- 0 0 0 0 0 0 10 10 10 26 26 26
35580- 58 58 58 90 90 90 18 18 18 2 2 6
35581- 2 2 6 110 110 110 253 253 253 253 253 253
35582-253 253 253 253 253 253 253 253 253 253 253 253
35583-250 250 250 253 253 253 253 253 253 253 253 253
35584-253 253 253 253 253 253 253 253 253 253 253 253
35585-253 253 253 253 253 253 253 253 253 253 253 253
35586-253 253 253 231 231 231 18 18 18 2 2 6
35587- 2 2 6 2 2 6 2 2 6 2 2 6
35588- 2 2 6 2 2 6 18 18 18 94 94 94
35589- 54 54 54 26 26 26 10 10 10 0 0 0
35590- 0 0 0 0 0 0 0 0 0 0 0 0
35591- 0 0 0 0 0 0 0 0 0 0 0 0
35592- 0 0 0 0 0 0 0 0 0 0 0 0
35593- 0 0 0 0 0 0 0 0 0 0 0 0
35594- 0 0 0 0 0 0 0 0 0 0 0 0
35595- 0 0 0 0 0 0 0 0 0 0 0 0
35596- 0 0 0 0 0 0 0 0 0 0 0 0
35597- 0 0 0 0 0 0 0 0 0 0 0 0
35598- 0 0 0 0 0 0 0 0 0 0 0 0
35599- 0 0 0 6 6 6 22 22 22 50 50 50
35600- 90 90 90 26 26 26 2 2 6 2 2 6
35601- 14 14 14 195 195 195 250 250 250 253 253 253
35602-253 253 253 253 253 253 253 253 253 253 253 253
35603-253 253 253 253 253 253 253 253 253 253 253 253
35604-253 253 253 253 253 253 253 253 253 253 253 253
35605-253 253 253 253 253 253 253 253 253 253 253 253
35606-250 250 250 242 242 242 54 54 54 2 2 6
35607- 2 2 6 2 2 6 2 2 6 2 2 6
35608- 2 2 6 2 2 6 2 2 6 38 38 38
35609- 86 86 86 50 50 50 22 22 22 6 6 6
35610- 0 0 0 0 0 0 0 0 0 0 0 0
35611- 0 0 0 0 0 0 0 0 0 0 0 0
35612- 0 0 0 0 0 0 0 0 0 0 0 0
35613- 0 0 0 0 0 0 0 0 0 0 0 0
35614- 0 0 0 0 0 0 0 0 0 0 0 0
35615- 0 0 0 0 0 0 0 0 0 0 0 0
35616- 0 0 0 0 0 0 0 0 0 0 0 0
35617- 0 0 0 0 0 0 0 0 0 0 0 0
35618- 0 0 0 0 0 0 0 0 0 0 0 0
35619- 6 6 6 14 14 14 38 38 38 82 82 82
35620- 34 34 34 2 2 6 2 2 6 2 2 6
35621- 42 42 42 195 195 195 246 246 246 253 253 253
35622-253 253 253 253 253 253 253 253 253 250 250 250
35623-242 242 242 242 242 242 250 250 250 253 253 253
35624-253 253 253 253 253 253 253 253 253 253 253 253
35625-253 253 253 250 250 250 246 246 246 238 238 238
35626-226 226 226 231 231 231 101 101 101 6 6 6
35627- 2 2 6 2 2 6 2 2 6 2 2 6
35628- 2 2 6 2 2 6 2 2 6 2 2 6
35629- 38 38 38 82 82 82 42 42 42 14 14 14
35630- 6 6 6 0 0 0 0 0 0 0 0 0
35631- 0 0 0 0 0 0 0 0 0 0 0 0
35632- 0 0 0 0 0 0 0 0 0 0 0 0
35633- 0 0 0 0 0 0 0 0 0 0 0 0
35634- 0 0 0 0 0 0 0 0 0 0 0 0
35635- 0 0 0 0 0 0 0 0 0 0 0 0
35636- 0 0 0 0 0 0 0 0 0 0 0 0
35637- 0 0 0 0 0 0 0 0 0 0 0 0
35638- 0 0 0 0 0 0 0 0 0 0 0 0
35639- 10 10 10 26 26 26 62 62 62 66 66 66
35640- 2 2 6 2 2 6 2 2 6 6 6 6
35641- 70 70 70 170 170 170 206 206 206 234 234 234
35642-246 246 246 250 250 250 250 250 250 238 238 238
35643-226 226 226 231 231 231 238 238 238 250 250 250
35644-250 250 250 250 250 250 246 246 246 231 231 231
35645-214 214 214 206 206 206 202 202 202 202 202 202
35646-198 198 198 202 202 202 182 182 182 18 18 18
35647- 2 2 6 2 2 6 2 2 6 2 2 6
35648- 2 2 6 2 2 6 2 2 6 2 2 6
35649- 2 2 6 62 62 62 66 66 66 30 30 30
35650- 10 10 10 0 0 0 0 0 0 0 0 0
35651- 0 0 0 0 0 0 0 0 0 0 0 0
35652- 0 0 0 0 0 0 0 0 0 0 0 0
35653- 0 0 0 0 0 0 0 0 0 0 0 0
35654- 0 0 0 0 0 0 0 0 0 0 0 0
35655- 0 0 0 0 0 0 0 0 0 0 0 0
35656- 0 0 0 0 0 0 0 0 0 0 0 0
35657- 0 0 0 0 0 0 0 0 0 0 0 0
35658- 0 0 0 0 0 0 0 0 0 0 0 0
35659- 14 14 14 42 42 42 82 82 82 18 18 18
35660- 2 2 6 2 2 6 2 2 6 10 10 10
35661- 94 94 94 182 182 182 218 218 218 242 242 242
35662-250 250 250 253 253 253 253 253 253 250 250 250
35663-234 234 234 253 253 253 253 253 253 253 253 253
35664-253 253 253 253 253 253 253 253 253 246 246 246
35665-238 238 238 226 226 226 210 210 210 202 202 202
35666-195 195 195 195 195 195 210 210 210 158 158 158
35667- 6 6 6 14 14 14 50 50 50 14 14 14
35668- 2 2 6 2 2 6 2 2 6 2 2 6
35669- 2 2 6 6 6 6 86 86 86 46 46 46
35670- 18 18 18 6 6 6 0 0 0 0 0 0
35671- 0 0 0 0 0 0 0 0 0 0 0 0
35672- 0 0 0 0 0 0 0 0 0 0 0 0
35673- 0 0 0 0 0 0 0 0 0 0 0 0
35674- 0 0 0 0 0 0 0 0 0 0 0 0
35675- 0 0 0 0 0 0 0 0 0 0 0 0
35676- 0 0 0 0 0 0 0 0 0 0 0 0
35677- 0 0 0 0 0 0 0 0 0 0 0 0
35678- 0 0 0 0 0 0 0 0 0 6 6 6
35679- 22 22 22 54 54 54 70 70 70 2 2 6
35680- 2 2 6 10 10 10 2 2 6 22 22 22
35681-166 166 166 231 231 231 250 250 250 253 253 253
35682-253 253 253 253 253 253 253 253 253 250 250 250
35683-242 242 242 253 253 253 253 253 253 253 253 253
35684-253 253 253 253 253 253 253 253 253 253 253 253
35685-253 253 253 253 253 253 253 253 253 246 246 246
35686-231 231 231 206 206 206 198 198 198 226 226 226
35687- 94 94 94 2 2 6 6 6 6 38 38 38
35688- 30 30 30 2 2 6 2 2 6 2 2 6
35689- 2 2 6 2 2 6 62 62 62 66 66 66
35690- 26 26 26 10 10 10 0 0 0 0 0 0
35691- 0 0 0 0 0 0 0 0 0 0 0 0
35692- 0 0 0 0 0 0 0 0 0 0 0 0
35693- 0 0 0 0 0 0 0 0 0 0 0 0
35694- 0 0 0 0 0 0 0 0 0 0 0 0
35695- 0 0 0 0 0 0 0 0 0 0 0 0
35696- 0 0 0 0 0 0 0 0 0 0 0 0
35697- 0 0 0 0 0 0 0 0 0 0 0 0
35698- 0 0 0 0 0 0 0 0 0 10 10 10
35699- 30 30 30 74 74 74 50 50 50 2 2 6
35700- 26 26 26 26 26 26 2 2 6 106 106 106
35701-238 238 238 253 253 253 253 253 253 253 253 253
35702-253 253 253 253 253 253 253 253 253 253 253 253
35703-253 253 253 253 253 253 253 253 253 253 253 253
35704-253 253 253 253 253 253 253 253 253 253 253 253
35705-253 253 253 253 253 253 253 253 253 253 253 253
35706-253 253 253 246 246 246 218 218 218 202 202 202
35707-210 210 210 14 14 14 2 2 6 2 2 6
35708- 30 30 30 22 22 22 2 2 6 2 2 6
35709- 2 2 6 2 2 6 18 18 18 86 86 86
35710- 42 42 42 14 14 14 0 0 0 0 0 0
35711- 0 0 0 0 0 0 0 0 0 0 0 0
35712- 0 0 0 0 0 0 0 0 0 0 0 0
35713- 0 0 0 0 0 0 0 0 0 0 0 0
35714- 0 0 0 0 0 0 0 0 0 0 0 0
35715- 0 0 0 0 0 0 0 0 0 0 0 0
35716- 0 0 0 0 0 0 0 0 0 0 0 0
35717- 0 0 0 0 0 0 0 0 0 0 0 0
35718- 0 0 0 0 0 0 0 0 0 14 14 14
35719- 42 42 42 90 90 90 22 22 22 2 2 6
35720- 42 42 42 2 2 6 18 18 18 218 218 218
35721-253 253 253 253 253 253 253 253 253 253 253 253
35722-253 253 253 253 253 253 253 253 253 253 253 253
35723-253 253 253 253 253 253 253 253 253 253 253 253
35724-253 253 253 253 253 253 253 253 253 253 253 253
35725-253 253 253 253 253 253 253 253 253 253 253 253
35726-253 253 253 253 253 253 250 250 250 221 221 221
35727-218 218 218 101 101 101 2 2 6 14 14 14
35728- 18 18 18 38 38 38 10 10 10 2 2 6
35729- 2 2 6 2 2 6 2 2 6 78 78 78
35730- 58 58 58 22 22 22 6 6 6 0 0 0
35731- 0 0 0 0 0 0 0 0 0 0 0 0
35732- 0 0 0 0 0 0 0 0 0 0 0 0
35733- 0 0 0 0 0 0 0 0 0 0 0 0
35734- 0 0 0 0 0 0 0 0 0 0 0 0
35735- 0 0 0 0 0 0 0 0 0 0 0 0
35736- 0 0 0 0 0 0 0 0 0 0 0 0
35737- 0 0 0 0 0 0 0 0 0 0 0 0
35738- 0 0 0 0 0 0 6 6 6 18 18 18
35739- 54 54 54 82 82 82 2 2 6 26 26 26
35740- 22 22 22 2 2 6 123 123 123 253 253 253
35741-253 253 253 253 253 253 253 253 253 253 253 253
35742-253 253 253 253 253 253 253 253 253 253 253 253
35743-253 253 253 253 253 253 253 253 253 253 253 253
35744-253 253 253 253 253 253 253 253 253 253 253 253
35745-253 253 253 253 253 253 253 253 253 253 253 253
35746-253 253 253 253 253 253 253 253 253 250 250 250
35747-238 238 238 198 198 198 6 6 6 38 38 38
35748- 58 58 58 26 26 26 38 38 38 2 2 6
35749- 2 2 6 2 2 6 2 2 6 46 46 46
35750- 78 78 78 30 30 30 10 10 10 0 0 0
35751- 0 0 0 0 0 0 0 0 0 0 0 0
35752- 0 0 0 0 0 0 0 0 0 0 0 0
35753- 0 0 0 0 0 0 0 0 0 0 0 0
35754- 0 0 0 0 0 0 0 0 0 0 0 0
35755- 0 0 0 0 0 0 0 0 0 0 0 0
35756- 0 0 0 0 0 0 0 0 0 0 0 0
35757- 0 0 0 0 0 0 0 0 0 0 0 0
35758- 0 0 0 0 0 0 10 10 10 30 30 30
35759- 74 74 74 58 58 58 2 2 6 42 42 42
35760- 2 2 6 22 22 22 231 231 231 253 253 253
35761-253 253 253 253 253 253 253 253 253 253 253 253
35762-253 253 253 253 253 253 253 253 253 250 250 250
35763-253 253 253 253 253 253 253 253 253 253 253 253
35764-253 253 253 253 253 253 253 253 253 253 253 253
35765-253 253 253 253 253 253 253 253 253 253 253 253
35766-253 253 253 253 253 253 253 253 253 253 253 253
35767-253 253 253 246 246 246 46 46 46 38 38 38
35768- 42 42 42 14 14 14 38 38 38 14 14 14
35769- 2 2 6 2 2 6 2 2 6 6 6 6
35770- 86 86 86 46 46 46 14 14 14 0 0 0
35771- 0 0 0 0 0 0 0 0 0 0 0 0
35772- 0 0 0 0 0 0 0 0 0 0 0 0
35773- 0 0 0 0 0 0 0 0 0 0 0 0
35774- 0 0 0 0 0 0 0 0 0 0 0 0
35775- 0 0 0 0 0 0 0 0 0 0 0 0
35776- 0 0 0 0 0 0 0 0 0 0 0 0
35777- 0 0 0 0 0 0 0 0 0 0 0 0
35778- 0 0 0 6 6 6 14 14 14 42 42 42
35779- 90 90 90 18 18 18 18 18 18 26 26 26
35780- 2 2 6 116 116 116 253 253 253 253 253 253
35781-253 253 253 253 253 253 253 253 253 253 253 253
35782-253 253 253 253 253 253 250 250 250 238 238 238
35783-253 253 253 253 253 253 253 253 253 253 253 253
35784-253 253 253 253 253 253 253 253 253 253 253 253
35785-253 253 253 253 253 253 253 253 253 253 253 253
35786-253 253 253 253 253 253 253 253 253 253 253 253
35787-253 253 253 253 253 253 94 94 94 6 6 6
35788- 2 2 6 2 2 6 10 10 10 34 34 34
35789- 2 2 6 2 2 6 2 2 6 2 2 6
35790- 74 74 74 58 58 58 22 22 22 6 6 6
35791- 0 0 0 0 0 0 0 0 0 0 0 0
35792- 0 0 0 0 0 0 0 0 0 0 0 0
35793- 0 0 0 0 0 0 0 0 0 0 0 0
35794- 0 0 0 0 0 0 0 0 0 0 0 0
35795- 0 0 0 0 0 0 0 0 0 0 0 0
35796- 0 0 0 0 0 0 0 0 0 0 0 0
35797- 0 0 0 0 0 0 0 0 0 0 0 0
35798- 0 0 0 10 10 10 26 26 26 66 66 66
35799- 82 82 82 2 2 6 38 38 38 6 6 6
35800- 14 14 14 210 210 210 253 253 253 253 253 253
35801-253 253 253 253 253 253 253 253 253 253 253 253
35802-253 253 253 253 253 253 246 246 246 242 242 242
35803-253 253 253 253 253 253 253 253 253 253 253 253
35804-253 253 253 253 253 253 253 253 253 253 253 253
35805-253 253 253 253 253 253 253 253 253 253 253 253
35806-253 253 253 253 253 253 253 253 253 253 253 253
35807-253 253 253 253 253 253 144 144 144 2 2 6
35808- 2 2 6 2 2 6 2 2 6 46 46 46
35809- 2 2 6 2 2 6 2 2 6 2 2 6
35810- 42 42 42 74 74 74 30 30 30 10 10 10
35811- 0 0 0 0 0 0 0 0 0 0 0 0
35812- 0 0 0 0 0 0 0 0 0 0 0 0
35813- 0 0 0 0 0 0 0 0 0 0 0 0
35814- 0 0 0 0 0 0 0 0 0 0 0 0
35815- 0 0 0 0 0 0 0 0 0 0 0 0
35816- 0 0 0 0 0 0 0 0 0 0 0 0
35817- 0 0 0 0 0 0 0 0 0 0 0 0
35818- 6 6 6 14 14 14 42 42 42 90 90 90
35819- 26 26 26 6 6 6 42 42 42 2 2 6
35820- 74 74 74 250 250 250 253 253 253 253 253 253
35821-253 253 253 253 253 253 253 253 253 253 253 253
35822-253 253 253 253 253 253 242 242 242 242 242 242
35823-253 253 253 253 253 253 253 253 253 253 253 253
35824-253 253 253 253 253 253 253 253 253 253 253 253
35825-253 253 253 253 253 253 253 253 253 253 253 253
35826-253 253 253 253 253 253 253 253 253 253 253 253
35827-253 253 253 253 253 253 182 182 182 2 2 6
35828- 2 2 6 2 2 6 2 2 6 46 46 46
35829- 2 2 6 2 2 6 2 2 6 2 2 6
35830- 10 10 10 86 86 86 38 38 38 10 10 10
35831- 0 0 0 0 0 0 0 0 0 0 0 0
35832- 0 0 0 0 0 0 0 0 0 0 0 0
35833- 0 0 0 0 0 0 0 0 0 0 0 0
35834- 0 0 0 0 0 0 0 0 0 0 0 0
35835- 0 0 0 0 0 0 0 0 0 0 0 0
35836- 0 0 0 0 0 0 0 0 0 0 0 0
35837- 0 0 0 0 0 0 0 0 0 0 0 0
35838- 10 10 10 26 26 26 66 66 66 82 82 82
35839- 2 2 6 22 22 22 18 18 18 2 2 6
35840-149 149 149 253 253 253 253 253 253 253 253 253
35841-253 253 253 253 253 253 253 253 253 253 253 253
35842-253 253 253 253 253 253 234 234 234 242 242 242
35843-253 253 253 253 253 253 253 253 253 253 253 253
35844-253 253 253 253 253 253 253 253 253 253 253 253
35845-253 253 253 253 253 253 253 253 253 253 253 253
35846-253 253 253 253 253 253 253 253 253 253 253 253
35847-253 253 253 253 253 253 206 206 206 2 2 6
35848- 2 2 6 2 2 6 2 2 6 38 38 38
35849- 2 2 6 2 2 6 2 2 6 2 2 6
35850- 6 6 6 86 86 86 46 46 46 14 14 14
35851- 0 0 0 0 0 0 0 0 0 0 0 0
35852- 0 0 0 0 0 0 0 0 0 0 0 0
35853- 0 0 0 0 0 0 0 0 0 0 0 0
35854- 0 0 0 0 0 0 0 0 0 0 0 0
35855- 0 0 0 0 0 0 0 0 0 0 0 0
35856- 0 0 0 0 0 0 0 0 0 0 0 0
35857- 0 0 0 0 0 0 0 0 0 6 6 6
35858- 18 18 18 46 46 46 86 86 86 18 18 18
35859- 2 2 6 34 34 34 10 10 10 6 6 6
35860-210 210 210 253 253 253 253 253 253 253 253 253
35861-253 253 253 253 253 253 253 253 253 253 253 253
35862-253 253 253 253 253 253 234 234 234 242 242 242
35863-253 253 253 253 253 253 253 253 253 253 253 253
35864-253 253 253 253 253 253 253 253 253 253 253 253
35865-253 253 253 253 253 253 253 253 253 253 253 253
35866-253 253 253 253 253 253 253 253 253 253 253 253
35867-253 253 253 253 253 253 221 221 221 6 6 6
35868- 2 2 6 2 2 6 6 6 6 30 30 30
35869- 2 2 6 2 2 6 2 2 6 2 2 6
35870- 2 2 6 82 82 82 54 54 54 18 18 18
35871- 6 6 6 0 0 0 0 0 0 0 0 0
35872- 0 0 0 0 0 0 0 0 0 0 0 0
35873- 0 0 0 0 0 0 0 0 0 0 0 0
35874- 0 0 0 0 0 0 0 0 0 0 0 0
35875- 0 0 0 0 0 0 0 0 0 0 0 0
35876- 0 0 0 0 0 0 0 0 0 0 0 0
35877- 0 0 0 0 0 0 0 0 0 10 10 10
35878- 26 26 26 66 66 66 62 62 62 2 2 6
35879- 2 2 6 38 38 38 10 10 10 26 26 26
35880-238 238 238 253 253 253 253 253 253 253 253 253
35881-253 253 253 253 253 253 253 253 253 253 253 253
35882-253 253 253 253 253 253 231 231 231 238 238 238
35883-253 253 253 253 253 253 253 253 253 253 253 253
35884-253 253 253 253 253 253 253 253 253 253 253 253
35885-253 253 253 253 253 253 253 253 253 253 253 253
35886-253 253 253 253 253 253 253 253 253 253 253 253
35887-253 253 253 253 253 253 231 231 231 6 6 6
35888- 2 2 6 2 2 6 10 10 10 30 30 30
35889- 2 2 6 2 2 6 2 2 6 2 2 6
35890- 2 2 6 66 66 66 58 58 58 22 22 22
35891- 6 6 6 0 0 0 0 0 0 0 0 0
35892- 0 0 0 0 0 0 0 0 0 0 0 0
35893- 0 0 0 0 0 0 0 0 0 0 0 0
35894- 0 0 0 0 0 0 0 0 0 0 0 0
35895- 0 0 0 0 0 0 0 0 0 0 0 0
35896- 0 0 0 0 0 0 0 0 0 0 0 0
35897- 0 0 0 0 0 0 0 0 0 10 10 10
35898- 38 38 38 78 78 78 6 6 6 2 2 6
35899- 2 2 6 46 46 46 14 14 14 42 42 42
35900-246 246 246 253 253 253 253 253 253 253 253 253
35901-253 253 253 253 253 253 253 253 253 253 253 253
35902-253 253 253 253 253 253 231 231 231 242 242 242
35903-253 253 253 253 253 253 253 253 253 253 253 253
35904-253 253 253 253 253 253 253 253 253 253 253 253
35905-253 253 253 253 253 253 253 253 253 253 253 253
35906-253 253 253 253 253 253 253 253 253 253 253 253
35907-253 253 253 253 253 253 234 234 234 10 10 10
35908- 2 2 6 2 2 6 22 22 22 14 14 14
35909- 2 2 6 2 2 6 2 2 6 2 2 6
35910- 2 2 6 66 66 66 62 62 62 22 22 22
35911- 6 6 6 0 0 0 0 0 0 0 0 0
35912- 0 0 0 0 0 0 0 0 0 0 0 0
35913- 0 0 0 0 0 0 0 0 0 0 0 0
35914- 0 0 0 0 0 0 0 0 0 0 0 0
35915- 0 0 0 0 0 0 0 0 0 0 0 0
35916- 0 0 0 0 0 0 0 0 0 0 0 0
35917- 0 0 0 0 0 0 6 6 6 18 18 18
35918- 50 50 50 74 74 74 2 2 6 2 2 6
35919- 14 14 14 70 70 70 34 34 34 62 62 62
35920-250 250 250 253 253 253 253 253 253 253 253 253
35921-253 253 253 253 253 253 253 253 253 253 253 253
35922-253 253 253 253 253 253 231 231 231 246 246 246
35923-253 253 253 253 253 253 253 253 253 253 253 253
35924-253 253 253 253 253 253 253 253 253 253 253 253
35925-253 253 253 253 253 253 253 253 253 253 253 253
35926-253 253 253 253 253 253 253 253 253 253 253 253
35927-253 253 253 253 253 253 234 234 234 14 14 14
35928- 2 2 6 2 2 6 30 30 30 2 2 6
35929- 2 2 6 2 2 6 2 2 6 2 2 6
35930- 2 2 6 66 66 66 62 62 62 22 22 22
35931- 6 6 6 0 0 0 0 0 0 0 0 0
35932- 0 0 0 0 0 0 0 0 0 0 0 0
35933- 0 0 0 0 0 0 0 0 0 0 0 0
35934- 0 0 0 0 0 0 0 0 0 0 0 0
35935- 0 0 0 0 0 0 0 0 0 0 0 0
35936- 0 0 0 0 0 0 0 0 0 0 0 0
35937- 0 0 0 0 0 0 6 6 6 18 18 18
35938- 54 54 54 62 62 62 2 2 6 2 2 6
35939- 2 2 6 30 30 30 46 46 46 70 70 70
35940-250 250 250 253 253 253 253 253 253 253 253 253
35941-253 253 253 253 253 253 253 253 253 253 253 253
35942-253 253 253 253 253 253 231 231 231 246 246 246
35943-253 253 253 253 253 253 253 253 253 253 253 253
35944-253 253 253 253 253 253 253 253 253 253 253 253
35945-253 253 253 253 253 253 253 253 253 253 253 253
35946-253 253 253 253 253 253 253 253 253 253 253 253
35947-253 253 253 253 253 253 226 226 226 10 10 10
35948- 2 2 6 6 6 6 30 30 30 2 2 6
35949- 2 2 6 2 2 6 2 2 6 2 2 6
35950- 2 2 6 66 66 66 58 58 58 22 22 22
35951- 6 6 6 0 0 0 0 0 0 0 0 0
35952- 0 0 0 0 0 0 0 0 0 0 0 0
35953- 0 0 0 0 0 0 0 0 0 0 0 0
35954- 0 0 0 0 0 0 0 0 0 0 0 0
35955- 0 0 0 0 0 0 0 0 0 0 0 0
35956- 0 0 0 0 0 0 0 0 0 0 0 0
35957- 0 0 0 0 0 0 6 6 6 22 22 22
35958- 58 58 58 62 62 62 2 2 6 2 2 6
35959- 2 2 6 2 2 6 30 30 30 78 78 78
35960-250 250 250 253 253 253 253 253 253 253 253 253
35961-253 253 253 253 253 253 253 253 253 253 253 253
35962-253 253 253 253 253 253 231 231 231 246 246 246
35963-253 253 253 253 253 253 253 253 253 253 253 253
35964-253 253 253 253 253 253 253 253 253 253 253 253
35965-253 253 253 253 253 253 253 253 253 253 253 253
35966-253 253 253 253 253 253 253 253 253 253 253 253
35967-253 253 253 253 253 253 206 206 206 2 2 6
35968- 22 22 22 34 34 34 18 14 6 22 22 22
35969- 26 26 26 18 18 18 6 6 6 2 2 6
35970- 2 2 6 82 82 82 54 54 54 18 18 18
35971- 6 6 6 0 0 0 0 0 0 0 0 0
35972- 0 0 0 0 0 0 0 0 0 0 0 0
35973- 0 0 0 0 0 0 0 0 0 0 0 0
35974- 0 0 0 0 0 0 0 0 0 0 0 0
35975- 0 0 0 0 0 0 0 0 0 0 0 0
35976- 0 0 0 0 0 0 0 0 0 0 0 0
35977- 0 0 0 0 0 0 6 6 6 26 26 26
35978- 62 62 62 106 106 106 74 54 14 185 133 11
35979-210 162 10 121 92 8 6 6 6 62 62 62
35980-238 238 238 253 253 253 253 253 253 253 253 253
35981-253 253 253 253 253 253 253 253 253 253 253 253
35982-253 253 253 253 253 253 231 231 231 246 246 246
35983-253 253 253 253 253 253 253 253 253 253 253 253
35984-253 253 253 253 253 253 253 253 253 253 253 253
35985-253 253 253 253 253 253 253 253 253 253 253 253
35986-253 253 253 253 253 253 253 253 253 253 253 253
35987-253 253 253 253 253 253 158 158 158 18 18 18
35988- 14 14 14 2 2 6 2 2 6 2 2 6
35989- 6 6 6 18 18 18 66 66 66 38 38 38
35990- 6 6 6 94 94 94 50 50 50 18 18 18
35991- 6 6 6 0 0 0 0 0 0 0 0 0
35992- 0 0 0 0 0 0 0 0 0 0 0 0
35993- 0 0 0 0 0 0 0 0 0 0 0 0
35994- 0 0 0 0 0 0 0 0 0 0 0 0
35995- 0 0 0 0 0 0 0 0 0 0 0 0
35996- 0 0 0 0 0 0 0 0 0 6 6 6
35997- 10 10 10 10 10 10 18 18 18 38 38 38
35998- 78 78 78 142 134 106 216 158 10 242 186 14
35999-246 190 14 246 190 14 156 118 10 10 10 10
36000- 90 90 90 238 238 238 253 253 253 253 253 253
36001-253 253 253 253 253 253 253 253 253 253 253 253
36002-253 253 253 253 253 253 231 231 231 250 250 250
36003-253 253 253 253 253 253 253 253 253 253 253 253
36004-253 253 253 253 253 253 253 253 253 253 253 253
36005-253 253 253 253 253 253 253 253 253 253 253 253
36006-253 253 253 253 253 253 253 253 253 246 230 190
36007-238 204 91 238 204 91 181 142 44 37 26 9
36008- 2 2 6 2 2 6 2 2 6 2 2 6
36009- 2 2 6 2 2 6 38 38 38 46 46 46
36010- 26 26 26 106 106 106 54 54 54 18 18 18
36011- 6 6 6 0 0 0 0 0 0 0 0 0
36012- 0 0 0 0 0 0 0 0 0 0 0 0
36013- 0 0 0 0 0 0 0 0 0 0 0 0
36014- 0 0 0 0 0 0 0 0 0 0 0 0
36015- 0 0 0 0 0 0 0 0 0 0 0 0
36016- 0 0 0 6 6 6 14 14 14 22 22 22
36017- 30 30 30 38 38 38 50 50 50 70 70 70
36018-106 106 106 190 142 34 226 170 11 242 186 14
36019-246 190 14 246 190 14 246 190 14 154 114 10
36020- 6 6 6 74 74 74 226 226 226 253 253 253
36021-253 253 253 253 253 253 253 253 253 253 253 253
36022-253 253 253 253 253 253 231 231 231 250 250 250
36023-253 253 253 253 253 253 253 253 253 253 253 253
36024-253 253 253 253 253 253 253 253 253 253 253 253
36025-253 253 253 253 253 253 253 253 253 253 253 253
36026-253 253 253 253 253 253 253 253 253 228 184 62
36027-241 196 14 241 208 19 232 195 16 38 30 10
36028- 2 2 6 2 2 6 2 2 6 2 2 6
36029- 2 2 6 6 6 6 30 30 30 26 26 26
36030-203 166 17 154 142 90 66 66 66 26 26 26
36031- 6 6 6 0 0 0 0 0 0 0 0 0
36032- 0 0 0 0 0 0 0 0 0 0 0 0
36033- 0 0 0 0 0 0 0 0 0 0 0 0
36034- 0 0 0 0 0 0 0 0 0 0 0 0
36035- 0 0 0 0 0 0 0 0 0 0 0 0
36036- 6 6 6 18 18 18 38 38 38 58 58 58
36037- 78 78 78 86 86 86 101 101 101 123 123 123
36038-175 146 61 210 150 10 234 174 13 246 186 14
36039-246 190 14 246 190 14 246 190 14 238 190 10
36040-102 78 10 2 2 6 46 46 46 198 198 198
36041-253 253 253 253 253 253 253 253 253 253 253 253
36042-253 253 253 253 253 253 234 234 234 242 242 242
36043-253 253 253 253 253 253 253 253 253 253 253 253
36044-253 253 253 253 253 253 253 253 253 253 253 253
36045-253 253 253 253 253 253 253 253 253 253 253 253
36046-253 253 253 253 253 253 253 253 253 224 178 62
36047-242 186 14 241 196 14 210 166 10 22 18 6
36048- 2 2 6 2 2 6 2 2 6 2 2 6
36049- 2 2 6 2 2 6 6 6 6 121 92 8
36050-238 202 15 232 195 16 82 82 82 34 34 34
36051- 10 10 10 0 0 0 0 0 0 0 0 0
36052- 0 0 0 0 0 0 0 0 0 0 0 0
36053- 0 0 0 0 0 0 0 0 0 0 0 0
36054- 0 0 0 0 0 0 0 0 0 0 0 0
36055- 0 0 0 0 0 0 0 0 0 0 0 0
36056- 14 14 14 38 38 38 70 70 70 154 122 46
36057-190 142 34 200 144 11 197 138 11 197 138 11
36058-213 154 11 226 170 11 242 186 14 246 190 14
36059-246 190 14 246 190 14 246 190 14 246 190 14
36060-225 175 15 46 32 6 2 2 6 22 22 22
36061-158 158 158 250 250 250 253 253 253 253 253 253
36062-253 253 253 253 253 253 253 253 253 253 253 253
36063-253 253 253 253 253 253 253 253 253 253 253 253
36064-253 253 253 253 253 253 253 253 253 253 253 253
36065-253 253 253 253 253 253 253 253 253 253 253 253
36066-253 253 253 250 250 250 242 242 242 224 178 62
36067-239 182 13 236 186 11 213 154 11 46 32 6
36068- 2 2 6 2 2 6 2 2 6 2 2 6
36069- 2 2 6 2 2 6 61 42 6 225 175 15
36070-238 190 10 236 186 11 112 100 78 42 42 42
36071- 14 14 14 0 0 0 0 0 0 0 0 0
36072- 0 0 0 0 0 0 0 0 0 0 0 0
36073- 0 0 0 0 0 0 0 0 0 0 0 0
36074- 0 0 0 0 0 0 0 0 0 0 0 0
36075- 0 0 0 0 0 0 0 0 0 6 6 6
36076- 22 22 22 54 54 54 154 122 46 213 154 11
36077-226 170 11 230 174 11 226 170 11 226 170 11
36078-236 178 12 242 186 14 246 190 14 246 190 14
36079-246 190 14 246 190 14 246 190 14 246 190 14
36080-241 196 14 184 144 12 10 10 10 2 2 6
36081- 6 6 6 116 116 116 242 242 242 253 253 253
36082-253 253 253 253 253 253 253 253 253 253 253 253
36083-253 253 253 253 253 253 253 253 253 253 253 253
36084-253 253 253 253 253 253 253 253 253 253 253 253
36085-253 253 253 253 253 253 253 253 253 253 253 253
36086-253 253 253 231 231 231 198 198 198 214 170 54
36087-236 178 12 236 178 12 210 150 10 137 92 6
36088- 18 14 6 2 2 6 2 2 6 2 2 6
36089- 6 6 6 70 47 6 200 144 11 236 178 12
36090-239 182 13 239 182 13 124 112 88 58 58 58
36091- 22 22 22 6 6 6 0 0 0 0 0 0
36092- 0 0 0 0 0 0 0 0 0 0 0 0
36093- 0 0 0 0 0 0 0 0 0 0 0 0
36094- 0 0 0 0 0 0 0 0 0 0 0 0
36095- 0 0 0 0 0 0 0 0 0 10 10 10
36096- 30 30 30 70 70 70 180 133 36 226 170 11
36097-239 182 13 242 186 14 242 186 14 246 186 14
36098-246 190 14 246 190 14 246 190 14 246 190 14
36099-246 190 14 246 190 14 246 190 14 246 190 14
36100-246 190 14 232 195 16 98 70 6 2 2 6
36101- 2 2 6 2 2 6 66 66 66 221 221 221
36102-253 253 253 253 253 253 253 253 253 253 253 253
36103-253 253 253 253 253 253 253 253 253 253 253 253
36104-253 253 253 253 253 253 253 253 253 253 253 253
36105-253 253 253 253 253 253 253 253 253 253 253 253
36106-253 253 253 206 206 206 198 198 198 214 166 58
36107-230 174 11 230 174 11 216 158 10 192 133 9
36108-163 110 8 116 81 8 102 78 10 116 81 8
36109-167 114 7 197 138 11 226 170 11 239 182 13
36110-242 186 14 242 186 14 162 146 94 78 78 78
36111- 34 34 34 14 14 14 6 6 6 0 0 0
36112- 0 0 0 0 0 0 0 0 0 0 0 0
36113- 0 0 0 0 0 0 0 0 0 0 0 0
36114- 0 0 0 0 0 0 0 0 0 0 0 0
36115- 0 0 0 0 0 0 0 0 0 6 6 6
36116- 30 30 30 78 78 78 190 142 34 226 170 11
36117-239 182 13 246 190 14 246 190 14 246 190 14
36118-246 190 14 246 190 14 246 190 14 246 190 14
36119-246 190 14 246 190 14 246 190 14 246 190 14
36120-246 190 14 241 196 14 203 166 17 22 18 6
36121- 2 2 6 2 2 6 2 2 6 38 38 38
36122-218 218 218 253 253 253 253 253 253 253 253 253
36123-253 253 253 253 253 253 253 253 253 253 253 253
36124-253 253 253 253 253 253 253 253 253 253 253 253
36125-253 253 253 253 253 253 253 253 253 253 253 253
36126-250 250 250 206 206 206 198 198 198 202 162 69
36127-226 170 11 236 178 12 224 166 10 210 150 10
36128-200 144 11 197 138 11 192 133 9 197 138 11
36129-210 150 10 226 170 11 242 186 14 246 190 14
36130-246 190 14 246 186 14 225 175 15 124 112 88
36131- 62 62 62 30 30 30 14 14 14 6 6 6
36132- 0 0 0 0 0 0 0 0 0 0 0 0
36133- 0 0 0 0 0 0 0 0 0 0 0 0
36134- 0 0 0 0 0 0 0 0 0 0 0 0
36135- 0 0 0 0 0 0 0 0 0 10 10 10
36136- 30 30 30 78 78 78 174 135 50 224 166 10
36137-239 182 13 246 190 14 246 190 14 246 190 14
36138-246 190 14 246 190 14 246 190 14 246 190 14
36139-246 190 14 246 190 14 246 190 14 246 190 14
36140-246 190 14 246 190 14 241 196 14 139 102 15
36141- 2 2 6 2 2 6 2 2 6 2 2 6
36142- 78 78 78 250 250 250 253 253 253 253 253 253
36143-253 253 253 253 253 253 253 253 253 253 253 253
36144-253 253 253 253 253 253 253 253 253 253 253 253
36145-253 253 253 253 253 253 253 253 253 253 253 253
36146-250 250 250 214 214 214 198 198 198 190 150 46
36147-219 162 10 236 178 12 234 174 13 224 166 10
36148-216 158 10 213 154 11 213 154 11 216 158 10
36149-226 170 11 239 182 13 246 190 14 246 190 14
36150-246 190 14 246 190 14 242 186 14 206 162 42
36151-101 101 101 58 58 58 30 30 30 14 14 14
36152- 6 6 6 0 0 0 0 0 0 0 0 0
36153- 0 0 0 0 0 0 0 0 0 0 0 0
36154- 0 0 0 0 0 0 0 0 0 0 0 0
36155- 0 0 0 0 0 0 0 0 0 10 10 10
36156- 30 30 30 74 74 74 174 135 50 216 158 10
36157-236 178 12 246 190 14 246 190 14 246 190 14
36158-246 190 14 246 190 14 246 190 14 246 190 14
36159-246 190 14 246 190 14 246 190 14 246 190 14
36160-246 190 14 246 190 14 241 196 14 226 184 13
36161- 61 42 6 2 2 6 2 2 6 2 2 6
36162- 22 22 22 238 238 238 253 253 253 253 253 253
36163-253 253 253 253 253 253 253 253 253 253 253 253
36164-253 253 253 253 253 253 253 253 253 253 253 253
36165-253 253 253 253 253 253 253 253 253 253 253 253
36166-253 253 253 226 226 226 187 187 187 180 133 36
36167-216 158 10 236 178 12 239 182 13 236 178 12
36168-230 174 11 226 170 11 226 170 11 230 174 11
36169-236 178 12 242 186 14 246 190 14 246 190 14
36170-246 190 14 246 190 14 246 186 14 239 182 13
36171-206 162 42 106 106 106 66 66 66 34 34 34
36172- 14 14 14 6 6 6 0 0 0 0 0 0
36173- 0 0 0 0 0 0 0 0 0 0 0 0
36174- 0 0 0 0 0 0 0 0 0 0 0 0
36175- 0 0 0 0 0 0 0 0 0 6 6 6
36176- 26 26 26 70 70 70 163 133 67 213 154 11
36177-236 178 12 246 190 14 246 190 14 246 190 14
36178-246 190 14 246 190 14 246 190 14 246 190 14
36179-246 190 14 246 190 14 246 190 14 246 190 14
36180-246 190 14 246 190 14 246 190 14 241 196 14
36181-190 146 13 18 14 6 2 2 6 2 2 6
36182- 46 46 46 246 246 246 253 253 253 253 253 253
36183-253 253 253 253 253 253 253 253 253 253 253 253
36184-253 253 253 253 253 253 253 253 253 253 253 253
36185-253 253 253 253 253 253 253 253 253 253 253 253
36186-253 253 253 221 221 221 86 86 86 156 107 11
36187-216 158 10 236 178 12 242 186 14 246 186 14
36188-242 186 14 239 182 13 239 182 13 242 186 14
36189-242 186 14 246 186 14 246 190 14 246 190 14
36190-246 190 14 246 190 14 246 190 14 246 190 14
36191-242 186 14 225 175 15 142 122 72 66 66 66
36192- 30 30 30 10 10 10 0 0 0 0 0 0
36193- 0 0 0 0 0 0 0 0 0 0 0 0
36194- 0 0 0 0 0 0 0 0 0 0 0 0
36195- 0 0 0 0 0 0 0 0 0 6 6 6
36196- 26 26 26 70 70 70 163 133 67 210 150 10
36197-236 178 12 246 190 14 246 190 14 246 190 14
36198-246 190 14 246 190 14 246 190 14 246 190 14
36199-246 190 14 246 190 14 246 190 14 246 190 14
36200-246 190 14 246 190 14 246 190 14 246 190 14
36201-232 195 16 121 92 8 34 34 34 106 106 106
36202-221 221 221 253 253 253 253 253 253 253 253 253
36203-253 253 253 253 253 253 253 253 253 253 253 253
36204-253 253 253 253 253 253 253 253 253 253 253 253
36205-253 253 253 253 253 253 253 253 253 253 253 253
36206-242 242 242 82 82 82 18 14 6 163 110 8
36207-216 158 10 236 178 12 242 186 14 246 190 14
36208-246 190 14 246 190 14 246 190 14 246 190 14
36209-246 190 14 246 190 14 246 190 14 246 190 14
36210-246 190 14 246 190 14 246 190 14 246 190 14
36211-246 190 14 246 190 14 242 186 14 163 133 67
36212- 46 46 46 18 18 18 6 6 6 0 0 0
36213- 0 0 0 0 0 0 0 0 0 0 0 0
36214- 0 0 0 0 0 0 0 0 0 0 0 0
36215- 0 0 0 0 0 0 0 0 0 10 10 10
36216- 30 30 30 78 78 78 163 133 67 210 150 10
36217-236 178 12 246 186 14 246 190 14 246 190 14
36218-246 190 14 246 190 14 246 190 14 246 190 14
36219-246 190 14 246 190 14 246 190 14 246 190 14
36220-246 190 14 246 190 14 246 190 14 246 190 14
36221-241 196 14 215 174 15 190 178 144 253 253 253
36222-253 253 253 253 253 253 253 253 253 253 253 253
36223-253 253 253 253 253 253 253 253 253 253 253 253
36224-253 253 253 253 253 253 253 253 253 253 253 253
36225-253 253 253 253 253 253 253 253 253 218 218 218
36226- 58 58 58 2 2 6 22 18 6 167 114 7
36227-216 158 10 236 178 12 246 186 14 246 190 14
36228-246 190 14 246 190 14 246 190 14 246 190 14
36229-246 190 14 246 190 14 246 190 14 246 190 14
36230-246 190 14 246 190 14 246 190 14 246 190 14
36231-246 190 14 246 186 14 242 186 14 190 150 46
36232- 54 54 54 22 22 22 6 6 6 0 0 0
36233- 0 0 0 0 0 0 0 0 0 0 0 0
36234- 0 0 0 0 0 0 0 0 0 0 0 0
36235- 0 0 0 0 0 0 0 0 0 14 14 14
36236- 38 38 38 86 86 86 180 133 36 213 154 11
36237-236 178 12 246 186 14 246 190 14 246 190 14
36238-246 190 14 246 190 14 246 190 14 246 190 14
36239-246 190 14 246 190 14 246 190 14 246 190 14
36240-246 190 14 246 190 14 246 190 14 246 190 14
36241-246 190 14 232 195 16 190 146 13 214 214 214
36242-253 253 253 253 253 253 253 253 253 253 253 253
36243-253 253 253 253 253 253 253 253 253 253 253 253
36244-253 253 253 253 253 253 253 253 253 253 253 253
36245-253 253 253 250 250 250 170 170 170 26 26 26
36246- 2 2 6 2 2 6 37 26 9 163 110 8
36247-219 162 10 239 182 13 246 186 14 246 190 14
36248-246 190 14 246 190 14 246 190 14 246 190 14
36249-246 190 14 246 190 14 246 190 14 246 190 14
36250-246 190 14 246 190 14 246 190 14 246 190 14
36251-246 186 14 236 178 12 224 166 10 142 122 72
36252- 46 46 46 18 18 18 6 6 6 0 0 0
36253- 0 0 0 0 0 0 0 0 0 0 0 0
36254- 0 0 0 0 0 0 0 0 0 0 0 0
36255- 0 0 0 0 0 0 6 6 6 18 18 18
36256- 50 50 50 109 106 95 192 133 9 224 166 10
36257-242 186 14 246 190 14 246 190 14 246 190 14
36258-246 190 14 246 190 14 246 190 14 246 190 14
36259-246 190 14 246 190 14 246 190 14 246 190 14
36260-246 190 14 246 190 14 246 190 14 246 190 14
36261-242 186 14 226 184 13 210 162 10 142 110 46
36262-226 226 226 253 253 253 253 253 253 253 253 253
36263-253 253 253 253 253 253 253 253 253 253 253 253
36264-253 253 253 253 253 253 253 253 253 253 253 253
36265-198 198 198 66 66 66 2 2 6 2 2 6
36266- 2 2 6 2 2 6 50 34 6 156 107 11
36267-219 162 10 239 182 13 246 186 14 246 190 14
36268-246 190 14 246 190 14 246 190 14 246 190 14
36269-246 190 14 246 190 14 246 190 14 246 190 14
36270-246 190 14 246 190 14 246 190 14 242 186 14
36271-234 174 13 213 154 11 154 122 46 66 66 66
36272- 30 30 30 10 10 10 0 0 0 0 0 0
36273- 0 0 0 0 0 0 0 0 0 0 0 0
36274- 0 0 0 0 0 0 0 0 0 0 0 0
36275- 0 0 0 0 0 0 6 6 6 22 22 22
36276- 58 58 58 154 121 60 206 145 10 234 174 13
36277-242 186 14 246 186 14 246 190 14 246 190 14
36278-246 190 14 246 190 14 246 190 14 246 190 14
36279-246 190 14 246 190 14 246 190 14 246 190 14
36280-246 190 14 246 190 14 246 190 14 246 190 14
36281-246 186 14 236 178 12 210 162 10 163 110 8
36282- 61 42 6 138 138 138 218 218 218 250 250 250
36283-253 253 253 253 253 253 253 253 253 250 250 250
36284-242 242 242 210 210 210 144 144 144 66 66 66
36285- 6 6 6 2 2 6 2 2 6 2 2 6
36286- 2 2 6 2 2 6 61 42 6 163 110 8
36287-216 158 10 236 178 12 246 190 14 246 190 14
36288-246 190 14 246 190 14 246 190 14 246 190 14
36289-246 190 14 246 190 14 246 190 14 246 190 14
36290-246 190 14 239 182 13 230 174 11 216 158 10
36291-190 142 34 124 112 88 70 70 70 38 38 38
36292- 18 18 18 6 6 6 0 0 0 0 0 0
36293- 0 0 0 0 0 0 0 0 0 0 0 0
36294- 0 0 0 0 0 0 0 0 0 0 0 0
36295- 0 0 0 0 0 0 6 6 6 22 22 22
36296- 62 62 62 168 124 44 206 145 10 224 166 10
36297-236 178 12 239 182 13 242 186 14 242 186 14
36298-246 186 14 246 190 14 246 190 14 246 190 14
36299-246 190 14 246 190 14 246 190 14 246 190 14
36300-246 190 14 246 190 14 246 190 14 246 190 14
36301-246 190 14 236 178 12 216 158 10 175 118 6
36302- 80 54 7 2 2 6 6 6 6 30 30 30
36303- 54 54 54 62 62 62 50 50 50 38 38 38
36304- 14 14 14 2 2 6 2 2 6 2 2 6
36305- 2 2 6 2 2 6 2 2 6 2 2 6
36306- 2 2 6 6 6 6 80 54 7 167 114 7
36307-213 154 11 236 178 12 246 190 14 246 190 14
36308-246 190 14 246 190 14 246 190 14 246 190 14
36309-246 190 14 242 186 14 239 182 13 239 182 13
36310-230 174 11 210 150 10 174 135 50 124 112 88
36311- 82 82 82 54 54 54 34 34 34 18 18 18
36312- 6 6 6 0 0 0 0 0 0 0 0 0
36313- 0 0 0 0 0 0 0 0 0 0 0 0
36314- 0 0 0 0 0 0 0 0 0 0 0 0
36315- 0 0 0 0 0 0 6 6 6 18 18 18
36316- 50 50 50 158 118 36 192 133 9 200 144 11
36317-216 158 10 219 162 10 224 166 10 226 170 11
36318-230 174 11 236 178 12 239 182 13 239 182 13
36319-242 186 14 246 186 14 246 190 14 246 190 14
36320-246 190 14 246 190 14 246 190 14 246 190 14
36321-246 186 14 230 174 11 210 150 10 163 110 8
36322-104 69 6 10 10 10 2 2 6 2 2 6
36323- 2 2 6 2 2 6 2 2 6 2 2 6
36324- 2 2 6 2 2 6 2 2 6 2 2 6
36325- 2 2 6 2 2 6 2 2 6 2 2 6
36326- 2 2 6 6 6 6 91 60 6 167 114 7
36327-206 145 10 230 174 11 242 186 14 246 190 14
36328-246 190 14 246 190 14 246 186 14 242 186 14
36329-239 182 13 230 174 11 224 166 10 213 154 11
36330-180 133 36 124 112 88 86 86 86 58 58 58
36331- 38 38 38 22 22 22 10 10 10 6 6 6
36332- 0 0 0 0 0 0 0 0 0 0 0 0
36333- 0 0 0 0 0 0 0 0 0 0 0 0
36334- 0 0 0 0 0 0 0 0 0 0 0 0
36335- 0 0 0 0 0 0 0 0 0 14 14 14
36336- 34 34 34 70 70 70 138 110 50 158 118 36
36337-167 114 7 180 123 7 192 133 9 197 138 11
36338-200 144 11 206 145 10 213 154 11 219 162 10
36339-224 166 10 230 174 11 239 182 13 242 186 14
36340-246 186 14 246 186 14 246 186 14 246 186 14
36341-239 182 13 216 158 10 185 133 11 152 99 6
36342-104 69 6 18 14 6 2 2 6 2 2 6
36343- 2 2 6 2 2 6 2 2 6 2 2 6
36344- 2 2 6 2 2 6 2 2 6 2 2 6
36345- 2 2 6 2 2 6 2 2 6 2 2 6
36346- 2 2 6 6 6 6 80 54 7 152 99 6
36347-192 133 9 219 162 10 236 178 12 239 182 13
36348-246 186 14 242 186 14 239 182 13 236 178 12
36349-224 166 10 206 145 10 192 133 9 154 121 60
36350- 94 94 94 62 62 62 42 42 42 22 22 22
36351- 14 14 14 6 6 6 0 0 0 0 0 0
36352- 0 0 0 0 0 0 0 0 0 0 0 0
36353- 0 0 0 0 0 0 0 0 0 0 0 0
36354- 0 0 0 0 0 0 0 0 0 0 0 0
36355- 0 0 0 0 0 0 0 0 0 6 6 6
36356- 18 18 18 34 34 34 58 58 58 78 78 78
36357-101 98 89 124 112 88 142 110 46 156 107 11
36358-163 110 8 167 114 7 175 118 6 180 123 7
36359-185 133 11 197 138 11 210 150 10 219 162 10
36360-226 170 11 236 178 12 236 178 12 234 174 13
36361-219 162 10 197 138 11 163 110 8 130 83 6
36362- 91 60 6 10 10 10 2 2 6 2 2 6
36363- 18 18 18 38 38 38 38 38 38 38 38 38
36364- 38 38 38 38 38 38 38 38 38 38 38 38
36365- 38 38 38 38 38 38 26 26 26 2 2 6
36366- 2 2 6 6 6 6 70 47 6 137 92 6
36367-175 118 6 200 144 11 219 162 10 230 174 11
36368-234 174 13 230 174 11 219 162 10 210 150 10
36369-192 133 9 163 110 8 124 112 88 82 82 82
36370- 50 50 50 30 30 30 14 14 14 6 6 6
36371- 0 0 0 0 0 0 0 0 0 0 0 0
36372- 0 0 0 0 0 0 0 0 0 0 0 0
36373- 0 0 0 0 0 0 0 0 0 0 0 0
36374- 0 0 0 0 0 0 0 0 0 0 0 0
36375- 0 0 0 0 0 0 0 0 0 0 0 0
36376- 6 6 6 14 14 14 22 22 22 34 34 34
36377- 42 42 42 58 58 58 74 74 74 86 86 86
36378-101 98 89 122 102 70 130 98 46 121 87 25
36379-137 92 6 152 99 6 163 110 8 180 123 7
36380-185 133 11 197 138 11 206 145 10 200 144 11
36381-180 123 7 156 107 11 130 83 6 104 69 6
36382- 50 34 6 54 54 54 110 110 110 101 98 89
36383- 86 86 86 82 82 82 78 78 78 78 78 78
36384- 78 78 78 78 78 78 78 78 78 78 78 78
36385- 78 78 78 82 82 82 86 86 86 94 94 94
36386-106 106 106 101 101 101 86 66 34 124 80 6
36387-156 107 11 180 123 7 192 133 9 200 144 11
36388-206 145 10 200 144 11 192 133 9 175 118 6
36389-139 102 15 109 106 95 70 70 70 42 42 42
36390- 22 22 22 10 10 10 0 0 0 0 0 0
36391- 0 0 0 0 0 0 0 0 0 0 0 0
36392- 0 0 0 0 0 0 0 0 0 0 0 0
36393- 0 0 0 0 0 0 0 0 0 0 0 0
36394- 0 0 0 0 0 0 0 0 0 0 0 0
36395- 0 0 0 0 0 0 0 0 0 0 0 0
36396- 0 0 0 0 0 0 6 6 6 10 10 10
36397- 14 14 14 22 22 22 30 30 30 38 38 38
36398- 50 50 50 62 62 62 74 74 74 90 90 90
36399-101 98 89 112 100 78 121 87 25 124 80 6
36400-137 92 6 152 99 6 152 99 6 152 99 6
36401-138 86 6 124 80 6 98 70 6 86 66 30
36402-101 98 89 82 82 82 58 58 58 46 46 46
36403- 38 38 38 34 34 34 34 34 34 34 34 34
36404- 34 34 34 34 34 34 34 34 34 34 34 34
36405- 34 34 34 34 34 34 38 38 38 42 42 42
36406- 54 54 54 82 82 82 94 86 76 91 60 6
36407-134 86 6 156 107 11 167 114 7 175 118 6
36408-175 118 6 167 114 7 152 99 6 121 87 25
36409-101 98 89 62 62 62 34 34 34 18 18 18
36410- 6 6 6 0 0 0 0 0 0 0 0 0
36411- 0 0 0 0 0 0 0 0 0 0 0 0
36412- 0 0 0 0 0 0 0 0 0 0 0 0
36413- 0 0 0 0 0 0 0 0 0 0 0 0
36414- 0 0 0 0 0 0 0 0 0 0 0 0
36415- 0 0 0 0 0 0 0 0 0 0 0 0
36416- 0 0 0 0 0 0 0 0 0 0 0 0
36417- 0 0 0 6 6 6 6 6 6 10 10 10
36418- 18 18 18 22 22 22 30 30 30 42 42 42
36419- 50 50 50 66 66 66 86 86 86 101 98 89
36420-106 86 58 98 70 6 104 69 6 104 69 6
36421-104 69 6 91 60 6 82 62 34 90 90 90
36422- 62 62 62 38 38 38 22 22 22 14 14 14
36423- 10 10 10 10 10 10 10 10 10 10 10 10
36424- 10 10 10 10 10 10 6 6 6 10 10 10
36425- 10 10 10 10 10 10 10 10 10 14 14 14
36426- 22 22 22 42 42 42 70 70 70 89 81 66
36427- 80 54 7 104 69 6 124 80 6 137 92 6
36428-134 86 6 116 81 8 100 82 52 86 86 86
36429- 58 58 58 30 30 30 14 14 14 6 6 6
36430- 0 0 0 0 0 0 0 0 0 0 0 0
36431- 0 0 0 0 0 0 0 0 0 0 0 0
36432- 0 0 0 0 0 0 0 0 0 0 0 0
36433- 0 0 0 0 0 0 0 0 0 0 0 0
36434- 0 0 0 0 0 0 0 0 0 0 0 0
36435- 0 0 0 0 0 0 0 0 0 0 0 0
36436- 0 0 0 0 0 0 0 0 0 0 0 0
36437- 0 0 0 0 0 0 0 0 0 0 0 0
36438- 0 0 0 6 6 6 10 10 10 14 14 14
36439- 18 18 18 26 26 26 38 38 38 54 54 54
36440- 70 70 70 86 86 86 94 86 76 89 81 66
36441- 89 81 66 86 86 86 74 74 74 50 50 50
36442- 30 30 30 14 14 14 6 6 6 0 0 0
36443- 0 0 0 0 0 0 0 0 0 0 0 0
36444- 0 0 0 0 0 0 0 0 0 0 0 0
36445- 0 0 0 0 0 0 0 0 0 0 0 0
36446- 6 6 6 18 18 18 34 34 34 58 58 58
36447- 82 82 82 89 81 66 89 81 66 89 81 66
36448- 94 86 66 94 86 76 74 74 74 50 50 50
36449- 26 26 26 14 14 14 6 6 6 0 0 0
36450- 0 0 0 0 0 0 0 0 0 0 0 0
36451- 0 0 0 0 0 0 0 0 0 0 0 0
36452- 0 0 0 0 0 0 0 0 0 0 0 0
36453- 0 0 0 0 0 0 0 0 0 0 0 0
36454- 0 0 0 0 0 0 0 0 0 0 0 0
36455- 0 0 0 0 0 0 0 0 0 0 0 0
36456- 0 0 0 0 0 0 0 0 0 0 0 0
36457- 0 0 0 0 0 0 0 0 0 0 0 0
36458- 0 0 0 0 0 0 0 0 0 0 0 0
36459- 6 6 6 6 6 6 14 14 14 18 18 18
36460- 30 30 30 38 38 38 46 46 46 54 54 54
36461- 50 50 50 42 42 42 30 30 30 18 18 18
36462- 10 10 10 0 0 0 0 0 0 0 0 0
36463- 0 0 0 0 0 0 0 0 0 0 0 0
36464- 0 0 0 0 0 0 0 0 0 0 0 0
36465- 0 0 0 0 0 0 0 0 0 0 0 0
36466- 0 0 0 6 6 6 14 14 14 26 26 26
36467- 38 38 38 50 50 50 58 58 58 58 58 58
36468- 54 54 54 42 42 42 30 30 30 18 18 18
36469- 10 10 10 0 0 0 0 0 0 0 0 0
36470- 0 0 0 0 0 0 0 0 0 0 0 0
36471- 0 0 0 0 0 0 0 0 0 0 0 0
36472- 0 0 0 0 0 0 0 0 0 0 0 0
36473- 0 0 0 0 0 0 0 0 0 0 0 0
36474- 0 0 0 0 0 0 0 0 0 0 0 0
36475- 0 0 0 0 0 0 0 0 0 0 0 0
36476- 0 0 0 0 0 0 0 0 0 0 0 0
36477- 0 0 0 0 0 0 0 0 0 0 0 0
36478- 0 0 0 0 0 0 0 0 0 0 0 0
36479- 0 0 0 0 0 0 0 0 0 6 6 6
36480- 6 6 6 10 10 10 14 14 14 18 18 18
36481- 18 18 18 14 14 14 10 10 10 6 6 6
36482- 0 0 0 0 0 0 0 0 0 0 0 0
36483- 0 0 0 0 0 0 0 0 0 0 0 0
36484- 0 0 0 0 0 0 0 0 0 0 0 0
36485- 0 0 0 0 0 0 0 0 0 0 0 0
36486- 0 0 0 0 0 0 0 0 0 6 6 6
36487- 14 14 14 18 18 18 22 22 22 22 22 22
36488- 18 18 18 14 14 14 10 10 10 6 6 6
36489- 0 0 0 0 0 0 0 0 0 0 0 0
36490- 0 0 0 0 0 0 0 0 0 0 0 0
36491- 0 0 0 0 0 0 0 0 0 0 0 0
36492- 0 0 0 0 0 0 0 0 0 0 0 0
36493- 0 0 0 0 0 0 0 0 0 0 0 0
36494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36507+4 4 4 4 4 4
36508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36521+4 4 4 4 4 4
36522+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36535+4 4 4 4 4 4
36536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36549+4 4 4 4 4 4
36550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36563+4 4 4 4 4 4
36564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36577+4 4 4 4 4 4
36578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36582+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
36583+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
36584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36587+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
36588+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
36589+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
36590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36591+4 4 4 4 4 4
36592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36596+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
36597+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
36598+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36601+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
36602+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
36603+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
36604+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36605+4 4 4 4 4 4
36606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36610+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
36611+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
36612+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
36613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36615+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
36616+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
36617+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
36618+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
36619+4 4 4 4 4 4
36620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36623+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
36624+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
36625+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
36626+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
36627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36628+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
36629+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
36630+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
36631+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
36632+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
36633+4 4 4 4 4 4
36634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36637+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
36638+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
36639+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
36640+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
36641+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36642+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
36643+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
36644+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
36645+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
36646+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
36647+4 4 4 4 4 4
36648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
36651+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
36652+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
36653+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
36654+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
36655+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
36656+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
36657+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
36658+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
36659+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
36660+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
36661+4 4 4 4 4 4
36662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36664+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
36665+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
36666+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
36667+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
36668+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
36669+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
36670+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
36671+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
36672+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
36673+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
36674+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
36675+4 4 4 4 4 4
36676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36678+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
36679+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
36680+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
36681+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
36682+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
36683+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
36684+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
36685+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
36686+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
36687+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
36688+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
36689+4 4 4 4 4 4
36690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36692+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
36693+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
36694+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
36695+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
36696+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
36697+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
36698+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
36699+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
36700+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
36701+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
36702+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
36703+4 4 4 4 4 4
36704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36706+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
36707+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
36708+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
36709+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
36710+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
36711+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
36712+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
36713+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
36714+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
36715+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
36716+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
36717+4 4 4 4 4 4
36718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36719+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
36720+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
36721+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
36722+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
36723+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
36724+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
36725+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
36726+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
36727+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
36728+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
36729+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
36730+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
36731+4 4 4 4 4 4
36732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36733+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
36734+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
36735+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
36736+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
36737+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
36738+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
36739+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
36740+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
36741+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
36742+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
36743+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
36744+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
36745+0 0 0 4 4 4
36746+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
36747+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
36748+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
36749+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
36750+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
36751+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
36752+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
36753+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
36754+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
36755+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
36756+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
36757+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
36758+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
36759+2 0 0 0 0 0
36760+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
36761+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
36762+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
36763+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
36764+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
36765+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
36766+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
36767+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
36768+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
36769+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
36770+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
36771+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
36772+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
36773+37 38 37 0 0 0
36774+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
36775+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
36776+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
36777+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
36778+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
36779+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
36780+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
36781+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
36782+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
36783+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
36784+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
36785+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
36786+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
36787+85 115 134 4 0 0
36788+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
36789+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
36790+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
36791+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
36792+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
36793+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
36794+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
36795+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
36796+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
36797+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
36798+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
36799+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
36800+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
36801+60 73 81 4 0 0
36802+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
36803+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
36804+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
36805+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
36806+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
36807+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
36808+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
36809+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
36810+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
36811+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
36812+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
36813+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
36814+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
36815+16 19 21 4 0 0
36816+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
36817+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
36818+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
36819+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
36820+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
36821+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
36822+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
36823+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
36824+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
36825+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
36826+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
36827+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
36828+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
36829+4 0 0 4 3 3
36830+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
36831+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
36832+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
36833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
36834+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
36835+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
36836+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
36837+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
36838+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
36839+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
36840+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
36841+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
36842+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
36843+3 2 2 4 4 4
36844+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
36845+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
36846+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
36847+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
36848+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
36849+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
36850+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
36851+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
36852+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
36853+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
36854+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
36855+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
36856+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
36857+4 4 4 4 4 4
36858+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
36859+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
36860+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
36861+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
36862+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
36863+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
36864+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
36865+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
36866+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
36867+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
36868+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
36869+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
36870+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
36871+4 4 4 4 4 4
36872+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
36873+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
36874+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
36875+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
36876+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
36877+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
36878+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
36879+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
36880+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
36881+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
36882+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
36883+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
36884+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
36885+5 5 5 5 5 5
36886+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
36887+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
36888+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
36889+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
36890+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
36891+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36892+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
36893+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
36894+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
36895+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
36896+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
36897+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
36898+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
36899+5 5 5 4 4 4
36900+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
36901+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
36902+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
36903+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
36904+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
36905+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
36906+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
36907+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
36908+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
36909+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
36910+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
36911+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
36912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36913+4 4 4 4 4 4
36914+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
36915+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
36916+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
36917+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
36918+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
36919+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36920+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36921+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
36922+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
36923+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
36924+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
36925+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
36926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36927+4 4 4 4 4 4
36928+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
36929+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
36930+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
36931+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
36932+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
36933+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
36934+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
36935+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
36936+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
36937+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
36938+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
36939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36941+4 4 4 4 4 4
36942+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
36943+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
36944+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
36945+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
36946+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
36947+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36948+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36949+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
36950+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
36951+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
36952+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
36953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36955+4 4 4 4 4 4
36956+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
36957+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
36958+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
36959+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
36960+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
36961+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
36962+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
36963+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
36964+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
36965+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
36966+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36969+4 4 4 4 4 4
36970+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
36971+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
36972+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
36973+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
36974+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
36975+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
36976+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
36977+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
36978+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
36979+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
36980+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
36981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36983+4 4 4 4 4 4
36984+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
36985+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
36986+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
36987+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
36988+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
36989+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
36990+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
36991+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
36992+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
36993+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
36994+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
36995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36997+4 4 4 4 4 4
36998+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
36999+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
37000+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
37001+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
37002+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
37003+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
37004+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
37005+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
37006+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
37007+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
37008+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37011+4 4 4 4 4 4
37012+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
37013+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
37014+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
37015+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
37016+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
37017+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
37018+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
37019+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
37020+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
37021+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
37022+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37025+4 4 4 4 4 4
37026+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
37027+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
37028+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
37029+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
37030+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
37031+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
37032+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
37033+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
37034+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
37035+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
37036+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37039+4 4 4 4 4 4
37040+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
37041+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
37042+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
37043+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
37044+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
37045+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
37046+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
37047+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
37048+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
37049+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
37050+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37053+4 4 4 4 4 4
37054+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
37055+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
37056+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
37057+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
37058+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
37059+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
37060+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
37061+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
37062+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
37063+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
37064+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37067+4 4 4 4 4 4
37068+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
37069+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
37070+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
37071+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
37072+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
37073+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
37074+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
37075+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
37076+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
37077+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
37078+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37081+4 4 4 4 4 4
37082+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
37083+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
37084+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
37085+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
37086+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
37087+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
37088+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
37089+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
37090+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
37091+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
37092+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37095+4 4 4 4 4 4
37096+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
37097+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
37098+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
37099+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
37100+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
37101+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
37102+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
37103+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
37104+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
37105+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
37106+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37109+4 4 4 4 4 4
37110+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
37111+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
37112+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
37113+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
37114+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
37115+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
37116+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
37117+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
37118+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
37119+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
37120+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37123+4 4 4 4 4 4
37124+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
37125+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
37126+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
37127+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
37128+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
37129+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
37130+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
37131+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
37132+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
37133+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
37134+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37137+4 4 4 4 4 4
37138+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
37139+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
37140+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
37141+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
37142+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
37143+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
37144+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
37145+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
37146+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
37147+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
37148+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37151+4 4 4 4 4 4
37152+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
37153+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
37154+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
37155+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
37156+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
37157+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
37158+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
37159+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
37160+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
37161+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
37162+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37165+4 4 4 4 4 4
37166+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
37167+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
37168+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
37169+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
37170+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
37171+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
37172+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
37173+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
37174+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
37175+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
37176+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37179+4 4 4 4 4 4
37180+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
37181+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
37182+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
37183+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
37184+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
37185+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
37186+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
37187+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
37188+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
37189+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
37190+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37193+4 4 4 4 4 4
37194+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
37195+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
37196+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
37197+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
37198+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
37199+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
37200+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
37201+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
37202+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
37203+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
37204+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37207+4 4 4 4 4 4
37208+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
37209+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
37210+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
37211+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
37212+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
37213+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
37214+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
37215+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
37216+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
37217+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
37218+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37221+4 4 4 4 4 4
37222+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
37223+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
37224+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
37225+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
37226+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
37227+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
37228+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
37229+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
37230+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
37231+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
37232+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37235+4 4 4 4 4 4
37236+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
37237+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
37238+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
37239+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
37240+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
37241+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
37242+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
37243+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
37244+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
37245+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
37246+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37249+4 4 4 4 4 4
37250+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
37251+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
37252+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
37253+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
37254+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
37255+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
37256+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
37257+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
37258+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
37259+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
37260+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37263+4 4 4 4 4 4
37264+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
37265+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
37266+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
37267+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
37268+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
37269+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
37270+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
37271+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
37272+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
37273+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
37274+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37277+4 4 4 4 4 4
37278+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
37279+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
37280+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
37281+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
37282+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
37283+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
37284+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37285+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
37286+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
37287+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
37288+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
37289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37291+4 4 4 4 4 4
37292+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
37293+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
37294+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
37295+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
37296+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
37297+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
37298+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
37299+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
37300+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
37301+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
37302+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37305+4 4 4 4 4 4
37306+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
37307+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
37308+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
37309+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
37310+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
37311+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
37312+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
37313+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
37314+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
37315+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
37316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37319+4 4 4 4 4 4
37320+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
37321+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
37322+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
37323+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
37324+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
37325+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
37326+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
37327+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
37328+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
37329+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
37330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37333+4 4 4 4 4 4
37334+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
37335+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
37336+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
37337+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
37338+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
37339+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
37340+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
37341+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
37342+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
37343+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
37344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37347+4 4 4 4 4 4
37348+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
37349+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
37350+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
37351+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
37352+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
37353+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
37354+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
37355+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
37356+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
37357+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
37358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37361+4 4 4 4 4 4
37362+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
37363+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
37364+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
37365+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
37366+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
37367+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
37368+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
37369+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
37370+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
37371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37375+4 4 4 4 4 4
37376+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
37377+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
37378+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
37379+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
37380+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
37381+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
37382+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
37383+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
37384+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
37385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37389+4 4 4 4 4 4
37390+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
37391+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
37392+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
37393+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
37394+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
37395+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
37396+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
37397+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
37398+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37403+4 4 4 4 4 4
37404+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
37405+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
37406+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
37407+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
37408+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
37409+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
37410+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
37411+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
37412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37417+4 4 4 4 4 4
37418+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
37419+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
37420+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
37421+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
37422+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
37423+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
37424+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
37425+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
37426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37431+4 4 4 4 4 4
37432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
37433+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
37434+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
37435+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
37436+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
37437+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
37438+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
37439+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
37440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37445+4 4 4 4 4 4
37446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37447+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
37448+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
37449+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
37450+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
37451+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
37452+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
37453+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
37454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37459+4 4 4 4 4 4
37460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37461+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
37462+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
37463+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
37464+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
37465+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
37466+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
37467+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
37468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37473+4 4 4 4 4 4
37474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37476+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
37477+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
37478+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
37479+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
37480+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
37481+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
37482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37487+4 4 4 4 4 4
37488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
37491+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
37492+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
37493+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
37494+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
37495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37501+4 4 4 4 4 4
37502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37505+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
37506+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
37507+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
37508+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
37509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37515+4 4 4 4 4 4
37516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37519+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
37520+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
37521+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
37522+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
37523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37529+4 4 4 4 4 4
37530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37533+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
37534+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
37535+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
37536+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
37537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37543+4 4 4 4 4 4
37544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
37548+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
37549+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
37550+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37557+4 4 4 4 4 4
37558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37562+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
37563+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
37564+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
37565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37571+4 4 4 4 4 4
37572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37576+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
37577+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
37578+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37585+4 4 4 4 4 4
37586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37590+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
37591+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
37592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37599+4 4 4 4 4 4
37600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37604+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
37605+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
37606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37613+4 4 4 4 4 4
37614diff -urNp linux-3.0.4/drivers/video/udlfb.c linux-3.0.4/drivers/video/udlfb.c
37615--- linux-3.0.4/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
37616+++ linux-3.0.4/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
37617@@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
37618 dlfb_urb_completion(urb);
37619
37620 error:
37621- atomic_add(bytes_sent, &dev->bytes_sent);
37622- atomic_add(bytes_identical, &dev->bytes_identical);
37623- atomic_add(width*height*2, &dev->bytes_rendered);
37624+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
37625+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
37626+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
37627 end_cycles = get_cycles();
37628- atomic_add(((unsigned int) ((end_cycles - start_cycles)
37629+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
37630 >> 10)), /* Kcycles */
37631 &dev->cpu_kcycles_used);
37632
37633@@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
37634 dlfb_urb_completion(urb);
37635
37636 error:
37637- atomic_add(bytes_sent, &dev->bytes_sent);
37638- atomic_add(bytes_identical, &dev->bytes_identical);
37639- atomic_add(bytes_rendered, &dev->bytes_rendered);
37640+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
37641+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
37642+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
37643 end_cycles = get_cycles();
37644- atomic_add(((unsigned int) ((end_cycles - start_cycles)
37645+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
37646 >> 10)), /* Kcycles */
37647 &dev->cpu_kcycles_used);
37648 }
37649@@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
37650 struct fb_info *fb_info = dev_get_drvdata(fbdev);
37651 struct dlfb_data *dev = fb_info->par;
37652 return snprintf(buf, PAGE_SIZE, "%u\n",
37653- atomic_read(&dev->bytes_rendered));
37654+ atomic_read_unchecked(&dev->bytes_rendered));
37655 }
37656
37657 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
37658@@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
37659 struct fb_info *fb_info = dev_get_drvdata(fbdev);
37660 struct dlfb_data *dev = fb_info->par;
37661 return snprintf(buf, PAGE_SIZE, "%u\n",
37662- atomic_read(&dev->bytes_identical));
37663+ atomic_read_unchecked(&dev->bytes_identical));
37664 }
37665
37666 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
37667@@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
37668 struct fb_info *fb_info = dev_get_drvdata(fbdev);
37669 struct dlfb_data *dev = fb_info->par;
37670 return snprintf(buf, PAGE_SIZE, "%u\n",
37671- atomic_read(&dev->bytes_sent));
37672+ atomic_read_unchecked(&dev->bytes_sent));
37673 }
37674
37675 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
37676@@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
37677 struct fb_info *fb_info = dev_get_drvdata(fbdev);
37678 struct dlfb_data *dev = fb_info->par;
37679 return snprintf(buf, PAGE_SIZE, "%u\n",
37680- atomic_read(&dev->cpu_kcycles_used));
37681+ atomic_read_unchecked(&dev->cpu_kcycles_used));
37682 }
37683
37684 static ssize_t edid_show(
37685@@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
37686 struct fb_info *fb_info = dev_get_drvdata(fbdev);
37687 struct dlfb_data *dev = fb_info->par;
37688
37689- atomic_set(&dev->bytes_rendered, 0);
37690- atomic_set(&dev->bytes_identical, 0);
37691- atomic_set(&dev->bytes_sent, 0);
37692- atomic_set(&dev->cpu_kcycles_used, 0);
37693+ atomic_set_unchecked(&dev->bytes_rendered, 0);
37694+ atomic_set_unchecked(&dev->bytes_identical, 0);
37695+ atomic_set_unchecked(&dev->bytes_sent, 0);
37696+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
37697
37698 return count;
37699 }
37700diff -urNp linux-3.0.4/drivers/video/uvesafb.c linux-3.0.4/drivers/video/uvesafb.c
37701--- linux-3.0.4/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
37702+++ linux-3.0.4/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
37703@@ -19,6 +19,7 @@
37704 #include <linux/io.h>
37705 #include <linux/mutex.h>
37706 #include <linux/slab.h>
37707+#include <linux/moduleloader.h>
37708 #include <video/edid.h>
37709 #include <video/uvesafb.h>
37710 #ifdef CONFIG_X86
37711@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
37712 NULL,
37713 };
37714
37715- return call_usermodehelper(v86d_path, argv, envp, 1);
37716+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
37717 }
37718
37719 /*
37720@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
37721 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
37722 par->pmi_setpal = par->ypan = 0;
37723 } else {
37724+
37725+#ifdef CONFIG_PAX_KERNEXEC
37726+#ifdef CONFIG_MODULES
37727+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
37728+#endif
37729+ if (!par->pmi_code) {
37730+ par->pmi_setpal = par->ypan = 0;
37731+ return 0;
37732+ }
37733+#endif
37734+
37735 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
37736 + task->t.regs.edi);
37737+
37738+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37739+ pax_open_kernel();
37740+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
37741+ pax_close_kernel();
37742+
37743+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
37744+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
37745+#else
37746 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
37747 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
37748+#endif
37749+
37750 printk(KERN_INFO "uvesafb: protected mode interface info at "
37751 "%04x:%04x\n",
37752 (u16)task->t.regs.es, (u16)task->t.regs.edi);
37753@@ -1821,6 +1844,11 @@ out:
37754 if (par->vbe_modes)
37755 kfree(par->vbe_modes);
37756
37757+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37758+ if (par->pmi_code)
37759+ module_free_exec(NULL, par->pmi_code);
37760+#endif
37761+
37762 framebuffer_release(info);
37763 return err;
37764 }
37765@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
37766 kfree(par->vbe_state_orig);
37767 if (par->vbe_state_saved)
37768 kfree(par->vbe_state_saved);
37769+
37770+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37771+ if (par->pmi_code)
37772+ module_free_exec(NULL, par->pmi_code);
37773+#endif
37774+
37775 }
37776
37777 framebuffer_release(info);
37778diff -urNp linux-3.0.4/drivers/video/vesafb.c linux-3.0.4/drivers/video/vesafb.c
37779--- linux-3.0.4/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
37780+++ linux-3.0.4/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
37781@@ -9,6 +9,7 @@
37782 */
37783
37784 #include <linux/module.h>
37785+#include <linux/moduleloader.h>
37786 #include <linux/kernel.h>
37787 #include <linux/errno.h>
37788 #include <linux/string.h>
37789@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
37790 static int vram_total __initdata; /* Set total amount of memory */
37791 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
37792 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
37793-static void (*pmi_start)(void) __read_mostly;
37794-static void (*pmi_pal) (void) __read_mostly;
37795+static void (*pmi_start)(void) __read_only;
37796+static void (*pmi_pal) (void) __read_only;
37797 static int depth __read_mostly;
37798 static int vga_compat __read_mostly;
37799 /* --------------------------------------------------------------------- */
37800@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
37801 unsigned int size_vmode;
37802 unsigned int size_remap;
37803 unsigned int size_total;
37804+ void *pmi_code = NULL;
37805
37806 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
37807 return -ENODEV;
37808@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
37809 size_remap = size_total;
37810 vesafb_fix.smem_len = size_remap;
37811
37812-#ifndef __i386__
37813- screen_info.vesapm_seg = 0;
37814-#endif
37815-
37816 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
37817 printk(KERN_WARNING
37818 "vesafb: cannot reserve video memory at 0x%lx\n",
37819@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
37820 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
37821 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
37822
37823+#ifdef __i386__
37824+
37825+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37826+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
37827+ if (!pmi_code)
37828+#elif !defined(CONFIG_PAX_KERNEXEC)
37829+ if (0)
37830+#endif
37831+
37832+#endif
37833+ screen_info.vesapm_seg = 0;
37834+
37835 if (screen_info.vesapm_seg) {
37836- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
37837- screen_info.vesapm_seg,screen_info.vesapm_off);
37838+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
37839+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
37840 }
37841
37842 if (screen_info.vesapm_seg < 0xc000)
37843@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
37844
37845 if (ypan || pmi_setpal) {
37846 unsigned short *pmi_base;
37847+
37848 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
37849- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
37850- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
37851+
37852+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37853+ pax_open_kernel();
37854+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
37855+#else
37856+ pmi_code = pmi_base;
37857+#endif
37858+
37859+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
37860+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
37861+
37862+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37863+ pmi_start = ktva_ktla(pmi_start);
37864+ pmi_pal = ktva_ktla(pmi_pal);
37865+ pax_close_kernel();
37866+#endif
37867+
37868 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
37869 if (pmi_base[3]) {
37870 printk(KERN_INFO "vesafb: pmi: ports = ");
37871@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
37872 info->node, info->fix.id);
37873 return 0;
37874 err:
37875+
37876+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37877+ module_free_exec(NULL, pmi_code);
37878+#endif
37879+
37880 if (info->screen_base)
37881 iounmap(info->screen_base);
37882 framebuffer_release(info);
37883diff -urNp linux-3.0.4/drivers/video/via/via_clock.h linux-3.0.4/drivers/video/via/via_clock.h
37884--- linux-3.0.4/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
37885+++ linux-3.0.4/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
37886@@ -56,7 +56,7 @@ struct via_clock {
37887
37888 void (*set_engine_pll_state)(u8 state);
37889 void (*set_engine_pll)(struct via_pll_config config);
37890-};
37891+} __no_const;
37892
37893
37894 static inline u32 get_pll_internal_frequency(u32 ref_freq,
37895diff -urNp linux-3.0.4/drivers/virtio/virtio_balloon.c linux-3.0.4/drivers/virtio/virtio_balloon.c
37896--- linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
37897+++ linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
37898@@ -174,6 +174,8 @@ static void update_balloon_stats(struct
37899 struct sysinfo i;
37900 int idx = 0;
37901
37902+ pax_track_stack();
37903+
37904 all_vm_events(events);
37905 si_meminfo(&i);
37906
37907diff -urNp linux-3.0.4/fs/9p/vfs_inode.c linux-3.0.4/fs/9p/vfs_inode.c
37908--- linux-3.0.4/fs/9p/vfs_inode.c 2011-07-21 22:17:23.000000000 -0400
37909+++ linux-3.0.4/fs/9p/vfs_inode.c 2011-08-23 21:47:56.000000000 -0400
37910@@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
37911 void
37912 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
37913 {
37914- char *s = nd_get_link(nd);
37915+ const char *s = nd_get_link(nd);
37916
37917 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
37918 IS_ERR(s) ? "<error>" : s);
37919diff -urNp linux-3.0.4/fs/aio.c linux-3.0.4/fs/aio.c
37920--- linux-3.0.4/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
37921+++ linux-3.0.4/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
37922@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
37923 size += sizeof(struct io_event) * nr_events;
37924 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
37925
37926- if (nr_pages < 0)
37927+ if (nr_pages <= 0)
37928 return -EINVAL;
37929
37930 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
37931@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
37932 struct aio_timeout to;
37933 int retry = 0;
37934
37935+ pax_track_stack();
37936+
37937 /* needed to zero any padding within an entry (there shouldn't be
37938 * any, but C is fun!
37939 */
37940@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
37941 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
37942 {
37943 ssize_t ret;
37944+ struct iovec iovstack;
37945
37946 #ifdef CONFIG_COMPAT
37947 if (compat)
37948 ret = compat_rw_copy_check_uvector(type,
37949 (struct compat_iovec __user *)kiocb->ki_buf,
37950- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
37951+ kiocb->ki_nbytes, 1, &iovstack,
37952 &kiocb->ki_iovec);
37953 else
37954 #endif
37955 ret = rw_copy_check_uvector(type,
37956 (struct iovec __user *)kiocb->ki_buf,
37957- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
37958+ kiocb->ki_nbytes, 1, &iovstack,
37959 &kiocb->ki_iovec);
37960 if (ret < 0)
37961 goto out;
37962
37963+ if (kiocb->ki_iovec == &iovstack) {
37964+ kiocb->ki_inline_vec = iovstack;
37965+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
37966+ }
37967 kiocb->ki_nr_segs = kiocb->ki_nbytes;
37968 kiocb->ki_cur_seg = 0;
37969 /* ki_nbytes/left now reflect bytes instead of segs */
37970diff -urNp linux-3.0.4/fs/attr.c linux-3.0.4/fs/attr.c
37971--- linux-3.0.4/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
37972+++ linux-3.0.4/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
37973@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
37974 unsigned long limit;
37975
37976 limit = rlimit(RLIMIT_FSIZE);
37977+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
37978 if (limit != RLIM_INFINITY && offset > limit)
37979 goto out_sig;
37980 if (offset > inode->i_sb->s_maxbytes)
37981diff -urNp linux-3.0.4/fs/autofs4/waitq.c linux-3.0.4/fs/autofs4/waitq.c
37982--- linux-3.0.4/fs/autofs4/waitq.c 2011-07-21 22:17:23.000000000 -0400
37983+++ linux-3.0.4/fs/autofs4/waitq.c 2011-10-06 04:17:55.000000000 -0400
37984@@ -60,7 +60,7 @@ static int autofs4_write(struct file *fi
37985 {
37986 unsigned long sigpipe, flags;
37987 mm_segment_t fs;
37988- const char *data = (const char *)addr;
37989+ const char __user *data = (const char __force_user *)addr;
37990 ssize_t wr = 0;
37991
37992 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
37993diff -urNp linux-3.0.4/fs/befs/linuxvfs.c linux-3.0.4/fs/befs/linuxvfs.c
37994--- linux-3.0.4/fs/befs/linuxvfs.c 2011-09-02 18:11:26.000000000 -0400
37995+++ linux-3.0.4/fs/befs/linuxvfs.c 2011-08-29 23:26:27.000000000 -0400
37996@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
37997 {
37998 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
37999 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
38000- char *link = nd_get_link(nd);
38001+ const char *link = nd_get_link(nd);
38002 if (!IS_ERR(link))
38003 kfree(link);
38004 }
38005diff -urNp linux-3.0.4/fs/binfmt_aout.c linux-3.0.4/fs/binfmt_aout.c
38006--- linux-3.0.4/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
38007+++ linux-3.0.4/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
38008@@ -16,6 +16,7 @@
38009 #include <linux/string.h>
38010 #include <linux/fs.h>
38011 #include <linux/file.h>
38012+#include <linux/security.h>
38013 #include <linux/stat.h>
38014 #include <linux/fcntl.h>
38015 #include <linux/ptrace.h>
38016@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
38017 #endif
38018 # define START_STACK(u) ((void __user *)u.start_stack)
38019
38020+ memset(&dump, 0, sizeof(dump));
38021+
38022 fs = get_fs();
38023 set_fs(KERNEL_DS);
38024 has_dumped = 1;
38025@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
38026
38027 /* If the size of the dump file exceeds the rlimit, then see what would happen
38028 if we wrote the stack, but not the data area. */
38029+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
38030 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
38031 dump.u_dsize = 0;
38032
38033 /* Make sure we have enough room to write the stack and data areas. */
38034+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
38035 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
38036 dump.u_ssize = 0;
38037
38038@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
38039 rlim = rlimit(RLIMIT_DATA);
38040 if (rlim >= RLIM_INFINITY)
38041 rlim = ~0;
38042+
38043+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
38044 if (ex.a_data + ex.a_bss > rlim)
38045 return -ENOMEM;
38046
38047@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
38048 install_exec_creds(bprm);
38049 current->flags &= ~PF_FORKNOEXEC;
38050
38051+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
38052+ current->mm->pax_flags = 0UL;
38053+#endif
38054+
38055+#ifdef CONFIG_PAX_PAGEEXEC
38056+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
38057+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
38058+
38059+#ifdef CONFIG_PAX_EMUTRAMP
38060+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
38061+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
38062+#endif
38063+
38064+#ifdef CONFIG_PAX_MPROTECT
38065+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
38066+ current->mm->pax_flags |= MF_PAX_MPROTECT;
38067+#endif
38068+
38069+ }
38070+#endif
38071+
38072 if (N_MAGIC(ex) == OMAGIC) {
38073 unsigned long text_addr, map_size;
38074 loff_t pos;
38075@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
38076
38077 down_write(&current->mm->mmap_sem);
38078 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
38079- PROT_READ | PROT_WRITE | PROT_EXEC,
38080+ PROT_READ | PROT_WRITE,
38081 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
38082 fd_offset + ex.a_text);
38083 up_write(&current->mm->mmap_sem);
38084diff -urNp linux-3.0.4/fs/binfmt_elf.c linux-3.0.4/fs/binfmt_elf.c
38085--- linux-3.0.4/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
38086+++ linux-3.0.4/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
38087@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
38088 #define elf_core_dump NULL
38089 #endif
38090
38091+#ifdef CONFIG_PAX_MPROTECT
38092+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
38093+#endif
38094+
38095 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
38096 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
38097 #else
38098@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
38099 .load_binary = load_elf_binary,
38100 .load_shlib = load_elf_library,
38101 .core_dump = elf_core_dump,
38102+
38103+#ifdef CONFIG_PAX_MPROTECT
38104+ .handle_mprotect= elf_handle_mprotect,
38105+#endif
38106+
38107 .min_coredump = ELF_EXEC_PAGESIZE,
38108 };
38109
38110@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
38111
38112 static int set_brk(unsigned long start, unsigned long end)
38113 {
38114+ unsigned long e = end;
38115+
38116 start = ELF_PAGEALIGN(start);
38117 end = ELF_PAGEALIGN(end);
38118 if (end > start) {
38119@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
38120 if (BAD_ADDR(addr))
38121 return addr;
38122 }
38123- current->mm->start_brk = current->mm->brk = end;
38124+ current->mm->start_brk = current->mm->brk = e;
38125 return 0;
38126 }
38127
38128@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
38129 elf_addr_t __user *u_rand_bytes;
38130 const char *k_platform = ELF_PLATFORM;
38131 const char *k_base_platform = ELF_BASE_PLATFORM;
38132- unsigned char k_rand_bytes[16];
38133+ u32 k_rand_bytes[4];
38134 int items;
38135 elf_addr_t *elf_info;
38136 int ei_index = 0;
38137 const struct cred *cred = current_cred();
38138 struct vm_area_struct *vma;
38139+ unsigned long saved_auxv[AT_VECTOR_SIZE];
38140+
38141+ pax_track_stack();
38142
38143 /*
38144 * In some cases (e.g. Hyper-Threading), we want to avoid L1
38145@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
38146 * Generate 16 random bytes for userspace PRNG seeding.
38147 */
38148 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
38149- u_rand_bytes = (elf_addr_t __user *)
38150- STACK_ALLOC(p, sizeof(k_rand_bytes));
38151+ srandom32(k_rand_bytes[0] ^ random32());
38152+ srandom32(k_rand_bytes[1] ^ random32());
38153+ srandom32(k_rand_bytes[2] ^ random32());
38154+ srandom32(k_rand_bytes[3] ^ random32());
38155+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
38156+ u_rand_bytes = (elf_addr_t __user *) p;
38157 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
38158 return -EFAULT;
38159
38160@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
38161 return -EFAULT;
38162 current->mm->env_end = p;
38163
38164+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
38165+
38166 /* Put the elf_info on the stack in the right place. */
38167 sp = (elf_addr_t __user *)envp + 1;
38168- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
38169+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
38170 return -EFAULT;
38171 return 0;
38172 }
38173@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
38174 {
38175 struct elf_phdr *elf_phdata;
38176 struct elf_phdr *eppnt;
38177- unsigned long load_addr = 0;
38178+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
38179 int load_addr_set = 0;
38180 unsigned long last_bss = 0, elf_bss = 0;
38181- unsigned long error = ~0UL;
38182+ unsigned long error = -EINVAL;
38183 unsigned long total_size;
38184 int retval, i, size;
38185
38186@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
38187 goto out_close;
38188 }
38189
38190+#ifdef CONFIG_PAX_SEGMEXEC
38191+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
38192+ pax_task_size = SEGMEXEC_TASK_SIZE;
38193+#endif
38194+
38195 eppnt = elf_phdata;
38196 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
38197 if (eppnt->p_type == PT_LOAD) {
38198@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
38199 k = load_addr + eppnt->p_vaddr;
38200 if (BAD_ADDR(k) ||
38201 eppnt->p_filesz > eppnt->p_memsz ||
38202- eppnt->p_memsz > TASK_SIZE ||
38203- TASK_SIZE - eppnt->p_memsz < k) {
38204+ eppnt->p_memsz > pax_task_size ||
38205+ pax_task_size - eppnt->p_memsz < k) {
38206 error = -ENOMEM;
38207 goto out_close;
38208 }
38209@@ -528,6 +553,193 @@ out:
38210 return error;
38211 }
38212
38213+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
38214+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
38215+{
38216+ unsigned long pax_flags = 0UL;
38217+
38218+#ifdef CONFIG_PAX_PAGEEXEC
38219+ if (elf_phdata->p_flags & PF_PAGEEXEC)
38220+ pax_flags |= MF_PAX_PAGEEXEC;
38221+#endif
38222+
38223+#ifdef CONFIG_PAX_SEGMEXEC
38224+ if (elf_phdata->p_flags & PF_SEGMEXEC)
38225+ pax_flags |= MF_PAX_SEGMEXEC;
38226+#endif
38227+
38228+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
38229+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38230+ if ((__supported_pte_mask & _PAGE_NX))
38231+ pax_flags &= ~MF_PAX_SEGMEXEC;
38232+ else
38233+ pax_flags &= ~MF_PAX_PAGEEXEC;
38234+ }
38235+#endif
38236+
38237+#ifdef CONFIG_PAX_EMUTRAMP
38238+ if (elf_phdata->p_flags & PF_EMUTRAMP)
38239+ pax_flags |= MF_PAX_EMUTRAMP;
38240+#endif
38241+
38242+#ifdef CONFIG_PAX_MPROTECT
38243+ if (elf_phdata->p_flags & PF_MPROTECT)
38244+ pax_flags |= MF_PAX_MPROTECT;
38245+#endif
38246+
38247+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
38248+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
38249+ pax_flags |= MF_PAX_RANDMMAP;
38250+#endif
38251+
38252+ return pax_flags;
38253+}
38254+#endif
38255+
38256+#ifdef CONFIG_PAX_PT_PAX_FLAGS
38257+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
38258+{
38259+ unsigned long pax_flags = 0UL;
38260+
38261+#ifdef CONFIG_PAX_PAGEEXEC
38262+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
38263+ pax_flags |= MF_PAX_PAGEEXEC;
38264+#endif
38265+
38266+#ifdef CONFIG_PAX_SEGMEXEC
38267+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
38268+ pax_flags |= MF_PAX_SEGMEXEC;
38269+#endif
38270+
38271+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
38272+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38273+ if ((__supported_pte_mask & _PAGE_NX))
38274+ pax_flags &= ~MF_PAX_SEGMEXEC;
38275+ else
38276+ pax_flags &= ~MF_PAX_PAGEEXEC;
38277+ }
38278+#endif
38279+
38280+#ifdef CONFIG_PAX_EMUTRAMP
38281+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
38282+ pax_flags |= MF_PAX_EMUTRAMP;
38283+#endif
38284+
38285+#ifdef CONFIG_PAX_MPROTECT
38286+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
38287+ pax_flags |= MF_PAX_MPROTECT;
38288+#endif
38289+
38290+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
38291+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
38292+ pax_flags |= MF_PAX_RANDMMAP;
38293+#endif
38294+
38295+ return pax_flags;
38296+}
38297+#endif
38298+
38299+#ifdef CONFIG_PAX_EI_PAX
38300+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
38301+{
38302+ unsigned long pax_flags = 0UL;
38303+
38304+#ifdef CONFIG_PAX_PAGEEXEC
38305+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
38306+ pax_flags |= MF_PAX_PAGEEXEC;
38307+#endif
38308+
38309+#ifdef CONFIG_PAX_SEGMEXEC
38310+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
38311+ pax_flags |= MF_PAX_SEGMEXEC;
38312+#endif
38313+
38314+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
38315+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38316+ if ((__supported_pte_mask & _PAGE_NX))
38317+ pax_flags &= ~MF_PAX_SEGMEXEC;
38318+ else
38319+ pax_flags &= ~MF_PAX_PAGEEXEC;
38320+ }
38321+#endif
38322+
38323+#ifdef CONFIG_PAX_EMUTRAMP
38324+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
38325+ pax_flags |= MF_PAX_EMUTRAMP;
38326+#endif
38327+
38328+#ifdef CONFIG_PAX_MPROTECT
38329+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
38330+ pax_flags |= MF_PAX_MPROTECT;
38331+#endif
38332+
38333+#ifdef CONFIG_PAX_ASLR
38334+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
38335+ pax_flags |= MF_PAX_RANDMMAP;
38336+#endif
38337+
38338+ return pax_flags;
38339+}
38340+#endif
38341+
38342+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
38343+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
38344+{
38345+ unsigned long pax_flags = 0UL;
38346+
38347+#ifdef CONFIG_PAX_PT_PAX_FLAGS
38348+ unsigned long i;
38349+ int found_flags = 0;
38350+#endif
38351+
38352+#ifdef CONFIG_PAX_EI_PAX
38353+ pax_flags = pax_parse_ei_pax(elf_ex);
38354+#endif
38355+
38356+#ifdef CONFIG_PAX_PT_PAX_FLAGS
38357+ for (i = 0UL; i < elf_ex->e_phnum; i++)
38358+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
38359+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
38360+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
38361+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
38362+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
38363+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
38364+ return -EINVAL;
38365+
38366+#ifdef CONFIG_PAX_SOFTMODE
38367+ if (pax_softmode)
38368+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
38369+ else
38370+#endif
38371+
38372+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
38373+ found_flags = 1;
38374+ break;
38375+ }
38376+#endif
38377+
38378+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
38379+ if (found_flags == 0) {
38380+ struct elf_phdr phdr;
38381+ memset(&phdr, 0, sizeof(phdr));
38382+ phdr.p_flags = PF_NOEMUTRAMP;
38383+#ifdef CONFIG_PAX_SOFTMODE
38384+ if (pax_softmode)
38385+ pax_flags = pax_parse_softmode(&phdr);
38386+ else
38387+#endif
38388+ pax_flags = pax_parse_hardmode(&phdr);
38389+ }
38390+#endif
38391+
38392+ if (0 > pax_check_flags(&pax_flags))
38393+ return -EINVAL;
38394+
38395+ current->mm->pax_flags = pax_flags;
38396+ return 0;
38397+}
38398+#endif
38399+
38400 /*
38401 * These are the functions used to load ELF style executables and shared
38402 * libraries. There is no binary dependent code anywhere else.
38403@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
38404 {
38405 unsigned int random_variable = 0;
38406
38407+#ifdef CONFIG_PAX_RANDUSTACK
38408+ if (randomize_va_space)
38409+ return stack_top - current->mm->delta_stack;
38410+#endif
38411+
38412 if ((current->flags & PF_RANDOMIZE) &&
38413 !(current->personality & ADDR_NO_RANDOMIZE)) {
38414 random_variable = get_random_int() & STACK_RND_MASK;
38415@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
38416 unsigned long load_addr = 0, load_bias = 0;
38417 int load_addr_set = 0;
38418 char * elf_interpreter = NULL;
38419- unsigned long error;
38420+ unsigned long error = 0;
38421 struct elf_phdr *elf_ppnt, *elf_phdata;
38422 unsigned long elf_bss, elf_brk;
38423 int retval, i;
38424@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
38425 unsigned long start_code, end_code, start_data, end_data;
38426 unsigned long reloc_func_desc __maybe_unused = 0;
38427 int executable_stack = EXSTACK_DEFAULT;
38428- unsigned long def_flags = 0;
38429 struct {
38430 struct elfhdr elf_ex;
38431 struct elfhdr interp_elf_ex;
38432 } *loc;
38433+ unsigned long pax_task_size = TASK_SIZE;
38434
38435 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
38436 if (!loc) {
38437@@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
38438
38439 /* OK, This is the point of no return */
38440 current->flags &= ~PF_FORKNOEXEC;
38441- current->mm->def_flags = def_flags;
38442+
38443+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
38444+ current->mm->pax_flags = 0UL;
38445+#endif
38446+
38447+#ifdef CONFIG_PAX_DLRESOLVE
38448+ current->mm->call_dl_resolve = 0UL;
38449+#endif
38450+
38451+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
38452+ current->mm->call_syscall = 0UL;
38453+#endif
38454+
38455+#ifdef CONFIG_PAX_ASLR
38456+ current->mm->delta_mmap = 0UL;
38457+ current->mm->delta_stack = 0UL;
38458+#endif
38459+
38460+ current->mm->def_flags = 0;
38461+
38462+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
38463+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
38464+ send_sig(SIGKILL, current, 0);
38465+ goto out_free_dentry;
38466+ }
38467+#endif
38468+
38469+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
38470+ pax_set_initial_flags(bprm);
38471+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
38472+ if (pax_set_initial_flags_func)
38473+ (pax_set_initial_flags_func)(bprm);
38474+#endif
38475+
38476+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
38477+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
38478+ current->mm->context.user_cs_limit = PAGE_SIZE;
38479+ current->mm->def_flags |= VM_PAGEEXEC;
38480+ }
38481+#endif
38482+
38483+#ifdef CONFIG_PAX_SEGMEXEC
38484+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
38485+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
38486+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
38487+ pax_task_size = SEGMEXEC_TASK_SIZE;
38488+ current->mm->def_flags |= VM_NOHUGEPAGE;
38489+ }
38490+#endif
38491+
38492+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
38493+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38494+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
38495+ put_cpu();
38496+ }
38497+#endif
38498
38499 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
38500 may depend on the personality. */
38501 SET_PERSONALITY(loc->elf_ex);
38502+
38503+#ifdef CONFIG_PAX_ASLR
38504+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
38505+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
38506+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
38507+ }
38508+#endif
38509+
38510+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38511+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38512+ executable_stack = EXSTACK_DISABLE_X;
38513+ current->personality &= ~READ_IMPLIES_EXEC;
38514+ } else
38515+#endif
38516+
38517 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
38518 current->personality |= READ_IMPLIES_EXEC;
38519
38520@@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
38521 #else
38522 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
38523 #endif
38524+
38525+#ifdef CONFIG_PAX_RANDMMAP
38526+ /* PaX: randomize base address at the default exe base if requested */
38527+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
38528+#ifdef CONFIG_SPARC64
38529+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
38530+#else
38531+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
38532+#endif
38533+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
38534+ elf_flags |= MAP_FIXED;
38535+ }
38536+#endif
38537+
38538 }
38539
38540 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
38541@@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
38542 * allowed task size. Note that p_filesz must always be
38543 * <= p_memsz so it is only necessary to check p_memsz.
38544 */
38545- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
38546- elf_ppnt->p_memsz > TASK_SIZE ||
38547- TASK_SIZE - elf_ppnt->p_memsz < k) {
38548+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
38549+ elf_ppnt->p_memsz > pax_task_size ||
38550+ pax_task_size - elf_ppnt->p_memsz < k) {
38551 /* set_brk can never work. Avoid overflows. */
38552 send_sig(SIGKILL, current, 0);
38553 retval = -EINVAL;
38554@@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
38555 start_data += load_bias;
38556 end_data += load_bias;
38557
38558+#ifdef CONFIG_PAX_RANDMMAP
38559+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
38560+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
38561+#endif
38562+
38563 /* Calling set_brk effectively mmaps the pages that we need
38564 * for the bss and break sections. We must do this before
38565 * mapping in the interpreter, to make sure it doesn't wind
38566@@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
38567 goto out_free_dentry;
38568 }
38569 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
38570- send_sig(SIGSEGV, current, 0);
38571- retval = -EFAULT; /* Nobody gets to see this, but.. */
38572- goto out_free_dentry;
38573+ /*
38574+ * This bss-zeroing can fail if the ELF
38575+ * file specifies odd protections. So
38576+ * we don't check the return value
38577+ */
38578 }
38579
38580 if (elf_interpreter) {
38581@@ -1090,7 +1398,7 @@ out:
38582 * Decide what to dump of a segment, part, all or none.
38583 */
38584 static unsigned long vma_dump_size(struct vm_area_struct *vma,
38585- unsigned long mm_flags)
38586+ unsigned long mm_flags, long signr)
38587 {
38588 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
38589
38590@@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
38591 if (vma->vm_file == NULL)
38592 return 0;
38593
38594- if (FILTER(MAPPED_PRIVATE))
38595+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
38596 goto whole;
38597
38598 /*
38599@@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
38600 {
38601 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
38602 int i = 0;
38603- do
38604+ do {
38605 i += 2;
38606- while (auxv[i - 2] != AT_NULL);
38607+ } while (auxv[i - 2] != AT_NULL);
38608 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
38609 }
38610
38611@@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
38612 }
38613
38614 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
38615- unsigned long mm_flags)
38616+ struct coredump_params *cprm)
38617 {
38618 struct vm_area_struct *vma;
38619 size_t size = 0;
38620
38621 for (vma = first_vma(current, gate_vma); vma != NULL;
38622 vma = next_vma(vma, gate_vma))
38623- size += vma_dump_size(vma, mm_flags);
38624+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
38625 return size;
38626 }
38627
38628@@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
38629
38630 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
38631
38632- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
38633+ offset += elf_core_vma_data_size(gate_vma, cprm);
38634 offset += elf_core_extra_data_size();
38635 e_shoff = offset;
38636
38637@@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
38638 offset = dataoff;
38639
38640 size += sizeof(*elf);
38641+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
38642 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
38643 goto end_coredump;
38644
38645 size += sizeof(*phdr4note);
38646+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
38647 if (size > cprm->limit
38648 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
38649 goto end_coredump;
38650@@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
38651 phdr.p_offset = offset;
38652 phdr.p_vaddr = vma->vm_start;
38653 phdr.p_paddr = 0;
38654- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
38655+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
38656 phdr.p_memsz = vma->vm_end - vma->vm_start;
38657 offset += phdr.p_filesz;
38658 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
38659@@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
38660 phdr.p_align = ELF_EXEC_PAGESIZE;
38661
38662 size += sizeof(phdr);
38663+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
38664 if (size > cprm->limit
38665 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
38666 goto end_coredump;
38667@@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
38668 unsigned long addr;
38669 unsigned long end;
38670
38671- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
38672+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
38673
38674 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
38675 struct page *page;
38676@@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
38677 page = get_dump_page(addr);
38678 if (page) {
38679 void *kaddr = kmap(page);
38680+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
38681 stop = ((size += PAGE_SIZE) > cprm->limit) ||
38682 !dump_write(cprm->file, kaddr,
38683 PAGE_SIZE);
38684@@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
38685
38686 if (e_phnum == PN_XNUM) {
38687 size += sizeof(*shdr4extnum);
38688+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
38689 if (size > cprm->limit
38690 || !dump_write(cprm->file, shdr4extnum,
38691 sizeof(*shdr4extnum)))
38692@@ -2067,6 +2380,97 @@ out:
38693
38694 #endif /* CONFIG_ELF_CORE */
38695
38696+#ifdef CONFIG_PAX_MPROTECT
38697+/* PaX: non-PIC ELF libraries need relocations on their executable segments
38698+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
38699+ * we'll remove VM_MAYWRITE for good on RELRO segments.
38700+ *
38701+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
38702+ * basis because we want to allow the common case and not the special ones.
38703+ */
38704+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
38705+{
38706+ struct elfhdr elf_h;
38707+ struct elf_phdr elf_p;
38708+ unsigned long i;
38709+ unsigned long oldflags;
38710+ bool is_textrel_rw, is_textrel_rx, is_relro;
38711+
38712+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
38713+ return;
38714+
38715+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
38716+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
38717+
38718+#ifdef CONFIG_PAX_ELFRELOCS
38719+ /* possible TEXTREL */
38720+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
38721+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
38722+#else
38723+ is_textrel_rw = false;
38724+ is_textrel_rx = false;
38725+#endif
38726+
38727+ /* possible RELRO */
38728+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
38729+
38730+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
38731+ return;
38732+
38733+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
38734+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
38735+
38736+#ifdef CONFIG_PAX_ETEXECRELOCS
38737+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
38738+#else
38739+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
38740+#endif
38741+
38742+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
38743+ !elf_check_arch(&elf_h) ||
38744+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
38745+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
38746+ return;
38747+
38748+ for (i = 0UL; i < elf_h.e_phnum; i++) {
38749+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
38750+ return;
38751+ switch (elf_p.p_type) {
38752+ case PT_DYNAMIC:
38753+ if (!is_textrel_rw && !is_textrel_rx)
38754+ continue;
38755+ i = 0UL;
38756+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
38757+ elf_dyn dyn;
38758+
38759+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
38760+ return;
38761+ if (dyn.d_tag == DT_NULL)
38762+ return;
38763+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
38764+ gr_log_textrel(vma);
38765+ if (is_textrel_rw)
38766+ vma->vm_flags |= VM_MAYWRITE;
38767+ else
38768+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
38769+ vma->vm_flags &= ~VM_MAYWRITE;
38770+ return;
38771+ }
38772+ i++;
38773+ }
38774+ return;
38775+
38776+ case PT_GNU_RELRO:
38777+ if (!is_relro)
38778+ continue;
38779+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
38780+ vma->vm_flags &= ~VM_MAYWRITE;
38781+ return;
38782+ }
38783+ }
38784+}
38785+#endif
38786+
38787 static int __init init_elf_binfmt(void)
38788 {
38789 return register_binfmt(&elf_format);
38790diff -urNp linux-3.0.4/fs/binfmt_flat.c linux-3.0.4/fs/binfmt_flat.c
38791--- linux-3.0.4/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
38792+++ linux-3.0.4/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
38793@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
38794 realdatastart = (unsigned long) -ENOMEM;
38795 printk("Unable to allocate RAM for process data, errno %d\n",
38796 (int)-realdatastart);
38797+ down_write(&current->mm->mmap_sem);
38798 do_munmap(current->mm, textpos, text_len);
38799+ up_write(&current->mm->mmap_sem);
38800 ret = realdatastart;
38801 goto err;
38802 }
38803@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
38804 }
38805 if (IS_ERR_VALUE(result)) {
38806 printk("Unable to read data+bss, errno %d\n", (int)-result);
38807+ down_write(&current->mm->mmap_sem);
38808 do_munmap(current->mm, textpos, text_len);
38809 do_munmap(current->mm, realdatastart, len);
38810+ up_write(&current->mm->mmap_sem);
38811 ret = result;
38812 goto err;
38813 }
38814@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
38815 }
38816 if (IS_ERR_VALUE(result)) {
38817 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
38818+ down_write(&current->mm->mmap_sem);
38819 do_munmap(current->mm, textpos, text_len + data_len + extra +
38820 MAX_SHARED_LIBS * sizeof(unsigned long));
38821+ up_write(&current->mm->mmap_sem);
38822 ret = result;
38823 goto err;
38824 }
38825diff -urNp linux-3.0.4/fs/bio.c linux-3.0.4/fs/bio.c
38826--- linux-3.0.4/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
38827+++ linux-3.0.4/fs/bio.c 2011-10-06 04:17:55.000000000 -0400
38828@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
38829 const int read = bio_data_dir(bio) == READ;
38830 struct bio_map_data *bmd = bio->bi_private;
38831 int i;
38832- char *p = bmd->sgvecs[0].iov_base;
38833+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
38834
38835 __bio_for_each_segment(bvec, bio, i, 0) {
38836 char *addr = page_address(bvec->bv_page);
38837diff -urNp linux-3.0.4/fs/block_dev.c linux-3.0.4/fs/block_dev.c
38838--- linux-3.0.4/fs/block_dev.c 2011-07-21 22:17:23.000000000 -0400
38839+++ linux-3.0.4/fs/block_dev.c 2011-08-23 21:47:56.000000000 -0400
38840@@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
38841 else if (bdev->bd_contains == bdev)
38842 return true; /* is a whole device which isn't held */
38843
38844- else if (whole->bd_holder == bd_may_claim)
38845+ else if (whole->bd_holder == (void *)bd_may_claim)
38846 return true; /* is a partition of a device that is being partitioned */
38847 else if (whole->bd_holder != NULL)
38848 return false; /* is a partition of a held device */
38849diff -urNp linux-3.0.4/fs/btrfs/ctree.c linux-3.0.4/fs/btrfs/ctree.c
38850--- linux-3.0.4/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
38851+++ linux-3.0.4/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
38852@@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
38853 free_extent_buffer(buf);
38854 add_root_to_dirty_list(root);
38855 } else {
38856- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
38857- parent_start = parent->start;
38858- else
38859+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
38860+ if (parent)
38861+ parent_start = parent->start;
38862+ else
38863+ parent_start = 0;
38864+ } else
38865 parent_start = 0;
38866
38867 WARN_ON(trans->transid != btrfs_header_generation(parent));
38868diff -urNp linux-3.0.4/fs/btrfs/inode.c linux-3.0.4/fs/btrfs/inode.c
38869--- linux-3.0.4/fs/btrfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38870+++ linux-3.0.4/fs/btrfs/inode.c 2011-08-23 21:48:14.000000000 -0400
38871@@ -6895,7 +6895,7 @@ fail:
38872 return -ENOMEM;
38873 }
38874
38875-static int btrfs_getattr(struct vfsmount *mnt,
38876+int btrfs_getattr(struct vfsmount *mnt,
38877 struct dentry *dentry, struct kstat *stat)
38878 {
38879 struct inode *inode = dentry->d_inode;
38880@@ -6907,6 +6907,14 @@ static int btrfs_getattr(struct vfsmount
38881 return 0;
38882 }
38883
38884+EXPORT_SYMBOL(btrfs_getattr);
38885+
38886+dev_t get_btrfs_dev_from_inode(struct inode *inode)
38887+{
38888+ return BTRFS_I(inode)->root->anon_super.s_dev;
38889+}
38890+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
38891+
38892 /*
38893 * If a file is moved, it will inherit the cow and compression flags of the new
38894 * directory.
38895diff -urNp linux-3.0.4/fs/btrfs/ioctl.c linux-3.0.4/fs/btrfs/ioctl.c
38896--- linux-3.0.4/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
38897+++ linux-3.0.4/fs/btrfs/ioctl.c 2011-10-06 04:17:55.000000000 -0400
38898@@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
38899 for (i = 0; i < num_types; i++) {
38900 struct btrfs_space_info *tmp;
38901
38902+ /* Don't copy in more than we allocated */
38903 if (!slot_count)
38904 break;
38905
38906+ slot_count--;
38907+
38908 info = NULL;
38909 rcu_read_lock();
38910 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
38911@@ -2700,15 +2703,12 @@ long btrfs_ioctl_space_info(struct btrfs
38912 memcpy(dest, &space, sizeof(space));
38913 dest++;
38914 space_args.total_spaces++;
38915- slot_count--;
38916 }
38917- if (!slot_count)
38918- break;
38919 }
38920 up_read(&info->groups_sem);
38921 }
38922
38923- user_dest = (struct btrfs_ioctl_space_info *)
38924+ user_dest = (struct btrfs_ioctl_space_info __user *)
38925 (arg + sizeof(struct btrfs_ioctl_space_args));
38926
38927 if (copy_to_user(user_dest, dest_orig, alloc_size))
38928diff -urNp linux-3.0.4/fs/btrfs/relocation.c linux-3.0.4/fs/btrfs/relocation.c
38929--- linux-3.0.4/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
38930+++ linux-3.0.4/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
38931@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
38932 }
38933 spin_unlock(&rc->reloc_root_tree.lock);
38934
38935- BUG_ON((struct btrfs_root *)node->data != root);
38936+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
38937
38938 if (!del) {
38939 spin_lock(&rc->reloc_root_tree.lock);
38940diff -urNp linux-3.0.4/fs/cachefiles/bind.c linux-3.0.4/fs/cachefiles/bind.c
38941--- linux-3.0.4/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
38942+++ linux-3.0.4/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
38943@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
38944 args);
38945
38946 /* start by checking things over */
38947- ASSERT(cache->fstop_percent >= 0 &&
38948- cache->fstop_percent < cache->fcull_percent &&
38949+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
38950 cache->fcull_percent < cache->frun_percent &&
38951 cache->frun_percent < 100);
38952
38953- ASSERT(cache->bstop_percent >= 0 &&
38954- cache->bstop_percent < cache->bcull_percent &&
38955+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
38956 cache->bcull_percent < cache->brun_percent &&
38957 cache->brun_percent < 100);
38958
38959diff -urNp linux-3.0.4/fs/cachefiles/daemon.c linux-3.0.4/fs/cachefiles/daemon.c
38960--- linux-3.0.4/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
38961+++ linux-3.0.4/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
38962@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
38963 if (n > buflen)
38964 return -EMSGSIZE;
38965
38966- if (copy_to_user(_buffer, buffer, n) != 0)
38967+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
38968 return -EFAULT;
38969
38970 return n;
38971@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
38972 if (test_bit(CACHEFILES_DEAD, &cache->flags))
38973 return -EIO;
38974
38975- if (datalen < 0 || datalen > PAGE_SIZE - 1)
38976+ if (datalen > PAGE_SIZE - 1)
38977 return -EOPNOTSUPP;
38978
38979 /* drag the command string into the kernel so we can parse it */
38980@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
38981 if (args[0] != '%' || args[1] != '\0')
38982 return -EINVAL;
38983
38984- if (fstop < 0 || fstop >= cache->fcull_percent)
38985+ if (fstop >= cache->fcull_percent)
38986 return cachefiles_daemon_range_error(cache, args);
38987
38988 cache->fstop_percent = fstop;
38989@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
38990 if (args[0] != '%' || args[1] != '\0')
38991 return -EINVAL;
38992
38993- if (bstop < 0 || bstop >= cache->bcull_percent)
38994+ if (bstop >= cache->bcull_percent)
38995 return cachefiles_daemon_range_error(cache, args);
38996
38997 cache->bstop_percent = bstop;
38998diff -urNp linux-3.0.4/fs/cachefiles/internal.h linux-3.0.4/fs/cachefiles/internal.h
38999--- linux-3.0.4/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
39000+++ linux-3.0.4/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
39001@@ -57,7 +57,7 @@ struct cachefiles_cache {
39002 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
39003 struct rb_root active_nodes; /* active nodes (can't be culled) */
39004 rwlock_t active_lock; /* lock for active_nodes */
39005- atomic_t gravecounter; /* graveyard uniquifier */
39006+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
39007 unsigned frun_percent; /* when to stop culling (% files) */
39008 unsigned fcull_percent; /* when to start culling (% files) */
39009 unsigned fstop_percent; /* when to stop allocating (% files) */
39010@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
39011 * proc.c
39012 */
39013 #ifdef CONFIG_CACHEFILES_HISTOGRAM
39014-extern atomic_t cachefiles_lookup_histogram[HZ];
39015-extern atomic_t cachefiles_mkdir_histogram[HZ];
39016-extern atomic_t cachefiles_create_histogram[HZ];
39017+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
39018+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
39019+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
39020
39021 extern int __init cachefiles_proc_init(void);
39022 extern void cachefiles_proc_cleanup(void);
39023 static inline
39024-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
39025+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
39026 {
39027 unsigned long jif = jiffies - start_jif;
39028 if (jif >= HZ)
39029 jif = HZ - 1;
39030- atomic_inc(&histogram[jif]);
39031+ atomic_inc_unchecked(&histogram[jif]);
39032 }
39033
39034 #else
39035diff -urNp linux-3.0.4/fs/cachefiles/namei.c linux-3.0.4/fs/cachefiles/namei.c
39036--- linux-3.0.4/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
39037+++ linux-3.0.4/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
39038@@ -318,7 +318,7 @@ try_again:
39039 /* first step is to make up a grave dentry in the graveyard */
39040 sprintf(nbuffer, "%08x%08x",
39041 (uint32_t) get_seconds(),
39042- (uint32_t) atomic_inc_return(&cache->gravecounter));
39043+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
39044
39045 /* do the multiway lock magic */
39046 trap = lock_rename(cache->graveyard, dir);
39047diff -urNp linux-3.0.4/fs/cachefiles/proc.c linux-3.0.4/fs/cachefiles/proc.c
39048--- linux-3.0.4/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
39049+++ linux-3.0.4/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
39050@@ -14,9 +14,9 @@
39051 #include <linux/seq_file.h>
39052 #include "internal.h"
39053
39054-atomic_t cachefiles_lookup_histogram[HZ];
39055-atomic_t cachefiles_mkdir_histogram[HZ];
39056-atomic_t cachefiles_create_histogram[HZ];
39057+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
39058+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
39059+atomic_unchecked_t cachefiles_create_histogram[HZ];
39060
39061 /*
39062 * display the latency histogram
39063@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
39064 return 0;
39065 default:
39066 index = (unsigned long) v - 3;
39067- x = atomic_read(&cachefiles_lookup_histogram[index]);
39068- y = atomic_read(&cachefiles_mkdir_histogram[index]);
39069- z = atomic_read(&cachefiles_create_histogram[index]);
39070+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
39071+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
39072+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
39073 if (x == 0 && y == 0 && z == 0)
39074 return 0;
39075
39076diff -urNp linux-3.0.4/fs/cachefiles/rdwr.c linux-3.0.4/fs/cachefiles/rdwr.c
39077--- linux-3.0.4/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
39078+++ linux-3.0.4/fs/cachefiles/rdwr.c 2011-10-06 04:17:55.000000000 -0400
39079@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
39080 old_fs = get_fs();
39081 set_fs(KERNEL_DS);
39082 ret = file->f_op->write(
39083- file, (const void __user *) data, len, &pos);
39084+ file, (const void __force_user *) data, len, &pos);
39085 set_fs(old_fs);
39086 kunmap(page);
39087 if (ret != len)
39088diff -urNp linux-3.0.4/fs/ceph/dir.c linux-3.0.4/fs/ceph/dir.c
39089--- linux-3.0.4/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
39090+++ linux-3.0.4/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
39091@@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
39092 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
39093 struct ceph_mds_client *mdsc = fsc->mdsc;
39094 unsigned frag = fpos_frag(filp->f_pos);
39095- int off = fpos_off(filp->f_pos);
39096+ unsigned int off = fpos_off(filp->f_pos);
39097 int err;
39098 u32 ftype;
39099 struct ceph_mds_reply_info_parsed *rinfo;
39100diff -urNp linux-3.0.4/fs/cifs/cifs_debug.c linux-3.0.4/fs/cifs/cifs_debug.c
39101--- linux-3.0.4/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
39102+++ linux-3.0.4/fs/cifs/cifs_debug.c 2011-08-25 17:18:05.000000000 -0400
39103@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
39104
39105 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
39106 #ifdef CONFIG_CIFS_STATS2
39107- atomic_set(&totBufAllocCount, 0);
39108- atomic_set(&totSmBufAllocCount, 0);
39109+ atomic_set_unchecked(&totBufAllocCount, 0);
39110+ atomic_set_unchecked(&totSmBufAllocCount, 0);
39111 #endif /* CONFIG_CIFS_STATS2 */
39112 spin_lock(&cifs_tcp_ses_lock);
39113 list_for_each(tmp1, &cifs_tcp_ses_list) {
39114@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
39115 tcon = list_entry(tmp3,
39116 struct cifs_tcon,
39117 tcon_list);
39118- atomic_set(&tcon->num_smbs_sent, 0);
39119- atomic_set(&tcon->num_writes, 0);
39120- atomic_set(&tcon->num_reads, 0);
39121- atomic_set(&tcon->num_oplock_brks, 0);
39122- atomic_set(&tcon->num_opens, 0);
39123- atomic_set(&tcon->num_posixopens, 0);
39124- atomic_set(&tcon->num_posixmkdirs, 0);
39125- atomic_set(&tcon->num_closes, 0);
39126- atomic_set(&tcon->num_deletes, 0);
39127- atomic_set(&tcon->num_mkdirs, 0);
39128- atomic_set(&tcon->num_rmdirs, 0);
39129- atomic_set(&tcon->num_renames, 0);
39130- atomic_set(&tcon->num_t2renames, 0);
39131- atomic_set(&tcon->num_ffirst, 0);
39132- atomic_set(&tcon->num_fnext, 0);
39133- atomic_set(&tcon->num_fclose, 0);
39134- atomic_set(&tcon->num_hardlinks, 0);
39135- atomic_set(&tcon->num_symlinks, 0);
39136- atomic_set(&tcon->num_locks, 0);
39137+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
39138+ atomic_set_unchecked(&tcon->num_writes, 0);
39139+ atomic_set_unchecked(&tcon->num_reads, 0);
39140+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
39141+ atomic_set_unchecked(&tcon->num_opens, 0);
39142+ atomic_set_unchecked(&tcon->num_posixopens, 0);
39143+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
39144+ atomic_set_unchecked(&tcon->num_closes, 0);
39145+ atomic_set_unchecked(&tcon->num_deletes, 0);
39146+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
39147+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
39148+ atomic_set_unchecked(&tcon->num_renames, 0);
39149+ atomic_set_unchecked(&tcon->num_t2renames, 0);
39150+ atomic_set_unchecked(&tcon->num_ffirst, 0);
39151+ atomic_set_unchecked(&tcon->num_fnext, 0);
39152+ atomic_set_unchecked(&tcon->num_fclose, 0);
39153+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
39154+ atomic_set_unchecked(&tcon->num_symlinks, 0);
39155+ atomic_set_unchecked(&tcon->num_locks, 0);
39156 }
39157 }
39158 }
39159@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
39160 smBufAllocCount.counter, cifs_min_small);
39161 #ifdef CONFIG_CIFS_STATS2
39162 seq_printf(m, "Total Large %d Small %d Allocations\n",
39163- atomic_read(&totBufAllocCount),
39164- atomic_read(&totSmBufAllocCount));
39165+ atomic_read_unchecked(&totBufAllocCount),
39166+ atomic_read_unchecked(&totSmBufAllocCount));
39167 #endif /* CONFIG_CIFS_STATS2 */
39168
39169 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
39170@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
39171 if (tcon->need_reconnect)
39172 seq_puts(m, "\tDISCONNECTED ");
39173 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
39174- atomic_read(&tcon->num_smbs_sent),
39175- atomic_read(&tcon->num_oplock_brks));
39176+ atomic_read_unchecked(&tcon->num_smbs_sent),
39177+ atomic_read_unchecked(&tcon->num_oplock_brks));
39178 seq_printf(m, "\nReads: %d Bytes: %lld",
39179- atomic_read(&tcon->num_reads),
39180+ atomic_read_unchecked(&tcon->num_reads),
39181 (long long)(tcon->bytes_read));
39182 seq_printf(m, "\nWrites: %d Bytes: %lld",
39183- atomic_read(&tcon->num_writes),
39184+ atomic_read_unchecked(&tcon->num_writes),
39185 (long long)(tcon->bytes_written));
39186 seq_printf(m, "\nFlushes: %d",
39187- atomic_read(&tcon->num_flushes));
39188+ atomic_read_unchecked(&tcon->num_flushes));
39189 seq_printf(m, "\nLocks: %d HardLinks: %d "
39190 "Symlinks: %d",
39191- atomic_read(&tcon->num_locks),
39192- atomic_read(&tcon->num_hardlinks),
39193- atomic_read(&tcon->num_symlinks));
39194+ atomic_read_unchecked(&tcon->num_locks),
39195+ atomic_read_unchecked(&tcon->num_hardlinks),
39196+ atomic_read_unchecked(&tcon->num_symlinks));
39197 seq_printf(m, "\nOpens: %d Closes: %d "
39198 "Deletes: %d",
39199- atomic_read(&tcon->num_opens),
39200- atomic_read(&tcon->num_closes),
39201- atomic_read(&tcon->num_deletes));
39202+ atomic_read_unchecked(&tcon->num_opens),
39203+ atomic_read_unchecked(&tcon->num_closes),
39204+ atomic_read_unchecked(&tcon->num_deletes));
39205 seq_printf(m, "\nPosix Opens: %d "
39206 "Posix Mkdirs: %d",
39207- atomic_read(&tcon->num_posixopens),
39208- atomic_read(&tcon->num_posixmkdirs));
39209+ atomic_read_unchecked(&tcon->num_posixopens),
39210+ atomic_read_unchecked(&tcon->num_posixmkdirs));
39211 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
39212- atomic_read(&tcon->num_mkdirs),
39213- atomic_read(&tcon->num_rmdirs));
39214+ atomic_read_unchecked(&tcon->num_mkdirs),
39215+ atomic_read_unchecked(&tcon->num_rmdirs));
39216 seq_printf(m, "\nRenames: %d T2 Renames %d",
39217- atomic_read(&tcon->num_renames),
39218- atomic_read(&tcon->num_t2renames));
39219+ atomic_read_unchecked(&tcon->num_renames),
39220+ atomic_read_unchecked(&tcon->num_t2renames));
39221 seq_printf(m, "\nFindFirst: %d FNext %d "
39222 "FClose %d",
39223- atomic_read(&tcon->num_ffirst),
39224- atomic_read(&tcon->num_fnext),
39225- atomic_read(&tcon->num_fclose));
39226+ atomic_read_unchecked(&tcon->num_ffirst),
39227+ atomic_read_unchecked(&tcon->num_fnext),
39228+ atomic_read_unchecked(&tcon->num_fclose));
39229 }
39230 }
39231 }
39232diff -urNp linux-3.0.4/fs/cifs/cifsfs.c linux-3.0.4/fs/cifs/cifsfs.c
39233--- linux-3.0.4/fs/cifs/cifsfs.c 2011-09-02 18:11:21.000000000 -0400
39234+++ linux-3.0.4/fs/cifs/cifsfs.c 2011-08-25 17:18:05.000000000 -0400
39235@@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
39236 cifs_req_cachep = kmem_cache_create("cifs_request",
39237 CIFSMaxBufSize +
39238 MAX_CIFS_HDR_SIZE, 0,
39239- SLAB_HWCACHE_ALIGN, NULL);
39240+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
39241 if (cifs_req_cachep == NULL)
39242 return -ENOMEM;
39243
39244@@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
39245 efficient to alloc 1 per page off the slab compared to 17K (5page)
39246 alloc of large cifs buffers even when page debugging is on */
39247 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
39248- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
39249+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
39250 NULL);
39251 if (cifs_sm_req_cachep == NULL) {
39252 mempool_destroy(cifs_req_poolp);
39253@@ -1106,8 +1106,8 @@ init_cifs(void)
39254 atomic_set(&bufAllocCount, 0);
39255 atomic_set(&smBufAllocCount, 0);
39256 #ifdef CONFIG_CIFS_STATS2
39257- atomic_set(&totBufAllocCount, 0);
39258- atomic_set(&totSmBufAllocCount, 0);
39259+ atomic_set_unchecked(&totBufAllocCount, 0);
39260+ atomic_set_unchecked(&totSmBufAllocCount, 0);
39261 #endif /* CONFIG_CIFS_STATS2 */
39262
39263 atomic_set(&midCount, 0);
39264diff -urNp linux-3.0.4/fs/cifs/cifsglob.h linux-3.0.4/fs/cifs/cifsglob.h
39265--- linux-3.0.4/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
39266+++ linux-3.0.4/fs/cifs/cifsglob.h 2011-08-25 17:18:05.000000000 -0400
39267@@ -381,28 +381,28 @@ struct cifs_tcon {
39268 __u16 Flags; /* optional support bits */
39269 enum statusEnum tidStatus;
39270 #ifdef CONFIG_CIFS_STATS
39271- atomic_t num_smbs_sent;
39272- atomic_t num_writes;
39273- atomic_t num_reads;
39274- atomic_t num_flushes;
39275- atomic_t num_oplock_brks;
39276- atomic_t num_opens;
39277- atomic_t num_closes;
39278- atomic_t num_deletes;
39279- atomic_t num_mkdirs;
39280- atomic_t num_posixopens;
39281- atomic_t num_posixmkdirs;
39282- atomic_t num_rmdirs;
39283- atomic_t num_renames;
39284- atomic_t num_t2renames;
39285- atomic_t num_ffirst;
39286- atomic_t num_fnext;
39287- atomic_t num_fclose;
39288- atomic_t num_hardlinks;
39289- atomic_t num_symlinks;
39290- atomic_t num_locks;
39291- atomic_t num_acl_get;
39292- atomic_t num_acl_set;
39293+ atomic_unchecked_t num_smbs_sent;
39294+ atomic_unchecked_t num_writes;
39295+ atomic_unchecked_t num_reads;
39296+ atomic_unchecked_t num_flushes;
39297+ atomic_unchecked_t num_oplock_brks;
39298+ atomic_unchecked_t num_opens;
39299+ atomic_unchecked_t num_closes;
39300+ atomic_unchecked_t num_deletes;
39301+ atomic_unchecked_t num_mkdirs;
39302+ atomic_unchecked_t num_posixopens;
39303+ atomic_unchecked_t num_posixmkdirs;
39304+ atomic_unchecked_t num_rmdirs;
39305+ atomic_unchecked_t num_renames;
39306+ atomic_unchecked_t num_t2renames;
39307+ atomic_unchecked_t num_ffirst;
39308+ atomic_unchecked_t num_fnext;
39309+ atomic_unchecked_t num_fclose;
39310+ atomic_unchecked_t num_hardlinks;
39311+ atomic_unchecked_t num_symlinks;
39312+ atomic_unchecked_t num_locks;
39313+ atomic_unchecked_t num_acl_get;
39314+ atomic_unchecked_t num_acl_set;
39315 #ifdef CONFIG_CIFS_STATS2
39316 unsigned long long time_writes;
39317 unsigned long long time_reads;
39318@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
39319 }
39320
39321 #ifdef CONFIG_CIFS_STATS
39322-#define cifs_stats_inc atomic_inc
39323+#define cifs_stats_inc atomic_inc_unchecked
39324
39325 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
39326 unsigned int bytes)
39327@@ -911,8 +911,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
39328 /* Various Debug counters */
39329 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
39330 #ifdef CONFIG_CIFS_STATS2
39331-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
39332-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
39333+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
39334+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
39335 #endif
39336 GLOBAL_EXTERN atomic_t smBufAllocCount;
39337 GLOBAL_EXTERN atomic_t midCount;
39338diff -urNp linux-3.0.4/fs/cifs/link.c linux-3.0.4/fs/cifs/link.c
39339--- linux-3.0.4/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
39340+++ linux-3.0.4/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
39341@@ -587,7 +587,7 @@ symlink_exit:
39342
39343 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
39344 {
39345- char *p = nd_get_link(nd);
39346+ const char *p = nd_get_link(nd);
39347 if (!IS_ERR(p))
39348 kfree(p);
39349 }
39350diff -urNp linux-3.0.4/fs/cifs/misc.c linux-3.0.4/fs/cifs/misc.c
39351--- linux-3.0.4/fs/cifs/misc.c 2011-07-21 22:17:23.000000000 -0400
39352+++ linux-3.0.4/fs/cifs/misc.c 2011-08-25 17:18:05.000000000 -0400
39353@@ -156,7 +156,7 @@ cifs_buf_get(void)
39354 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
39355 atomic_inc(&bufAllocCount);
39356 #ifdef CONFIG_CIFS_STATS2
39357- atomic_inc(&totBufAllocCount);
39358+ atomic_inc_unchecked(&totBufAllocCount);
39359 #endif /* CONFIG_CIFS_STATS2 */
39360 }
39361
39362@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
39363 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
39364 atomic_inc(&smBufAllocCount);
39365 #ifdef CONFIG_CIFS_STATS2
39366- atomic_inc(&totSmBufAllocCount);
39367+ atomic_inc_unchecked(&totSmBufAllocCount);
39368 #endif /* CONFIG_CIFS_STATS2 */
39369
39370 }
39371diff -urNp linux-3.0.4/fs/coda/cache.c linux-3.0.4/fs/coda/cache.c
39372--- linux-3.0.4/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
39373+++ linux-3.0.4/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
39374@@ -24,7 +24,7 @@
39375 #include "coda_linux.h"
39376 #include "coda_cache.h"
39377
39378-static atomic_t permission_epoch = ATOMIC_INIT(0);
39379+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
39380
39381 /* replace or extend an acl cache hit */
39382 void coda_cache_enter(struct inode *inode, int mask)
39383@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
39384 struct coda_inode_info *cii = ITOC(inode);
39385
39386 spin_lock(&cii->c_lock);
39387- cii->c_cached_epoch = atomic_read(&permission_epoch);
39388+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
39389 if (cii->c_uid != current_fsuid()) {
39390 cii->c_uid = current_fsuid();
39391 cii->c_cached_perm = mask;
39392@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
39393 {
39394 struct coda_inode_info *cii = ITOC(inode);
39395 spin_lock(&cii->c_lock);
39396- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
39397+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
39398 spin_unlock(&cii->c_lock);
39399 }
39400
39401 /* remove all acl caches */
39402 void coda_cache_clear_all(struct super_block *sb)
39403 {
39404- atomic_inc(&permission_epoch);
39405+ atomic_inc_unchecked(&permission_epoch);
39406 }
39407
39408
39409@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
39410 spin_lock(&cii->c_lock);
39411 hit = (mask & cii->c_cached_perm) == mask &&
39412 cii->c_uid == current_fsuid() &&
39413- cii->c_cached_epoch == atomic_read(&permission_epoch);
39414+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
39415 spin_unlock(&cii->c_lock);
39416
39417 return hit;
39418diff -urNp linux-3.0.4/fs/compat_binfmt_elf.c linux-3.0.4/fs/compat_binfmt_elf.c
39419--- linux-3.0.4/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
39420+++ linux-3.0.4/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
39421@@ -30,11 +30,13 @@
39422 #undef elf_phdr
39423 #undef elf_shdr
39424 #undef elf_note
39425+#undef elf_dyn
39426 #undef elf_addr_t
39427 #define elfhdr elf32_hdr
39428 #define elf_phdr elf32_phdr
39429 #define elf_shdr elf32_shdr
39430 #define elf_note elf32_note
39431+#define elf_dyn Elf32_Dyn
39432 #define elf_addr_t Elf32_Addr
39433
39434 /*
39435diff -urNp linux-3.0.4/fs/compat.c linux-3.0.4/fs/compat.c
39436--- linux-3.0.4/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
39437+++ linux-3.0.4/fs/compat.c 2011-10-06 04:17:55.000000000 -0400
39438@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const
39439 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
39440 {
39441 compat_ino_t ino = stat->ino;
39442- typeof(ubuf->st_uid) uid = 0;
39443- typeof(ubuf->st_gid) gid = 0;
39444+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
39445+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
39446 int err;
39447
39448 SET_UID(uid, stat->uid);
39449@@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u3
39450
39451 set_fs(KERNEL_DS);
39452 /* The __user pointer cast is valid because of the set_fs() */
39453- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
39454+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
39455 set_fs(oldfs);
39456 /* truncating is ok because it's a user address */
39457 if (!ret)
39458@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
39459 goto out;
39460
39461 ret = -EINVAL;
39462- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
39463+ if (nr_segs > UIO_MAXIOV)
39464 goto out;
39465 if (nr_segs > fast_segs) {
39466 ret = -ENOMEM;
39467@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
39468
39469 struct compat_readdir_callback {
39470 struct compat_old_linux_dirent __user *dirent;
39471+ struct file * file;
39472 int result;
39473 };
39474
39475@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
39476 buf->result = -EOVERFLOW;
39477 return -EOVERFLOW;
39478 }
39479+
39480+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
39481+ return 0;
39482+
39483 buf->result++;
39484 dirent = buf->dirent;
39485 if (!access_ok(VERIFY_WRITE, dirent,
39486@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
39487
39488 buf.result = 0;
39489 buf.dirent = dirent;
39490+ buf.file = file;
39491
39492 error = vfs_readdir(file, compat_fillonedir, &buf);
39493 if (buf.result)
39494@@ -917,6 +923,7 @@ struct compat_linux_dirent {
39495 struct compat_getdents_callback {
39496 struct compat_linux_dirent __user *current_dir;
39497 struct compat_linux_dirent __user *previous;
39498+ struct file * file;
39499 int count;
39500 int error;
39501 };
39502@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
39503 buf->error = -EOVERFLOW;
39504 return -EOVERFLOW;
39505 }
39506+
39507+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
39508+ return 0;
39509+
39510 dirent = buf->previous;
39511 if (dirent) {
39512 if (__put_user(offset, &dirent->d_off))
39513@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
39514 buf.previous = NULL;
39515 buf.count = count;
39516 buf.error = 0;
39517+ buf.file = file;
39518
39519 error = vfs_readdir(file, compat_filldir, &buf);
39520 if (error >= 0)
39521@@ -1006,6 +1018,7 @@ out:
39522 struct compat_getdents_callback64 {
39523 struct linux_dirent64 __user *current_dir;
39524 struct linux_dirent64 __user *previous;
39525+ struct file * file;
39526 int count;
39527 int error;
39528 };
39529@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
39530 buf->error = -EINVAL; /* only used if we fail.. */
39531 if (reclen > buf->count)
39532 return -EINVAL;
39533+
39534+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
39535+ return 0;
39536+
39537 dirent = buf->previous;
39538
39539 if (dirent) {
39540@@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(un
39541 buf.previous = NULL;
39542 buf.count = count;
39543 buf.error = 0;
39544+ buf.file = file;
39545
39546 error = vfs_readdir(file, compat_filldir64, &buf);
39547 if (error >= 0)
39548 error = buf.error;
39549 lastdirent = buf.previous;
39550 if (lastdirent) {
39551- typeof(lastdirent->d_off) d_off = file->f_pos;
39552+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
39553 if (__put_user_unaligned(d_off, &lastdirent->d_off))
39554 error = -EFAULT;
39555 else
39556@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
39557 struct fdtable *fdt;
39558 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
39559
39560+ pax_track_stack();
39561+
39562 if (n < 0)
39563 goto out_nofds;
39564
39565@@ -1904,7 +1924,7 @@ asmlinkage long compat_sys_nfsservctl(in
39566 oldfs = get_fs();
39567 set_fs(KERNEL_DS);
39568 /* The __user pointer casts are valid because of the set_fs() */
39569- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
39570+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
39571 set_fs(oldfs);
39572
39573 if (err)
39574diff -urNp linux-3.0.4/fs/compat_ioctl.c linux-3.0.4/fs/compat_ioctl.c
39575--- linux-3.0.4/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
39576+++ linux-3.0.4/fs/compat_ioctl.c 2011-10-06 04:17:55.000000000 -0400
39577@@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
39578
39579 err = get_user(palp, &up->palette);
39580 err |= get_user(length, &up->length);
39581+ if (err)
39582+ return -EFAULT;
39583
39584 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
39585 err = put_user(compat_ptr(palp), &up_native->palette);
39586@@ -619,7 +621,7 @@ static int serial_struct_ioctl(unsigned
39587 return -EFAULT;
39588 if (__get_user(udata, &ss32->iomem_base))
39589 return -EFAULT;
39590- ss.iomem_base = compat_ptr(udata);
39591+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
39592 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
39593 __get_user(ss.port_high, &ss32->port_high))
39594 return -EFAULT;
39595@@ -794,7 +796,7 @@ static int compat_ioctl_preallocate(stru
39596 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
39597 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
39598 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
39599- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
39600+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
39601 return -EFAULT;
39602
39603 return ioctl_preallocate(file, p);
39604@@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
39605 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
39606 {
39607 unsigned int a, b;
39608- a = *(unsigned int *)p;
39609- b = *(unsigned int *)q;
39610+ a = *(const unsigned int *)p;
39611+ b = *(const unsigned int *)q;
39612 if (a > b)
39613 return 1;
39614 if (a < b)
39615diff -urNp linux-3.0.4/fs/configfs/dir.c linux-3.0.4/fs/configfs/dir.c
39616--- linux-3.0.4/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
39617+++ linux-3.0.4/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
39618@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
39619 }
39620 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
39621 struct configfs_dirent *next;
39622- const char * name;
39623+ const unsigned char * name;
39624+ char d_name[sizeof(next->s_dentry->d_iname)];
39625 int len;
39626 struct inode *inode = NULL;
39627
39628@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
39629 continue;
39630
39631 name = configfs_get_name(next);
39632- len = strlen(name);
39633+ if (next->s_dentry && name == next->s_dentry->d_iname) {
39634+ len = next->s_dentry->d_name.len;
39635+ memcpy(d_name, name, len);
39636+ name = d_name;
39637+ } else
39638+ len = strlen(name);
39639
39640 /*
39641 * We'll have a dentry and an inode for
39642diff -urNp linux-3.0.4/fs/dcache.c linux-3.0.4/fs/dcache.c
39643--- linux-3.0.4/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
39644+++ linux-3.0.4/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
39645@@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
39646 mempages -= reserve;
39647
39648 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
39649- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
39650+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
39651
39652 dcache_init();
39653 inode_init();
39654diff -urNp linux-3.0.4/fs/ecryptfs/inode.c linux-3.0.4/fs/ecryptfs/inode.c
39655--- linux-3.0.4/fs/ecryptfs/inode.c 2011-09-02 18:11:21.000000000 -0400
39656+++ linux-3.0.4/fs/ecryptfs/inode.c 2011-10-06 04:17:55.000000000 -0400
39657@@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
39658 old_fs = get_fs();
39659 set_fs(get_ds());
39660 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
39661- (char __user *)lower_buf,
39662+ (char __force_user *)lower_buf,
39663 lower_bufsiz);
39664 set_fs(old_fs);
39665 if (rc < 0)
39666@@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
39667 }
39668 old_fs = get_fs();
39669 set_fs(get_ds());
39670- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
39671+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
39672 set_fs(old_fs);
39673 if (rc < 0) {
39674 kfree(buf);
39675@@ -765,7 +765,7 @@ out:
39676 static void
39677 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
39678 {
39679- char *buf = nd_get_link(nd);
39680+ const char *buf = nd_get_link(nd);
39681 if (!IS_ERR(buf)) {
39682 /* Free the char* */
39683 kfree(buf);
39684diff -urNp linux-3.0.4/fs/ecryptfs/miscdev.c linux-3.0.4/fs/ecryptfs/miscdev.c
39685--- linux-3.0.4/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
39686+++ linux-3.0.4/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
39687@@ -328,7 +328,7 @@ check_list:
39688 goto out_unlock_msg_ctx;
39689 i = 5;
39690 if (msg_ctx->msg) {
39691- if (copy_to_user(&buf[i], packet_length, packet_length_size))
39692+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
39693 goto out_unlock_msg_ctx;
39694 i += packet_length_size;
39695 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
39696diff -urNp linux-3.0.4/fs/ecryptfs/read_write.c linux-3.0.4/fs/ecryptfs/read_write.c
39697--- linux-3.0.4/fs/ecryptfs/read_write.c 2011-09-02 18:11:21.000000000 -0400
39698+++ linux-3.0.4/fs/ecryptfs/read_write.c 2011-10-06 04:17:55.000000000 -0400
39699@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *e
39700 return -EIO;
39701 fs_save = get_fs();
39702 set_fs(get_ds());
39703- rc = vfs_write(lower_file, data, size, &offset);
39704+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
39705 set_fs(fs_save);
39706 mark_inode_dirty_sync(ecryptfs_inode);
39707 return rc;
39708@@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff
39709 return -EIO;
39710 fs_save = get_fs();
39711 set_fs(get_ds());
39712- rc = vfs_read(lower_file, data, size, &offset);
39713+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
39714 set_fs(fs_save);
39715 return rc;
39716 }
39717diff -urNp linux-3.0.4/fs/exec.c linux-3.0.4/fs/exec.c
39718--- linux-3.0.4/fs/exec.c 2011-07-21 22:17:23.000000000 -0400
39719+++ linux-3.0.4/fs/exec.c 2011-10-06 04:17:55.000000000 -0400
39720@@ -55,12 +55,24 @@
39721 #include <linux/pipe_fs_i.h>
39722 #include <linux/oom.h>
39723 #include <linux/compat.h>
39724+#include <linux/random.h>
39725+#include <linux/seq_file.h>
39726+
39727+#ifdef CONFIG_PAX_REFCOUNT
39728+#include <linux/kallsyms.h>
39729+#include <linux/kdebug.h>
39730+#endif
39731
39732 #include <asm/uaccess.h>
39733 #include <asm/mmu_context.h>
39734 #include <asm/tlb.h>
39735 #include "internal.h"
39736
39737+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
39738+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
39739+EXPORT_SYMBOL(pax_set_initial_flags_func);
39740+#endif
39741+
39742 int core_uses_pid;
39743 char core_pattern[CORENAME_MAX_SIZE] = "core";
39744 unsigned int core_pipe_limit;
39745@@ -70,7 +82,7 @@ struct core_name {
39746 char *corename;
39747 int used, size;
39748 };
39749-static atomic_t call_count = ATOMIC_INIT(1);
39750+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
39751
39752 /* The maximal length of core_pattern is also specified in sysctl.c */
39753
39754@@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
39755 char *tmp = getname(library);
39756 int error = PTR_ERR(tmp);
39757 static const struct open_flags uselib_flags = {
39758- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
39759+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
39760 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
39761 .intent = LOOKUP_OPEN
39762 };
39763@@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
39764 int write)
39765 {
39766 struct page *page;
39767- int ret;
39768
39769-#ifdef CONFIG_STACK_GROWSUP
39770- if (write) {
39771- ret = expand_downwards(bprm->vma, pos);
39772- if (ret < 0)
39773- return NULL;
39774- }
39775-#endif
39776- ret = get_user_pages(current, bprm->mm, pos,
39777- 1, write, 1, &page, NULL);
39778- if (ret <= 0)
39779+ if (0 > expand_downwards(bprm->vma, pos))
39780+ return NULL;
39781+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
39782 return NULL;
39783
39784 if (write) {
39785@@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
39786 vma->vm_end = STACK_TOP_MAX;
39787 vma->vm_start = vma->vm_end - PAGE_SIZE;
39788 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
39789+
39790+#ifdef CONFIG_PAX_SEGMEXEC
39791+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
39792+#endif
39793+
39794 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
39795 INIT_LIST_HEAD(&vma->anon_vma_chain);
39796
39797@@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
39798 mm->stack_vm = mm->total_vm = 1;
39799 up_write(&mm->mmap_sem);
39800 bprm->p = vma->vm_end - sizeof(void *);
39801+
39802+#ifdef CONFIG_PAX_RANDUSTACK
39803+ if (randomize_va_space)
39804+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
39805+#endif
39806+
39807 return 0;
39808 err:
39809 up_write(&mm->mmap_sem);
39810@@ -403,19 +418,7 @@ err:
39811 return err;
39812 }
39813
39814-struct user_arg_ptr {
39815-#ifdef CONFIG_COMPAT
39816- bool is_compat;
39817-#endif
39818- union {
39819- const char __user *const __user *native;
39820-#ifdef CONFIG_COMPAT
39821- compat_uptr_t __user *compat;
39822-#endif
39823- } ptr;
39824-};
39825-
39826-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
39827+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
39828 {
39829 const char __user *native;
39830
39831@@ -424,14 +427,14 @@ static const char __user *get_user_arg_p
39832 compat_uptr_t compat;
39833
39834 if (get_user(compat, argv.ptr.compat + nr))
39835- return ERR_PTR(-EFAULT);
39836+ return (const char __force_user *)ERR_PTR(-EFAULT);
39837
39838 return compat_ptr(compat);
39839 }
39840 #endif
39841
39842 if (get_user(native, argv.ptr.native + nr))
39843- return ERR_PTR(-EFAULT);
39844+ return (const char __force_user *)ERR_PTR(-EFAULT);
39845
39846 return native;
39847 }
39848@@ -450,7 +453,7 @@ static int count(struct user_arg_ptr arg
39849 if (!p)
39850 break;
39851
39852- if (IS_ERR(p))
39853+ if (IS_ERR((const char __force_kernel *)p))
39854 return -EFAULT;
39855
39856 if (i++ >= max)
39857@@ -484,7 +487,7 @@ static int copy_strings(int argc, struct
39858
39859 ret = -EFAULT;
39860 str = get_user_arg_ptr(argv, argc);
39861- if (IS_ERR(str))
39862+ if (IS_ERR((const char __force_kernel *)str))
39863 goto out;
39864
39865 len = strnlen_user(str, MAX_ARG_STRLEN);
39866@@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
39867 int r;
39868 mm_segment_t oldfs = get_fs();
39869 struct user_arg_ptr argv = {
39870- .ptr.native = (const char __user *const __user *)__argv,
39871+ .ptr.native = (const char __force_user *const __force_user *)__argv,
39872 };
39873
39874 set_fs(KERNEL_DS);
39875@@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
39876 unsigned long new_end = old_end - shift;
39877 struct mmu_gather tlb;
39878
39879- BUG_ON(new_start > new_end);
39880+ if (new_start >= new_end || new_start < mmap_min_addr)
39881+ return -ENOMEM;
39882
39883 /*
39884 * ensure there are no vmas between where we want to go
39885@@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
39886 if (vma != find_vma(mm, new_start))
39887 return -EFAULT;
39888
39889+#ifdef CONFIG_PAX_SEGMEXEC
39890+ BUG_ON(pax_find_mirror_vma(vma));
39891+#endif
39892+
39893 /*
39894 * cover the whole range: [new_start, old_end)
39895 */
39896@@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
39897 stack_top = arch_align_stack(stack_top);
39898 stack_top = PAGE_ALIGN(stack_top);
39899
39900- if (unlikely(stack_top < mmap_min_addr) ||
39901- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
39902- return -ENOMEM;
39903-
39904 stack_shift = vma->vm_end - stack_top;
39905
39906 bprm->p -= stack_shift;
39907@@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
39908 bprm->exec -= stack_shift;
39909
39910 down_write(&mm->mmap_sem);
39911+
39912+ /* Move stack pages down in memory. */
39913+ if (stack_shift) {
39914+ ret = shift_arg_pages(vma, stack_shift);
39915+ if (ret)
39916+ goto out_unlock;
39917+ }
39918+
39919 vm_flags = VM_STACK_FLAGS;
39920
39921+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39922+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39923+ vm_flags &= ~VM_EXEC;
39924+
39925+#ifdef CONFIG_PAX_MPROTECT
39926+ if (mm->pax_flags & MF_PAX_MPROTECT)
39927+ vm_flags &= ~VM_MAYEXEC;
39928+#endif
39929+
39930+ }
39931+#endif
39932+
39933 /*
39934 * Adjust stack execute permissions; explicitly enable for
39935 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
39936@@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
39937 goto out_unlock;
39938 BUG_ON(prev != vma);
39939
39940- /* Move stack pages down in memory. */
39941- if (stack_shift) {
39942- ret = shift_arg_pages(vma, stack_shift);
39943- if (ret)
39944- goto out_unlock;
39945- }
39946-
39947 /* mprotect_fixup is overkill to remove the temporary stack flags */
39948 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
39949
39950@@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
39951 struct file *file;
39952 int err;
39953 static const struct open_flags open_exec_flags = {
39954- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
39955+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
39956 .acc_mode = MAY_EXEC | MAY_OPEN,
39957 .intent = LOOKUP_OPEN
39958 };
39959@@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
39960 old_fs = get_fs();
39961 set_fs(get_ds());
39962 /* The cast to a user pointer is valid due to the set_fs() */
39963- result = vfs_read(file, (void __user *)addr, count, &pos);
39964+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
39965 set_fs(old_fs);
39966 return result;
39967 }
39968@@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
39969 }
39970 rcu_read_unlock();
39971
39972- if (p->fs->users > n_fs) {
39973+ if (atomic_read(&p->fs->users) > n_fs) {
39974 bprm->unsafe |= LSM_UNSAFE_SHARE;
39975 } else {
39976 res = -EAGAIN;
39977@@ -1428,11 +1445,35 @@ static int do_execve_common(const char *
39978 struct user_arg_ptr envp,
39979 struct pt_regs *regs)
39980 {
39981+#ifdef CONFIG_GRKERNSEC
39982+ struct file *old_exec_file;
39983+ struct acl_subject_label *old_acl;
39984+ struct rlimit old_rlim[RLIM_NLIMITS];
39985+#endif
39986 struct linux_binprm *bprm;
39987 struct file *file;
39988 struct files_struct *displaced;
39989 bool clear_in_exec;
39990 int retval;
39991+ const struct cred *cred = current_cred();
39992+
39993+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
39994+
39995+ /*
39996+ * We move the actual failure in case of RLIMIT_NPROC excess from
39997+ * set*uid() to execve() because too many poorly written programs
39998+ * don't check setuid() return code. Here we additionally recheck
39999+ * whether NPROC limit is still exceeded.
40000+ */
40001+ if ((current->flags & PF_NPROC_EXCEEDED) &&
40002+ atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
40003+ retval = -EAGAIN;
40004+ goto out_ret;
40005+ }
40006+
40007+ /* We're below the limit (still or again), so we don't want to make
40008+ * further execve() calls fail. */
40009+ current->flags &= ~PF_NPROC_EXCEEDED;
40010
40011 retval = unshare_files(&displaced);
40012 if (retval)
40013@@ -1464,6 +1505,16 @@ static int do_execve_common(const char *
40014 bprm->filename = filename;
40015 bprm->interp = filename;
40016
40017+ if (gr_process_user_ban()) {
40018+ retval = -EPERM;
40019+ goto out_file;
40020+ }
40021+
40022+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
40023+ retval = -EACCES;
40024+ goto out_file;
40025+ }
40026+
40027 retval = bprm_mm_init(bprm);
40028 if (retval)
40029 goto out_file;
40030@@ -1493,9 +1544,40 @@ static int do_execve_common(const char *
40031 if (retval < 0)
40032 goto out;
40033
40034+ if (!gr_tpe_allow(file)) {
40035+ retval = -EACCES;
40036+ goto out;
40037+ }
40038+
40039+ if (gr_check_crash_exec(file)) {
40040+ retval = -EACCES;
40041+ goto out;
40042+ }
40043+
40044+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
40045+
40046+ gr_handle_exec_args(bprm, argv);
40047+
40048+#ifdef CONFIG_GRKERNSEC
40049+ old_acl = current->acl;
40050+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
40051+ old_exec_file = current->exec_file;
40052+ get_file(file);
40053+ current->exec_file = file;
40054+#endif
40055+
40056+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
40057+ bprm->unsafe & LSM_UNSAFE_SHARE);
40058+ if (retval < 0)
40059+ goto out_fail;
40060+
40061 retval = search_binary_handler(bprm,regs);
40062 if (retval < 0)
40063- goto out;
40064+ goto out_fail;
40065+#ifdef CONFIG_GRKERNSEC
40066+ if (old_exec_file)
40067+ fput(old_exec_file);
40068+#endif
40069
40070 /* execve succeeded */
40071 current->fs->in_exec = 0;
40072@@ -1506,6 +1588,14 @@ static int do_execve_common(const char *
40073 put_files_struct(displaced);
40074 return retval;
40075
40076+out_fail:
40077+#ifdef CONFIG_GRKERNSEC
40078+ current->acl = old_acl;
40079+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
40080+ fput(current->exec_file);
40081+ current->exec_file = old_exec_file;
40082+#endif
40083+
40084 out:
40085 if (bprm->mm) {
40086 acct_arg_size(bprm, 0);
40087@@ -1579,7 +1669,7 @@ static int expand_corename(struct core_n
40088 {
40089 char *old_corename = cn->corename;
40090
40091- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
40092+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
40093 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
40094
40095 if (!cn->corename) {
40096@@ -1667,7 +1757,7 @@ static int format_corename(struct core_n
40097 int pid_in_pattern = 0;
40098 int err = 0;
40099
40100- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
40101+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
40102 cn->corename = kmalloc(cn->size, GFP_KERNEL);
40103 cn->used = 0;
40104
40105@@ -1758,6 +1848,219 @@ out:
40106 return ispipe;
40107 }
40108
40109+int pax_check_flags(unsigned long *flags)
40110+{
40111+ int retval = 0;
40112+
40113+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
40114+ if (*flags & MF_PAX_SEGMEXEC)
40115+ {
40116+ *flags &= ~MF_PAX_SEGMEXEC;
40117+ retval = -EINVAL;
40118+ }
40119+#endif
40120+
40121+ if ((*flags & MF_PAX_PAGEEXEC)
40122+
40123+#ifdef CONFIG_PAX_PAGEEXEC
40124+ && (*flags & MF_PAX_SEGMEXEC)
40125+#endif
40126+
40127+ )
40128+ {
40129+ *flags &= ~MF_PAX_PAGEEXEC;
40130+ retval = -EINVAL;
40131+ }
40132+
40133+ if ((*flags & MF_PAX_MPROTECT)
40134+
40135+#ifdef CONFIG_PAX_MPROTECT
40136+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
40137+#endif
40138+
40139+ )
40140+ {
40141+ *flags &= ~MF_PAX_MPROTECT;
40142+ retval = -EINVAL;
40143+ }
40144+
40145+ if ((*flags & MF_PAX_EMUTRAMP)
40146+
40147+#ifdef CONFIG_PAX_EMUTRAMP
40148+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
40149+#endif
40150+
40151+ )
40152+ {
40153+ *flags &= ~MF_PAX_EMUTRAMP;
40154+ retval = -EINVAL;
40155+ }
40156+
40157+ return retval;
40158+}
40159+
40160+EXPORT_SYMBOL(pax_check_flags);
40161+
40162+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40163+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
40164+{
40165+ struct task_struct *tsk = current;
40166+ struct mm_struct *mm = current->mm;
40167+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
40168+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
40169+ char *path_exec = NULL;
40170+ char *path_fault = NULL;
40171+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
40172+
40173+ if (buffer_exec && buffer_fault) {
40174+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
40175+
40176+ down_read(&mm->mmap_sem);
40177+ vma = mm->mmap;
40178+ while (vma && (!vma_exec || !vma_fault)) {
40179+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
40180+ vma_exec = vma;
40181+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
40182+ vma_fault = vma;
40183+ vma = vma->vm_next;
40184+ }
40185+ if (vma_exec) {
40186+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
40187+ if (IS_ERR(path_exec))
40188+ path_exec = "<path too long>";
40189+ else {
40190+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
40191+ if (path_exec) {
40192+ *path_exec = 0;
40193+ path_exec = buffer_exec;
40194+ } else
40195+ path_exec = "<path too long>";
40196+ }
40197+ }
40198+ if (vma_fault) {
40199+ start = vma_fault->vm_start;
40200+ end = vma_fault->vm_end;
40201+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
40202+ if (vma_fault->vm_file) {
40203+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
40204+ if (IS_ERR(path_fault))
40205+ path_fault = "<path too long>";
40206+ else {
40207+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
40208+ if (path_fault) {
40209+ *path_fault = 0;
40210+ path_fault = buffer_fault;
40211+ } else
40212+ path_fault = "<path too long>";
40213+ }
40214+ } else
40215+ path_fault = "<anonymous mapping>";
40216+ }
40217+ up_read(&mm->mmap_sem);
40218+ }
40219+ if (tsk->signal->curr_ip)
40220+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
40221+ else
40222+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
40223+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
40224+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
40225+ task_uid(tsk), task_euid(tsk), pc, sp);
40226+ free_page((unsigned long)buffer_exec);
40227+ free_page((unsigned long)buffer_fault);
40228+ pax_report_insns(pc, sp);
40229+ do_coredump(SIGKILL, SIGKILL, regs);
40230+}
40231+#endif
40232+
40233+#ifdef CONFIG_PAX_REFCOUNT
40234+void pax_report_refcount_overflow(struct pt_regs *regs)
40235+{
40236+ if (current->signal->curr_ip)
40237+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
40238+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
40239+ else
40240+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
40241+ current->comm, task_pid_nr(current), current_uid(), current_euid());
40242+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
40243+ show_regs(regs);
40244+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
40245+}
40246+#endif
40247+
40248+#ifdef CONFIG_PAX_USERCOPY
40249+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
40250+int object_is_on_stack(const void *obj, unsigned long len)
40251+{
40252+ const void * const stack = task_stack_page(current);
40253+ const void * const stackend = stack + THREAD_SIZE;
40254+
40255+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
40256+ const void *frame = NULL;
40257+ const void *oldframe;
40258+#endif
40259+
40260+ if (obj + len < obj)
40261+ return -1;
40262+
40263+ if (obj + len <= stack || stackend <= obj)
40264+ return 0;
40265+
40266+ if (obj < stack || stackend < obj + len)
40267+ return -1;
40268+
40269+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
40270+ oldframe = __builtin_frame_address(1);
40271+ if (oldframe)
40272+ frame = __builtin_frame_address(2);
40273+ /*
40274+ low ----------------------------------------------> high
40275+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
40276+ ^----------------^
40277+ allow copies only within here
40278+ */
40279+ while (stack <= frame && frame < stackend) {
40280+ /* if obj + len extends past the last frame, this
40281+ check won't pass and the next frame will be 0,
40282+ causing us to bail out and correctly report
40283+ the copy as invalid
40284+ */
40285+ if (obj + len <= frame)
40286+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
40287+ oldframe = frame;
40288+ frame = *(const void * const *)frame;
40289+ }
40290+ return -1;
40291+#else
40292+ return 1;
40293+#endif
40294+}
40295+
40296+
40297+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
40298+{
40299+ if (current->signal->curr_ip)
40300+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
40301+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
40302+ else
40303+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
40304+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
40305+ dump_stack();
40306+ gr_handle_kernel_exploit();
40307+ do_group_exit(SIGKILL);
40308+}
40309+#endif
40310+
40311+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
40312+void pax_track_stack(void)
40313+{
40314+ unsigned long sp = (unsigned long)&sp;
40315+ if (sp < current_thread_info()->lowest_stack &&
40316+ sp > (unsigned long)task_stack_page(current))
40317+ current_thread_info()->lowest_stack = sp;
40318+}
40319+EXPORT_SYMBOL(pax_track_stack);
40320+#endif
40321+
40322 static int zap_process(struct task_struct *start, int exit_code)
40323 {
40324 struct task_struct *t;
40325@@ -1969,17 +2272,17 @@ static void wait_for_dump_helpers(struct
40326 pipe = file->f_path.dentry->d_inode->i_pipe;
40327
40328 pipe_lock(pipe);
40329- pipe->readers++;
40330- pipe->writers--;
40331+ atomic_inc(&pipe->readers);
40332+ atomic_dec(&pipe->writers);
40333
40334- while ((pipe->readers > 1) && (!signal_pending(current))) {
40335+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
40336 wake_up_interruptible_sync(&pipe->wait);
40337 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
40338 pipe_wait(pipe);
40339 }
40340
40341- pipe->readers--;
40342- pipe->writers++;
40343+ atomic_dec(&pipe->readers);
40344+ atomic_inc(&pipe->writers);
40345 pipe_unlock(pipe);
40346
40347 }
40348@@ -2040,7 +2343,7 @@ void do_coredump(long signr, int exit_co
40349 int retval = 0;
40350 int flag = 0;
40351 int ispipe;
40352- static atomic_t core_dump_count = ATOMIC_INIT(0);
40353+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
40354 struct coredump_params cprm = {
40355 .signr = signr,
40356 .regs = regs,
40357@@ -2055,6 +2358,9 @@ void do_coredump(long signr, int exit_co
40358
40359 audit_core_dumps(signr);
40360
40361+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
40362+ gr_handle_brute_attach(current, cprm.mm_flags);
40363+
40364 binfmt = mm->binfmt;
40365 if (!binfmt || !binfmt->core_dump)
40366 goto fail;
40367@@ -2095,6 +2401,8 @@ void do_coredump(long signr, int exit_co
40368 goto fail_corename;
40369 }
40370
40371+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
40372+
40373 if (ispipe) {
40374 int dump_count;
40375 char **helper_argv;
40376@@ -2122,7 +2430,7 @@ void do_coredump(long signr, int exit_co
40377 }
40378 cprm.limit = RLIM_INFINITY;
40379
40380- dump_count = atomic_inc_return(&core_dump_count);
40381+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
40382 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
40383 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
40384 task_tgid_vnr(current), current->comm);
40385@@ -2192,7 +2500,7 @@ close_fail:
40386 filp_close(cprm.file, NULL);
40387 fail_dropcount:
40388 if (ispipe)
40389- atomic_dec(&core_dump_count);
40390+ atomic_dec_unchecked(&core_dump_count);
40391 fail_unlock:
40392 kfree(cn.corename);
40393 fail_corename:
40394@@ -2211,7 +2519,7 @@ fail:
40395 */
40396 int dump_write(struct file *file, const void *addr, int nr)
40397 {
40398- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
40399+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
40400 }
40401 EXPORT_SYMBOL(dump_write);
40402
40403diff -urNp linux-3.0.4/fs/ext2/balloc.c linux-3.0.4/fs/ext2/balloc.c
40404--- linux-3.0.4/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
40405+++ linux-3.0.4/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
40406@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
40407
40408 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
40409 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
40410- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
40411+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
40412 sbi->s_resuid != current_fsuid() &&
40413 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
40414 return 0;
40415diff -urNp linux-3.0.4/fs/ext3/balloc.c linux-3.0.4/fs/ext3/balloc.c
40416--- linux-3.0.4/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
40417+++ linux-3.0.4/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
40418@@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
40419
40420 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
40421 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
40422- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
40423+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
40424 sbi->s_resuid != current_fsuid() &&
40425 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
40426 return 0;
40427diff -urNp linux-3.0.4/fs/ext3/ioctl.c linux-3.0.4/fs/ext3/ioctl.c
40428--- linux-3.0.4/fs/ext3/ioctl.c 2011-07-21 22:17:23.000000000 -0400
40429+++ linux-3.0.4/fs/ext3/ioctl.c 2011-10-06 04:17:55.000000000 -0400
40430@@ -285,7 +285,7 @@ group_add_out:
40431 if (!capable(CAP_SYS_ADMIN))
40432 return -EPERM;
40433
40434- if (copy_from_user(&range, (struct fstrim_range *)arg,
40435+ if (copy_from_user(&range, (struct fstrim_range __user *)arg,
40436 sizeof(range)))
40437 return -EFAULT;
40438
40439@@ -293,7 +293,7 @@ group_add_out:
40440 if (ret < 0)
40441 return ret;
40442
40443- if (copy_to_user((struct fstrim_range *)arg, &range,
40444+ if (copy_to_user((struct fstrim_range __user *)arg, &range,
40445 sizeof(range)))
40446 return -EFAULT;
40447
40448diff -urNp linux-3.0.4/fs/ext4/balloc.c linux-3.0.4/fs/ext4/balloc.c
40449--- linux-3.0.4/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
40450+++ linux-3.0.4/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
40451@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
40452 /* Hm, nope. Are (enough) root reserved blocks available? */
40453 if (sbi->s_resuid == current_fsuid() ||
40454 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
40455- capable(CAP_SYS_RESOURCE) ||
40456- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
40457+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
40458+ capable_nolog(CAP_SYS_RESOURCE)) {
40459
40460 if (free_blocks >= (nblocks + dirty_blocks))
40461 return 1;
40462diff -urNp linux-3.0.4/fs/ext4/ext4.h linux-3.0.4/fs/ext4/ext4.h
40463--- linux-3.0.4/fs/ext4/ext4.h 2011-09-02 18:11:21.000000000 -0400
40464+++ linux-3.0.4/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
40465@@ -1177,19 +1177,19 @@ struct ext4_sb_info {
40466 unsigned long s_mb_last_start;
40467
40468 /* stats for buddy allocator */
40469- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
40470- atomic_t s_bal_success; /* we found long enough chunks */
40471- atomic_t s_bal_allocated; /* in blocks */
40472- atomic_t s_bal_ex_scanned; /* total extents scanned */
40473- atomic_t s_bal_goals; /* goal hits */
40474- atomic_t s_bal_breaks; /* too long searches */
40475- atomic_t s_bal_2orders; /* 2^order hits */
40476+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
40477+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
40478+ atomic_unchecked_t s_bal_allocated; /* in blocks */
40479+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
40480+ atomic_unchecked_t s_bal_goals; /* goal hits */
40481+ atomic_unchecked_t s_bal_breaks; /* too long searches */
40482+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
40483 spinlock_t s_bal_lock;
40484 unsigned long s_mb_buddies_generated;
40485 unsigned long long s_mb_generation_time;
40486- atomic_t s_mb_lost_chunks;
40487- atomic_t s_mb_preallocated;
40488- atomic_t s_mb_discarded;
40489+ atomic_unchecked_t s_mb_lost_chunks;
40490+ atomic_unchecked_t s_mb_preallocated;
40491+ atomic_unchecked_t s_mb_discarded;
40492 atomic_t s_lock_busy;
40493
40494 /* locality groups */
40495diff -urNp linux-3.0.4/fs/ext4/ioctl.c linux-3.0.4/fs/ext4/ioctl.c
40496--- linux-3.0.4/fs/ext4/ioctl.c 2011-07-21 22:17:23.000000000 -0400
40497+++ linux-3.0.4/fs/ext4/ioctl.c 2011-10-06 04:17:55.000000000 -0400
40498@@ -344,7 +344,7 @@ mext_out:
40499 if (!blk_queue_discard(q))
40500 return -EOPNOTSUPP;
40501
40502- if (copy_from_user(&range, (struct fstrim_range *)arg,
40503+ if (copy_from_user(&range, (struct fstrim_range __user *)arg,
40504 sizeof(range)))
40505 return -EFAULT;
40506
40507@@ -354,7 +354,7 @@ mext_out:
40508 if (ret < 0)
40509 return ret;
40510
40511- if (copy_to_user((struct fstrim_range *)arg, &range,
40512+ if (copy_to_user((struct fstrim_range __user *)arg, &range,
40513 sizeof(range)))
40514 return -EFAULT;
40515
40516diff -urNp linux-3.0.4/fs/ext4/mballoc.c linux-3.0.4/fs/ext4/mballoc.c
40517--- linux-3.0.4/fs/ext4/mballoc.c 2011-09-02 18:11:21.000000000 -0400
40518+++ linux-3.0.4/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
40519@@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
40520 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
40521
40522 if (EXT4_SB(sb)->s_mb_stats)
40523- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
40524+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
40525
40526 break;
40527 }
40528@@ -2087,7 +2087,7 @@ repeat:
40529 ac->ac_status = AC_STATUS_CONTINUE;
40530 ac->ac_flags |= EXT4_MB_HINT_FIRST;
40531 cr = 3;
40532- atomic_inc(&sbi->s_mb_lost_chunks);
40533+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
40534 goto repeat;
40535 }
40536 }
40537@@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
40538 ext4_grpblk_t counters[16];
40539 } sg;
40540
40541+ pax_track_stack();
40542+
40543 group--;
40544 if (group == 0)
40545 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
40546@@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
40547 if (sbi->s_mb_stats) {
40548 printk(KERN_INFO
40549 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
40550- atomic_read(&sbi->s_bal_allocated),
40551- atomic_read(&sbi->s_bal_reqs),
40552- atomic_read(&sbi->s_bal_success));
40553+ atomic_read_unchecked(&sbi->s_bal_allocated),
40554+ atomic_read_unchecked(&sbi->s_bal_reqs),
40555+ atomic_read_unchecked(&sbi->s_bal_success));
40556 printk(KERN_INFO
40557 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
40558 "%u 2^N hits, %u breaks, %u lost\n",
40559- atomic_read(&sbi->s_bal_ex_scanned),
40560- atomic_read(&sbi->s_bal_goals),
40561- atomic_read(&sbi->s_bal_2orders),
40562- atomic_read(&sbi->s_bal_breaks),
40563- atomic_read(&sbi->s_mb_lost_chunks));
40564+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
40565+ atomic_read_unchecked(&sbi->s_bal_goals),
40566+ atomic_read_unchecked(&sbi->s_bal_2orders),
40567+ atomic_read_unchecked(&sbi->s_bal_breaks),
40568+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
40569 printk(KERN_INFO
40570 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
40571 sbi->s_mb_buddies_generated++,
40572 sbi->s_mb_generation_time);
40573 printk(KERN_INFO
40574 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
40575- atomic_read(&sbi->s_mb_preallocated),
40576- atomic_read(&sbi->s_mb_discarded));
40577+ atomic_read_unchecked(&sbi->s_mb_preallocated),
40578+ atomic_read_unchecked(&sbi->s_mb_discarded));
40579 }
40580
40581 free_percpu(sbi->s_locality_groups);
40582@@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
40583 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
40584
40585 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
40586- atomic_inc(&sbi->s_bal_reqs);
40587- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
40588+ atomic_inc_unchecked(&sbi->s_bal_reqs);
40589+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
40590 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
40591- atomic_inc(&sbi->s_bal_success);
40592- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
40593+ atomic_inc_unchecked(&sbi->s_bal_success);
40594+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
40595 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
40596 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
40597- atomic_inc(&sbi->s_bal_goals);
40598+ atomic_inc_unchecked(&sbi->s_bal_goals);
40599 if (ac->ac_found > sbi->s_mb_max_to_scan)
40600- atomic_inc(&sbi->s_bal_breaks);
40601+ atomic_inc_unchecked(&sbi->s_bal_breaks);
40602 }
40603
40604 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
40605@@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
40606 trace_ext4_mb_new_inode_pa(ac, pa);
40607
40608 ext4_mb_use_inode_pa(ac, pa);
40609- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
40610+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
40611
40612 ei = EXT4_I(ac->ac_inode);
40613 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
40614@@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
40615 trace_ext4_mb_new_group_pa(ac, pa);
40616
40617 ext4_mb_use_group_pa(ac, pa);
40618- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
40619+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
40620
40621 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
40622 lg = ac->ac_lg;
40623@@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
40624 * from the bitmap and continue.
40625 */
40626 }
40627- atomic_add(free, &sbi->s_mb_discarded);
40628+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
40629
40630 return err;
40631 }
40632@@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
40633 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
40634 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
40635 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
40636- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
40637+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
40638 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
40639
40640 return 0;
40641diff -urNp linux-3.0.4/fs/fcntl.c linux-3.0.4/fs/fcntl.c
40642--- linux-3.0.4/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
40643+++ linux-3.0.4/fs/fcntl.c 2011-10-06 04:17:55.000000000 -0400
40644@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
40645 if (err)
40646 return err;
40647
40648+ if (gr_handle_chroot_fowner(pid, type))
40649+ return -ENOENT;
40650+ if (gr_check_protected_task_fowner(pid, type))
40651+ return -EACCES;
40652+
40653 f_modown(filp, pid, type, force);
40654 return 0;
40655 }
40656@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
40657
40658 static int f_setown_ex(struct file *filp, unsigned long arg)
40659 {
40660- struct f_owner_ex * __user owner_p = (void * __user)arg;
40661+ struct f_owner_ex __user *owner_p = (void __user *)arg;
40662 struct f_owner_ex owner;
40663 struct pid *pid;
40664 int type;
40665@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp
40666
40667 static int f_getown_ex(struct file *filp, unsigned long arg)
40668 {
40669- struct f_owner_ex * __user owner_p = (void * __user)arg;
40670+ struct f_owner_ex __user *owner_p = (void __user *)arg;
40671 struct f_owner_ex owner;
40672 int ret = 0;
40673
40674@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
40675 switch (cmd) {
40676 case F_DUPFD:
40677 case F_DUPFD_CLOEXEC:
40678+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
40679 if (arg >= rlimit(RLIMIT_NOFILE))
40680 break;
40681 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
40682@@ -835,14 +841,14 @@ static int __init fcntl_init(void)
40683 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
40684 * is defined as O_NONBLOCK on some platforms and not on others.
40685 */
40686- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
40687+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
40688 O_RDONLY | O_WRONLY | O_RDWR |
40689 O_CREAT | O_EXCL | O_NOCTTY |
40690 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
40691 __O_SYNC | O_DSYNC | FASYNC |
40692 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
40693 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
40694- __FMODE_EXEC | O_PATH
40695+ __FMODE_EXEC | O_PATH | FMODE_GREXEC
40696 ));
40697
40698 fasync_cache = kmem_cache_create("fasync_cache",
40699diff -urNp linux-3.0.4/fs/fifo.c linux-3.0.4/fs/fifo.c
40700--- linux-3.0.4/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
40701+++ linux-3.0.4/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
40702@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
40703 */
40704 filp->f_op = &read_pipefifo_fops;
40705 pipe->r_counter++;
40706- if (pipe->readers++ == 0)
40707+ if (atomic_inc_return(&pipe->readers) == 1)
40708 wake_up_partner(inode);
40709
40710- if (!pipe->writers) {
40711+ if (!atomic_read(&pipe->writers)) {
40712 if ((filp->f_flags & O_NONBLOCK)) {
40713 /* suppress POLLHUP until we have
40714 * seen a writer */
40715@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
40716 * errno=ENXIO when there is no process reading the FIFO.
40717 */
40718 ret = -ENXIO;
40719- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
40720+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
40721 goto err;
40722
40723 filp->f_op = &write_pipefifo_fops;
40724 pipe->w_counter++;
40725- if (!pipe->writers++)
40726+ if (atomic_inc_return(&pipe->writers) == 1)
40727 wake_up_partner(inode);
40728
40729- if (!pipe->readers) {
40730+ if (!atomic_read(&pipe->readers)) {
40731 wait_for_partner(inode, &pipe->r_counter);
40732 if (signal_pending(current))
40733 goto err_wr;
40734@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
40735 */
40736 filp->f_op = &rdwr_pipefifo_fops;
40737
40738- pipe->readers++;
40739- pipe->writers++;
40740+ atomic_inc(&pipe->readers);
40741+ atomic_inc(&pipe->writers);
40742 pipe->r_counter++;
40743 pipe->w_counter++;
40744- if (pipe->readers == 1 || pipe->writers == 1)
40745+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
40746 wake_up_partner(inode);
40747 break;
40748
40749@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
40750 return 0;
40751
40752 err_rd:
40753- if (!--pipe->readers)
40754+ if (atomic_dec_and_test(&pipe->readers))
40755 wake_up_interruptible(&pipe->wait);
40756 ret = -ERESTARTSYS;
40757 goto err;
40758
40759 err_wr:
40760- if (!--pipe->writers)
40761+ if (atomic_dec_and_test(&pipe->writers))
40762 wake_up_interruptible(&pipe->wait);
40763 ret = -ERESTARTSYS;
40764 goto err;
40765
40766 err:
40767- if (!pipe->readers && !pipe->writers)
40768+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
40769 free_pipe_info(inode);
40770
40771 err_nocleanup:
40772diff -urNp linux-3.0.4/fs/file.c linux-3.0.4/fs/file.c
40773--- linux-3.0.4/fs/file.c 2011-07-21 22:17:23.000000000 -0400
40774+++ linux-3.0.4/fs/file.c 2011-08-23 21:48:14.000000000 -0400
40775@@ -15,6 +15,7 @@
40776 #include <linux/slab.h>
40777 #include <linux/vmalloc.h>
40778 #include <linux/file.h>
40779+#include <linux/security.h>
40780 #include <linux/fdtable.h>
40781 #include <linux/bitops.h>
40782 #include <linux/interrupt.h>
40783@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
40784 * N.B. For clone tasks sharing a files structure, this test
40785 * will limit the total number of files that can be opened.
40786 */
40787+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
40788 if (nr >= rlimit(RLIMIT_NOFILE))
40789 return -EMFILE;
40790
40791diff -urNp linux-3.0.4/fs/filesystems.c linux-3.0.4/fs/filesystems.c
40792--- linux-3.0.4/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
40793+++ linux-3.0.4/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
40794@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
40795 int len = dot ? dot - name : strlen(name);
40796
40797 fs = __get_fs_type(name, len);
40798+
40799+#ifdef CONFIG_GRKERNSEC_MODHARDEN
40800+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
40801+#else
40802 if (!fs && (request_module("%.*s", len, name) == 0))
40803+#endif
40804 fs = __get_fs_type(name, len);
40805
40806 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
40807diff -urNp linux-3.0.4/fs/fscache/cookie.c linux-3.0.4/fs/fscache/cookie.c
40808--- linux-3.0.4/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
40809+++ linux-3.0.4/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
40810@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
40811 parent ? (char *) parent->def->name : "<no-parent>",
40812 def->name, netfs_data);
40813
40814- fscache_stat(&fscache_n_acquires);
40815+ fscache_stat_unchecked(&fscache_n_acquires);
40816
40817 /* if there's no parent cookie, then we don't create one here either */
40818 if (!parent) {
40819- fscache_stat(&fscache_n_acquires_null);
40820+ fscache_stat_unchecked(&fscache_n_acquires_null);
40821 _leave(" [no parent]");
40822 return NULL;
40823 }
40824@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
40825 /* allocate and initialise a cookie */
40826 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
40827 if (!cookie) {
40828- fscache_stat(&fscache_n_acquires_oom);
40829+ fscache_stat_unchecked(&fscache_n_acquires_oom);
40830 _leave(" [ENOMEM]");
40831 return NULL;
40832 }
40833@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
40834
40835 switch (cookie->def->type) {
40836 case FSCACHE_COOKIE_TYPE_INDEX:
40837- fscache_stat(&fscache_n_cookie_index);
40838+ fscache_stat_unchecked(&fscache_n_cookie_index);
40839 break;
40840 case FSCACHE_COOKIE_TYPE_DATAFILE:
40841- fscache_stat(&fscache_n_cookie_data);
40842+ fscache_stat_unchecked(&fscache_n_cookie_data);
40843 break;
40844 default:
40845- fscache_stat(&fscache_n_cookie_special);
40846+ fscache_stat_unchecked(&fscache_n_cookie_special);
40847 break;
40848 }
40849
40850@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
40851 if (fscache_acquire_non_index_cookie(cookie) < 0) {
40852 atomic_dec(&parent->n_children);
40853 __fscache_cookie_put(cookie);
40854- fscache_stat(&fscache_n_acquires_nobufs);
40855+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
40856 _leave(" = NULL");
40857 return NULL;
40858 }
40859 }
40860
40861- fscache_stat(&fscache_n_acquires_ok);
40862+ fscache_stat_unchecked(&fscache_n_acquires_ok);
40863 _leave(" = %p", cookie);
40864 return cookie;
40865 }
40866@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
40867 cache = fscache_select_cache_for_object(cookie->parent);
40868 if (!cache) {
40869 up_read(&fscache_addremove_sem);
40870- fscache_stat(&fscache_n_acquires_no_cache);
40871+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
40872 _leave(" = -ENOMEDIUM [no cache]");
40873 return -ENOMEDIUM;
40874 }
40875@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
40876 object = cache->ops->alloc_object(cache, cookie);
40877 fscache_stat_d(&fscache_n_cop_alloc_object);
40878 if (IS_ERR(object)) {
40879- fscache_stat(&fscache_n_object_no_alloc);
40880+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
40881 ret = PTR_ERR(object);
40882 goto error;
40883 }
40884
40885- fscache_stat(&fscache_n_object_alloc);
40886+ fscache_stat_unchecked(&fscache_n_object_alloc);
40887
40888 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
40889
40890@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
40891 struct fscache_object *object;
40892 struct hlist_node *_p;
40893
40894- fscache_stat(&fscache_n_updates);
40895+ fscache_stat_unchecked(&fscache_n_updates);
40896
40897 if (!cookie) {
40898- fscache_stat(&fscache_n_updates_null);
40899+ fscache_stat_unchecked(&fscache_n_updates_null);
40900 _leave(" [no cookie]");
40901 return;
40902 }
40903@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
40904 struct fscache_object *object;
40905 unsigned long event;
40906
40907- fscache_stat(&fscache_n_relinquishes);
40908+ fscache_stat_unchecked(&fscache_n_relinquishes);
40909 if (retire)
40910- fscache_stat(&fscache_n_relinquishes_retire);
40911+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
40912
40913 if (!cookie) {
40914- fscache_stat(&fscache_n_relinquishes_null);
40915+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
40916 _leave(" [no cookie]");
40917 return;
40918 }
40919@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
40920
40921 /* wait for the cookie to finish being instantiated (or to fail) */
40922 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
40923- fscache_stat(&fscache_n_relinquishes_waitcrt);
40924+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
40925 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
40926 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
40927 }
40928diff -urNp linux-3.0.4/fs/fscache/internal.h linux-3.0.4/fs/fscache/internal.h
40929--- linux-3.0.4/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
40930+++ linux-3.0.4/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
40931@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
40932 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
40933 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
40934
40935-extern atomic_t fscache_n_op_pend;
40936-extern atomic_t fscache_n_op_run;
40937-extern atomic_t fscache_n_op_enqueue;
40938-extern atomic_t fscache_n_op_deferred_release;
40939-extern atomic_t fscache_n_op_release;
40940-extern atomic_t fscache_n_op_gc;
40941-extern atomic_t fscache_n_op_cancelled;
40942-extern atomic_t fscache_n_op_rejected;
40943-
40944-extern atomic_t fscache_n_attr_changed;
40945-extern atomic_t fscache_n_attr_changed_ok;
40946-extern atomic_t fscache_n_attr_changed_nobufs;
40947-extern atomic_t fscache_n_attr_changed_nomem;
40948-extern atomic_t fscache_n_attr_changed_calls;
40949-
40950-extern atomic_t fscache_n_allocs;
40951-extern atomic_t fscache_n_allocs_ok;
40952-extern atomic_t fscache_n_allocs_wait;
40953-extern atomic_t fscache_n_allocs_nobufs;
40954-extern atomic_t fscache_n_allocs_intr;
40955-extern atomic_t fscache_n_allocs_object_dead;
40956-extern atomic_t fscache_n_alloc_ops;
40957-extern atomic_t fscache_n_alloc_op_waits;
40958-
40959-extern atomic_t fscache_n_retrievals;
40960-extern atomic_t fscache_n_retrievals_ok;
40961-extern atomic_t fscache_n_retrievals_wait;
40962-extern atomic_t fscache_n_retrievals_nodata;
40963-extern atomic_t fscache_n_retrievals_nobufs;
40964-extern atomic_t fscache_n_retrievals_intr;
40965-extern atomic_t fscache_n_retrievals_nomem;
40966-extern atomic_t fscache_n_retrievals_object_dead;
40967-extern atomic_t fscache_n_retrieval_ops;
40968-extern atomic_t fscache_n_retrieval_op_waits;
40969-
40970-extern atomic_t fscache_n_stores;
40971-extern atomic_t fscache_n_stores_ok;
40972-extern atomic_t fscache_n_stores_again;
40973-extern atomic_t fscache_n_stores_nobufs;
40974-extern atomic_t fscache_n_stores_oom;
40975-extern atomic_t fscache_n_store_ops;
40976-extern atomic_t fscache_n_store_calls;
40977-extern atomic_t fscache_n_store_pages;
40978-extern atomic_t fscache_n_store_radix_deletes;
40979-extern atomic_t fscache_n_store_pages_over_limit;
40980-
40981-extern atomic_t fscache_n_store_vmscan_not_storing;
40982-extern atomic_t fscache_n_store_vmscan_gone;
40983-extern atomic_t fscache_n_store_vmscan_busy;
40984-extern atomic_t fscache_n_store_vmscan_cancelled;
40985-
40986-extern atomic_t fscache_n_marks;
40987-extern atomic_t fscache_n_uncaches;
40988-
40989-extern atomic_t fscache_n_acquires;
40990-extern atomic_t fscache_n_acquires_null;
40991-extern atomic_t fscache_n_acquires_no_cache;
40992-extern atomic_t fscache_n_acquires_ok;
40993-extern atomic_t fscache_n_acquires_nobufs;
40994-extern atomic_t fscache_n_acquires_oom;
40995-
40996-extern atomic_t fscache_n_updates;
40997-extern atomic_t fscache_n_updates_null;
40998-extern atomic_t fscache_n_updates_run;
40999-
41000-extern atomic_t fscache_n_relinquishes;
41001-extern atomic_t fscache_n_relinquishes_null;
41002-extern atomic_t fscache_n_relinquishes_waitcrt;
41003-extern atomic_t fscache_n_relinquishes_retire;
41004-
41005-extern atomic_t fscache_n_cookie_index;
41006-extern atomic_t fscache_n_cookie_data;
41007-extern atomic_t fscache_n_cookie_special;
41008-
41009-extern atomic_t fscache_n_object_alloc;
41010-extern atomic_t fscache_n_object_no_alloc;
41011-extern atomic_t fscache_n_object_lookups;
41012-extern atomic_t fscache_n_object_lookups_negative;
41013-extern atomic_t fscache_n_object_lookups_positive;
41014-extern atomic_t fscache_n_object_lookups_timed_out;
41015-extern atomic_t fscache_n_object_created;
41016-extern atomic_t fscache_n_object_avail;
41017-extern atomic_t fscache_n_object_dead;
41018-
41019-extern atomic_t fscache_n_checkaux_none;
41020-extern atomic_t fscache_n_checkaux_okay;
41021-extern atomic_t fscache_n_checkaux_update;
41022-extern atomic_t fscache_n_checkaux_obsolete;
41023+extern atomic_unchecked_t fscache_n_op_pend;
41024+extern atomic_unchecked_t fscache_n_op_run;
41025+extern atomic_unchecked_t fscache_n_op_enqueue;
41026+extern atomic_unchecked_t fscache_n_op_deferred_release;
41027+extern atomic_unchecked_t fscache_n_op_release;
41028+extern atomic_unchecked_t fscache_n_op_gc;
41029+extern atomic_unchecked_t fscache_n_op_cancelled;
41030+extern atomic_unchecked_t fscache_n_op_rejected;
41031+
41032+extern atomic_unchecked_t fscache_n_attr_changed;
41033+extern atomic_unchecked_t fscache_n_attr_changed_ok;
41034+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
41035+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
41036+extern atomic_unchecked_t fscache_n_attr_changed_calls;
41037+
41038+extern atomic_unchecked_t fscache_n_allocs;
41039+extern atomic_unchecked_t fscache_n_allocs_ok;
41040+extern atomic_unchecked_t fscache_n_allocs_wait;
41041+extern atomic_unchecked_t fscache_n_allocs_nobufs;
41042+extern atomic_unchecked_t fscache_n_allocs_intr;
41043+extern atomic_unchecked_t fscache_n_allocs_object_dead;
41044+extern atomic_unchecked_t fscache_n_alloc_ops;
41045+extern atomic_unchecked_t fscache_n_alloc_op_waits;
41046+
41047+extern atomic_unchecked_t fscache_n_retrievals;
41048+extern atomic_unchecked_t fscache_n_retrievals_ok;
41049+extern atomic_unchecked_t fscache_n_retrievals_wait;
41050+extern atomic_unchecked_t fscache_n_retrievals_nodata;
41051+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
41052+extern atomic_unchecked_t fscache_n_retrievals_intr;
41053+extern atomic_unchecked_t fscache_n_retrievals_nomem;
41054+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
41055+extern atomic_unchecked_t fscache_n_retrieval_ops;
41056+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
41057+
41058+extern atomic_unchecked_t fscache_n_stores;
41059+extern atomic_unchecked_t fscache_n_stores_ok;
41060+extern atomic_unchecked_t fscache_n_stores_again;
41061+extern atomic_unchecked_t fscache_n_stores_nobufs;
41062+extern atomic_unchecked_t fscache_n_stores_oom;
41063+extern atomic_unchecked_t fscache_n_store_ops;
41064+extern atomic_unchecked_t fscache_n_store_calls;
41065+extern atomic_unchecked_t fscache_n_store_pages;
41066+extern atomic_unchecked_t fscache_n_store_radix_deletes;
41067+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
41068+
41069+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
41070+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
41071+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
41072+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
41073+
41074+extern atomic_unchecked_t fscache_n_marks;
41075+extern atomic_unchecked_t fscache_n_uncaches;
41076+
41077+extern atomic_unchecked_t fscache_n_acquires;
41078+extern atomic_unchecked_t fscache_n_acquires_null;
41079+extern atomic_unchecked_t fscache_n_acquires_no_cache;
41080+extern atomic_unchecked_t fscache_n_acquires_ok;
41081+extern atomic_unchecked_t fscache_n_acquires_nobufs;
41082+extern atomic_unchecked_t fscache_n_acquires_oom;
41083+
41084+extern atomic_unchecked_t fscache_n_updates;
41085+extern atomic_unchecked_t fscache_n_updates_null;
41086+extern atomic_unchecked_t fscache_n_updates_run;
41087+
41088+extern atomic_unchecked_t fscache_n_relinquishes;
41089+extern atomic_unchecked_t fscache_n_relinquishes_null;
41090+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
41091+extern atomic_unchecked_t fscache_n_relinquishes_retire;
41092+
41093+extern atomic_unchecked_t fscache_n_cookie_index;
41094+extern atomic_unchecked_t fscache_n_cookie_data;
41095+extern atomic_unchecked_t fscache_n_cookie_special;
41096+
41097+extern atomic_unchecked_t fscache_n_object_alloc;
41098+extern atomic_unchecked_t fscache_n_object_no_alloc;
41099+extern atomic_unchecked_t fscache_n_object_lookups;
41100+extern atomic_unchecked_t fscache_n_object_lookups_negative;
41101+extern atomic_unchecked_t fscache_n_object_lookups_positive;
41102+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
41103+extern atomic_unchecked_t fscache_n_object_created;
41104+extern atomic_unchecked_t fscache_n_object_avail;
41105+extern atomic_unchecked_t fscache_n_object_dead;
41106+
41107+extern atomic_unchecked_t fscache_n_checkaux_none;
41108+extern atomic_unchecked_t fscache_n_checkaux_okay;
41109+extern atomic_unchecked_t fscache_n_checkaux_update;
41110+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
41111
41112 extern atomic_t fscache_n_cop_alloc_object;
41113 extern atomic_t fscache_n_cop_lookup_object;
41114@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
41115 atomic_inc(stat);
41116 }
41117
41118+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
41119+{
41120+ atomic_inc_unchecked(stat);
41121+}
41122+
41123 static inline void fscache_stat_d(atomic_t *stat)
41124 {
41125 atomic_dec(stat);
41126@@ -267,6 +272,7 @@ extern const struct file_operations fsca
41127
41128 #define __fscache_stat(stat) (NULL)
41129 #define fscache_stat(stat) do {} while (0)
41130+#define fscache_stat_unchecked(stat) do {} while (0)
41131 #define fscache_stat_d(stat) do {} while (0)
41132 #endif
41133
41134diff -urNp linux-3.0.4/fs/fscache/object.c linux-3.0.4/fs/fscache/object.c
41135--- linux-3.0.4/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
41136+++ linux-3.0.4/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
41137@@ -128,7 +128,7 @@ static void fscache_object_state_machine
41138 /* update the object metadata on disk */
41139 case FSCACHE_OBJECT_UPDATING:
41140 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
41141- fscache_stat(&fscache_n_updates_run);
41142+ fscache_stat_unchecked(&fscache_n_updates_run);
41143 fscache_stat(&fscache_n_cop_update_object);
41144 object->cache->ops->update_object(object);
41145 fscache_stat_d(&fscache_n_cop_update_object);
41146@@ -217,7 +217,7 @@ static void fscache_object_state_machine
41147 spin_lock(&object->lock);
41148 object->state = FSCACHE_OBJECT_DEAD;
41149 spin_unlock(&object->lock);
41150- fscache_stat(&fscache_n_object_dead);
41151+ fscache_stat_unchecked(&fscache_n_object_dead);
41152 goto terminal_transit;
41153
41154 /* handle the parent cache of this object being withdrawn from
41155@@ -232,7 +232,7 @@ static void fscache_object_state_machine
41156 spin_lock(&object->lock);
41157 object->state = FSCACHE_OBJECT_DEAD;
41158 spin_unlock(&object->lock);
41159- fscache_stat(&fscache_n_object_dead);
41160+ fscache_stat_unchecked(&fscache_n_object_dead);
41161 goto terminal_transit;
41162
41163 /* complain about the object being woken up once it is
41164@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
41165 parent->cookie->def->name, cookie->def->name,
41166 object->cache->tag->name);
41167
41168- fscache_stat(&fscache_n_object_lookups);
41169+ fscache_stat_unchecked(&fscache_n_object_lookups);
41170 fscache_stat(&fscache_n_cop_lookup_object);
41171 ret = object->cache->ops->lookup_object(object);
41172 fscache_stat_d(&fscache_n_cop_lookup_object);
41173@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
41174 if (ret == -ETIMEDOUT) {
41175 /* probably stuck behind another object, so move this one to
41176 * the back of the queue */
41177- fscache_stat(&fscache_n_object_lookups_timed_out);
41178+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
41179 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
41180 }
41181
41182@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
41183
41184 spin_lock(&object->lock);
41185 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
41186- fscache_stat(&fscache_n_object_lookups_negative);
41187+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
41188
41189 /* transit here to allow write requests to begin stacking up
41190 * and read requests to begin returning ENODATA */
41191@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
41192 * result, in which case there may be data available */
41193 spin_lock(&object->lock);
41194 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
41195- fscache_stat(&fscache_n_object_lookups_positive);
41196+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
41197
41198 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
41199
41200@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
41201 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
41202 } else {
41203 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
41204- fscache_stat(&fscache_n_object_created);
41205+ fscache_stat_unchecked(&fscache_n_object_created);
41206
41207 object->state = FSCACHE_OBJECT_AVAILABLE;
41208 spin_unlock(&object->lock);
41209@@ -602,7 +602,7 @@ static void fscache_object_available(str
41210 fscache_enqueue_dependents(object);
41211
41212 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
41213- fscache_stat(&fscache_n_object_avail);
41214+ fscache_stat_unchecked(&fscache_n_object_avail);
41215
41216 _leave("");
41217 }
41218@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
41219 enum fscache_checkaux result;
41220
41221 if (!object->cookie->def->check_aux) {
41222- fscache_stat(&fscache_n_checkaux_none);
41223+ fscache_stat_unchecked(&fscache_n_checkaux_none);
41224 return FSCACHE_CHECKAUX_OKAY;
41225 }
41226
41227@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
41228 switch (result) {
41229 /* entry okay as is */
41230 case FSCACHE_CHECKAUX_OKAY:
41231- fscache_stat(&fscache_n_checkaux_okay);
41232+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
41233 break;
41234
41235 /* entry requires update */
41236 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
41237- fscache_stat(&fscache_n_checkaux_update);
41238+ fscache_stat_unchecked(&fscache_n_checkaux_update);
41239 break;
41240
41241 /* entry requires deletion */
41242 case FSCACHE_CHECKAUX_OBSOLETE:
41243- fscache_stat(&fscache_n_checkaux_obsolete);
41244+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
41245 break;
41246
41247 default:
41248diff -urNp linux-3.0.4/fs/fscache/operation.c linux-3.0.4/fs/fscache/operation.c
41249--- linux-3.0.4/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
41250+++ linux-3.0.4/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
41251@@ -17,7 +17,7 @@
41252 #include <linux/slab.h>
41253 #include "internal.h"
41254
41255-atomic_t fscache_op_debug_id;
41256+atomic_unchecked_t fscache_op_debug_id;
41257 EXPORT_SYMBOL(fscache_op_debug_id);
41258
41259 /**
41260@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
41261 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
41262 ASSERTCMP(atomic_read(&op->usage), >, 0);
41263
41264- fscache_stat(&fscache_n_op_enqueue);
41265+ fscache_stat_unchecked(&fscache_n_op_enqueue);
41266 switch (op->flags & FSCACHE_OP_TYPE) {
41267 case FSCACHE_OP_ASYNC:
41268 _debug("queue async");
41269@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
41270 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
41271 if (op->processor)
41272 fscache_enqueue_operation(op);
41273- fscache_stat(&fscache_n_op_run);
41274+ fscache_stat_unchecked(&fscache_n_op_run);
41275 }
41276
41277 /*
41278@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
41279 if (object->n_ops > 1) {
41280 atomic_inc(&op->usage);
41281 list_add_tail(&op->pend_link, &object->pending_ops);
41282- fscache_stat(&fscache_n_op_pend);
41283+ fscache_stat_unchecked(&fscache_n_op_pend);
41284 } else if (!list_empty(&object->pending_ops)) {
41285 atomic_inc(&op->usage);
41286 list_add_tail(&op->pend_link, &object->pending_ops);
41287- fscache_stat(&fscache_n_op_pend);
41288+ fscache_stat_unchecked(&fscache_n_op_pend);
41289 fscache_start_operations(object);
41290 } else {
41291 ASSERTCMP(object->n_in_progress, ==, 0);
41292@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
41293 object->n_exclusive++; /* reads and writes must wait */
41294 atomic_inc(&op->usage);
41295 list_add_tail(&op->pend_link, &object->pending_ops);
41296- fscache_stat(&fscache_n_op_pend);
41297+ fscache_stat_unchecked(&fscache_n_op_pend);
41298 ret = 0;
41299 } else {
41300 /* not allowed to submit ops in any other state */
41301@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
41302 if (object->n_exclusive > 0) {
41303 atomic_inc(&op->usage);
41304 list_add_tail(&op->pend_link, &object->pending_ops);
41305- fscache_stat(&fscache_n_op_pend);
41306+ fscache_stat_unchecked(&fscache_n_op_pend);
41307 } else if (!list_empty(&object->pending_ops)) {
41308 atomic_inc(&op->usage);
41309 list_add_tail(&op->pend_link, &object->pending_ops);
41310- fscache_stat(&fscache_n_op_pend);
41311+ fscache_stat_unchecked(&fscache_n_op_pend);
41312 fscache_start_operations(object);
41313 } else {
41314 ASSERTCMP(object->n_exclusive, ==, 0);
41315@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
41316 object->n_ops++;
41317 atomic_inc(&op->usage);
41318 list_add_tail(&op->pend_link, &object->pending_ops);
41319- fscache_stat(&fscache_n_op_pend);
41320+ fscache_stat_unchecked(&fscache_n_op_pend);
41321 ret = 0;
41322 } else if (object->state == FSCACHE_OBJECT_DYING ||
41323 object->state == FSCACHE_OBJECT_LC_DYING ||
41324 object->state == FSCACHE_OBJECT_WITHDRAWING) {
41325- fscache_stat(&fscache_n_op_rejected);
41326+ fscache_stat_unchecked(&fscache_n_op_rejected);
41327 ret = -ENOBUFS;
41328 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
41329 fscache_report_unexpected_submission(object, op, ostate);
41330@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
41331
41332 ret = -EBUSY;
41333 if (!list_empty(&op->pend_link)) {
41334- fscache_stat(&fscache_n_op_cancelled);
41335+ fscache_stat_unchecked(&fscache_n_op_cancelled);
41336 list_del_init(&op->pend_link);
41337 object->n_ops--;
41338 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
41339@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
41340 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
41341 BUG();
41342
41343- fscache_stat(&fscache_n_op_release);
41344+ fscache_stat_unchecked(&fscache_n_op_release);
41345
41346 if (op->release) {
41347 op->release(op);
41348@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
41349 * lock, and defer it otherwise */
41350 if (!spin_trylock(&object->lock)) {
41351 _debug("defer put");
41352- fscache_stat(&fscache_n_op_deferred_release);
41353+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
41354
41355 cache = object->cache;
41356 spin_lock(&cache->op_gc_list_lock);
41357@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
41358
41359 _debug("GC DEFERRED REL OBJ%x OP%x",
41360 object->debug_id, op->debug_id);
41361- fscache_stat(&fscache_n_op_gc);
41362+ fscache_stat_unchecked(&fscache_n_op_gc);
41363
41364 ASSERTCMP(atomic_read(&op->usage), ==, 0);
41365
41366diff -urNp linux-3.0.4/fs/fscache/page.c linux-3.0.4/fs/fscache/page.c
41367--- linux-3.0.4/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
41368+++ linux-3.0.4/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
41369@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
41370 val = radix_tree_lookup(&cookie->stores, page->index);
41371 if (!val) {
41372 rcu_read_unlock();
41373- fscache_stat(&fscache_n_store_vmscan_not_storing);
41374+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
41375 __fscache_uncache_page(cookie, page);
41376 return true;
41377 }
41378@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
41379 spin_unlock(&cookie->stores_lock);
41380
41381 if (xpage) {
41382- fscache_stat(&fscache_n_store_vmscan_cancelled);
41383- fscache_stat(&fscache_n_store_radix_deletes);
41384+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
41385+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
41386 ASSERTCMP(xpage, ==, page);
41387 } else {
41388- fscache_stat(&fscache_n_store_vmscan_gone);
41389+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
41390 }
41391
41392 wake_up_bit(&cookie->flags, 0);
41393@@ -107,7 +107,7 @@ page_busy:
41394 /* we might want to wait here, but that could deadlock the allocator as
41395 * the work threads writing to the cache may all end up sleeping
41396 * on memory allocation */
41397- fscache_stat(&fscache_n_store_vmscan_busy);
41398+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
41399 return false;
41400 }
41401 EXPORT_SYMBOL(__fscache_maybe_release_page);
41402@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
41403 FSCACHE_COOKIE_STORING_TAG);
41404 if (!radix_tree_tag_get(&cookie->stores, page->index,
41405 FSCACHE_COOKIE_PENDING_TAG)) {
41406- fscache_stat(&fscache_n_store_radix_deletes);
41407+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
41408 xpage = radix_tree_delete(&cookie->stores, page->index);
41409 }
41410 spin_unlock(&cookie->stores_lock);
41411@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
41412
41413 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
41414
41415- fscache_stat(&fscache_n_attr_changed_calls);
41416+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
41417
41418 if (fscache_object_is_active(object)) {
41419 fscache_stat(&fscache_n_cop_attr_changed);
41420@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
41421
41422 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
41423
41424- fscache_stat(&fscache_n_attr_changed);
41425+ fscache_stat_unchecked(&fscache_n_attr_changed);
41426
41427 op = kzalloc(sizeof(*op), GFP_KERNEL);
41428 if (!op) {
41429- fscache_stat(&fscache_n_attr_changed_nomem);
41430+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
41431 _leave(" = -ENOMEM");
41432 return -ENOMEM;
41433 }
41434@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
41435 if (fscache_submit_exclusive_op(object, op) < 0)
41436 goto nobufs;
41437 spin_unlock(&cookie->lock);
41438- fscache_stat(&fscache_n_attr_changed_ok);
41439+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
41440 fscache_put_operation(op);
41441 _leave(" = 0");
41442 return 0;
41443@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
41444 nobufs:
41445 spin_unlock(&cookie->lock);
41446 kfree(op);
41447- fscache_stat(&fscache_n_attr_changed_nobufs);
41448+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
41449 _leave(" = %d", -ENOBUFS);
41450 return -ENOBUFS;
41451 }
41452@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
41453 /* allocate a retrieval operation and attempt to submit it */
41454 op = kzalloc(sizeof(*op), GFP_NOIO);
41455 if (!op) {
41456- fscache_stat(&fscache_n_retrievals_nomem);
41457+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
41458 return NULL;
41459 }
41460
41461@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
41462 return 0;
41463 }
41464
41465- fscache_stat(&fscache_n_retrievals_wait);
41466+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
41467
41468 jif = jiffies;
41469 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
41470 fscache_wait_bit_interruptible,
41471 TASK_INTERRUPTIBLE) != 0) {
41472- fscache_stat(&fscache_n_retrievals_intr);
41473+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
41474 _leave(" = -ERESTARTSYS");
41475 return -ERESTARTSYS;
41476 }
41477@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
41478 */
41479 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
41480 struct fscache_retrieval *op,
41481- atomic_t *stat_op_waits,
41482- atomic_t *stat_object_dead)
41483+ atomic_unchecked_t *stat_op_waits,
41484+ atomic_unchecked_t *stat_object_dead)
41485 {
41486 int ret;
41487
41488@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
41489 goto check_if_dead;
41490
41491 _debug(">>> WT");
41492- fscache_stat(stat_op_waits);
41493+ fscache_stat_unchecked(stat_op_waits);
41494 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
41495 fscache_wait_bit_interruptible,
41496 TASK_INTERRUPTIBLE) < 0) {
41497@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
41498
41499 check_if_dead:
41500 if (unlikely(fscache_object_is_dead(object))) {
41501- fscache_stat(stat_object_dead);
41502+ fscache_stat_unchecked(stat_object_dead);
41503 return -ENOBUFS;
41504 }
41505 return 0;
41506@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
41507
41508 _enter("%p,%p,,,", cookie, page);
41509
41510- fscache_stat(&fscache_n_retrievals);
41511+ fscache_stat_unchecked(&fscache_n_retrievals);
41512
41513 if (hlist_empty(&cookie->backing_objects))
41514 goto nobufs;
41515@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
41516 goto nobufs_unlock;
41517 spin_unlock(&cookie->lock);
41518
41519- fscache_stat(&fscache_n_retrieval_ops);
41520+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
41521
41522 /* pin the netfs read context in case we need to do the actual netfs
41523 * read because we've encountered a cache read failure */
41524@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
41525
41526 error:
41527 if (ret == -ENOMEM)
41528- fscache_stat(&fscache_n_retrievals_nomem);
41529+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
41530 else if (ret == -ERESTARTSYS)
41531- fscache_stat(&fscache_n_retrievals_intr);
41532+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
41533 else if (ret == -ENODATA)
41534- fscache_stat(&fscache_n_retrievals_nodata);
41535+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
41536 else if (ret < 0)
41537- fscache_stat(&fscache_n_retrievals_nobufs);
41538+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
41539 else
41540- fscache_stat(&fscache_n_retrievals_ok);
41541+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
41542
41543 fscache_put_retrieval(op);
41544 _leave(" = %d", ret);
41545@@ -429,7 +429,7 @@ nobufs_unlock:
41546 spin_unlock(&cookie->lock);
41547 kfree(op);
41548 nobufs:
41549- fscache_stat(&fscache_n_retrievals_nobufs);
41550+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
41551 _leave(" = -ENOBUFS");
41552 return -ENOBUFS;
41553 }
41554@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
41555
41556 _enter("%p,,%d,,,", cookie, *nr_pages);
41557
41558- fscache_stat(&fscache_n_retrievals);
41559+ fscache_stat_unchecked(&fscache_n_retrievals);
41560
41561 if (hlist_empty(&cookie->backing_objects))
41562 goto nobufs;
41563@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
41564 goto nobufs_unlock;
41565 spin_unlock(&cookie->lock);
41566
41567- fscache_stat(&fscache_n_retrieval_ops);
41568+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
41569
41570 /* pin the netfs read context in case we need to do the actual netfs
41571 * read because we've encountered a cache read failure */
41572@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
41573
41574 error:
41575 if (ret == -ENOMEM)
41576- fscache_stat(&fscache_n_retrievals_nomem);
41577+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
41578 else if (ret == -ERESTARTSYS)
41579- fscache_stat(&fscache_n_retrievals_intr);
41580+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
41581 else if (ret == -ENODATA)
41582- fscache_stat(&fscache_n_retrievals_nodata);
41583+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
41584 else if (ret < 0)
41585- fscache_stat(&fscache_n_retrievals_nobufs);
41586+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
41587 else
41588- fscache_stat(&fscache_n_retrievals_ok);
41589+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
41590
41591 fscache_put_retrieval(op);
41592 _leave(" = %d", ret);
41593@@ -545,7 +545,7 @@ nobufs_unlock:
41594 spin_unlock(&cookie->lock);
41595 kfree(op);
41596 nobufs:
41597- fscache_stat(&fscache_n_retrievals_nobufs);
41598+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
41599 _leave(" = -ENOBUFS");
41600 return -ENOBUFS;
41601 }
41602@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
41603
41604 _enter("%p,%p,,,", cookie, page);
41605
41606- fscache_stat(&fscache_n_allocs);
41607+ fscache_stat_unchecked(&fscache_n_allocs);
41608
41609 if (hlist_empty(&cookie->backing_objects))
41610 goto nobufs;
41611@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
41612 goto nobufs_unlock;
41613 spin_unlock(&cookie->lock);
41614
41615- fscache_stat(&fscache_n_alloc_ops);
41616+ fscache_stat_unchecked(&fscache_n_alloc_ops);
41617
41618 ret = fscache_wait_for_retrieval_activation(
41619 object, op,
41620@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
41621
41622 error:
41623 if (ret == -ERESTARTSYS)
41624- fscache_stat(&fscache_n_allocs_intr);
41625+ fscache_stat_unchecked(&fscache_n_allocs_intr);
41626 else if (ret < 0)
41627- fscache_stat(&fscache_n_allocs_nobufs);
41628+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
41629 else
41630- fscache_stat(&fscache_n_allocs_ok);
41631+ fscache_stat_unchecked(&fscache_n_allocs_ok);
41632
41633 fscache_put_retrieval(op);
41634 _leave(" = %d", ret);
41635@@ -625,7 +625,7 @@ nobufs_unlock:
41636 spin_unlock(&cookie->lock);
41637 kfree(op);
41638 nobufs:
41639- fscache_stat(&fscache_n_allocs_nobufs);
41640+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
41641 _leave(" = -ENOBUFS");
41642 return -ENOBUFS;
41643 }
41644@@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
41645
41646 spin_lock(&cookie->stores_lock);
41647
41648- fscache_stat(&fscache_n_store_calls);
41649+ fscache_stat_unchecked(&fscache_n_store_calls);
41650
41651 /* find a page to store */
41652 page = NULL;
41653@@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
41654 page = results[0];
41655 _debug("gang %d [%lx]", n, page->index);
41656 if (page->index > op->store_limit) {
41657- fscache_stat(&fscache_n_store_pages_over_limit);
41658+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
41659 goto superseded;
41660 }
41661
41662@@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
41663 spin_unlock(&cookie->stores_lock);
41664 spin_unlock(&object->lock);
41665
41666- fscache_stat(&fscache_n_store_pages);
41667+ fscache_stat_unchecked(&fscache_n_store_pages);
41668 fscache_stat(&fscache_n_cop_write_page);
41669 ret = object->cache->ops->write_page(op, page);
41670 fscache_stat_d(&fscache_n_cop_write_page);
41671@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
41672 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
41673 ASSERT(PageFsCache(page));
41674
41675- fscache_stat(&fscache_n_stores);
41676+ fscache_stat_unchecked(&fscache_n_stores);
41677
41678 op = kzalloc(sizeof(*op), GFP_NOIO);
41679 if (!op)
41680@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
41681 spin_unlock(&cookie->stores_lock);
41682 spin_unlock(&object->lock);
41683
41684- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
41685+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
41686 op->store_limit = object->store_limit;
41687
41688 if (fscache_submit_op(object, &op->op) < 0)
41689@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
41690
41691 spin_unlock(&cookie->lock);
41692 radix_tree_preload_end();
41693- fscache_stat(&fscache_n_store_ops);
41694- fscache_stat(&fscache_n_stores_ok);
41695+ fscache_stat_unchecked(&fscache_n_store_ops);
41696+ fscache_stat_unchecked(&fscache_n_stores_ok);
41697
41698 /* the work queue now carries its own ref on the object */
41699 fscache_put_operation(&op->op);
41700@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
41701 return 0;
41702
41703 already_queued:
41704- fscache_stat(&fscache_n_stores_again);
41705+ fscache_stat_unchecked(&fscache_n_stores_again);
41706 already_pending:
41707 spin_unlock(&cookie->stores_lock);
41708 spin_unlock(&object->lock);
41709 spin_unlock(&cookie->lock);
41710 radix_tree_preload_end();
41711 kfree(op);
41712- fscache_stat(&fscache_n_stores_ok);
41713+ fscache_stat_unchecked(&fscache_n_stores_ok);
41714 _leave(" = 0");
41715 return 0;
41716
41717@@ -851,14 +851,14 @@ nobufs:
41718 spin_unlock(&cookie->lock);
41719 radix_tree_preload_end();
41720 kfree(op);
41721- fscache_stat(&fscache_n_stores_nobufs);
41722+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
41723 _leave(" = -ENOBUFS");
41724 return -ENOBUFS;
41725
41726 nomem_free:
41727 kfree(op);
41728 nomem:
41729- fscache_stat(&fscache_n_stores_oom);
41730+ fscache_stat_unchecked(&fscache_n_stores_oom);
41731 _leave(" = -ENOMEM");
41732 return -ENOMEM;
41733 }
41734@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
41735 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
41736 ASSERTCMP(page, !=, NULL);
41737
41738- fscache_stat(&fscache_n_uncaches);
41739+ fscache_stat_unchecked(&fscache_n_uncaches);
41740
41741 /* cache withdrawal may beat us to it */
41742 if (!PageFsCache(page))
41743@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
41744 unsigned long loop;
41745
41746 #ifdef CONFIG_FSCACHE_STATS
41747- atomic_add(pagevec->nr, &fscache_n_marks);
41748+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
41749 #endif
41750
41751 for (loop = 0; loop < pagevec->nr; loop++) {
41752diff -urNp linux-3.0.4/fs/fscache/stats.c linux-3.0.4/fs/fscache/stats.c
41753--- linux-3.0.4/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
41754+++ linux-3.0.4/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
41755@@ -18,95 +18,95 @@
41756 /*
41757 * operation counters
41758 */
41759-atomic_t fscache_n_op_pend;
41760-atomic_t fscache_n_op_run;
41761-atomic_t fscache_n_op_enqueue;
41762-atomic_t fscache_n_op_requeue;
41763-atomic_t fscache_n_op_deferred_release;
41764-atomic_t fscache_n_op_release;
41765-atomic_t fscache_n_op_gc;
41766-atomic_t fscache_n_op_cancelled;
41767-atomic_t fscache_n_op_rejected;
41768-
41769-atomic_t fscache_n_attr_changed;
41770-atomic_t fscache_n_attr_changed_ok;
41771-atomic_t fscache_n_attr_changed_nobufs;
41772-atomic_t fscache_n_attr_changed_nomem;
41773-atomic_t fscache_n_attr_changed_calls;
41774-
41775-atomic_t fscache_n_allocs;
41776-atomic_t fscache_n_allocs_ok;
41777-atomic_t fscache_n_allocs_wait;
41778-atomic_t fscache_n_allocs_nobufs;
41779-atomic_t fscache_n_allocs_intr;
41780-atomic_t fscache_n_allocs_object_dead;
41781-atomic_t fscache_n_alloc_ops;
41782-atomic_t fscache_n_alloc_op_waits;
41783-
41784-atomic_t fscache_n_retrievals;
41785-atomic_t fscache_n_retrievals_ok;
41786-atomic_t fscache_n_retrievals_wait;
41787-atomic_t fscache_n_retrievals_nodata;
41788-atomic_t fscache_n_retrievals_nobufs;
41789-atomic_t fscache_n_retrievals_intr;
41790-atomic_t fscache_n_retrievals_nomem;
41791-atomic_t fscache_n_retrievals_object_dead;
41792-atomic_t fscache_n_retrieval_ops;
41793-atomic_t fscache_n_retrieval_op_waits;
41794-
41795-atomic_t fscache_n_stores;
41796-atomic_t fscache_n_stores_ok;
41797-atomic_t fscache_n_stores_again;
41798-atomic_t fscache_n_stores_nobufs;
41799-atomic_t fscache_n_stores_oom;
41800-atomic_t fscache_n_store_ops;
41801-atomic_t fscache_n_store_calls;
41802-atomic_t fscache_n_store_pages;
41803-atomic_t fscache_n_store_radix_deletes;
41804-atomic_t fscache_n_store_pages_over_limit;
41805-
41806-atomic_t fscache_n_store_vmscan_not_storing;
41807-atomic_t fscache_n_store_vmscan_gone;
41808-atomic_t fscache_n_store_vmscan_busy;
41809-atomic_t fscache_n_store_vmscan_cancelled;
41810-
41811-atomic_t fscache_n_marks;
41812-atomic_t fscache_n_uncaches;
41813-
41814-atomic_t fscache_n_acquires;
41815-atomic_t fscache_n_acquires_null;
41816-atomic_t fscache_n_acquires_no_cache;
41817-atomic_t fscache_n_acquires_ok;
41818-atomic_t fscache_n_acquires_nobufs;
41819-atomic_t fscache_n_acquires_oom;
41820-
41821-atomic_t fscache_n_updates;
41822-atomic_t fscache_n_updates_null;
41823-atomic_t fscache_n_updates_run;
41824-
41825-atomic_t fscache_n_relinquishes;
41826-atomic_t fscache_n_relinquishes_null;
41827-atomic_t fscache_n_relinquishes_waitcrt;
41828-atomic_t fscache_n_relinquishes_retire;
41829-
41830-atomic_t fscache_n_cookie_index;
41831-atomic_t fscache_n_cookie_data;
41832-atomic_t fscache_n_cookie_special;
41833-
41834-atomic_t fscache_n_object_alloc;
41835-atomic_t fscache_n_object_no_alloc;
41836-atomic_t fscache_n_object_lookups;
41837-atomic_t fscache_n_object_lookups_negative;
41838-atomic_t fscache_n_object_lookups_positive;
41839-atomic_t fscache_n_object_lookups_timed_out;
41840-atomic_t fscache_n_object_created;
41841-atomic_t fscache_n_object_avail;
41842-atomic_t fscache_n_object_dead;
41843-
41844-atomic_t fscache_n_checkaux_none;
41845-atomic_t fscache_n_checkaux_okay;
41846-atomic_t fscache_n_checkaux_update;
41847-atomic_t fscache_n_checkaux_obsolete;
41848+atomic_unchecked_t fscache_n_op_pend;
41849+atomic_unchecked_t fscache_n_op_run;
41850+atomic_unchecked_t fscache_n_op_enqueue;
41851+atomic_unchecked_t fscache_n_op_requeue;
41852+atomic_unchecked_t fscache_n_op_deferred_release;
41853+atomic_unchecked_t fscache_n_op_release;
41854+atomic_unchecked_t fscache_n_op_gc;
41855+atomic_unchecked_t fscache_n_op_cancelled;
41856+atomic_unchecked_t fscache_n_op_rejected;
41857+
41858+atomic_unchecked_t fscache_n_attr_changed;
41859+atomic_unchecked_t fscache_n_attr_changed_ok;
41860+atomic_unchecked_t fscache_n_attr_changed_nobufs;
41861+atomic_unchecked_t fscache_n_attr_changed_nomem;
41862+atomic_unchecked_t fscache_n_attr_changed_calls;
41863+
41864+atomic_unchecked_t fscache_n_allocs;
41865+atomic_unchecked_t fscache_n_allocs_ok;
41866+atomic_unchecked_t fscache_n_allocs_wait;
41867+atomic_unchecked_t fscache_n_allocs_nobufs;
41868+atomic_unchecked_t fscache_n_allocs_intr;
41869+atomic_unchecked_t fscache_n_allocs_object_dead;
41870+atomic_unchecked_t fscache_n_alloc_ops;
41871+atomic_unchecked_t fscache_n_alloc_op_waits;
41872+
41873+atomic_unchecked_t fscache_n_retrievals;
41874+atomic_unchecked_t fscache_n_retrievals_ok;
41875+atomic_unchecked_t fscache_n_retrievals_wait;
41876+atomic_unchecked_t fscache_n_retrievals_nodata;
41877+atomic_unchecked_t fscache_n_retrievals_nobufs;
41878+atomic_unchecked_t fscache_n_retrievals_intr;
41879+atomic_unchecked_t fscache_n_retrievals_nomem;
41880+atomic_unchecked_t fscache_n_retrievals_object_dead;
41881+atomic_unchecked_t fscache_n_retrieval_ops;
41882+atomic_unchecked_t fscache_n_retrieval_op_waits;
41883+
41884+atomic_unchecked_t fscache_n_stores;
41885+atomic_unchecked_t fscache_n_stores_ok;
41886+atomic_unchecked_t fscache_n_stores_again;
41887+atomic_unchecked_t fscache_n_stores_nobufs;
41888+atomic_unchecked_t fscache_n_stores_oom;
41889+atomic_unchecked_t fscache_n_store_ops;
41890+atomic_unchecked_t fscache_n_store_calls;
41891+atomic_unchecked_t fscache_n_store_pages;
41892+atomic_unchecked_t fscache_n_store_radix_deletes;
41893+atomic_unchecked_t fscache_n_store_pages_over_limit;
41894+
41895+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
41896+atomic_unchecked_t fscache_n_store_vmscan_gone;
41897+atomic_unchecked_t fscache_n_store_vmscan_busy;
41898+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
41899+
41900+atomic_unchecked_t fscache_n_marks;
41901+atomic_unchecked_t fscache_n_uncaches;
41902+
41903+atomic_unchecked_t fscache_n_acquires;
41904+atomic_unchecked_t fscache_n_acquires_null;
41905+atomic_unchecked_t fscache_n_acquires_no_cache;
41906+atomic_unchecked_t fscache_n_acquires_ok;
41907+atomic_unchecked_t fscache_n_acquires_nobufs;
41908+atomic_unchecked_t fscache_n_acquires_oom;
41909+
41910+atomic_unchecked_t fscache_n_updates;
41911+atomic_unchecked_t fscache_n_updates_null;
41912+atomic_unchecked_t fscache_n_updates_run;
41913+
41914+atomic_unchecked_t fscache_n_relinquishes;
41915+atomic_unchecked_t fscache_n_relinquishes_null;
41916+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
41917+atomic_unchecked_t fscache_n_relinquishes_retire;
41918+
41919+atomic_unchecked_t fscache_n_cookie_index;
41920+atomic_unchecked_t fscache_n_cookie_data;
41921+atomic_unchecked_t fscache_n_cookie_special;
41922+
41923+atomic_unchecked_t fscache_n_object_alloc;
41924+atomic_unchecked_t fscache_n_object_no_alloc;
41925+atomic_unchecked_t fscache_n_object_lookups;
41926+atomic_unchecked_t fscache_n_object_lookups_negative;
41927+atomic_unchecked_t fscache_n_object_lookups_positive;
41928+atomic_unchecked_t fscache_n_object_lookups_timed_out;
41929+atomic_unchecked_t fscache_n_object_created;
41930+atomic_unchecked_t fscache_n_object_avail;
41931+atomic_unchecked_t fscache_n_object_dead;
41932+
41933+atomic_unchecked_t fscache_n_checkaux_none;
41934+atomic_unchecked_t fscache_n_checkaux_okay;
41935+atomic_unchecked_t fscache_n_checkaux_update;
41936+atomic_unchecked_t fscache_n_checkaux_obsolete;
41937
41938 atomic_t fscache_n_cop_alloc_object;
41939 atomic_t fscache_n_cop_lookup_object;
41940@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
41941 seq_puts(m, "FS-Cache statistics\n");
41942
41943 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
41944- atomic_read(&fscache_n_cookie_index),
41945- atomic_read(&fscache_n_cookie_data),
41946- atomic_read(&fscache_n_cookie_special));
41947+ atomic_read_unchecked(&fscache_n_cookie_index),
41948+ atomic_read_unchecked(&fscache_n_cookie_data),
41949+ atomic_read_unchecked(&fscache_n_cookie_special));
41950
41951 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
41952- atomic_read(&fscache_n_object_alloc),
41953- atomic_read(&fscache_n_object_no_alloc),
41954- atomic_read(&fscache_n_object_avail),
41955- atomic_read(&fscache_n_object_dead));
41956+ atomic_read_unchecked(&fscache_n_object_alloc),
41957+ atomic_read_unchecked(&fscache_n_object_no_alloc),
41958+ atomic_read_unchecked(&fscache_n_object_avail),
41959+ atomic_read_unchecked(&fscache_n_object_dead));
41960 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
41961- atomic_read(&fscache_n_checkaux_none),
41962- atomic_read(&fscache_n_checkaux_okay),
41963- atomic_read(&fscache_n_checkaux_update),
41964- atomic_read(&fscache_n_checkaux_obsolete));
41965+ atomic_read_unchecked(&fscache_n_checkaux_none),
41966+ atomic_read_unchecked(&fscache_n_checkaux_okay),
41967+ atomic_read_unchecked(&fscache_n_checkaux_update),
41968+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
41969
41970 seq_printf(m, "Pages : mrk=%u unc=%u\n",
41971- atomic_read(&fscache_n_marks),
41972- atomic_read(&fscache_n_uncaches));
41973+ atomic_read_unchecked(&fscache_n_marks),
41974+ atomic_read_unchecked(&fscache_n_uncaches));
41975
41976 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
41977 " oom=%u\n",
41978- atomic_read(&fscache_n_acquires),
41979- atomic_read(&fscache_n_acquires_null),
41980- atomic_read(&fscache_n_acquires_no_cache),
41981- atomic_read(&fscache_n_acquires_ok),
41982- atomic_read(&fscache_n_acquires_nobufs),
41983- atomic_read(&fscache_n_acquires_oom));
41984+ atomic_read_unchecked(&fscache_n_acquires),
41985+ atomic_read_unchecked(&fscache_n_acquires_null),
41986+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
41987+ atomic_read_unchecked(&fscache_n_acquires_ok),
41988+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
41989+ atomic_read_unchecked(&fscache_n_acquires_oom));
41990
41991 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
41992- atomic_read(&fscache_n_object_lookups),
41993- atomic_read(&fscache_n_object_lookups_negative),
41994- atomic_read(&fscache_n_object_lookups_positive),
41995- atomic_read(&fscache_n_object_created),
41996- atomic_read(&fscache_n_object_lookups_timed_out));
41997+ atomic_read_unchecked(&fscache_n_object_lookups),
41998+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
41999+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
42000+ atomic_read_unchecked(&fscache_n_object_created),
42001+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
42002
42003 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
42004- atomic_read(&fscache_n_updates),
42005- atomic_read(&fscache_n_updates_null),
42006- atomic_read(&fscache_n_updates_run));
42007+ atomic_read_unchecked(&fscache_n_updates),
42008+ atomic_read_unchecked(&fscache_n_updates_null),
42009+ atomic_read_unchecked(&fscache_n_updates_run));
42010
42011 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
42012- atomic_read(&fscache_n_relinquishes),
42013- atomic_read(&fscache_n_relinquishes_null),
42014- atomic_read(&fscache_n_relinquishes_waitcrt),
42015- atomic_read(&fscache_n_relinquishes_retire));
42016+ atomic_read_unchecked(&fscache_n_relinquishes),
42017+ atomic_read_unchecked(&fscache_n_relinquishes_null),
42018+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
42019+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
42020
42021 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
42022- atomic_read(&fscache_n_attr_changed),
42023- atomic_read(&fscache_n_attr_changed_ok),
42024- atomic_read(&fscache_n_attr_changed_nobufs),
42025- atomic_read(&fscache_n_attr_changed_nomem),
42026- atomic_read(&fscache_n_attr_changed_calls));
42027+ atomic_read_unchecked(&fscache_n_attr_changed),
42028+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
42029+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
42030+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
42031+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
42032
42033 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
42034- atomic_read(&fscache_n_allocs),
42035- atomic_read(&fscache_n_allocs_ok),
42036- atomic_read(&fscache_n_allocs_wait),
42037- atomic_read(&fscache_n_allocs_nobufs),
42038- atomic_read(&fscache_n_allocs_intr));
42039+ atomic_read_unchecked(&fscache_n_allocs),
42040+ atomic_read_unchecked(&fscache_n_allocs_ok),
42041+ atomic_read_unchecked(&fscache_n_allocs_wait),
42042+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
42043+ atomic_read_unchecked(&fscache_n_allocs_intr));
42044 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
42045- atomic_read(&fscache_n_alloc_ops),
42046- atomic_read(&fscache_n_alloc_op_waits),
42047- atomic_read(&fscache_n_allocs_object_dead));
42048+ atomic_read_unchecked(&fscache_n_alloc_ops),
42049+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
42050+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
42051
42052 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
42053 " int=%u oom=%u\n",
42054- atomic_read(&fscache_n_retrievals),
42055- atomic_read(&fscache_n_retrievals_ok),
42056- atomic_read(&fscache_n_retrievals_wait),
42057- atomic_read(&fscache_n_retrievals_nodata),
42058- atomic_read(&fscache_n_retrievals_nobufs),
42059- atomic_read(&fscache_n_retrievals_intr),
42060- atomic_read(&fscache_n_retrievals_nomem));
42061+ atomic_read_unchecked(&fscache_n_retrievals),
42062+ atomic_read_unchecked(&fscache_n_retrievals_ok),
42063+ atomic_read_unchecked(&fscache_n_retrievals_wait),
42064+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
42065+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
42066+ atomic_read_unchecked(&fscache_n_retrievals_intr),
42067+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
42068 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
42069- atomic_read(&fscache_n_retrieval_ops),
42070- atomic_read(&fscache_n_retrieval_op_waits),
42071- atomic_read(&fscache_n_retrievals_object_dead));
42072+ atomic_read_unchecked(&fscache_n_retrieval_ops),
42073+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
42074+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
42075
42076 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
42077- atomic_read(&fscache_n_stores),
42078- atomic_read(&fscache_n_stores_ok),
42079- atomic_read(&fscache_n_stores_again),
42080- atomic_read(&fscache_n_stores_nobufs),
42081- atomic_read(&fscache_n_stores_oom));
42082+ atomic_read_unchecked(&fscache_n_stores),
42083+ atomic_read_unchecked(&fscache_n_stores_ok),
42084+ atomic_read_unchecked(&fscache_n_stores_again),
42085+ atomic_read_unchecked(&fscache_n_stores_nobufs),
42086+ atomic_read_unchecked(&fscache_n_stores_oom));
42087 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
42088- atomic_read(&fscache_n_store_ops),
42089- atomic_read(&fscache_n_store_calls),
42090- atomic_read(&fscache_n_store_pages),
42091- atomic_read(&fscache_n_store_radix_deletes),
42092- atomic_read(&fscache_n_store_pages_over_limit));
42093+ atomic_read_unchecked(&fscache_n_store_ops),
42094+ atomic_read_unchecked(&fscache_n_store_calls),
42095+ atomic_read_unchecked(&fscache_n_store_pages),
42096+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
42097+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
42098
42099 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
42100- atomic_read(&fscache_n_store_vmscan_not_storing),
42101- atomic_read(&fscache_n_store_vmscan_gone),
42102- atomic_read(&fscache_n_store_vmscan_busy),
42103- atomic_read(&fscache_n_store_vmscan_cancelled));
42104+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
42105+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
42106+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
42107+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
42108
42109 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
42110- atomic_read(&fscache_n_op_pend),
42111- atomic_read(&fscache_n_op_run),
42112- atomic_read(&fscache_n_op_enqueue),
42113- atomic_read(&fscache_n_op_cancelled),
42114- atomic_read(&fscache_n_op_rejected));
42115+ atomic_read_unchecked(&fscache_n_op_pend),
42116+ atomic_read_unchecked(&fscache_n_op_run),
42117+ atomic_read_unchecked(&fscache_n_op_enqueue),
42118+ atomic_read_unchecked(&fscache_n_op_cancelled),
42119+ atomic_read_unchecked(&fscache_n_op_rejected));
42120 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
42121- atomic_read(&fscache_n_op_deferred_release),
42122- atomic_read(&fscache_n_op_release),
42123- atomic_read(&fscache_n_op_gc));
42124+ atomic_read_unchecked(&fscache_n_op_deferred_release),
42125+ atomic_read_unchecked(&fscache_n_op_release),
42126+ atomic_read_unchecked(&fscache_n_op_gc));
42127
42128 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
42129 atomic_read(&fscache_n_cop_alloc_object),
42130diff -urNp linux-3.0.4/fs/fs_struct.c linux-3.0.4/fs/fs_struct.c
42131--- linux-3.0.4/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
42132+++ linux-3.0.4/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
42133@@ -4,6 +4,7 @@
42134 #include <linux/path.h>
42135 #include <linux/slab.h>
42136 #include <linux/fs_struct.h>
42137+#include <linux/grsecurity.h>
42138 #include "internal.h"
42139
42140 static inline void path_get_longterm(struct path *path)
42141@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
42142 old_root = fs->root;
42143 fs->root = *path;
42144 path_get_longterm(path);
42145+ gr_set_chroot_entries(current, path);
42146 write_seqcount_end(&fs->seq);
42147 spin_unlock(&fs->lock);
42148 if (old_root.dentry)
42149@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
42150 && fs->root.mnt == old_root->mnt) {
42151 path_get_longterm(new_root);
42152 fs->root = *new_root;
42153+ gr_set_chroot_entries(p, new_root);
42154 count++;
42155 }
42156 if (fs->pwd.dentry == old_root->dentry
42157@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
42158 spin_lock(&fs->lock);
42159 write_seqcount_begin(&fs->seq);
42160 tsk->fs = NULL;
42161- kill = !--fs->users;
42162+ gr_clear_chroot_entries(tsk);
42163+ kill = !atomic_dec_return(&fs->users);
42164 write_seqcount_end(&fs->seq);
42165 spin_unlock(&fs->lock);
42166 task_unlock(tsk);
42167@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
42168 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42169 /* We don't need to lock fs - think why ;-) */
42170 if (fs) {
42171- fs->users = 1;
42172+ atomic_set(&fs->users, 1);
42173 fs->in_exec = 0;
42174 spin_lock_init(&fs->lock);
42175 seqcount_init(&fs->seq);
42176@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
42177 spin_lock(&old->lock);
42178 fs->root = old->root;
42179 path_get_longterm(&fs->root);
42180+ /* instead of calling gr_set_chroot_entries here,
42181+ we call it from every caller of this function
42182+ */
42183 fs->pwd = old->pwd;
42184 path_get_longterm(&fs->pwd);
42185 spin_unlock(&old->lock);
42186@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
42187
42188 task_lock(current);
42189 spin_lock(&fs->lock);
42190- kill = !--fs->users;
42191+ kill = !atomic_dec_return(&fs->users);
42192 current->fs = new_fs;
42193+ gr_set_chroot_entries(current, &new_fs->root);
42194 spin_unlock(&fs->lock);
42195 task_unlock(current);
42196
42197@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
42198
42199 /* to be mentioned only in INIT_TASK */
42200 struct fs_struct init_fs = {
42201- .users = 1,
42202+ .users = ATOMIC_INIT(1),
42203 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
42204 .seq = SEQCNT_ZERO,
42205 .umask = 0022,
42206@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
42207 task_lock(current);
42208
42209 spin_lock(&init_fs.lock);
42210- init_fs.users++;
42211+ atomic_inc(&init_fs.users);
42212 spin_unlock(&init_fs.lock);
42213
42214 spin_lock(&fs->lock);
42215 current->fs = &init_fs;
42216- kill = !--fs->users;
42217+ gr_set_chroot_entries(current, &current->fs->root);
42218+ kill = !atomic_dec_return(&fs->users);
42219 spin_unlock(&fs->lock);
42220
42221 task_unlock(current);
42222diff -urNp linux-3.0.4/fs/fuse/cuse.c linux-3.0.4/fs/fuse/cuse.c
42223--- linux-3.0.4/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
42224+++ linux-3.0.4/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
42225@@ -586,10 +586,12 @@ static int __init cuse_init(void)
42226 INIT_LIST_HEAD(&cuse_conntbl[i]);
42227
42228 /* inherit and extend fuse_dev_operations */
42229- cuse_channel_fops = fuse_dev_operations;
42230- cuse_channel_fops.owner = THIS_MODULE;
42231- cuse_channel_fops.open = cuse_channel_open;
42232- cuse_channel_fops.release = cuse_channel_release;
42233+ pax_open_kernel();
42234+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
42235+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
42236+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
42237+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
42238+ pax_close_kernel();
42239
42240 cuse_class = class_create(THIS_MODULE, "cuse");
42241 if (IS_ERR(cuse_class))
42242diff -urNp linux-3.0.4/fs/fuse/dev.c linux-3.0.4/fs/fuse/dev.c
42243--- linux-3.0.4/fs/fuse/dev.c 2011-09-02 18:11:26.000000000 -0400
42244+++ linux-3.0.4/fs/fuse/dev.c 2011-08-29 23:26:27.000000000 -0400
42245@@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
42246 ret = 0;
42247 pipe_lock(pipe);
42248
42249- if (!pipe->readers) {
42250+ if (!atomic_read(&pipe->readers)) {
42251 send_sig(SIGPIPE, current, 0);
42252 if (!ret)
42253 ret = -EPIPE;
42254diff -urNp linux-3.0.4/fs/fuse/dir.c linux-3.0.4/fs/fuse/dir.c
42255--- linux-3.0.4/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
42256+++ linux-3.0.4/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
42257@@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
42258 return link;
42259 }
42260
42261-static void free_link(char *link)
42262+static void free_link(const char *link)
42263 {
42264 if (!IS_ERR(link))
42265 free_page((unsigned long) link);
42266diff -urNp linux-3.0.4/fs/gfs2/inode.c linux-3.0.4/fs/gfs2/inode.c
42267--- linux-3.0.4/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
42268+++ linux-3.0.4/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
42269@@ -1525,7 +1525,7 @@ out:
42270
42271 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42272 {
42273- char *s = nd_get_link(nd);
42274+ const char *s = nd_get_link(nd);
42275 if (!IS_ERR(s))
42276 kfree(s);
42277 }
42278diff -urNp linux-3.0.4/fs/hfsplus/catalog.c linux-3.0.4/fs/hfsplus/catalog.c
42279--- linux-3.0.4/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
42280+++ linux-3.0.4/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
42281@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
42282 int err;
42283 u16 type;
42284
42285+ pax_track_stack();
42286+
42287 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
42288 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
42289 if (err)
42290@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
42291 int entry_size;
42292 int err;
42293
42294+ pax_track_stack();
42295+
42296 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
42297 str->name, cnid, inode->i_nlink);
42298 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
42299@@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
42300 int entry_size, type;
42301 int err = 0;
42302
42303+ pax_track_stack();
42304+
42305 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
42306 cnid, src_dir->i_ino, src_name->name,
42307 dst_dir->i_ino, dst_name->name);
42308diff -urNp linux-3.0.4/fs/hfsplus/dir.c linux-3.0.4/fs/hfsplus/dir.c
42309--- linux-3.0.4/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
42310+++ linux-3.0.4/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
42311@@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
42312 struct hfsplus_readdir_data *rd;
42313 u16 type;
42314
42315+ pax_track_stack();
42316+
42317 if (filp->f_pos >= inode->i_size)
42318 return 0;
42319
42320diff -urNp linux-3.0.4/fs/hfsplus/inode.c linux-3.0.4/fs/hfsplus/inode.c
42321--- linux-3.0.4/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
42322+++ linux-3.0.4/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
42323@@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
42324 int res = 0;
42325 u16 type;
42326
42327+ pax_track_stack();
42328+
42329 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
42330
42331 HFSPLUS_I(inode)->linkid = 0;
42332@@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
42333 struct hfs_find_data fd;
42334 hfsplus_cat_entry entry;
42335
42336+ pax_track_stack();
42337+
42338 if (HFSPLUS_IS_RSRC(inode))
42339 main_inode = HFSPLUS_I(inode)->rsrc_inode;
42340
42341diff -urNp linux-3.0.4/fs/hfsplus/ioctl.c linux-3.0.4/fs/hfsplus/ioctl.c
42342--- linux-3.0.4/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
42343+++ linux-3.0.4/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
42344@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
42345 struct hfsplus_cat_file *file;
42346 int res;
42347
42348+ pax_track_stack();
42349+
42350 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
42351 return -EOPNOTSUPP;
42352
42353@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
42354 struct hfsplus_cat_file *file;
42355 ssize_t res = 0;
42356
42357+ pax_track_stack();
42358+
42359 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
42360 return -EOPNOTSUPP;
42361
42362diff -urNp linux-3.0.4/fs/hfsplus/super.c linux-3.0.4/fs/hfsplus/super.c
42363--- linux-3.0.4/fs/hfsplus/super.c 2011-07-21 22:17:23.000000000 -0400
42364+++ linux-3.0.4/fs/hfsplus/super.c 2011-08-23 21:48:14.000000000 -0400
42365@@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
42366 struct nls_table *nls = NULL;
42367 int err;
42368
42369+ pax_track_stack();
42370+
42371 err = -EINVAL;
42372 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
42373 if (!sbi)
42374diff -urNp linux-3.0.4/fs/hugetlbfs/inode.c linux-3.0.4/fs/hugetlbfs/inode.c
42375--- linux-3.0.4/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
42376+++ linux-3.0.4/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
42377@@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
42378 .kill_sb = kill_litter_super,
42379 };
42380
42381-static struct vfsmount *hugetlbfs_vfsmount;
42382+struct vfsmount *hugetlbfs_vfsmount;
42383
42384 static int can_do_hugetlb_shm(void)
42385 {
42386diff -urNp linux-3.0.4/fs/inode.c linux-3.0.4/fs/inode.c
42387--- linux-3.0.4/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
42388+++ linux-3.0.4/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
42389@@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
42390
42391 #ifdef CONFIG_SMP
42392 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
42393- static atomic_t shared_last_ino;
42394- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
42395+ static atomic_unchecked_t shared_last_ino;
42396+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
42397
42398 res = next - LAST_INO_BATCH;
42399 }
42400diff -urNp linux-3.0.4/fs/jbd/checkpoint.c linux-3.0.4/fs/jbd/checkpoint.c
42401--- linux-3.0.4/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
42402+++ linux-3.0.4/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
42403@@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
42404 tid_t this_tid;
42405 int result;
42406
42407+ pax_track_stack();
42408+
42409 jbd_debug(1, "Start checkpoint\n");
42410
42411 /*
42412diff -urNp linux-3.0.4/fs/jffs2/compr_rtime.c linux-3.0.4/fs/jffs2/compr_rtime.c
42413--- linux-3.0.4/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
42414+++ linux-3.0.4/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
42415@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
42416 int outpos = 0;
42417 int pos=0;
42418
42419+ pax_track_stack();
42420+
42421 memset(positions,0,sizeof(positions));
42422
42423 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
42424@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
42425 int outpos = 0;
42426 int pos=0;
42427
42428+ pax_track_stack();
42429+
42430 memset(positions,0,sizeof(positions));
42431
42432 while (outpos<destlen) {
42433diff -urNp linux-3.0.4/fs/jffs2/compr_rubin.c linux-3.0.4/fs/jffs2/compr_rubin.c
42434--- linux-3.0.4/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
42435+++ linux-3.0.4/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
42436@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
42437 int ret;
42438 uint32_t mysrclen, mydstlen;
42439
42440+ pax_track_stack();
42441+
42442 mysrclen = *sourcelen;
42443 mydstlen = *dstlen - 8;
42444
42445diff -urNp linux-3.0.4/fs/jffs2/erase.c linux-3.0.4/fs/jffs2/erase.c
42446--- linux-3.0.4/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
42447+++ linux-3.0.4/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
42448@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
42449 struct jffs2_unknown_node marker = {
42450 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
42451 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
42452- .totlen = cpu_to_je32(c->cleanmarker_size)
42453+ .totlen = cpu_to_je32(c->cleanmarker_size),
42454+ .hdr_crc = cpu_to_je32(0)
42455 };
42456
42457 jffs2_prealloc_raw_node_refs(c, jeb, 1);
42458diff -urNp linux-3.0.4/fs/jffs2/wbuf.c linux-3.0.4/fs/jffs2/wbuf.c
42459--- linux-3.0.4/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
42460+++ linux-3.0.4/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
42461@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
42462 {
42463 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
42464 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
42465- .totlen = constant_cpu_to_je32(8)
42466+ .totlen = constant_cpu_to_je32(8),
42467+ .hdr_crc = constant_cpu_to_je32(0)
42468 };
42469
42470 /*
42471diff -urNp linux-3.0.4/fs/jffs2/xattr.c linux-3.0.4/fs/jffs2/xattr.c
42472--- linux-3.0.4/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
42473+++ linux-3.0.4/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
42474@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
42475
42476 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
42477
42478+ pax_track_stack();
42479+
42480 /* Phase.1 : Merge same xref */
42481 for (i=0; i < XREF_TMPHASH_SIZE; i++)
42482 xref_tmphash[i] = NULL;
42483diff -urNp linux-3.0.4/fs/jfs/super.c linux-3.0.4/fs/jfs/super.c
42484--- linux-3.0.4/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
42485+++ linux-3.0.4/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
42486@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
42487
42488 jfs_inode_cachep =
42489 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
42490- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
42491+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
42492 init_once);
42493 if (jfs_inode_cachep == NULL)
42494 return -ENOMEM;
42495diff -urNp linux-3.0.4/fs/Kconfig.binfmt linux-3.0.4/fs/Kconfig.binfmt
42496--- linux-3.0.4/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
42497+++ linux-3.0.4/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
42498@@ -86,7 +86,7 @@ config HAVE_AOUT
42499
42500 config BINFMT_AOUT
42501 tristate "Kernel support for a.out and ECOFF binaries"
42502- depends on HAVE_AOUT
42503+ depends on HAVE_AOUT && BROKEN
42504 ---help---
42505 A.out (Assembler.OUTput) is a set of formats for libraries and
42506 executables used in the earliest versions of UNIX. Linux used
42507diff -urNp linux-3.0.4/fs/libfs.c linux-3.0.4/fs/libfs.c
42508--- linux-3.0.4/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
42509+++ linux-3.0.4/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
42510@@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
42511
42512 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
42513 struct dentry *next;
42514+ char d_name[sizeof(next->d_iname)];
42515+ const unsigned char *name;
42516+
42517 next = list_entry(p, struct dentry, d_u.d_child);
42518 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
42519 if (!simple_positive(next)) {
42520@@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
42521
42522 spin_unlock(&next->d_lock);
42523 spin_unlock(&dentry->d_lock);
42524- if (filldir(dirent, next->d_name.name,
42525+ name = next->d_name.name;
42526+ if (name == next->d_iname) {
42527+ memcpy(d_name, name, next->d_name.len);
42528+ name = d_name;
42529+ }
42530+ if (filldir(dirent, name,
42531 next->d_name.len, filp->f_pos,
42532 next->d_inode->i_ino,
42533 dt_type(next->d_inode)) < 0)
42534diff -urNp linux-3.0.4/fs/lockd/clntproc.c linux-3.0.4/fs/lockd/clntproc.c
42535--- linux-3.0.4/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
42536+++ linux-3.0.4/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
42537@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
42538 /*
42539 * Cookie counter for NLM requests
42540 */
42541-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
42542+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
42543
42544 void nlmclnt_next_cookie(struct nlm_cookie *c)
42545 {
42546- u32 cookie = atomic_inc_return(&nlm_cookie);
42547+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
42548
42549 memcpy(c->data, &cookie, 4);
42550 c->len=4;
42551@@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
42552 struct nlm_rqst reqst, *req;
42553 int status;
42554
42555+ pax_track_stack();
42556+
42557 req = &reqst;
42558 memset(req, 0, sizeof(*req));
42559 locks_init_lock(&req->a_args.lock.fl);
42560diff -urNp linux-3.0.4/fs/locks.c linux-3.0.4/fs/locks.c
42561--- linux-3.0.4/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
42562+++ linux-3.0.4/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
42563@@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
42564 return;
42565
42566 if (filp->f_op && filp->f_op->flock) {
42567- struct file_lock fl = {
42568+ struct file_lock flock = {
42569 .fl_pid = current->tgid,
42570 .fl_file = filp,
42571 .fl_flags = FL_FLOCK,
42572 .fl_type = F_UNLCK,
42573 .fl_end = OFFSET_MAX,
42574 };
42575- filp->f_op->flock(filp, F_SETLKW, &fl);
42576- if (fl.fl_ops && fl.fl_ops->fl_release_private)
42577- fl.fl_ops->fl_release_private(&fl);
42578+ filp->f_op->flock(filp, F_SETLKW, &flock);
42579+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
42580+ flock.fl_ops->fl_release_private(&flock);
42581 }
42582
42583 lock_flocks();
42584diff -urNp linux-3.0.4/fs/logfs/super.c linux-3.0.4/fs/logfs/super.c
42585--- linux-3.0.4/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
42586+++ linux-3.0.4/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
42587@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
42588 struct logfs_disk_super _ds1, *ds1 = &_ds1;
42589 int err, valid0, valid1;
42590
42591+ pax_track_stack();
42592+
42593 /* read first superblock */
42594 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
42595 if (err)
42596diff -urNp linux-3.0.4/fs/namei.c linux-3.0.4/fs/namei.c
42597--- linux-3.0.4/fs/namei.c 2011-07-21 22:17:23.000000000 -0400
42598+++ linux-3.0.4/fs/namei.c 2011-10-06 03:40:11.000000000 -0400
42599@@ -237,21 +237,31 @@ int generic_permission(struct inode *ino
42600 return ret;
42601
42602 /*
42603- * Read/write DACs are always overridable.
42604- * Executable DACs are overridable for all directories and
42605- * for non-directories that have least one exec bit set.
42606+ * Searching includes executable on directories, else just read.
42607 */
42608- if (!(mask & MAY_EXEC) || execute_ok(inode))
42609- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
42610+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
42611+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
42612+#ifdef CONFIG_GRKERNSEC
42613+ if (flags & IPERM_FLAG_RCU)
42614+ return -ECHILD;
42615+#endif
42616+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
42617 return 0;
42618+ }
42619
42620 /*
42621- * Searching includes executable on directories, else just read.
42622+ * Read/write DACs are always overridable.
42623+ * Executable DACs are overridable for all directories and
42624+ * for non-directories that have least one exec bit set.
42625 */
42626- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
42627- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
42628- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
42629+ if (!(mask & MAY_EXEC) || execute_ok(inode)) {
42630+#ifdef CONFIG_GRKERNSEC
42631+ if (flags & IPERM_FLAG_RCU)
42632+ return -ECHILD;
42633+#endif
42634+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
42635 return 0;
42636+ }
42637
42638 return -EACCES;
42639 }
42640@@ -547,6 +557,9 @@ static int complete_walk(struct nameidat
42641 br_read_unlock(vfsmount_lock);
42642 }
42643
42644+ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
42645+ return -ENOENT;
42646+
42647 if (likely(!(nd->flags & LOOKUP_JUMPED)))
42648 return 0;
42649
42650@@ -593,9 +606,16 @@ static inline int exec_permission(struct
42651 if (ret == -ECHILD)
42652 return ret;
42653
42654- if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
42655- ns_capable(ns, CAP_DAC_READ_SEARCH))
42656+ if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
42657 goto ok;
42658+ else {
42659+#ifdef CONFIG_GRKERNSEC
42660+ if (flags & IPERM_FLAG_RCU)
42661+ return -ECHILD;
42662+#endif
42663+ if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
42664+ goto ok;
42665+ }
42666
42667 return ret;
42668 ok:
42669@@ -703,11 +723,26 @@ follow_link(struct path *link, struct na
42670 return error;
42671 }
42672
42673+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
42674+ dentry->d_inode, dentry, nd->path.mnt)) {
42675+ error = -EACCES;
42676+ *p = ERR_PTR(error); /* no ->put_link(), please */
42677+ path_put(&nd->path);
42678+ return error;
42679+ }
42680+
42681+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
42682+ error = -ENOENT;
42683+ *p = ERR_PTR(error); /* no ->put_link(), please */
42684+ path_put(&nd->path);
42685+ return error;
42686+ }
42687+
42688 nd->last_type = LAST_BIND;
42689 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
42690 error = PTR_ERR(*p);
42691 if (!IS_ERR(*p)) {
42692- char *s = nd_get_link(nd);
42693+ const char *s = nd_get_link(nd);
42694 error = 0;
42695 if (s)
42696 error = __vfs_follow_link(nd, s);
42697@@ -1625,6 +1660,9 @@ static int do_path_lookup(int dfd, const
42698 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
42699
42700 if (likely(!retval)) {
42701+ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
42702+ return -ENOENT;
42703+
42704 if (unlikely(!audit_dummy_context())) {
42705 if (nd->path.dentry && nd->inode)
42706 audit_inode(name, nd->path.dentry);
42707@@ -1935,6 +1973,30 @@ int vfs_create(struct inode *dir, struct
42708 return error;
42709 }
42710
42711+/*
42712+ * Note that while the flag value (low two bits) for sys_open means:
42713+ * 00 - read-only
42714+ * 01 - write-only
42715+ * 10 - read-write
42716+ * 11 - special
42717+ * it is changed into
42718+ * 00 - no permissions needed
42719+ * 01 - read-permission
42720+ * 10 - write-permission
42721+ * 11 - read-write
42722+ * for the internal routines (ie open_namei()/follow_link() etc)
42723+ * This is more logical, and also allows the 00 "no perm needed"
42724+ * to be used for symlinks (where the permissions are checked
42725+ * later).
42726+ *
42727+*/
42728+static inline int open_to_namei_flags(int flag)
42729+{
42730+ if ((flag+1) & O_ACCMODE)
42731+ flag++;
42732+ return flag;
42733+}
42734+
42735 static int may_open(struct path *path, int acc_mode, int flag)
42736 {
42737 struct dentry *dentry = path->dentry;
42738@@ -1987,7 +2049,27 @@ static int may_open(struct path *path, i
42739 /*
42740 * Ensure there are no outstanding leases on the file.
42741 */
42742- return break_lease(inode, flag);
42743+ error = break_lease(inode, flag);
42744+
42745+ if (error)
42746+ return error;
42747+
42748+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
42749+ error = -EPERM;
42750+ goto exit;
42751+ }
42752+
42753+ if (gr_handle_rawio(inode)) {
42754+ error = -EPERM;
42755+ goto exit;
42756+ }
42757+
42758+ if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
42759+ error = -EACCES;
42760+ goto exit;
42761+ }
42762+exit:
42763+ return error;
42764 }
42765
42766 static int handle_truncate(struct file *filp)
42767@@ -2013,30 +2095,6 @@ static int handle_truncate(struct file *
42768 }
42769
42770 /*
42771- * Note that while the flag value (low two bits) for sys_open means:
42772- * 00 - read-only
42773- * 01 - write-only
42774- * 10 - read-write
42775- * 11 - special
42776- * it is changed into
42777- * 00 - no permissions needed
42778- * 01 - read-permission
42779- * 10 - write-permission
42780- * 11 - read-write
42781- * for the internal routines (ie open_namei()/follow_link() etc)
42782- * This is more logical, and also allows the 00 "no perm needed"
42783- * to be used for symlinks (where the permissions are checked
42784- * later).
42785- *
42786-*/
42787-static inline int open_to_namei_flags(int flag)
42788-{
42789- if ((flag+1) & O_ACCMODE)
42790- flag++;
42791- return flag;
42792-}
42793-
42794-/*
42795 * Handle the last step of open()
42796 */
42797 static struct file *do_last(struct nameidata *nd, struct path *path,
42798@@ -2045,6 +2103,7 @@ static struct file *do_last(struct namei
42799 struct dentry *dir = nd->path.dentry;
42800 struct dentry *dentry;
42801 int open_flag = op->open_flag;
42802+ int flag = open_to_namei_flags(open_flag);
42803 int will_truncate = open_flag & O_TRUNC;
42804 int want_write = 0;
42805 int acc_mode = op->acc_mode;
42806@@ -2132,6 +2191,12 @@ static struct file *do_last(struct namei
42807 /* Negative dentry, just create the file */
42808 if (!dentry->d_inode) {
42809 int mode = op->mode;
42810+
42811+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
42812+ error = -EACCES;
42813+ goto exit_mutex_unlock;
42814+ }
42815+
42816 if (!IS_POSIXACL(dir->d_inode))
42817 mode &= ~current_umask();
42818 /*
42819@@ -2155,6 +2220,8 @@ static struct file *do_last(struct namei
42820 error = vfs_create(dir->d_inode, dentry, mode, nd);
42821 if (error)
42822 goto exit_mutex_unlock;
42823+ else
42824+ gr_handle_create(path->dentry, path->mnt);
42825 mutex_unlock(&dir->d_inode->i_mutex);
42826 dput(nd->path.dentry);
42827 nd->path.dentry = dentry;
42828@@ -2164,6 +2231,14 @@ static struct file *do_last(struct namei
42829 /*
42830 * It already exists.
42831 */
42832+
42833+ /* only check if O_CREAT is specified, all other checks need to go
42834+ into may_open */
42835+ if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
42836+ error = -EACCES;
42837+ goto exit_mutex_unlock;
42838+ }
42839+
42840 mutex_unlock(&dir->d_inode->i_mutex);
42841 audit_inode(pathname, path->dentry);
42842
42843@@ -2450,6 +2525,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
42844 error = may_mknod(mode);
42845 if (error)
42846 goto out_dput;
42847+
42848+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
42849+ error = -EPERM;
42850+ goto out_dput;
42851+ }
42852+
42853+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
42854+ error = -EACCES;
42855+ goto out_dput;
42856+ }
42857+
42858 error = mnt_want_write(nd.path.mnt);
42859 if (error)
42860 goto out_dput;
42861@@ -2470,6 +2556,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
42862 }
42863 out_drop_write:
42864 mnt_drop_write(nd.path.mnt);
42865+
42866+ if (!error)
42867+ gr_handle_create(dentry, nd.path.mnt);
42868 out_dput:
42869 dput(dentry);
42870 out_unlock:
42871@@ -2522,6 +2611,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
42872 if (IS_ERR(dentry))
42873 goto out_unlock;
42874
42875+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
42876+ error = -EACCES;
42877+ goto out_dput;
42878+ }
42879+
42880 if (!IS_POSIXACL(nd.path.dentry->d_inode))
42881 mode &= ~current_umask();
42882 error = mnt_want_write(nd.path.mnt);
42883@@ -2533,6 +2627,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
42884 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
42885 out_drop_write:
42886 mnt_drop_write(nd.path.mnt);
42887+
42888+ if (!error)
42889+ gr_handle_create(dentry, nd.path.mnt);
42890+
42891 out_dput:
42892 dput(dentry);
42893 out_unlock:
42894@@ -2613,6 +2711,8 @@ static long do_rmdir(int dfd, const char
42895 char * name;
42896 struct dentry *dentry;
42897 struct nameidata nd;
42898+ ino_t saved_ino = 0;
42899+ dev_t saved_dev = 0;
42900
42901 error = user_path_parent(dfd, pathname, &nd, &name);
42902 if (error)
42903@@ -2641,6 +2741,17 @@ static long do_rmdir(int dfd, const char
42904 error = -ENOENT;
42905 goto exit3;
42906 }
42907+
42908+ if (dentry->d_inode->i_nlink <= 1) {
42909+ saved_ino = dentry->d_inode->i_ino;
42910+ saved_dev = gr_get_dev_from_dentry(dentry);
42911+ }
42912+
42913+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
42914+ error = -EACCES;
42915+ goto exit3;
42916+ }
42917+
42918 error = mnt_want_write(nd.path.mnt);
42919 if (error)
42920 goto exit3;
42921@@ -2648,6 +2759,8 @@ static long do_rmdir(int dfd, const char
42922 if (error)
42923 goto exit4;
42924 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
42925+ if (!error && (saved_dev || saved_ino))
42926+ gr_handle_delete(saved_ino, saved_dev);
42927 exit4:
42928 mnt_drop_write(nd.path.mnt);
42929 exit3:
42930@@ -2710,6 +2823,8 @@ static long do_unlinkat(int dfd, const c
42931 struct dentry *dentry;
42932 struct nameidata nd;
42933 struct inode *inode = NULL;
42934+ ino_t saved_ino = 0;
42935+ dev_t saved_dev = 0;
42936
42937 error = user_path_parent(dfd, pathname, &nd, &name);
42938 if (error)
42939@@ -2732,6 +2847,16 @@ static long do_unlinkat(int dfd, const c
42940 if (!inode)
42941 goto slashes;
42942 ihold(inode);
42943+
42944+ if (inode->i_nlink <= 1) {
42945+ saved_ino = inode->i_ino;
42946+ saved_dev = gr_get_dev_from_dentry(dentry);
42947+ }
42948+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
42949+ error = -EACCES;
42950+ goto exit2;
42951+ }
42952+
42953 error = mnt_want_write(nd.path.mnt);
42954 if (error)
42955 goto exit2;
42956@@ -2739,6 +2864,8 @@ static long do_unlinkat(int dfd, const c
42957 if (error)
42958 goto exit3;
42959 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
42960+ if (!error && (saved_ino || saved_dev))
42961+ gr_handle_delete(saved_ino, saved_dev);
42962 exit3:
42963 mnt_drop_write(nd.path.mnt);
42964 exit2:
42965@@ -2816,6 +2943,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
42966 if (IS_ERR(dentry))
42967 goto out_unlock;
42968
42969+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
42970+ error = -EACCES;
42971+ goto out_dput;
42972+ }
42973+
42974 error = mnt_want_write(nd.path.mnt);
42975 if (error)
42976 goto out_dput;
42977@@ -2823,6 +2955,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
42978 if (error)
42979 goto out_drop_write;
42980 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
42981+ if (!error)
42982+ gr_handle_create(dentry, nd.path.mnt);
42983 out_drop_write:
42984 mnt_drop_write(nd.path.mnt);
42985 out_dput:
42986@@ -2931,6 +3065,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
42987 error = PTR_ERR(new_dentry);
42988 if (IS_ERR(new_dentry))
42989 goto out_unlock;
42990+
42991+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
42992+ old_path.dentry->d_inode,
42993+ old_path.dentry->d_inode->i_mode, to)) {
42994+ error = -EACCES;
42995+ goto out_dput;
42996+ }
42997+
42998+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
42999+ old_path.dentry, old_path.mnt, to)) {
43000+ error = -EACCES;
43001+ goto out_dput;
43002+ }
43003+
43004 error = mnt_want_write(nd.path.mnt);
43005 if (error)
43006 goto out_dput;
43007@@ -2938,6 +3086,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
43008 if (error)
43009 goto out_drop_write;
43010 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
43011+ if (!error)
43012+ gr_handle_create(new_dentry, nd.path.mnt);
43013 out_drop_write:
43014 mnt_drop_write(nd.path.mnt);
43015 out_dput:
43016@@ -3113,6 +3263,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43017 char *to;
43018 int error;
43019
43020+ pax_track_stack();
43021+
43022 error = user_path_parent(olddfd, oldname, &oldnd, &from);
43023 if (error)
43024 goto exit;
43025@@ -3169,6 +3321,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43026 if (new_dentry == trap)
43027 goto exit5;
43028
43029+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
43030+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
43031+ to);
43032+ if (error)
43033+ goto exit5;
43034+
43035 error = mnt_want_write(oldnd.path.mnt);
43036 if (error)
43037 goto exit5;
43038@@ -3178,6 +3336,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43039 goto exit6;
43040 error = vfs_rename(old_dir->d_inode, old_dentry,
43041 new_dir->d_inode, new_dentry);
43042+ if (!error)
43043+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
43044+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
43045 exit6:
43046 mnt_drop_write(oldnd.path.mnt);
43047 exit5:
43048@@ -3203,6 +3364,8 @@ SYSCALL_DEFINE2(rename, const char __use
43049
43050 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
43051 {
43052+ char tmpbuf[64];
43053+ const char *newlink;
43054 int len;
43055
43056 len = PTR_ERR(link);
43057@@ -3212,7 +3375,14 @@ int vfs_readlink(struct dentry *dentry,
43058 len = strlen(link);
43059 if (len > (unsigned) buflen)
43060 len = buflen;
43061- if (copy_to_user(buffer, link, len))
43062+
43063+ if (len < sizeof(tmpbuf)) {
43064+ memcpy(tmpbuf, link, len);
43065+ newlink = tmpbuf;
43066+ } else
43067+ newlink = link;
43068+
43069+ if (copy_to_user(buffer, newlink, len))
43070 len = -EFAULT;
43071 out:
43072 return len;
43073diff -urNp linux-3.0.4/fs/namespace.c linux-3.0.4/fs/namespace.c
43074--- linux-3.0.4/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
43075+++ linux-3.0.4/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
43076@@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
43077 if (!(sb->s_flags & MS_RDONLY))
43078 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
43079 up_write(&sb->s_umount);
43080+
43081+ gr_log_remount(mnt->mnt_devname, retval);
43082+
43083 return retval;
43084 }
43085
43086@@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
43087 br_write_unlock(vfsmount_lock);
43088 up_write(&namespace_sem);
43089 release_mounts(&umount_list);
43090+
43091+ gr_log_unmount(mnt->mnt_devname, retval);
43092+
43093 return retval;
43094 }
43095
43096@@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
43097 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
43098 MS_STRICTATIME);
43099
43100+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
43101+ retval = -EPERM;
43102+ goto dput_out;
43103+ }
43104+
43105+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
43106+ retval = -EPERM;
43107+ goto dput_out;
43108+ }
43109+
43110 if (flags & MS_REMOUNT)
43111 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
43112 data_page);
43113@@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
43114 dev_name, data_page);
43115 dput_out:
43116 path_put(&path);
43117+
43118+ gr_log_mount(dev_name, dir_name, retval);
43119+
43120 return retval;
43121 }
43122
43123@@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
43124 if (error)
43125 goto out2;
43126
43127+ if (gr_handle_chroot_pivot()) {
43128+ error = -EPERM;
43129+ goto out2;
43130+ }
43131+
43132 get_fs_root(current->fs, &root);
43133 error = lock_mount(&old);
43134 if (error)
43135diff -urNp linux-3.0.4/fs/ncpfs/dir.c linux-3.0.4/fs/ncpfs/dir.c
43136--- linux-3.0.4/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
43137+++ linux-3.0.4/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
43138@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
43139 int res, val = 0, len;
43140 __u8 __name[NCP_MAXPATHLEN + 1];
43141
43142+ pax_track_stack();
43143+
43144 if (dentry == dentry->d_sb->s_root)
43145 return 1;
43146
43147@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
43148 int error, res, len;
43149 __u8 __name[NCP_MAXPATHLEN + 1];
43150
43151+ pax_track_stack();
43152+
43153 error = -EIO;
43154 if (!ncp_conn_valid(server))
43155 goto finished;
43156@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
43157 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
43158 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
43159
43160+ pax_track_stack();
43161+
43162 ncp_age_dentry(server, dentry);
43163 len = sizeof(__name);
43164 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
43165@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
43166 int error, len;
43167 __u8 __name[NCP_MAXPATHLEN + 1];
43168
43169+ pax_track_stack();
43170+
43171 DPRINTK("ncp_mkdir: making %s/%s\n",
43172 dentry->d_parent->d_name.name, dentry->d_name.name);
43173
43174@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
43175 int old_len, new_len;
43176 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
43177
43178+ pax_track_stack();
43179+
43180 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
43181 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
43182 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
43183diff -urNp linux-3.0.4/fs/ncpfs/inode.c linux-3.0.4/fs/ncpfs/inode.c
43184--- linux-3.0.4/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
43185+++ linux-3.0.4/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
43186@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
43187 #endif
43188 struct ncp_entry_info finfo;
43189
43190+ pax_track_stack();
43191+
43192 memset(&data, 0, sizeof(data));
43193 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
43194 if (!server)
43195diff -urNp linux-3.0.4/fs/nfs/inode.c linux-3.0.4/fs/nfs/inode.c
43196--- linux-3.0.4/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
43197+++ linux-3.0.4/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
43198@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
43199 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
43200 nfsi->attrtimeo_timestamp = jiffies;
43201
43202- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
43203+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
43204 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
43205 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
43206 else
43207@@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
43208 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
43209 }
43210
43211-static atomic_long_t nfs_attr_generation_counter;
43212+static atomic_long_unchecked_t nfs_attr_generation_counter;
43213
43214 static unsigned long nfs_read_attr_generation_counter(void)
43215 {
43216- return atomic_long_read(&nfs_attr_generation_counter);
43217+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
43218 }
43219
43220 unsigned long nfs_inc_attr_generation_counter(void)
43221 {
43222- return atomic_long_inc_return(&nfs_attr_generation_counter);
43223+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
43224 }
43225
43226 void nfs_fattr_init(struct nfs_fattr *fattr)
43227diff -urNp linux-3.0.4/fs/nfsd/nfs4state.c linux-3.0.4/fs/nfsd/nfs4state.c
43228--- linux-3.0.4/fs/nfsd/nfs4state.c 2011-09-02 18:11:21.000000000 -0400
43229+++ linux-3.0.4/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
43230@@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
43231 unsigned int strhashval;
43232 int err;
43233
43234+ pax_track_stack();
43235+
43236 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
43237 (long long) lock->lk_offset,
43238 (long long) lock->lk_length);
43239diff -urNp linux-3.0.4/fs/nfsd/nfs4xdr.c linux-3.0.4/fs/nfsd/nfs4xdr.c
43240--- linux-3.0.4/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
43241+++ linux-3.0.4/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
43242@@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
43243 .dentry = dentry,
43244 };
43245
43246+ pax_track_stack();
43247+
43248 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
43249 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
43250 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
43251diff -urNp linux-3.0.4/fs/nfsd/vfs.c linux-3.0.4/fs/nfsd/vfs.c
43252--- linux-3.0.4/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
43253+++ linux-3.0.4/fs/nfsd/vfs.c 2011-10-06 04:17:55.000000000 -0400
43254@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
43255 } else {
43256 oldfs = get_fs();
43257 set_fs(KERNEL_DS);
43258- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
43259+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
43260 set_fs(oldfs);
43261 }
43262
43263@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
43264
43265 /* Write the data. */
43266 oldfs = get_fs(); set_fs(KERNEL_DS);
43267- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
43268+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
43269 set_fs(oldfs);
43270 if (host_err < 0)
43271 goto out_nfserr;
43272@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
43273 */
43274
43275 oldfs = get_fs(); set_fs(KERNEL_DS);
43276- host_err = inode->i_op->readlink(dentry, buf, *lenp);
43277+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
43278 set_fs(oldfs);
43279
43280 if (host_err < 0)
43281diff -urNp linux-3.0.4/fs/notify/fanotify/fanotify_user.c linux-3.0.4/fs/notify/fanotify/fanotify_user.c
43282--- linux-3.0.4/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
43283+++ linux-3.0.4/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
43284@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
43285 goto out_close_fd;
43286
43287 ret = -EFAULT;
43288- if (copy_to_user(buf, &fanotify_event_metadata,
43289+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
43290+ copy_to_user(buf, &fanotify_event_metadata,
43291 fanotify_event_metadata.event_len))
43292 goto out_kill_access_response;
43293
43294diff -urNp linux-3.0.4/fs/notify/notification.c linux-3.0.4/fs/notify/notification.c
43295--- linux-3.0.4/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
43296+++ linux-3.0.4/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
43297@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
43298 * get set to 0 so it will never get 'freed'
43299 */
43300 static struct fsnotify_event *q_overflow_event;
43301-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
43302+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
43303
43304 /**
43305 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
43306@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
43307 */
43308 u32 fsnotify_get_cookie(void)
43309 {
43310- return atomic_inc_return(&fsnotify_sync_cookie);
43311+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
43312 }
43313 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
43314
43315diff -urNp linux-3.0.4/fs/ntfs/dir.c linux-3.0.4/fs/ntfs/dir.c
43316--- linux-3.0.4/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
43317+++ linux-3.0.4/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
43318@@ -1329,7 +1329,7 @@ find_next_index_buffer:
43319 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
43320 ~(s64)(ndir->itype.index.block_size - 1)));
43321 /* Bounds checks. */
43322- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
43323+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
43324 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
43325 "inode 0x%lx or driver bug.", vdir->i_ino);
43326 goto err_out;
43327diff -urNp linux-3.0.4/fs/ntfs/file.c linux-3.0.4/fs/ntfs/file.c
43328--- linux-3.0.4/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
43329+++ linux-3.0.4/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
43330@@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
43331 #endif /* NTFS_RW */
43332 };
43333
43334-const struct file_operations ntfs_empty_file_ops = {};
43335+const struct file_operations ntfs_empty_file_ops __read_only;
43336
43337-const struct inode_operations ntfs_empty_inode_ops = {};
43338+const struct inode_operations ntfs_empty_inode_ops __read_only;
43339diff -urNp linux-3.0.4/fs/ocfs2/localalloc.c linux-3.0.4/fs/ocfs2/localalloc.c
43340--- linux-3.0.4/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
43341+++ linux-3.0.4/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
43342@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
43343 goto bail;
43344 }
43345
43346- atomic_inc(&osb->alloc_stats.moves);
43347+ atomic_inc_unchecked(&osb->alloc_stats.moves);
43348
43349 bail:
43350 if (handle)
43351diff -urNp linux-3.0.4/fs/ocfs2/namei.c linux-3.0.4/fs/ocfs2/namei.c
43352--- linux-3.0.4/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
43353+++ linux-3.0.4/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
43354@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
43355 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
43356 struct ocfs2_dir_lookup_result target_insert = { NULL, };
43357
43358+ pax_track_stack();
43359+
43360 /* At some point it might be nice to break this function up a
43361 * bit. */
43362
43363diff -urNp linux-3.0.4/fs/ocfs2/ocfs2.h linux-3.0.4/fs/ocfs2/ocfs2.h
43364--- linux-3.0.4/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
43365+++ linux-3.0.4/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
43366@@ -235,11 +235,11 @@ enum ocfs2_vol_state
43367
43368 struct ocfs2_alloc_stats
43369 {
43370- atomic_t moves;
43371- atomic_t local_data;
43372- atomic_t bitmap_data;
43373- atomic_t bg_allocs;
43374- atomic_t bg_extends;
43375+ atomic_unchecked_t moves;
43376+ atomic_unchecked_t local_data;
43377+ atomic_unchecked_t bitmap_data;
43378+ atomic_unchecked_t bg_allocs;
43379+ atomic_unchecked_t bg_extends;
43380 };
43381
43382 enum ocfs2_local_alloc_state
43383diff -urNp linux-3.0.4/fs/ocfs2/suballoc.c linux-3.0.4/fs/ocfs2/suballoc.c
43384--- linux-3.0.4/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
43385+++ linux-3.0.4/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
43386@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
43387 mlog_errno(status);
43388 goto bail;
43389 }
43390- atomic_inc(&osb->alloc_stats.bg_extends);
43391+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
43392
43393 /* You should never ask for this much metadata */
43394 BUG_ON(bits_wanted >
43395@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
43396 mlog_errno(status);
43397 goto bail;
43398 }
43399- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
43400+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
43401
43402 *suballoc_loc = res.sr_bg_blkno;
43403 *suballoc_bit_start = res.sr_bit_offset;
43404@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
43405 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
43406 res->sr_bits);
43407
43408- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
43409+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
43410
43411 BUG_ON(res->sr_bits != 1);
43412
43413@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
43414 mlog_errno(status);
43415 goto bail;
43416 }
43417- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
43418+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
43419
43420 BUG_ON(res.sr_bits != 1);
43421
43422@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
43423 cluster_start,
43424 num_clusters);
43425 if (!status)
43426- atomic_inc(&osb->alloc_stats.local_data);
43427+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
43428 } else {
43429 if (min_clusters > (osb->bitmap_cpg - 1)) {
43430 /* The only paths asking for contiguousness
43431@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
43432 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
43433 res.sr_bg_blkno,
43434 res.sr_bit_offset);
43435- atomic_inc(&osb->alloc_stats.bitmap_data);
43436+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
43437 *num_clusters = res.sr_bits;
43438 }
43439 }
43440diff -urNp linux-3.0.4/fs/ocfs2/super.c linux-3.0.4/fs/ocfs2/super.c
43441--- linux-3.0.4/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
43442+++ linux-3.0.4/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
43443@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
43444 "%10s => GlobalAllocs: %d LocalAllocs: %d "
43445 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
43446 "Stats",
43447- atomic_read(&osb->alloc_stats.bitmap_data),
43448- atomic_read(&osb->alloc_stats.local_data),
43449- atomic_read(&osb->alloc_stats.bg_allocs),
43450- atomic_read(&osb->alloc_stats.moves),
43451- atomic_read(&osb->alloc_stats.bg_extends));
43452+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
43453+ atomic_read_unchecked(&osb->alloc_stats.local_data),
43454+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
43455+ atomic_read_unchecked(&osb->alloc_stats.moves),
43456+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
43457
43458 out += snprintf(buf + out, len - out,
43459 "%10s => State: %u Descriptor: %llu Size: %u bits "
43460@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
43461 spin_lock_init(&osb->osb_xattr_lock);
43462 ocfs2_init_steal_slots(osb);
43463
43464- atomic_set(&osb->alloc_stats.moves, 0);
43465- atomic_set(&osb->alloc_stats.local_data, 0);
43466- atomic_set(&osb->alloc_stats.bitmap_data, 0);
43467- atomic_set(&osb->alloc_stats.bg_allocs, 0);
43468- atomic_set(&osb->alloc_stats.bg_extends, 0);
43469+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
43470+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
43471+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
43472+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
43473+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
43474
43475 /* Copy the blockcheck stats from the superblock probe */
43476 osb->osb_ecc_stats = *stats;
43477diff -urNp linux-3.0.4/fs/ocfs2/symlink.c linux-3.0.4/fs/ocfs2/symlink.c
43478--- linux-3.0.4/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
43479+++ linux-3.0.4/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
43480@@ -142,7 +142,7 @@ bail:
43481
43482 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
43483 {
43484- char *link = nd_get_link(nd);
43485+ const char *link = nd_get_link(nd);
43486 if (!IS_ERR(link))
43487 kfree(link);
43488 }
43489diff -urNp linux-3.0.4/fs/open.c linux-3.0.4/fs/open.c
43490--- linux-3.0.4/fs/open.c 2011-07-21 22:17:23.000000000 -0400
43491+++ linux-3.0.4/fs/open.c 2011-09-14 09:16:46.000000000 -0400
43492@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
43493 error = locks_verify_truncate(inode, NULL, length);
43494 if (!error)
43495 error = security_path_truncate(&path);
43496+
43497+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
43498+ error = -EACCES;
43499+
43500 if (!error)
43501 error = do_truncate(path.dentry, length, 0, NULL);
43502
43503@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
43504 if (__mnt_is_readonly(path.mnt))
43505 res = -EROFS;
43506
43507+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
43508+ res = -EACCES;
43509+
43510 out_path_release:
43511 path_put(&path);
43512 out:
43513@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
43514 if (error)
43515 goto dput_and_out;
43516
43517+ gr_log_chdir(path.dentry, path.mnt);
43518+
43519 set_fs_pwd(current->fs, &path);
43520
43521 dput_and_out:
43522@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
43523 goto out_putf;
43524
43525 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
43526+
43527+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
43528+ error = -EPERM;
43529+
43530+ if (!error)
43531+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
43532+
43533 if (!error)
43534 set_fs_pwd(current->fs, &file->f_path);
43535 out_putf:
43536@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __use
43537 if (error)
43538 goto dput_and_out;
43539
43540+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
43541+ goto dput_and_out;
43542+
43543 set_fs_root(current->fs, &path);
43544+
43545+ gr_handle_chroot_chdir(&path);
43546+
43547 error = 0;
43548 dput_and_out:
43549 path_put(&path);
43550@@ -466,12 +488,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
43551 err = mnt_want_write_file(file);
43552 if (err)
43553 goto out_putf;
43554+
43555 mutex_lock(&inode->i_mutex);
43556+
43557+ if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
43558+ err = -EACCES;
43559+ goto out_unlock;
43560+ }
43561+
43562 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
43563 if (err)
43564 goto out_unlock;
43565 if (mode == (mode_t) -1)
43566 mode = inode->i_mode;
43567+
43568+ if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
43569+ err = -EACCES;
43570+ goto out_unlock;
43571+ }
43572+
43573 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
43574 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
43575 err = notify_change(dentry, &newattrs);
43576@@ -499,12 +534,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
43577 error = mnt_want_write(path.mnt);
43578 if (error)
43579 goto dput_and_out;
43580+
43581 mutex_lock(&inode->i_mutex);
43582+
43583+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
43584+ error = -EACCES;
43585+ goto out_unlock;
43586+ }
43587+
43588 error = security_path_chmod(path.dentry, path.mnt, mode);
43589 if (error)
43590 goto out_unlock;
43591 if (mode == (mode_t) -1)
43592 mode = inode->i_mode;
43593+
43594+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
43595+ error = -EACCES;
43596+ goto out_unlock;
43597+ }
43598+
43599 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
43600 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
43601 error = notify_change(path.dentry, &newattrs);
43602@@ -528,6 +576,9 @@ static int chown_common(struct path *pat
43603 int error;
43604 struct iattr newattrs;
43605
43606+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
43607+ return -EACCES;
43608+
43609 newattrs.ia_valid = ATTR_CTIME;
43610 if (user != (uid_t) -1) {
43611 newattrs.ia_valid |= ATTR_UID;
43612@@ -998,7 +1049,10 @@ long do_sys_open(int dfd, const char __u
43613 if (!IS_ERR(tmp)) {
43614 fd = get_unused_fd_flags(flags);
43615 if (fd >= 0) {
43616- struct file *f = do_filp_open(dfd, tmp, &op, lookup);
43617+ struct file *f;
43618+ /* don't allow to be set by userland */
43619+ flags &= ~FMODE_GREXEC;
43620+ f = do_filp_open(dfd, tmp, &op, lookup);
43621 if (IS_ERR(f)) {
43622 put_unused_fd(fd);
43623 fd = PTR_ERR(f);
43624diff -urNp linux-3.0.4/fs/partitions/ldm.c linux-3.0.4/fs/partitions/ldm.c
43625--- linux-3.0.4/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
43626+++ linux-3.0.4/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
43627@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
43628 ldm_error ("A VBLK claims to have %d parts.", num);
43629 return false;
43630 }
43631+
43632 if (rec >= num) {
43633 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
43634 return false;
43635@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
43636 goto found;
43637 }
43638
43639- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
43640+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
43641 if (!f) {
43642 ldm_crit ("Out of memory.");
43643 return false;
43644diff -urNp linux-3.0.4/fs/pipe.c linux-3.0.4/fs/pipe.c
43645--- linux-3.0.4/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
43646+++ linux-3.0.4/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
43647@@ -420,9 +420,9 @@ redo:
43648 }
43649 if (bufs) /* More to do? */
43650 continue;
43651- if (!pipe->writers)
43652+ if (!atomic_read(&pipe->writers))
43653 break;
43654- if (!pipe->waiting_writers) {
43655+ if (!atomic_read(&pipe->waiting_writers)) {
43656 /* syscall merging: Usually we must not sleep
43657 * if O_NONBLOCK is set, or if we got some data.
43658 * But if a writer sleeps in kernel space, then
43659@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
43660 mutex_lock(&inode->i_mutex);
43661 pipe = inode->i_pipe;
43662
43663- if (!pipe->readers) {
43664+ if (!atomic_read(&pipe->readers)) {
43665 send_sig(SIGPIPE, current, 0);
43666 ret = -EPIPE;
43667 goto out;
43668@@ -530,7 +530,7 @@ redo1:
43669 for (;;) {
43670 int bufs;
43671
43672- if (!pipe->readers) {
43673+ if (!atomic_read(&pipe->readers)) {
43674 send_sig(SIGPIPE, current, 0);
43675 if (!ret)
43676 ret = -EPIPE;
43677@@ -616,9 +616,9 @@ redo2:
43678 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
43679 do_wakeup = 0;
43680 }
43681- pipe->waiting_writers++;
43682+ atomic_inc(&pipe->waiting_writers);
43683 pipe_wait(pipe);
43684- pipe->waiting_writers--;
43685+ atomic_dec(&pipe->waiting_writers);
43686 }
43687 out:
43688 mutex_unlock(&inode->i_mutex);
43689@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
43690 mask = 0;
43691 if (filp->f_mode & FMODE_READ) {
43692 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
43693- if (!pipe->writers && filp->f_version != pipe->w_counter)
43694+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
43695 mask |= POLLHUP;
43696 }
43697
43698@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
43699 * Most Unices do not set POLLERR for FIFOs but on Linux they
43700 * behave exactly like pipes for poll().
43701 */
43702- if (!pipe->readers)
43703+ if (!atomic_read(&pipe->readers))
43704 mask |= POLLERR;
43705 }
43706
43707@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
43708
43709 mutex_lock(&inode->i_mutex);
43710 pipe = inode->i_pipe;
43711- pipe->readers -= decr;
43712- pipe->writers -= decw;
43713+ atomic_sub(decr, &pipe->readers);
43714+ atomic_sub(decw, &pipe->writers);
43715
43716- if (!pipe->readers && !pipe->writers) {
43717+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
43718 free_pipe_info(inode);
43719 } else {
43720 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
43721@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
43722
43723 if (inode->i_pipe) {
43724 ret = 0;
43725- inode->i_pipe->readers++;
43726+ atomic_inc(&inode->i_pipe->readers);
43727 }
43728
43729 mutex_unlock(&inode->i_mutex);
43730@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
43731
43732 if (inode->i_pipe) {
43733 ret = 0;
43734- inode->i_pipe->writers++;
43735+ atomic_inc(&inode->i_pipe->writers);
43736 }
43737
43738 mutex_unlock(&inode->i_mutex);
43739@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
43740 if (inode->i_pipe) {
43741 ret = 0;
43742 if (filp->f_mode & FMODE_READ)
43743- inode->i_pipe->readers++;
43744+ atomic_inc(&inode->i_pipe->readers);
43745 if (filp->f_mode & FMODE_WRITE)
43746- inode->i_pipe->writers++;
43747+ atomic_inc(&inode->i_pipe->writers);
43748 }
43749
43750 mutex_unlock(&inode->i_mutex);
43751@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
43752 inode->i_pipe = NULL;
43753 }
43754
43755-static struct vfsmount *pipe_mnt __read_mostly;
43756+struct vfsmount *pipe_mnt __read_mostly;
43757
43758 /*
43759 * pipefs_dname() is called from d_path().
43760@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
43761 goto fail_iput;
43762 inode->i_pipe = pipe;
43763
43764- pipe->readers = pipe->writers = 1;
43765+ atomic_set(&pipe->readers, 1);
43766+ atomic_set(&pipe->writers, 1);
43767 inode->i_fop = &rdwr_pipefifo_fops;
43768
43769 /*
43770diff -urNp linux-3.0.4/fs/proc/array.c linux-3.0.4/fs/proc/array.c
43771--- linux-3.0.4/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
43772+++ linux-3.0.4/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
43773@@ -60,6 +60,7 @@
43774 #include <linux/tty.h>
43775 #include <linux/string.h>
43776 #include <linux/mman.h>
43777+#include <linux/grsecurity.h>
43778 #include <linux/proc_fs.h>
43779 #include <linux/ioport.h>
43780 #include <linux/uaccess.h>
43781@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
43782 seq_putc(m, '\n');
43783 }
43784
43785+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
43786+static inline void task_pax(struct seq_file *m, struct task_struct *p)
43787+{
43788+ if (p->mm)
43789+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
43790+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
43791+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
43792+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
43793+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
43794+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
43795+ else
43796+ seq_printf(m, "PaX:\t-----\n");
43797+}
43798+#endif
43799+
43800 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
43801 struct pid *pid, struct task_struct *task)
43802 {
43803@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
43804 task_cpus_allowed(m, task);
43805 cpuset_task_status_allowed(m, task);
43806 task_context_switch_counts(m, task);
43807+
43808+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
43809+ task_pax(m, task);
43810+#endif
43811+
43812+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
43813+ task_grsec_rbac(m, task);
43814+#endif
43815+
43816 return 0;
43817 }
43818
43819+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43820+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
43821+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
43822+ _mm->pax_flags & MF_PAX_SEGMEXEC))
43823+#endif
43824+
43825 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
43826 struct pid *pid, struct task_struct *task, int whole)
43827 {
43828@@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
43829 cputime_t cutime, cstime, utime, stime;
43830 cputime_t cgtime, gtime;
43831 unsigned long rsslim = 0;
43832- char tcomm[sizeof(task->comm)];
43833+ char tcomm[sizeof(task->comm)] = { 0 };
43834 unsigned long flags;
43835
43836+ pax_track_stack();
43837+
43838 state = *get_task_state(task);
43839 vsize = eip = esp = 0;
43840 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
43841@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
43842 gtime = task->gtime;
43843 }
43844
43845+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43846+ if (PAX_RAND_FLAGS(mm)) {
43847+ eip = 0;
43848+ esp = 0;
43849+ wchan = 0;
43850+ }
43851+#endif
43852+#ifdef CONFIG_GRKERNSEC_HIDESYM
43853+ wchan = 0;
43854+ eip =0;
43855+ esp =0;
43856+#endif
43857+
43858 /* scale priority and nice values from timeslices to -20..20 */
43859 /* to make it look like a "normal" Unix priority/nice value */
43860 priority = task_prio(task);
43861@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
43862 vsize,
43863 mm ? get_mm_rss(mm) : 0,
43864 rsslim,
43865+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43866+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
43867+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
43868+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
43869+#else
43870 mm ? (permitted ? mm->start_code : 1) : 0,
43871 mm ? (permitted ? mm->end_code : 1) : 0,
43872 (permitted && mm) ? mm->start_stack : 0,
43873+#endif
43874 esp,
43875 eip,
43876 /* The signal information here is obsolete.
43877@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
43878
43879 return 0;
43880 }
43881+
43882+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
43883+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
43884+{
43885+ u32 curr_ip = 0;
43886+ unsigned long flags;
43887+
43888+ if (lock_task_sighand(task, &flags)) {
43889+ curr_ip = task->signal->curr_ip;
43890+ unlock_task_sighand(task, &flags);
43891+ }
43892+
43893+ return sprintf(buffer, "%pI4\n", &curr_ip);
43894+}
43895+#endif
43896diff -urNp linux-3.0.4/fs/proc/base.c linux-3.0.4/fs/proc/base.c
43897--- linux-3.0.4/fs/proc/base.c 2011-09-02 18:11:21.000000000 -0400
43898+++ linux-3.0.4/fs/proc/base.c 2011-09-13 14:50:28.000000000 -0400
43899@@ -107,6 +107,22 @@ struct pid_entry {
43900 union proc_op op;
43901 };
43902
43903+struct getdents_callback {
43904+ struct linux_dirent __user * current_dir;
43905+ struct linux_dirent __user * previous;
43906+ struct file * file;
43907+ int count;
43908+ int error;
43909+};
43910+
43911+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
43912+ loff_t offset, u64 ino, unsigned int d_type)
43913+{
43914+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
43915+ buf->error = -EINVAL;
43916+ return 0;
43917+}
43918+
43919 #define NOD(NAME, MODE, IOP, FOP, OP) { \
43920 .name = (NAME), \
43921 .len = sizeof(NAME) - 1, \
43922@@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
43923 if (task == current)
43924 return mm;
43925
43926+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
43927+ return ERR_PTR(-EPERM);
43928+
43929 /*
43930 * If current is actively ptrace'ing, and would also be
43931 * permitted to freshly attach with ptrace now, permit it.
43932@@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
43933 if (!mm->arg_end)
43934 goto out_mm; /* Shh! No looking before we're done */
43935
43936+ if (gr_acl_handle_procpidmem(task))
43937+ goto out_mm;
43938+
43939 len = mm->arg_end - mm->arg_start;
43940
43941 if (len > PAGE_SIZE)
43942@@ -309,12 +331,28 @@ out:
43943 return res;
43944 }
43945
43946+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43947+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
43948+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
43949+ _mm->pax_flags & MF_PAX_SEGMEXEC))
43950+#endif
43951+
43952 static int proc_pid_auxv(struct task_struct *task, char *buffer)
43953 {
43954 struct mm_struct *mm = mm_for_maps(task);
43955 int res = PTR_ERR(mm);
43956 if (mm && !IS_ERR(mm)) {
43957 unsigned int nwords = 0;
43958+
43959+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43960+ /* allow if we're currently ptracing this task */
43961+ if (PAX_RAND_FLAGS(mm) &&
43962+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
43963+ mmput(mm);
43964+ return 0;
43965+ }
43966+#endif
43967+
43968 do {
43969 nwords += 2;
43970 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
43971@@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
43972 }
43973
43974
43975-#ifdef CONFIG_KALLSYMS
43976+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43977 /*
43978 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
43979 * Returns the resolved symbol. If that fails, simply return the address.
43980@@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
43981 mutex_unlock(&task->signal->cred_guard_mutex);
43982 }
43983
43984-#ifdef CONFIG_STACKTRACE
43985+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43986
43987 #define MAX_STACK_TRACE_DEPTH 64
43988
43989@@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
43990 return count;
43991 }
43992
43993-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
43994+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
43995 static int proc_pid_syscall(struct task_struct *task, char *buffer)
43996 {
43997 long nr;
43998@@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
43999 /************************************************************************/
44000
44001 /* permission checks */
44002-static int proc_fd_access_allowed(struct inode *inode)
44003+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
44004 {
44005 struct task_struct *task;
44006 int allowed = 0;
44007@@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
44008 */
44009 task = get_proc_task(inode);
44010 if (task) {
44011- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
44012+ if (log)
44013+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
44014+ else
44015+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
44016 put_task_struct(task);
44017 }
44018 return allowed;
44019@@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
44020 if (!task)
44021 goto out_no_task;
44022
44023+ if (gr_acl_handle_procpidmem(task))
44024+ goto out;
44025+
44026 ret = -ENOMEM;
44027 page = (char *)__get_free_page(GFP_TEMPORARY);
44028 if (!page)
44029@@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
44030 path_put(&nd->path);
44031
44032 /* Are we allowed to snoop on the tasks file descriptors? */
44033- if (!proc_fd_access_allowed(inode))
44034+ if (!proc_fd_access_allowed(inode,0))
44035 goto out;
44036
44037 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
44038@@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
44039 struct path path;
44040
44041 /* Are we allowed to snoop on the tasks file descriptors? */
44042- if (!proc_fd_access_allowed(inode))
44043- goto out;
44044+ /* logging this is needed for learning on chromium to work properly,
44045+ but we don't want to flood the logs from 'ps' which does a readlink
44046+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
44047+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
44048+ */
44049+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
44050+ if (!proc_fd_access_allowed(inode,0))
44051+ goto out;
44052+ } else {
44053+ if (!proc_fd_access_allowed(inode,1))
44054+ goto out;
44055+ }
44056
44057 error = PROC_I(inode)->op.proc_get_link(inode, &path);
44058 if (error)
44059@@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
44060 rcu_read_lock();
44061 cred = __task_cred(task);
44062 inode->i_uid = cred->euid;
44063+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44064+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
44065+#else
44066 inode->i_gid = cred->egid;
44067+#endif
44068 rcu_read_unlock();
44069 }
44070 security_task_to_inode(task, inode);
44071@@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
44072 struct inode *inode = dentry->d_inode;
44073 struct task_struct *task;
44074 const struct cred *cred;
44075+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44076+ const struct cred *tmpcred = current_cred();
44077+#endif
44078
44079 generic_fillattr(inode, stat);
44080
44081@@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
44082 stat->uid = 0;
44083 stat->gid = 0;
44084 task = pid_task(proc_pid(inode), PIDTYPE_PID);
44085+
44086+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
44087+ rcu_read_unlock();
44088+ return -ENOENT;
44089+ }
44090+
44091 if (task) {
44092+ cred = __task_cred(task);
44093+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44094+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
44095+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44096+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
44097+#endif
44098+ ) {
44099+#endif
44100 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
44101+#ifdef CONFIG_GRKERNSEC_PROC_USER
44102+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
44103+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44104+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
44105+#endif
44106 task_dumpable(task)) {
44107- cred = __task_cred(task);
44108 stat->uid = cred->euid;
44109+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44110+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
44111+#else
44112 stat->gid = cred->egid;
44113+#endif
44114 }
44115+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44116+ } else {
44117+ rcu_read_unlock();
44118+ return -ENOENT;
44119+ }
44120+#endif
44121 }
44122 rcu_read_unlock();
44123 return 0;
44124@@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
44125
44126 if (task) {
44127 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
44128+#ifdef CONFIG_GRKERNSEC_PROC_USER
44129+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
44130+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44131+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
44132+#endif
44133 task_dumpable(task)) {
44134 rcu_read_lock();
44135 cred = __task_cred(task);
44136 inode->i_uid = cred->euid;
44137+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44138+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
44139+#else
44140 inode->i_gid = cred->egid;
44141+#endif
44142 rcu_read_unlock();
44143 } else {
44144 inode->i_uid = 0;
44145@@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
44146 int fd = proc_fd(inode);
44147
44148 if (task) {
44149- files = get_files_struct(task);
44150+ if (!gr_acl_handle_procpidmem(task))
44151+ files = get_files_struct(task);
44152 put_task_struct(task);
44153 }
44154 if (files) {
44155@@ -2169,11 +2268,21 @@ static const struct file_operations proc
44156 */
44157 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
44158 {
44159+ struct task_struct *task;
44160 int rv = generic_permission(inode, mask, flags, NULL);
44161- if (rv == 0)
44162- return 0;
44163+
44164 if (task_pid(current) == proc_pid(inode))
44165 rv = 0;
44166+
44167+ task = get_proc_task(inode);
44168+ if (task == NULL)
44169+ return rv;
44170+
44171+ if (gr_acl_handle_procpidmem(task))
44172+ rv = -EACCES;
44173+
44174+ put_task_struct(task);
44175+
44176 return rv;
44177 }
44178
44179@@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
44180 if (!task)
44181 goto out_no_task;
44182
44183+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
44184+ goto out;
44185+
44186 /*
44187 * Yes, it does not scale. And it should not. Don't add
44188 * new entries into /proc/<tgid>/ without very good reasons.
44189@@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
44190 if (!task)
44191 goto out_no_task;
44192
44193+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
44194+ goto out;
44195+
44196 ret = 0;
44197 i = filp->f_pos;
44198 switch (i) {
44199@@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
44200 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
44201 void *cookie)
44202 {
44203- char *s = nd_get_link(nd);
44204+ const char *s = nd_get_link(nd);
44205 if (!IS_ERR(s))
44206 __putname(s);
44207 }
44208@@ -2795,7 +2910,7 @@ static const struct pid_entry tgid_base_
44209 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
44210 #endif
44211 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
44212-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
44213+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
44214 INF("syscall", S_IRUGO, proc_pid_syscall),
44215 #endif
44216 INF("cmdline", S_IRUGO, proc_pid_cmdline),
44217@@ -2820,10 +2935,10 @@ static const struct pid_entry tgid_base_
44218 #ifdef CONFIG_SECURITY
44219 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
44220 #endif
44221-#ifdef CONFIG_KALLSYMS
44222+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44223 INF("wchan", S_IRUGO, proc_pid_wchan),
44224 #endif
44225-#ifdef CONFIG_STACKTRACE
44226+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44227 ONE("stack", S_IRUGO, proc_pid_stack),
44228 #endif
44229 #ifdef CONFIG_SCHEDSTATS
44230@@ -2857,6 +2972,9 @@ static const struct pid_entry tgid_base_
44231 #ifdef CONFIG_HARDWALL
44232 INF("hardwall", S_IRUGO, proc_pid_hardwall),
44233 #endif
44234+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
44235+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
44236+#endif
44237 };
44238
44239 static int proc_tgid_base_readdir(struct file * filp,
44240@@ -2982,7 +3100,14 @@ static struct dentry *proc_pid_instantia
44241 if (!inode)
44242 goto out;
44243
44244+#ifdef CONFIG_GRKERNSEC_PROC_USER
44245+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
44246+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44247+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
44248+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
44249+#else
44250 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
44251+#endif
44252 inode->i_op = &proc_tgid_base_inode_operations;
44253 inode->i_fop = &proc_tgid_base_operations;
44254 inode->i_flags|=S_IMMUTABLE;
44255@@ -3024,7 +3149,11 @@ struct dentry *proc_pid_lookup(struct in
44256 if (!task)
44257 goto out;
44258
44259+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
44260+ goto out_put_task;
44261+
44262 result = proc_pid_instantiate(dir, dentry, task, NULL);
44263+out_put_task:
44264 put_task_struct(task);
44265 out:
44266 return result;
44267@@ -3089,6 +3218,11 @@ int proc_pid_readdir(struct file * filp,
44268 {
44269 unsigned int nr;
44270 struct task_struct *reaper;
44271+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44272+ const struct cred *tmpcred = current_cred();
44273+ const struct cred *itercred;
44274+#endif
44275+ filldir_t __filldir = filldir;
44276 struct tgid_iter iter;
44277 struct pid_namespace *ns;
44278
44279@@ -3112,8 +3246,27 @@ int proc_pid_readdir(struct file * filp,
44280 for (iter = next_tgid(ns, iter);
44281 iter.task;
44282 iter.tgid += 1, iter = next_tgid(ns, iter)) {
44283+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44284+ rcu_read_lock();
44285+ itercred = __task_cred(iter.task);
44286+#endif
44287+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
44288+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44289+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
44290+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44291+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
44292+#endif
44293+ )
44294+#endif
44295+ )
44296+ __filldir = &gr_fake_filldir;
44297+ else
44298+ __filldir = filldir;
44299+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44300+ rcu_read_unlock();
44301+#endif
44302 filp->f_pos = iter.tgid + TGID_OFFSET;
44303- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
44304+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
44305 put_task_struct(iter.task);
44306 goto out;
44307 }
44308@@ -3141,7 +3294,7 @@ static const struct pid_entry tid_base_s
44309 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
44310 #endif
44311 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
44312-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
44313+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
44314 INF("syscall", S_IRUGO, proc_pid_syscall),
44315 #endif
44316 INF("cmdline", S_IRUGO, proc_pid_cmdline),
44317@@ -3165,10 +3318,10 @@ static const struct pid_entry tid_base_s
44318 #ifdef CONFIG_SECURITY
44319 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
44320 #endif
44321-#ifdef CONFIG_KALLSYMS
44322+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44323 INF("wchan", S_IRUGO, proc_pid_wchan),
44324 #endif
44325-#ifdef CONFIG_STACKTRACE
44326+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44327 ONE("stack", S_IRUGO, proc_pid_stack),
44328 #endif
44329 #ifdef CONFIG_SCHEDSTATS
44330diff -urNp linux-3.0.4/fs/proc/cmdline.c linux-3.0.4/fs/proc/cmdline.c
44331--- linux-3.0.4/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
44332+++ linux-3.0.4/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
44333@@ -23,7 +23,11 @@ static const struct file_operations cmdl
44334
44335 static int __init proc_cmdline_init(void)
44336 {
44337+#ifdef CONFIG_GRKERNSEC_PROC_ADD
44338+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
44339+#else
44340 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
44341+#endif
44342 return 0;
44343 }
44344 module_init(proc_cmdline_init);
44345diff -urNp linux-3.0.4/fs/proc/devices.c linux-3.0.4/fs/proc/devices.c
44346--- linux-3.0.4/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
44347+++ linux-3.0.4/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
44348@@ -64,7 +64,11 @@ static const struct file_operations proc
44349
44350 static int __init proc_devices_init(void)
44351 {
44352+#ifdef CONFIG_GRKERNSEC_PROC_ADD
44353+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
44354+#else
44355 proc_create("devices", 0, NULL, &proc_devinfo_operations);
44356+#endif
44357 return 0;
44358 }
44359 module_init(proc_devices_init);
44360diff -urNp linux-3.0.4/fs/proc/inode.c linux-3.0.4/fs/proc/inode.c
44361--- linux-3.0.4/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
44362+++ linux-3.0.4/fs/proc/inode.c 2011-08-23 21:48:14.000000000 -0400
44363@@ -440,7 +440,11 @@ struct inode *proc_get_inode(struct supe
44364 if (de->mode) {
44365 inode->i_mode = de->mode;
44366 inode->i_uid = de->uid;
44367+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44368+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
44369+#else
44370 inode->i_gid = de->gid;
44371+#endif
44372 }
44373 if (de->size)
44374 inode->i_size = de->size;
44375diff -urNp linux-3.0.4/fs/proc/internal.h linux-3.0.4/fs/proc/internal.h
44376--- linux-3.0.4/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
44377+++ linux-3.0.4/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
44378@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
44379 struct pid *pid, struct task_struct *task);
44380 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
44381 struct pid *pid, struct task_struct *task);
44382+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
44383+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
44384+#endif
44385 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
44386
44387 extern const struct file_operations proc_maps_operations;
44388diff -urNp linux-3.0.4/fs/proc/Kconfig linux-3.0.4/fs/proc/Kconfig
44389--- linux-3.0.4/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
44390+++ linux-3.0.4/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
44391@@ -30,12 +30,12 @@ config PROC_FS
44392
44393 config PROC_KCORE
44394 bool "/proc/kcore support" if !ARM
44395- depends on PROC_FS && MMU
44396+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
44397
44398 config PROC_VMCORE
44399 bool "/proc/vmcore support"
44400- depends on PROC_FS && CRASH_DUMP
44401- default y
44402+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
44403+ default n
44404 help
44405 Exports the dump image of crashed kernel in ELF format.
44406
44407@@ -59,8 +59,8 @@ config PROC_SYSCTL
44408 limited in memory.
44409
44410 config PROC_PAGE_MONITOR
44411- default y
44412- depends on PROC_FS && MMU
44413+ default n
44414+ depends on PROC_FS && MMU && !GRKERNSEC
44415 bool "Enable /proc page monitoring" if EXPERT
44416 help
44417 Various /proc files exist to monitor process memory utilization:
44418diff -urNp linux-3.0.4/fs/proc/kcore.c linux-3.0.4/fs/proc/kcore.c
44419--- linux-3.0.4/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
44420+++ linux-3.0.4/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
44421@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
44422 off_t offset = 0;
44423 struct kcore_list *m;
44424
44425+ pax_track_stack();
44426+
44427 /* setup ELF header */
44428 elf = (struct elfhdr *) bufp;
44429 bufp += sizeof(struct elfhdr);
44430@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
44431 * the addresses in the elf_phdr on our list.
44432 */
44433 start = kc_offset_to_vaddr(*fpos - elf_buflen);
44434- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
44435+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
44436+ if (tsz > buflen)
44437 tsz = buflen;
44438-
44439+
44440 while (buflen) {
44441 struct kcore_list *m;
44442
44443@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
44444 kfree(elf_buf);
44445 } else {
44446 if (kern_addr_valid(start)) {
44447- unsigned long n;
44448+ char *elf_buf;
44449+ mm_segment_t oldfs;
44450
44451- n = copy_to_user(buffer, (char *)start, tsz);
44452- /*
44453- * We cannot distingush between fault on source
44454- * and fault on destination. When this happens
44455- * we clear too and hope it will trigger the
44456- * EFAULT again.
44457- */
44458- if (n) {
44459- if (clear_user(buffer + tsz - n,
44460- n))
44461+ elf_buf = kmalloc(tsz, GFP_KERNEL);
44462+ if (!elf_buf)
44463+ return -ENOMEM;
44464+ oldfs = get_fs();
44465+ set_fs(KERNEL_DS);
44466+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
44467+ set_fs(oldfs);
44468+ if (copy_to_user(buffer, elf_buf, tsz)) {
44469+ kfree(elf_buf);
44470 return -EFAULT;
44471+ }
44472 }
44473+ set_fs(oldfs);
44474+ kfree(elf_buf);
44475 } else {
44476 if (clear_user(buffer, tsz))
44477 return -EFAULT;
44478@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
44479
44480 static int open_kcore(struct inode *inode, struct file *filp)
44481 {
44482+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
44483+ return -EPERM;
44484+#endif
44485 if (!capable(CAP_SYS_RAWIO))
44486 return -EPERM;
44487 if (kcore_need_update)
44488diff -urNp linux-3.0.4/fs/proc/meminfo.c linux-3.0.4/fs/proc/meminfo.c
44489--- linux-3.0.4/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
44490+++ linux-3.0.4/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
44491@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
44492 unsigned long pages[NR_LRU_LISTS];
44493 int lru;
44494
44495+ pax_track_stack();
44496+
44497 /*
44498 * display in kilobytes.
44499 */
44500@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
44501 vmi.used >> 10,
44502 vmi.largest_chunk >> 10
44503 #ifdef CONFIG_MEMORY_FAILURE
44504- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
44505+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
44506 #endif
44507 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
44508 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
44509diff -urNp linux-3.0.4/fs/proc/nommu.c linux-3.0.4/fs/proc/nommu.c
44510--- linux-3.0.4/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
44511+++ linux-3.0.4/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
44512@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
44513 if (len < 1)
44514 len = 1;
44515 seq_printf(m, "%*c", len, ' ');
44516- seq_path(m, &file->f_path, "");
44517+ seq_path(m, &file->f_path, "\n\\");
44518 }
44519
44520 seq_putc(m, '\n');
44521diff -urNp linux-3.0.4/fs/proc/proc_net.c linux-3.0.4/fs/proc/proc_net.c
44522--- linux-3.0.4/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
44523+++ linux-3.0.4/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
44524@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
44525 struct task_struct *task;
44526 struct nsproxy *ns;
44527 struct net *net = NULL;
44528+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44529+ const struct cred *cred = current_cred();
44530+#endif
44531+
44532+#ifdef CONFIG_GRKERNSEC_PROC_USER
44533+ if (cred->fsuid)
44534+ return net;
44535+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44536+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
44537+ return net;
44538+#endif
44539
44540 rcu_read_lock();
44541 task = pid_task(proc_pid(dir), PIDTYPE_PID);
44542diff -urNp linux-3.0.4/fs/proc/proc_sysctl.c linux-3.0.4/fs/proc/proc_sysctl.c
44543--- linux-3.0.4/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
44544+++ linux-3.0.4/fs/proc/proc_sysctl.c 2011-08-23 21:48:14.000000000 -0400
44545@@ -8,6 +8,8 @@
44546 #include <linux/namei.h>
44547 #include "internal.h"
44548
44549+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
44550+
44551 static const struct dentry_operations proc_sys_dentry_operations;
44552 static const struct file_operations proc_sys_file_operations;
44553 static const struct inode_operations proc_sys_inode_operations;
44554@@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
44555 if (!p)
44556 goto out;
44557
44558+ if (gr_handle_sysctl(p, MAY_EXEC))
44559+ goto out;
44560+
44561 err = ERR_PTR(-ENOMEM);
44562 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
44563 if (h)
44564@@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
44565 if (*pos < file->f_pos)
44566 continue;
44567
44568+ if (gr_handle_sysctl(table, 0))
44569+ continue;
44570+
44571 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
44572 if (res)
44573 return res;
44574@@ -355,6 +363,9 @@ static int proc_sys_getattr(struct vfsmo
44575 if (IS_ERR(head))
44576 return PTR_ERR(head);
44577
44578+ if (table && gr_handle_sysctl(table, MAY_EXEC))
44579+ return -ENOENT;
44580+
44581 generic_fillattr(inode, stat);
44582 if (table)
44583 stat->mode = (stat->mode & S_IFMT) | table->mode;
44584diff -urNp linux-3.0.4/fs/proc/root.c linux-3.0.4/fs/proc/root.c
44585--- linux-3.0.4/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
44586+++ linux-3.0.4/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
44587@@ -123,7 +123,15 @@ void __init proc_root_init(void)
44588 #ifdef CONFIG_PROC_DEVICETREE
44589 proc_device_tree_init();
44590 #endif
44591+#ifdef CONFIG_GRKERNSEC_PROC_ADD
44592+#ifdef CONFIG_GRKERNSEC_PROC_USER
44593+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
44594+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44595+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
44596+#endif
44597+#else
44598 proc_mkdir("bus", NULL);
44599+#endif
44600 proc_sys_init();
44601 }
44602
44603diff -urNp linux-3.0.4/fs/proc/task_mmu.c linux-3.0.4/fs/proc/task_mmu.c
44604--- linux-3.0.4/fs/proc/task_mmu.c 2011-07-21 22:17:23.000000000 -0400
44605+++ linux-3.0.4/fs/proc/task_mmu.c 2011-08-23 21:48:14.000000000 -0400
44606@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
44607 "VmExe:\t%8lu kB\n"
44608 "VmLib:\t%8lu kB\n"
44609 "VmPTE:\t%8lu kB\n"
44610- "VmSwap:\t%8lu kB\n",
44611- hiwater_vm << (PAGE_SHIFT-10),
44612+ "VmSwap:\t%8lu kB\n"
44613+
44614+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
44615+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
44616+#endif
44617+
44618+ ,hiwater_vm << (PAGE_SHIFT-10),
44619 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
44620 mm->locked_vm << (PAGE_SHIFT-10),
44621 hiwater_rss << (PAGE_SHIFT-10),
44622@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
44623 data << (PAGE_SHIFT-10),
44624 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
44625 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
44626- swap << (PAGE_SHIFT-10));
44627+ swap << (PAGE_SHIFT-10)
44628+
44629+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
44630+ , mm->context.user_cs_base, mm->context.user_cs_limit
44631+#endif
44632+
44633+ );
44634 }
44635
44636 unsigned long task_vsize(struct mm_struct *mm)
44637@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
44638 return ret;
44639 }
44640
44641+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44642+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44643+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
44644+ _mm->pax_flags & MF_PAX_SEGMEXEC))
44645+#endif
44646+
44647 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
44648 {
44649 struct mm_struct *mm = vma->vm_mm;
44650@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
44651 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
44652 }
44653
44654- /* We don't show the stack guard page in /proc/maps */
44655+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44656+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
44657+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
44658+#else
44659 start = vma->vm_start;
44660- if (stack_guard_page_start(vma, start))
44661- start += PAGE_SIZE;
44662 end = vma->vm_end;
44663- if (stack_guard_page_end(vma, end))
44664- end -= PAGE_SIZE;
44665+#endif
44666
44667 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
44668 start,
44669@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
44670 flags & VM_WRITE ? 'w' : '-',
44671 flags & VM_EXEC ? 'x' : '-',
44672 flags & VM_MAYSHARE ? 's' : 'p',
44673+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44674+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
44675+#else
44676 pgoff,
44677+#endif
44678 MAJOR(dev), MINOR(dev), ino, &len);
44679
44680 /*
44681@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
44682 */
44683 if (file) {
44684 pad_len_spaces(m, len);
44685- seq_path(m, &file->f_path, "\n");
44686+ seq_path(m, &file->f_path, "\n\\");
44687 } else {
44688 const char *name = arch_vma_name(vma);
44689 if (!name) {
44690@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
44691 if (vma->vm_start <= mm->brk &&
44692 vma->vm_end >= mm->start_brk) {
44693 name = "[heap]";
44694- } else if (vma->vm_start <= mm->start_stack &&
44695- vma->vm_end >= mm->start_stack) {
44696+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
44697+ (vma->vm_start <= mm->start_stack &&
44698+ vma->vm_end >= mm->start_stack)) {
44699 name = "[stack]";
44700 }
44701 } else {
44702@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
44703 };
44704
44705 memset(&mss, 0, sizeof mss);
44706- mss.vma = vma;
44707- /* mmap_sem is held in m_start */
44708- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
44709- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
44710-
44711+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44712+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
44713+#endif
44714+ mss.vma = vma;
44715+ /* mmap_sem is held in m_start */
44716+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
44717+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
44718+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44719+ }
44720+#endif
44721 show_map_vma(m, vma);
44722
44723 seq_printf(m,
44724@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
44725 "KernelPageSize: %8lu kB\n"
44726 "MMUPageSize: %8lu kB\n"
44727 "Locked: %8lu kB\n",
44728+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44729+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
44730+#else
44731 (vma->vm_end - vma->vm_start) >> 10,
44732+#endif
44733 mss.resident >> 10,
44734 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
44735 mss.shared_clean >> 10,
44736@@ -1001,7 +1032,7 @@ static int show_numa_map(struct seq_file
44737
44738 if (file) {
44739 seq_printf(m, " file=");
44740- seq_path(m, &file->f_path, "\n\t= ");
44741+ seq_path(m, &file->f_path, "\n\t\\= ");
44742 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
44743 seq_printf(m, " heap");
44744 } else if (vma->vm_start <= mm->start_stack &&
44745diff -urNp linux-3.0.4/fs/proc/task_nommu.c linux-3.0.4/fs/proc/task_nommu.c
44746--- linux-3.0.4/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
44747+++ linux-3.0.4/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
44748@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
44749 else
44750 bytes += kobjsize(mm);
44751
44752- if (current->fs && current->fs->users > 1)
44753+ if (current->fs && atomic_read(&current->fs->users) > 1)
44754 sbytes += kobjsize(current->fs);
44755 else
44756 bytes += kobjsize(current->fs);
44757@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
44758
44759 if (file) {
44760 pad_len_spaces(m, len);
44761- seq_path(m, &file->f_path, "");
44762+ seq_path(m, &file->f_path, "\n\\");
44763 } else if (mm) {
44764 if (vma->vm_start <= mm->start_stack &&
44765 vma->vm_end >= mm->start_stack) {
44766diff -urNp linux-3.0.4/fs/quota/netlink.c linux-3.0.4/fs/quota/netlink.c
44767--- linux-3.0.4/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
44768+++ linux-3.0.4/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
44769@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
44770 void quota_send_warning(short type, unsigned int id, dev_t dev,
44771 const char warntype)
44772 {
44773- static atomic_t seq;
44774+ static atomic_unchecked_t seq;
44775 struct sk_buff *skb;
44776 void *msg_head;
44777 int ret;
44778@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
44779 "VFS: Not enough memory to send quota warning.\n");
44780 return;
44781 }
44782- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
44783+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
44784 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
44785 if (!msg_head) {
44786 printk(KERN_ERR
44787diff -urNp linux-3.0.4/fs/readdir.c linux-3.0.4/fs/readdir.c
44788--- linux-3.0.4/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
44789+++ linux-3.0.4/fs/readdir.c 2011-10-06 04:17:55.000000000 -0400
44790@@ -17,6 +17,7 @@
44791 #include <linux/security.h>
44792 #include <linux/syscalls.h>
44793 #include <linux/unistd.h>
44794+#include <linux/namei.h>
44795
44796 #include <asm/uaccess.h>
44797
44798@@ -67,6 +68,7 @@ struct old_linux_dirent {
44799
44800 struct readdir_callback {
44801 struct old_linux_dirent __user * dirent;
44802+ struct file * file;
44803 int result;
44804 };
44805
44806@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
44807 buf->result = -EOVERFLOW;
44808 return -EOVERFLOW;
44809 }
44810+
44811+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44812+ return 0;
44813+
44814 buf->result++;
44815 dirent = buf->dirent;
44816 if (!access_ok(VERIFY_WRITE, dirent,
44817@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
44818
44819 buf.result = 0;
44820 buf.dirent = dirent;
44821+ buf.file = file;
44822
44823 error = vfs_readdir(file, fillonedir, &buf);
44824 if (buf.result)
44825@@ -142,6 +149,7 @@ struct linux_dirent {
44826 struct getdents_callback {
44827 struct linux_dirent __user * current_dir;
44828 struct linux_dirent __user * previous;
44829+ struct file * file;
44830 int count;
44831 int error;
44832 };
44833@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
44834 buf->error = -EOVERFLOW;
44835 return -EOVERFLOW;
44836 }
44837+
44838+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44839+ return 0;
44840+
44841 dirent = buf->previous;
44842 if (dirent) {
44843 if (__put_user(offset, &dirent->d_off))
44844@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
44845 buf.previous = NULL;
44846 buf.count = count;
44847 buf.error = 0;
44848+ buf.file = file;
44849
44850 error = vfs_readdir(file, filldir, &buf);
44851 if (error >= 0)
44852@@ -229,6 +242,7 @@ out:
44853 struct getdents_callback64 {
44854 struct linux_dirent64 __user * current_dir;
44855 struct linux_dirent64 __user * previous;
44856+ struct file *file;
44857 int count;
44858 int error;
44859 };
44860@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
44861 buf->error = -EINVAL; /* only used if we fail.. */
44862 if (reclen > buf->count)
44863 return -EINVAL;
44864+
44865+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44866+ return 0;
44867+
44868 dirent = buf->previous;
44869 if (dirent) {
44870 if (__put_user(offset, &dirent->d_off))
44871@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
44872
44873 buf.current_dir = dirent;
44874 buf.previous = NULL;
44875+ buf.file = file;
44876 buf.count = count;
44877 buf.error = 0;
44878
44879@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
44880 error = buf.error;
44881 lastdirent = buf.previous;
44882 if (lastdirent) {
44883- typeof(lastdirent->d_off) d_off = file->f_pos;
44884+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
44885 if (__put_user(d_off, &lastdirent->d_off))
44886 error = -EFAULT;
44887 else
44888diff -urNp linux-3.0.4/fs/reiserfs/dir.c linux-3.0.4/fs/reiserfs/dir.c
44889--- linux-3.0.4/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
44890+++ linux-3.0.4/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
44891@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
44892 struct reiserfs_dir_entry de;
44893 int ret = 0;
44894
44895+ pax_track_stack();
44896+
44897 reiserfs_write_lock(inode->i_sb);
44898
44899 reiserfs_check_lock_depth(inode->i_sb, "readdir");
44900diff -urNp linux-3.0.4/fs/reiserfs/do_balan.c linux-3.0.4/fs/reiserfs/do_balan.c
44901--- linux-3.0.4/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
44902+++ linux-3.0.4/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
44903@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
44904 return;
44905 }
44906
44907- atomic_inc(&(fs_generation(tb->tb_sb)));
44908+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
44909 do_balance_starts(tb);
44910
44911 /* balance leaf returns 0 except if combining L R and S into
44912diff -urNp linux-3.0.4/fs/reiserfs/journal.c linux-3.0.4/fs/reiserfs/journal.c
44913--- linux-3.0.4/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
44914+++ linux-3.0.4/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
44915@@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
44916 struct buffer_head *bh;
44917 int i, j;
44918
44919+ pax_track_stack();
44920+
44921 bh = __getblk(dev, block, bufsize);
44922 if (buffer_uptodate(bh))
44923 return (bh);
44924diff -urNp linux-3.0.4/fs/reiserfs/namei.c linux-3.0.4/fs/reiserfs/namei.c
44925--- linux-3.0.4/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
44926+++ linux-3.0.4/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
44927@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
44928 unsigned long savelink = 1;
44929 struct timespec ctime;
44930
44931+ pax_track_stack();
44932+
44933 /* three balancings: (1) old name removal, (2) new name insertion
44934 and (3) maybe "save" link insertion
44935 stat data updates: (1) old directory,
44936diff -urNp linux-3.0.4/fs/reiserfs/procfs.c linux-3.0.4/fs/reiserfs/procfs.c
44937--- linux-3.0.4/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
44938+++ linux-3.0.4/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
44939@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
44940 "SMALL_TAILS " : "NO_TAILS ",
44941 replay_only(sb) ? "REPLAY_ONLY " : "",
44942 convert_reiserfs(sb) ? "CONV " : "",
44943- atomic_read(&r->s_generation_counter),
44944+ atomic_read_unchecked(&r->s_generation_counter),
44945 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
44946 SF(s_do_balance), SF(s_unneeded_left_neighbor),
44947 SF(s_good_search_by_key_reada), SF(s_bmaps),
44948@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
44949 struct journal_params *jp = &rs->s_v1.s_journal;
44950 char b[BDEVNAME_SIZE];
44951
44952+ pax_track_stack();
44953+
44954 seq_printf(m, /* on-disk fields */
44955 "jp_journal_1st_block: \t%i\n"
44956 "jp_journal_dev: \t%s[%x]\n"
44957diff -urNp linux-3.0.4/fs/reiserfs/stree.c linux-3.0.4/fs/reiserfs/stree.c
44958--- linux-3.0.4/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
44959+++ linux-3.0.4/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
44960@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
44961 int iter = 0;
44962 #endif
44963
44964+ pax_track_stack();
44965+
44966 BUG_ON(!th->t_trans_id);
44967
44968 init_tb_struct(th, &s_del_balance, sb, path,
44969@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
44970 int retval;
44971 int quota_cut_bytes = 0;
44972
44973+ pax_track_stack();
44974+
44975 BUG_ON(!th->t_trans_id);
44976
44977 le_key2cpu_key(&cpu_key, key);
44978@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
44979 int quota_cut_bytes;
44980 loff_t tail_pos = 0;
44981
44982+ pax_track_stack();
44983+
44984 BUG_ON(!th->t_trans_id);
44985
44986 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
44987@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
44988 int retval;
44989 int fs_gen;
44990
44991+ pax_track_stack();
44992+
44993 BUG_ON(!th->t_trans_id);
44994
44995 fs_gen = get_generation(inode->i_sb);
44996@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
44997 int fs_gen = 0;
44998 int quota_bytes = 0;
44999
45000+ pax_track_stack();
45001+
45002 BUG_ON(!th->t_trans_id);
45003
45004 if (inode) { /* Do we count quotas for item? */
45005diff -urNp linux-3.0.4/fs/reiserfs/super.c linux-3.0.4/fs/reiserfs/super.c
45006--- linux-3.0.4/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
45007+++ linux-3.0.4/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
45008@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
45009 {.option_name = NULL}
45010 };
45011
45012+ pax_track_stack();
45013+
45014 *blocks = 0;
45015 if (!options || !*options)
45016 /* use default configuration: create tails, journaling on, no
45017diff -urNp linux-3.0.4/fs/select.c linux-3.0.4/fs/select.c
45018--- linux-3.0.4/fs/select.c 2011-07-21 22:17:23.000000000 -0400
45019+++ linux-3.0.4/fs/select.c 2011-08-23 21:48:14.000000000 -0400
45020@@ -20,6 +20,7 @@
45021 #include <linux/module.h>
45022 #include <linux/slab.h>
45023 #include <linux/poll.h>
45024+#include <linux/security.h>
45025 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
45026 #include <linux/file.h>
45027 #include <linux/fdtable.h>
45028@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
45029 int retval, i, timed_out = 0;
45030 unsigned long slack = 0;
45031
45032+ pax_track_stack();
45033+
45034 rcu_read_lock();
45035 retval = max_select_fd(n, fds);
45036 rcu_read_unlock();
45037@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
45038 /* Allocate small arguments on the stack to save memory and be faster */
45039 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
45040
45041+ pax_track_stack();
45042+
45043 ret = -EINVAL;
45044 if (n < 0)
45045 goto out_nofds;
45046@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
45047 struct poll_list *walk = head;
45048 unsigned long todo = nfds;
45049
45050+ pax_track_stack();
45051+
45052+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
45053 if (nfds > rlimit(RLIMIT_NOFILE))
45054 return -EINVAL;
45055
45056diff -urNp linux-3.0.4/fs/seq_file.c linux-3.0.4/fs/seq_file.c
45057--- linux-3.0.4/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
45058+++ linux-3.0.4/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
45059@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
45060 return 0;
45061 }
45062 if (!m->buf) {
45063- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
45064+ m->size = PAGE_SIZE;
45065+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
45066 if (!m->buf)
45067 return -ENOMEM;
45068 }
45069@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
45070 Eoverflow:
45071 m->op->stop(m, p);
45072 kfree(m->buf);
45073- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
45074+ m->size <<= 1;
45075+ m->buf = kmalloc(m->size, GFP_KERNEL);
45076 return !m->buf ? -ENOMEM : -EAGAIN;
45077 }
45078
45079@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
45080 m->version = file->f_version;
45081 /* grab buffer if we didn't have one */
45082 if (!m->buf) {
45083- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
45084+ m->size = PAGE_SIZE;
45085+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
45086 if (!m->buf)
45087 goto Enomem;
45088 }
45089@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
45090 goto Fill;
45091 m->op->stop(m, p);
45092 kfree(m->buf);
45093- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
45094+ m->size <<= 1;
45095+ m->buf = kmalloc(m->size, GFP_KERNEL);
45096 if (!m->buf)
45097 goto Enomem;
45098 m->count = 0;
45099@@ -549,7 +553,7 @@ static void single_stop(struct seq_file
45100 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
45101 void *data)
45102 {
45103- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
45104+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
45105 int res = -ENOMEM;
45106
45107 if (op) {
45108diff -urNp linux-3.0.4/fs/splice.c linux-3.0.4/fs/splice.c
45109--- linux-3.0.4/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
45110+++ linux-3.0.4/fs/splice.c 2011-10-06 04:17:55.000000000 -0400
45111@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
45112 pipe_lock(pipe);
45113
45114 for (;;) {
45115- if (!pipe->readers) {
45116+ if (!atomic_read(&pipe->readers)) {
45117 send_sig(SIGPIPE, current, 0);
45118 if (!ret)
45119 ret = -EPIPE;
45120@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
45121 do_wakeup = 0;
45122 }
45123
45124- pipe->waiting_writers++;
45125+ atomic_inc(&pipe->waiting_writers);
45126 pipe_wait(pipe);
45127- pipe->waiting_writers--;
45128+ atomic_dec(&pipe->waiting_writers);
45129 }
45130
45131 pipe_unlock(pipe);
45132@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
45133 .spd_release = spd_release_page,
45134 };
45135
45136+ pax_track_stack();
45137+
45138 if (splice_grow_spd(pipe, &spd))
45139 return -ENOMEM;
45140
45141@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
45142 old_fs = get_fs();
45143 set_fs(get_ds());
45144 /* The cast to a user pointer is valid due to the set_fs() */
45145- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
45146+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
45147 set_fs(old_fs);
45148
45149 return res;
45150@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
45151 old_fs = get_fs();
45152 set_fs(get_ds());
45153 /* The cast to a user pointer is valid due to the set_fs() */
45154- res = vfs_write(file, (const char __user *)buf, count, &pos);
45155+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
45156 set_fs(old_fs);
45157
45158 return res;
45159@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
45160 .spd_release = spd_release_page,
45161 };
45162
45163+ pax_track_stack();
45164+
45165 if (splice_grow_spd(pipe, &spd))
45166 return -ENOMEM;
45167
45168@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
45169 goto err;
45170
45171 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
45172- vec[i].iov_base = (void __user *) page_address(page);
45173+ vec[i].iov_base = (void __force_user *) page_address(page);
45174 vec[i].iov_len = this_len;
45175 spd.pages[i] = page;
45176 spd.nr_pages++;
45177@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
45178 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
45179 {
45180 while (!pipe->nrbufs) {
45181- if (!pipe->writers)
45182+ if (!atomic_read(&pipe->writers))
45183 return 0;
45184
45185- if (!pipe->waiting_writers && sd->num_spliced)
45186+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
45187 return 0;
45188
45189 if (sd->flags & SPLICE_F_NONBLOCK)
45190@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
45191 * out of the pipe right after the splice_to_pipe(). So set
45192 * PIPE_READERS appropriately.
45193 */
45194- pipe->readers = 1;
45195+ atomic_set(&pipe->readers, 1);
45196
45197 current->splice_pipe = pipe;
45198 }
45199@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
45200 };
45201 long ret;
45202
45203+ pax_track_stack();
45204+
45205 pipe = get_pipe_info(file);
45206 if (!pipe)
45207 return -EBADF;
45208@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
45209 ret = -ERESTARTSYS;
45210 break;
45211 }
45212- if (!pipe->writers)
45213+ if (!atomic_read(&pipe->writers))
45214 break;
45215- if (!pipe->waiting_writers) {
45216+ if (!atomic_read(&pipe->waiting_writers)) {
45217 if (flags & SPLICE_F_NONBLOCK) {
45218 ret = -EAGAIN;
45219 break;
45220@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
45221 pipe_lock(pipe);
45222
45223 while (pipe->nrbufs >= pipe->buffers) {
45224- if (!pipe->readers) {
45225+ if (!atomic_read(&pipe->readers)) {
45226 send_sig(SIGPIPE, current, 0);
45227 ret = -EPIPE;
45228 break;
45229@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
45230 ret = -ERESTARTSYS;
45231 break;
45232 }
45233- pipe->waiting_writers++;
45234+ atomic_inc(&pipe->waiting_writers);
45235 pipe_wait(pipe);
45236- pipe->waiting_writers--;
45237+ atomic_dec(&pipe->waiting_writers);
45238 }
45239
45240 pipe_unlock(pipe);
45241@@ -1819,14 +1825,14 @@ retry:
45242 pipe_double_lock(ipipe, opipe);
45243
45244 do {
45245- if (!opipe->readers) {
45246+ if (!atomic_read(&opipe->readers)) {
45247 send_sig(SIGPIPE, current, 0);
45248 if (!ret)
45249 ret = -EPIPE;
45250 break;
45251 }
45252
45253- if (!ipipe->nrbufs && !ipipe->writers)
45254+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
45255 break;
45256
45257 /*
45258@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
45259 pipe_double_lock(ipipe, opipe);
45260
45261 do {
45262- if (!opipe->readers) {
45263+ if (!atomic_read(&opipe->readers)) {
45264 send_sig(SIGPIPE, current, 0);
45265 if (!ret)
45266 ret = -EPIPE;
45267@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
45268 * return EAGAIN if we have the potential of some data in the
45269 * future, otherwise just return 0
45270 */
45271- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
45272+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
45273 ret = -EAGAIN;
45274
45275 pipe_unlock(ipipe);
45276diff -urNp linux-3.0.4/fs/sysfs/file.c linux-3.0.4/fs/sysfs/file.c
45277--- linux-3.0.4/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
45278+++ linux-3.0.4/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
45279@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
45280
45281 struct sysfs_open_dirent {
45282 atomic_t refcnt;
45283- atomic_t event;
45284+ atomic_unchecked_t event;
45285 wait_queue_head_t poll;
45286 struct list_head buffers; /* goes through sysfs_buffer.list */
45287 };
45288@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
45289 if (!sysfs_get_active(attr_sd))
45290 return -ENODEV;
45291
45292- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
45293+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
45294 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
45295
45296 sysfs_put_active(attr_sd);
45297@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
45298 return -ENOMEM;
45299
45300 atomic_set(&new_od->refcnt, 0);
45301- atomic_set(&new_od->event, 1);
45302+ atomic_set_unchecked(&new_od->event, 1);
45303 init_waitqueue_head(&new_od->poll);
45304 INIT_LIST_HEAD(&new_od->buffers);
45305 goto retry;
45306@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
45307
45308 sysfs_put_active(attr_sd);
45309
45310- if (buffer->event != atomic_read(&od->event))
45311+ if (buffer->event != atomic_read_unchecked(&od->event))
45312 goto trigger;
45313
45314 return DEFAULT_POLLMASK;
45315@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
45316
45317 od = sd->s_attr.open;
45318 if (od) {
45319- atomic_inc(&od->event);
45320+ atomic_inc_unchecked(&od->event);
45321 wake_up_interruptible(&od->poll);
45322 }
45323
45324diff -urNp linux-3.0.4/fs/sysfs/mount.c linux-3.0.4/fs/sysfs/mount.c
45325--- linux-3.0.4/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
45326+++ linux-3.0.4/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
45327@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
45328 .s_name = "",
45329 .s_count = ATOMIC_INIT(1),
45330 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
45331+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
45332+ .s_mode = S_IFDIR | S_IRWXU,
45333+#else
45334 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
45335+#endif
45336 .s_ino = 1,
45337 };
45338
45339diff -urNp linux-3.0.4/fs/sysfs/symlink.c linux-3.0.4/fs/sysfs/symlink.c
45340--- linux-3.0.4/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
45341+++ linux-3.0.4/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
45342@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
45343
45344 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
45345 {
45346- char *page = nd_get_link(nd);
45347+ const char *page = nd_get_link(nd);
45348 if (!IS_ERR(page))
45349 free_page((unsigned long)page);
45350 }
45351diff -urNp linux-3.0.4/fs/udf/inode.c linux-3.0.4/fs/udf/inode.c
45352--- linux-3.0.4/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
45353+++ linux-3.0.4/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
45354@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
45355 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
45356 int lastblock = 0;
45357
45358+ pax_track_stack();
45359+
45360 prev_epos.offset = udf_file_entry_alloc_offset(inode);
45361 prev_epos.block = iinfo->i_location;
45362 prev_epos.bh = NULL;
45363diff -urNp linux-3.0.4/fs/udf/misc.c linux-3.0.4/fs/udf/misc.c
45364--- linux-3.0.4/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
45365+++ linux-3.0.4/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
45366@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
45367
45368 u8 udf_tag_checksum(const struct tag *t)
45369 {
45370- u8 *data = (u8 *)t;
45371+ const u8 *data = (const u8 *)t;
45372 u8 checksum = 0;
45373 int i;
45374 for (i = 0; i < sizeof(struct tag); ++i)
45375diff -urNp linux-3.0.4/fs/utimes.c linux-3.0.4/fs/utimes.c
45376--- linux-3.0.4/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
45377+++ linux-3.0.4/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
45378@@ -1,6 +1,7 @@
45379 #include <linux/compiler.h>
45380 #include <linux/file.h>
45381 #include <linux/fs.h>
45382+#include <linux/security.h>
45383 #include <linux/linkage.h>
45384 #include <linux/mount.h>
45385 #include <linux/namei.h>
45386@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
45387 goto mnt_drop_write_and_out;
45388 }
45389 }
45390+
45391+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
45392+ error = -EACCES;
45393+ goto mnt_drop_write_and_out;
45394+ }
45395+
45396 mutex_lock(&inode->i_mutex);
45397 error = notify_change(path->dentry, &newattrs);
45398 mutex_unlock(&inode->i_mutex);
45399diff -urNp linux-3.0.4/fs/xattr_acl.c linux-3.0.4/fs/xattr_acl.c
45400--- linux-3.0.4/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
45401+++ linux-3.0.4/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
45402@@ -17,8 +17,8 @@
45403 struct posix_acl *
45404 posix_acl_from_xattr(const void *value, size_t size)
45405 {
45406- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
45407- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
45408+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
45409+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
45410 int count;
45411 struct posix_acl *acl;
45412 struct posix_acl_entry *acl_e;
45413diff -urNp linux-3.0.4/fs/xattr.c linux-3.0.4/fs/xattr.c
45414--- linux-3.0.4/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
45415+++ linux-3.0.4/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
45416@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
45417 * Extended attribute SET operations
45418 */
45419 static long
45420-setxattr(struct dentry *d, const char __user *name, const void __user *value,
45421+setxattr(struct path *path, const char __user *name, const void __user *value,
45422 size_t size, int flags)
45423 {
45424 int error;
45425@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
45426 return PTR_ERR(kvalue);
45427 }
45428
45429- error = vfs_setxattr(d, kname, kvalue, size, flags);
45430+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
45431+ error = -EACCES;
45432+ goto out;
45433+ }
45434+
45435+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
45436+out:
45437 kfree(kvalue);
45438 return error;
45439 }
45440@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
45441 return error;
45442 error = mnt_want_write(path.mnt);
45443 if (!error) {
45444- error = setxattr(path.dentry, name, value, size, flags);
45445+ error = setxattr(&path, name, value, size, flags);
45446 mnt_drop_write(path.mnt);
45447 }
45448 path_put(&path);
45449@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
45450 return error;
45451 error = mnt_want_write(path.mnt);
45452 if (!error) {
45453- error = setxattr(path.dentry, name, value, size, flags);
45454+ error = setxattr(&path, name, value, size, flags);
45455 mnt_drop_write(path.mnt);
45456 }
45457 path_put(&path);
45458@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
45459 const void __user *,value, size_t, size, int, flags)
45460 {
45461 struct file *f;
45462- struct dentry *dentry;
45463 int error = -EBADF;
45464
45465 f = fget(fd);
45466 if (!f)
45467 return error;
45468- dentry = f->f_path.dentry;
45469- audit_inode(NULL, dentry);
45470+ audit_inode(NULL, f->f_path.dentry);
45471 error = mnt_want_write_file(f);
45472 if (!error) {
45473- error = setxattr(dentry, name, value, size, flags);
45474+ error = setxattr(&f->f_path, name, value, size, flags);
45475 mnt_drop_write(f->f_path.mnt);
45476 }
45477 fput(f);
45478diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c
45479--- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
45480+++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
45481@@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
45482 xfs_fsop_geom_t fsgeo;
45483 int error;
45484
45485+ memset(&fsgeo, 0, sizeof(fsgeo));
45486 error = xfs_fs_geometry(mp, &fsgeo, 3);
45487 if (error)
45488 return -error;
45489diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c
45490--- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
45491+++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
45492@@ -128,7 +128,7 @@ xfs_find_handle(
45493 }
45494
45495 error = -EFAULT;
45496- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
45497+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
45498 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
45499 goto out_put;
45500
45501diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c
45502--- linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
45503+++ linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
45504@@ -437,7 +437,7 @@ xfs_vn_put_link(
45505 struct nameidata *nd,
45506 void *p)
45507 {
45508- char *s = nd_get_link(nd);
45509+ const char *s = nd_get_link(nd);
45510
45511 if (!IS_ERR(s))
45512 kfree(s);
45513diff -urNp linux-3.0.4/fs/xfs/xfs_bmap.c linux-3.0.4/fs/xfs/xfs_bmap.c
45514--- linux-3.0.4/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
45515+++ linux-3.0.4/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
45516@@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
45517 int nmap,
45518 int ret_nmap);
45519 #else
45520-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
45521+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
45522 #endif /* DEBUG */
45523
45524 STATIC int
45525diff -urNp linux-3.0.4/fs/xfs/xfs_dir2_sf.c linux-3.0.4/fs/xfs/xfs_dir2_sf.c
45526--- linux-3.0.4/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
45527+++ linux-3.0.4/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
45528@@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
45529 }
45530
45531 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
45532- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
45533+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
45534+ char name[sfep->namelen];
45535+ memcpy(name, sfep->name, sfep->namelen);
45536+ if (filldir(dirent, name, sfep->namelen,
45537+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
45538+ *offset = off & 0x7fffffff;
45539+ return 0;
45540+ }
45541+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
45542 off & 0x7fffffff, ino, DT_UNKNOWN)) {
45543 *offset = off & 0x7fffffff;
45544 return 0;
45545diff -urNp linux-3.0.4/grsecurity/gracl_alloc.c linux-3.0.4/grsecurity/gracl_alloc.c
45546--- linux-3.0.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
45547+++ linux-3.0.4/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
45548@@ -0,0 +1,105 @@
45549+#include <linux/kernel.h>
45550+#include <linux/mm.h>
45551+#include <linux/slab.h>
45552+#include <linux/vmalloc.h>
45553+#include <linux/gracl.h>
45554+#include <linux/grsecurity.h>
45555+
45556+static unsigned long alloc_stack_next = 1;
45557+static unsigned long alloc_stack_size = 1;
45558+static void **alloc_stack;
45559+
45560+static __inline__ int
45561+alloc_pop(void)
45562+{
45563+ if (alloc_stack_next == 1)
45564+ return 0;
45565+
45566+ kfree(alloc_stack[alloc_stack_next - 2]);
45567+
45568+ alloc_stack_next--;
45569+
45570+ return 1;
45571+}
45572+
45573+static __inline__ int
45574+alloc_push(void *buf)
45575+{
45576+ if (alloc_stack_next >= alloc_stack_size)
45577+ return 1;
45578+
45579+ alloc_stack[alloc_stack_next - 1] = buf;
45580+
45581+ alloc_stack_next++;
45582+
45583+ return 0;
45584+}
45585+
45586+void *
45587+acl_alloc(unsigned long len)
45588+{
45589+ void *ret = NULL;
45590+
45591+ if (!len || len > PAGE_SIZE)
45592+ goto out;
45593+
45594+ ret = kmalloc(len, GFP_KERNEL);
45595+
45596+ if (ret) {
45597+ if (alloc_push(ret)) {
45598+ kfree(ret);
45599+ ret = NULL;
45600+ }
45601+ }
45602+
45603+out:
45604+ return ret;
45605+}
45606+
45607+void *
45608+acl_alloc_num(unsigned long num, unsigned long len)
45609+{
45610+ if (!len || (num > (PAGE_SIZE / len)))
45611+ return NULL;
45612+
45613+ return acl_alloc(num * len);
45614+}
45615+
45616+void
45617+acl_free_all(void)
45618+{
45619+ if (gr_acl_is_enabled() || !alloc_stack)
45620+ return;
45621+
45622+ while (alloc_pop()) ;
45623+
45624+ if (alloc_stack) {
45625+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
45626+ kfree(alloc_stack);
45627+ else
45628+ vfree(alloc_stack);
45629+ }
45630+
45631+ alloc_stack = NULL;
45632+ alloc_stack_size = 1;
45633+ alloc_stack_next = 1;
45634+
45635+ return;
45636+}
45637+
45638+int
45639+acl_alloc_stack_init(unsigned long size)
45640+{
45641+ if ((size * sizeof (void *)) <= PAGE_SIZE)
45642+ alloc_stack =
45643+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
45644+ else
45645+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
45646+
45647+ alloc_stack_size = size;
45648+
45649+ if (!alloc_stack)
45650+ return 0;
45651+ else
45652+ return 1;
45653+}
45654diff -urNp linux-3.0.4/grsecurity/gracl.c linux-3.0.4/grsecurity/gracl.c
45655--- linux-3.0.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
45656+++ linux-3.0.4/grsecurity/gracl.c 2011-08-23 21:48:14.000000000 -0400
45657@@ -0,0 +1,4106 @@
45658+#include <linux/kernel.h>
45659+#include <linux/module.h>
45660+#include <linux/sched.h>
45661+#include <linux/mm.h>
45662+#include <linux/file.h>
45663+#include <linux/fs.h>
45664+#include <linux/namei.h>
45665+#include <linux/mount.h>
45666+#include <linux/tty.h>
45667+#include <linux/proc_fs.h>
45668+#include <linux/lglock.h>
45669+#include <linux/slab.h>
45670+#include <linux/vmalloc.h>
45671+#include <linux/types.h>
45672+#include <linux/sysctl.h>
45673+#include <linux/netdevice.h>
45674+#include <linux/ptrace.h>
45675+#include <linux/gracl.h>
45676+#include <linux/gralloc.h>
45677+#include <linux/grsecurity.h>
45678+#include <linux/grinternal.h>
45679+#include <linux/pid_namespace.h>
45680+#include <linux/fdtable.h>
45681+#include <linux/percpu.h>
45682+
45683+#include <asm/uaccess.h>
45684+#include <asm/errno.h>
45685+#include <asm/mman.h>
45686+
45687+static struct acl_role_db acl_role_set;
45688+static struct name_db name_set;
45689+static struct inodev_db inodev_set;
45690+
45691+/* for keeping track of userspace pointers used for subjects, so we
45692+ can share references in the kernel as well
45693+*/
45694+
45695+static struct path real_root;
45696+
45697+static struct acl_subj_map_db subj_map_set;
45698+
45699+static struct acl_role_label *default_role;
45700+
45701+static struct acl_role_label *role_list;
45702+
45703+static u16 acl_sp_role_value;
45704+
45705+extern char *gr_shared_page[4];
45706+static DEFINE_MUTEX(gr_dev_mutex);
45707+DEFINE_RWLOCK(gr_inode_lock);
45708+
45709+struct gr_arg *gr_usermode;
45710+
45711+static unsigned int gr_status __read_only = GR_STATUS_INIT;
45712+
45713+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
45714+extern void gr_clear_learn_entries(void);
45715+
45716+#ifdef CONFIG_GRKERNSEC_RESLOG
45717+extern void gr_log_resource(const struct task_struct *task,
45718+ const int res, const unsigned long wanted, const int gt);
45719+#endif
45720+
45721+unsigned char *gr_system_salt;
45722+unsigned char *gr_system_sum;
45723+
45724+static struct sprole_pw **acl_special_roles = NULL;
45725+static __u16 num_sprole_pws = 0;
45726+
45727+static struct acl_role_label *kernel_role = NULL;
45728+
45729+static unsigned int gr_auth_attempts = 0;
45730+static unsigned long gr_auth_expires = 0UL;
45731+
45732+#ifdef CONFIG_NET
45733+extern struct vfsmount *sock_mnt;
45734+#endif
45735+
45736+extern struct vfsmount *pipe_mnt;
45737+extern struct vfsmount *shm_mnt;
45738+#ifdef CONFIG_HUGETLBFS
45739+extern struct vfsmount *hugetlbfs_vfsmount;
45740+#endif
45741+
45742+static struct acl_object_label *fakefs_obj_rw;
45743+static struct acl_object_label *fakefs_obj_rwx;
45744+
45745+extern int gr_init_uidset(void);
45746+extern void gr_free_uidset(void);
45747+extern void gr_remove_uid(uid_t uid);
45748+extern int gr_find_uid(uid_t uid);
45749+
45750+DECLARE_BRLOCK(vfsmount_lock);
45751+
45752+__inline__ int
45753+gr_acl_is_enabled(void)
45754+{
45755+ return (gr_status & GR_READY);
45756+}
45757+
45758+#ifdef CONFIG_BTRFS_FS
45759+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
45760+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
45761+#endif
45762+
45763+static inline dev_t __get_dev(const struct dentry *dentry)
45764+{
45765+#ifdef CONFIG_BTRFS_FS
45766+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
45767+ return get_btrfs_dev_from_inode(dentry->d_inode);
45768+ else
45769+#endif
45770+ return dentry->d_inode->i_sb->s_dev;
45771+}
45772+
45773+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
45774+{
45775+ return __get_dev(dentry);
45776+}
45777+
45778+static char gr_task_roletype_to_char(struct task_struct *task)
45779+{
45780+ switch (task->role->roletype &
45781+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
45782+ GR_ROLE_SPECIAL)) {
45783+ case GR_ROLE_DEFAULT:
45784+ return 'D';
45785+ case GR_ROLE_USER:
45786+ return 'U';
45787+ case GR_ROLE_GROUP:
45788+ return 'G';
45789+ case GR_ROLE_SPECIAL:
45790+ return 'S';
45791+ }
45792+
45793+ return 'X';
45794+}
45795+
45796+char gr_roletype_to_char(void)
45797+{
45798+ return gr_task_roletype_to_char(current);
45799+}
45800+
45801+__inline__ int
45802+gr_acl_tpe_check(void)
45803+{
45804+ if (unlikely(!(gr_status & GR_READY)))
45805+ return 0;
45806+ if (current->role->roletype & GR_ROLE_TPE)
45807+ return 1;
45808+ else
45809+ return 0;
45810+}
45811+
45812+int
45813+gr_handle_rawio(const struct inode *inode)
45814+{
45815+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
45816+ if (inode && S_ISBLK(inode->i_mode) &&
45817+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
45818+ !capable(CAP_SYS_RAWIO))
45819+ return 1;
45820+#endif
45821+ return 0;
45822+}
45823+
45824+static int
45825+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
45826+{
45827+ if (likely(lena != lenb))
45828+ return 0;
45829+
45830+ return !memcmp(a, b, lena);
45831+}
45832+
45833+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
45834+{
45835+ *buflen -= namelen;
45836+ if (*buflen < 0)
45837+ return -ENAMETOOLONG;
45838+ *buffer -= namelen;
45839+ memcpy(*buffer, str, namelen);
45840+ return 0;
45841+}
45842+
45843+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
45844+{
45845+ return prepend(buffer, buflen, name->name, name->len);
45846+}
45847+
45848+static int prepend_path(const struct path *path, struct path *root,
45849+ char **buffer, int *buflen)
45850+{
45851+ struct dentry *dentry = path->dentry;
45852+ struct vfsmount *vfsmnt = path->mnt;
45853+ bool slash = false;
45854+ int error = 0;
45855+
45856+ while (dentry != root->dentry || vfsmnt != root->mnt) {
45857+ struct dentry * parent;
45858+
45859+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
45860+ /* Global root? */
45861+ if (vfsmnt->mnt_parent == vfsmnt) {
45862+ goto out;
45863+ }
45864+ dentry = vfsmnt->mnt_mountpoint;
45865+ vfsmnt = vfsmnt->mnt_parent;
45866+ continue;
45867+ }
45868+ parent = dentry->d_parent;
45869+ prefetch(parent);
45870+ spin_lock(&dentry->d_lock);
45871+ error = prepend_name(buffer, buflen, &dentry->d_name);
45872+ spin_unlock(&dentry->d_lock);
45873+ if (!error)
45874+ error = prepend(buffer, buflen, "/", 1);
45875+ if (error)
45876+ break;
45877+
45878+ slash = true;
45879+ dentry = parent;
45880+ }
45881+
45882+out:
45883+ if (!error && !slash)
45884+ error = prepend(buffer, buflen, "/", 1);
45885+
45886+ return error;
45887+}
45888+
45889+/* this must be called with vfsmount_lock and rename_lock held */
45890+
45891+static char *__our_d_path(const struct path *path, struct path *root,
45892+ char *buf, int buflen)
45893+{
45894+ char *res = buf + buflen;
45895+ int error;
45896+
45897+ prepend(&res, &buflen, "\0", 1);
45898+ error = prepend_path(path, root, &res, &buflen);
45899+ if (error)
45900+ return ERR_PTR(error);
45901+
45902+ return res;
45903+}
45904+
45905+static char *
45906+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
45907+{
45908+ char *retval;
45909+
45910+ retval = __our_d_path(path, root, buf, buflen);
45911+ if (unlikely(IS_ERR(retval)))
45912+ retval = strcpy(buf, "<path too long>");
45913+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
45914+ retval[1] = '\0';
45915+
45916+ return retval;
45917+}
45918+
45919+static char *
45920+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
45921+ char *buf, int buflen)
45922+{
45923+ struct path path;
45924+ char *res;
45925+
45926+ path.dentry = (struct dentry *)dentry;
45927+ path.mnt = (struct vfsmount *)vfsmnt;
45928+
45929+ /* we can use real_root.dentry, real_root.mnt, because this is only called
45930+ by the RBAC system */
45931+ res = gen_full_path(&path, &real_root, buf, buflen);
45932+
45933+ return res;
45934+}
45935+
45936+static char *
45937+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
45938+ char *buf, int buflen)
45939+{
45940+ char *res;
45941+ struct path path;
45942+ struct path root;
45943+ struct task_struct *reaper = &init_task;
45944+
45945+ path.dentry = (struct dentry *)dentry;
45946+ path.mnt = (struct vfsmount *)vfsmnt;
45947+
45948+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
45949+ get_fs_root(reaper->fs, &root);
45950+
45951+ write_seqlock(&rename_lock);
45952+ br_read_lock(vfsmount_lock);
45953+ res = gen_full_path(&path, &root, buf, buflen);
45954+ br_read_unlock(vfsmount_lock);
45955+ write_sequnlock(&rename_lock);
45956+
45957+ path_put(&root);
45958+ return res;
45959+}
45960+
45961+static char *
45962+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
45963+{
45964+ char *ret;
45965+ write_seqlock(&rename_lock);
45966+ br_read_lock(vfsmount_lock);
45967+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
45968+ PAGE_SIZE);
45969+ br_read_unlock(vfsmount_lock);
45970+ write_sequnlock(&rename_lock);
45971+ return ret;
45972+}
45973+
45974+char *
45975+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
45976+{
45977+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
45978+ PAGE_SIZE);
45979+}
45980+
45981+char *
45982+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
45983+{
45984+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
45985+ PAGE_SIZE);
45986+}
45987+
45988+char *
45989+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
45990+{
45991+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
45992+ PAGE_SIZE);
45993+}
45994+
45995+char *
45996+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
45997+{
45998+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
45999+ PAGE_SIZE);
46000+}
46001+
46002+char *
46003+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
46004+{
46005+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
46006+ PAGE_SIZE);
46007+}
46008+
46009+__inline__ __u32
46010+to_gr_audit(const __u32 reqmode)
46011+{
46012+ /* masks off auditable permission flags, then shifts them to create
46013+ auditing flags, and adds the special case of append auditing if
46014+ we're requesting write */
46015+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
46016+}
46017+
46018+struct acl_subject_label *
46019+lookup_subject_map(const struct acl_subject_label *userp)
46020+{
46021+ unsigned int index = shash(userp, subj_map_set.s_size);
46022+ struct subject_map *match;
46023+
46024+ match = subj_map_set.s_hash[index];
46025+
46026+ while (match && match->user != userp)
46027+ match = match->next;
46028+
46029+ if (match != NULL)
46030+ return match->kernel;
46031+ else
46032+ return NULL;
46033+}
46034+
46035+static void
46036+insert_subj_map_entry(struct subject_map *subjmap)
46037+{
46038+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
46039+ struct subject_map **curr;
46040+
46041+ subjmap->prev = NULL;
46042+
46043+ curr = &subj_map_set.s_hash[index];
46044+ if (*curr != NULL)
46045+ (*curr)->prev = subjmap;
46046+
46047+ subjmap->next = *curr;
46048+ *curr = subjmap;
46049+
46050+ return;
46051+}
46052+
46053+static struct acl_role_label *
46054+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
46055+ const gid_t gid)
46056+{
46057+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
46058+ struct acl_role_label *match;
46059+ struct role_allowed_ip *ipp;
46060+ unsigned int x;
46061+ u32 curr_ip = task->signal->curr_ip;
46062+
46063+ task->signal->saved_ip = curr_ip;
46064+
46065+ match = acl_role_set.r_hash[index];
46066+
46067+ while (match) {
46068+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
46069+ for (x = 0; x < match->domain_child_num; x++) {
46070+ if (match->domain_children[x] == uid)
46071+ goto found;
46072+ }
46073+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
46074+ break;
46075+ match = match->next;
46076+ }
46077+found:
46078+ if (match == NULL) {
46079+ try_group:
46080+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
46081+ match = acl_role_set.r_hash[index];
46082+
46083+ while (match) {
46084+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
46085+ for (x = 0; x < match->domain_child_num; x++) {
46086+ if (match->domain_children[x] == gid)
46087+ goto found2;
46088+ }
46089+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
46090+ break;
46091+ match = match->next;
46092+ }
46093+found2:
46094+ if (match == NULL)
46095+ match = default_role;
46096+ if (match->allowed_ips == NULL)
46097+ return match;
46098+ else {
46099+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
46100+ if (likely
46101+ ((ntohl(curr_ip) & ipp->netmask) ==
46102+ (ntohl(ipp->addr) & ipp->netmask)))
46103+ return match;
46104+ }
46105+ match = default_role;
46106+ }
46107+ } else if (match->allowed_ips == NULL) {
46108+ return match;
46109+ } else {
46110+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
46111+ if (likely
46112+ ((ntohl(curr_ip) & ipp->netmask) ==
46113+ (ntohl(ipp->addr) & ipp->netmask)))
46114+ return match;
46115+ }
46116+ goto try_group;
46117+ }
46118+
46119+ return match;
46120+}
46121+
46122+struct acl_subject_label *
46123+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
46124+ const struct acl_role_label *role)
46125+{
46126+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
46127+ struct acl_subject_label *match;
46128+
46129+ match = role->subj_hash[index];
46130+
46131+ while (match && (match->inode != ino || match->device != dev ||
46132+ (match->mode & GR_DELETED))) {
46133+ match = match->next;
46134+ }
46135+
46136+ if (match && !(match->mode & GR_DELETED))
46137+ return match;
46138+ else
46139+ return NULL;
46140+}
46141+
46142+struct acl_subject_label *
46143+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
46144+ const struct acl_role_label *role)
46145+{
46146+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
46147+ struct acl_subject_label *match;
46148+
46149+ match = role->subj_hash[index];
46150+
46151+ while (match && (match->inode != ino || match->device != dev ||
46152+ !(match->mode & GR_DELETED))) {
46153+ match = match->next;
46154+ }
46155+
46156+ if (match && (match->mode & GR_DELETED))
46157+ return match;
46158+ else
46159+ return NULL;
46160+}
46161+
46162+static struct acl_object_label *
46163+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
46164+ const struct acl_subject_label *subj)
46165+{
46166+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
46167+ struct acl_object_label *match;
46168+
46169+ match = subj->obj_hash[index];
46170+
46171+ while (match && (match->inode != ino || match->device != dev ||
46172+ (match->mode & GR_DELETED))) {
46173+ match = match->next;
46174+ }
46175+
46176+ if (match && !(match->mode & GR_DELETED))
46177+ return match;
46178+ else
46179+ return NULL;
46180+}
46181+
46182+static struct acl_object_label *
46183+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
46184+ const struct acl_subject_label *subj)
46185+{
46186+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
46187+ struct acl_object_label *match;
46188+
46189+ match = subj->obj_hash[index];
46190+
46191+ while (match && (match->inode != ino || match->device != dev ||
46192+ !(match->mode & GR_DELETED))) {
46193+ match = match->next;
46194+ }
46195+
46196+ if (match && (match->mode & GR_DELETED))
46197+ return match;
46198+
46199+ match = subj->obj_hash[index];
46200+
46201+ while (match && (match->inode != ino || match->device != dev ||
46202+ (match->mode & GR_DELETED))) {
46203+ match = match->next;
46204+ }
46205+
46206+ if (match && !(match->mode & GR_DELETED))
46207+ return match;
46208+ else
46209+ return NULL;
46210+}
46211+
46212+static struct name_entry *
46213+lookup_name_entry(const char *name)
46214+{
46215+ unsigned int len = strlen(name);
46216+ unsigned int key = full_name_hash(name, len);
46217+ unsigned int index = key % name_set.n_size;
46218+ struct name_entry *match;
46219+
46220+ match = name_set.n_hash[index];
46221+
46222+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
46223+ match = match->next;
46224+
46225+ return match;
46226+}
46227+
46228+static struct name_entry *
46229+lookup_name_entry_create(const char *name)
46230+{
46231+ unsigned int len = strlen(name);
46232+ unsigned int key = full_name_hash(name, len);
46233+ unsigned int index = key % name_set.n_size;
46234+ struct name_entry *match;
46235+
46236+ match = name_set.n_hash[index];
46237+
46238+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
46239+ !match->deleted))
46240+ match = match->next;
46241+
46242+ if (match && match->deleted)
46243+ return match;
46244+
46245+ match = name_set.n_hash[index];
46246+
46247+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
46248+ match->deleted))
46249+ match = match->next;
46250+
46251+ if (match && !match->deleted)
46252+ return match;
46253+ else
46254+ return NULL;
46255+}
46256+
46257+static struct inodev_entry *
46258+lookup_inodev_entry(const ino_t ino, const dev_t dev)
46259+{
46260+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
46261+ struct inodev_entry *match;
46262+
46263+ match = inodev_set.i_hash[index];
46264+
46265+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
46266+ match = match->next;
46267+
46268+ return match;
46269+}
46270+
46271+static void
46272+insert_inodev_entry(struct inodev_entry *entry)
46273+{
46274+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
46275+ inodev_set.i_size);
46276+ struct inodev_entry **curr;
46277+
46278+ entry->prev = NULL;
46279+
46280+ curr = &inodev_set.i_hash[index];
46281+ if (*curr != NULL)
46282+ (*curr)->prev = entry;
46283+
46284+ entry->next = *curr;
46285+ *curr = entry;
46286+
46287+ return;
46288+}
46289+
46290+static void
46291+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
46292+{
46293+ unsigned int index =
46294+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
46295+ struct acl_role_label **curr;
46296+ struct acl_role_label *tmp;
46297+
46298+ curr = &acl_role_set.r_hash[index];
46299+
46300+ /* if role was already inserted due to domains and already has
46301+ a role in the same bucket as it attached, then we need to
46302+ combine these two buckets
46303+ */
46304+ if (role->next) {
46305+ tmp = role->next;
46306+ while (tmp->next)
46307+ tmp = tmp->next;
46308+ tmp->next = *curr;
46309+ } else
46310+ role->next = *curr;
46311+ *curr = role;
46312+
46313+ return;
46314+}
46315+
46316+static void
46317+insert_acl_role_label(struct acl_role_label *role)
46318+{
46319+ int i;
46320+
46321+ if (role_list == NULL) {
46322+ role_list = role;
46323+ role->prev = NULL;
46324+ } else {
46325+ role->prev = role_list;
46326+ role_list = role;
46327+ }
46328+
46329+ /* used for hash chains */
46330+ role->next = NULL;
46331+
46332+ if (role->roletype & GR_ROLE_DOMAIN) {
46333+ for (i = 0; i < role->domain_child_num; i++)
46334+ __insert_acl_role_label(role, role->domain_children[i]);
46335+ } else
46336+ __insert_acl_role_label(role, role->uidgid);
46337+}
46338+
46339+static int
46340+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
46341+{
46342+ struct name_entry **curr, *nentry;
46343+ struct inodev_entry *ientry;
46344+ unsigned int len = strlen(name);
46345+ unsigned int key = full_name_hash(name, len);
46346+ unsigned int index = key % name_set.n_size;
46347+
46348+ curr = &name_set.n_hash[index];
46349+
46350+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
46351+ curr = &((*curr)->next);
46352+
46353+ if (*curr != NULL)
46354+ return 1;
46355+
46356+ nentry = acl_alloc(sizeof (struct name_entry));
46357+ if (nentry == NULL)
46358+ return 0;
46359+ ientry = acl_alloc(sizeof (struct inodev_entry));
46360+ if (ientry == NULL)
46361+ return 0;
46362+ ientry->nentry = nentry;
46363+
46364+ nentry->key = key;
46365+ nentry->name = name;
46366+ nentry->inode = inode;
46367+ nentry->device = device;
46368+ nentry->len = len;
46369+ nentry->deleted = deleted;
46370+
46371+ nentry->prev = NULL;
46372+ curr = &name_set.n_hash[index];
46373+ if (*curr != NULL)
46374+ (*curr)->prev = nentry;
46375+ nentry->next = *curr;
46376+ *curr = nentry;
46377+
46378+ /* insert us into the table searchable by inode/dev */
46379+ insert_inodev_entry(ientry);
46380+
46381+ return 1;
46382+}
46383+
46384+static void
46385+insert_acl_obj_label(struct acl_object_label *obj,
46386+ struct acl_subject_label *subj)
46387+{
46388+ unsigned int index =
46389+ fhash(obj->inode, obj->device, subj->obj_hash_size);
46390+ struct acl_object_label **curr;
46391+
46392+
46393+ obj->prev = NULL;
46394+
46395+ curr = &subj->obj_hash[index];
46396+ if (*curr != NULL)
46397+ (*curr)->prev = obj;
46398+
46399+ obj->next = *curr;
46400+ *curr = obj;
46401+
46402+ return;
46403+}
46404+
46405+static void
46406+insert_acl_subj_label(struct acl_subject_label *obj,
46407+ struct acl_role_label *role)
46408+{
46409+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
46410+ struct acl_subject_label **curr;
46411+
46412+ obj->prev = NULL;
46413+
46414+ curr = &role->subj_hash[index];
46415+ if (*curr != NULL)
46416+ (*curr)->prev = obj;
46417+
46418+ obj->next = *curr;
46419+ *curr = obj;
46420+
46421+ return;
46422+}
46423+
46424+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
46425+
46426+static void *
46427+create_table(__u32 * len, int elementsize)
46428+{
46429+ unsigned int table_sizes[] = {
46430+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
46431+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
46432+ 4194301, 8388593, 16777213, 33554393, 67108859
46433+ };
46434+ void *newtable = NULL;
46435+ unsigned int pwr = 0;
46436+
46437+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
46438+ table_sizes[pwr] <= *len)
46439+ pwr++;
46440+
46441+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
46442+ return newtable;
46443+
46444+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
46445+ newtable =
46446+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
46447+ else
46448+ newtable = vmalloc(table_sizes[pwr] * elementsize);
46449+
46450+ *len = table_sizes[pwr];
46451+
46452+ return newtable;
46453+}
46454+
46455+static int
46456+init_variables(const struct gr_arg *arg)
46457+{
46458+ struct task_struct *reaper = &init_task;
46459+ unsigned int stacksize;
46460+
46461+ subj_map_set.s_size = arg->role_db.num_subjects;
46462+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
46463+ name_set.n_size = arg->role_db.num_objects;
46464+ inodev_set.i_size = arg->role_db.num_objects;
46465+
46466+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
46467+ !name_set.n_size || !inodev_set.i_size)
46468+ return 1;
46469+
46470+ if (!gr_init_uidset())
46471+ return 1;
46472+
46473+ /* set up the stack that holds allocation info */
46474+
46475+ stacksize = arg->role_db.num_pointers + 5;
46476+
46477+ if (!acl_alloc_stack_init(stacksize))
46478+ return 1;
46479+
46480+ /* grab reference for the real root dentry and vfsmount */
46481+ get_fs_root(reaper->fs, &real_root);
46482+
46483+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46484+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
46485+#endif
46486+
46487+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
46488+ if (fakefs_obj_rw == NULL)
46489+ return 1;
46490+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
46491+
46492+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
46493+ if (fakefs_obj_rwx == NULL)
46494+ return 1;
46495+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
46496+
46497+ subj_map_set.s_hash =
46498+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
46499+ acl_role_set.r_hash =
46500+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
46501+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
46502+ inodev_set.i_hash =
46503+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
46504+
46505+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
46506+ !name_set.n_hash || !inodev_set.i_hash)
46507+ return 1;
46508+
46509+ memset(subj_map_set.s_hash, 0,
46510+ sizeof(struct subject_map *) * subj_map_set.s_size);
46511+ memset(acl_role_set.r_hash, 0,
46512+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
46513+ memset(name_set.n_hash, 0,
46514+ sizeof (struct name_entry *) * name_set.n_size);
46515+ memset(inodev_set.i_hash, 0,
46516+ sizeof (struct inodev_entry *) * inodev_set.i_size);
46517+
46518+ return 0;
46519+}
46520+
46521+/* free information not needed after startup
46522+ currently contains user->kernel pointer mappings for subjects
46523+*/
46524+
46525+static void
46526+free_init_variables(void)
46527+{
46528+ __u32 i;
46529+
46530+ if (subj_map_set.s_hash) {
46531+ for (i = 0; i < subj_map_set.s_size; i++) {
46532+ if (subj_map_set.s_hash[i]) {
46533+ kfree(subj_map_set.s_hash[i]);
46534+ subj_map_set.s_hash[i] = NULL;
46535+ }
46536+ }
46537+
46538+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
46539+ PAGE_SIZE)
46540+ kfree(subj_map_set.s_hash);
46541+ else
46542+ vfree(subj_map_set.s_hash);
46543+ }
46544+
46545+ return;
46546+}
46547+
46548+static void
46549+free_variables(void)
46550+{
46551+ struct acl_subject_label *s;
46552+ struct acl_role_label *r;
46553+ struct task_struct *task, *task2;
46554+ unsigned int x;
46555+
46556+ gr_clear_learn_entries();
46557+
46558+ read_lock(&tasklist_lock);
46559+ do_each_thread(task2, task) {
46560+ task->acl_sp_role = 0;
46561+ task->acl_role_id = 0;
46562+ task->acl = NULL;
46563+ task->role = NULL;
46564+ } while_each_thread(task2, task);
46565+ read_unlock(&tasklist_lock);
46566+
46567+ /* release the reference to the real root dentry and vfsmount */
46568+ path_put(&real_root);
46569+
46570+ /* free all object hash tables */
46571+
46572+ FOR_EACH_ROLE_START(r)
46573+ if (r->subj_hash == NULL)
46574+ goto next_role;
46575+ FOR_EACH_SUBJECT_START(r, s, x)
46576+ if (s->obj_hash == NULL)
46577+ break;
46578+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
46579+ kfree(s->obj_hash);
46580+ else
46581+ vfree(s->obj_hash);
46582+ FOR_EACH_SUBJECT_END(s, x)
46583+ FOR_EACH_NESTED_SUBJECT_START(r, s)
46584+ if (s->obj_hash == NULL)
46585+ break;
46586+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
46587+ kfree(s->obj_hash);
46588+ else
46589+ vfree(s->obj_hash);
46590+ FOR_EACH_NESTED_SUBJECT_END(s)
46591+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
46592+ kfree(r->subj_hash);
46593+ else
46594+ vfree(r->subj_hash);
46595+ r->subj_hash = NULL;
46596+next_role:
46597+ FOR_EACH_ROLE_END(r)
46598+
46599+ acl_free_all();
46600+
46601+ if (acl_role_set.r_hash) {
46602+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
46603+ PAGE_SIZE)
46604+ kfree(acl_role_set.r_hash);
46605+ else
46606+ vfree(acl_role_set.r_hash);
46607+ }
46608+ if (name_set.n_hash) {
46609+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
46610+ PAGE_SIZE)
46611+ kfree(name_set.n_hash);
46612+ else
46613+ vfree(name_set.n_hash);
46614+ }
46615+
46616+ if (inodev_set.i_hash) {
46617+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
46618+ PAGE_SIZE)
46619+ kfree(inodev_set.i_hash);
46620+ else
46621+ vfree(inodev_set.i_hash);
46622+ }
46623+
46624+ gr_free_uidset();
46625+
46626+ memset(&name_set, 0, sizeof (struct name_db));
46627+ memset(&inodev_set, 0, sizeof (struct inodev_db));
46628+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
46629+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
46630+
46631+ default_role = NULL;
46632+ role_list = NULL;
46633+
46634+ return;
46635+}
46636+
46637+static __u32
46638+count_user_objs(struct acl_object_label *userp)
46639+{
46640+ struct acl_object_label o_tmp;
46641+ __u32 num = 0;
46642+
46643+ while (userp) {
46644+ if (copy_from_user(&o_tmp, userp,
46645+ sizeof (struct acl_object_label)))
46646+ break;
46647+
46648+ userp = o_tmp.prev;
46649+ num++;
46650+ }
46651+
46652+ return num;
46653+}
46654+
46655+static struct acl_subject_label *
46656+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
46657+
46658+static int
46659+copy_user_glob(struct acl_object_label *obj)
46660+{
46661+ struct acl_object_label *g_tmp, **guser;
46662+ unsigned int len;
46663+ char *tmp;
46664+
46665+ if (obj->globbed == NULL)
46666+ return 0;
46667+
46668+ guser = &obj->globbed;
46669+ while (*guser) {
46670+ g_tmp = (struct acl_object_label *)
46671+ acl_alloc(sizeof (struct acl_object_label));
46672+ if (g_tmp == NULL)
46673+ return -ENOMEM;
46674+
46675+ if (copy_from_user(g_tmp, *guser,
46676+ sizeof (struct acl_object_label)))
46677+ return -EFAULT;
46678+
46679+ len = strnlen_user(g_tmp->filename, PATH_MAX);
46680+
46681+ if (!len || len >= PATH_MAX)
46682+ return -EINVAL;
46683+
46684+ if ((tmp = (char *) acl_alloc(len)) == NULL)
46685+ return -ENOMEM;
46686+
46687+ if (copy_from_user(tmp, g_tmp->filename, len))
46688+ return -EFAULT;
46689+ tmp[len-1] = '\0';
46690+ g_tmp->filename = tmp;
46691+
46692+ *guser = g_tmp;
46693+ guser = &(g_tmp->next);
46694+ }
46695+
46696+ return 0;
46697+}
46698+
46699+static int
46700+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
46701+ struct acl_role_label *role)
46702+{
46703+ struct acl_object_label *o_tmp;
46704+ unsigned int len;
46705+ int ret;
46706+ char *tmp;
46707+
46708+ while (userp) {
46709+ if ((o_tmp = (struct acl_object_label *)
46710+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
46711+ return -ENOMEM;
46712+
46713+ if (copy_from_user(o_tmp, userp,
46714+ sizeof (struct acl_object_label)))
46715+ return -EFAULT;
46716+
46717+ userp = o_tmp->prev;
46718+
46719+ len = strnlen_user(o_tmp->filename, PATH_MAX);
46720+
46721+ if (!len || len >= PATH_MAX)
46722+ return -EINVAL;
46723+
46724+ if ((tmp = (char *) acl_alloc(len)) == NULL)
46725+ return -ENOMEM;
46726+
46727+ if (copy_from_user(tmp, o_tmp->filename, len))
46728+ return -EFAULT;
46729+ tmp[len-1] = '\0';
46730+ o_tmp->filename = tmp;
46731+
46732+ insert_acl_obj_label(o_tmp, subj);
46733+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
46734+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
46735+ return -ENOMEM;
46736+
46737+ ret = copy_user_glob(o_tmp);
46738+ if (ret)
46739+ return ret;
46740+
46741+ if (o_tmp->nested) {
46742+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
46743+ if (IS_ERR(o_tmp->nested))
46744+ return PTR_ERR(o_tmp->nested);
46745+
46746+ /* insert into nested subject list */
46747+ o_tmp->nested->next = role->hash->first;
46748+ role->hash->first = o_tmp->nested;
46749+ }
46750+ }
46751+
46752+ return 0;
46753+}
46754+
46755+static __u32
46756+count_user_subjs(struct acl_subject_label *userp)
46757+{
46758+ struct acl_subject_label s_tmp;
46759+ __u32 num = 0;
46760+
46761+ while (userp) {
46762+ if (copy_from_user(&s_tmp, userp,
46763+ sizeof (struct acl_subject_label)))
46764+ break;
46765+
46766+ userp = s_tmp.prev;
46767+ /* do not count nested subjects against this count, since
46768+ they are not included in the hash table, but are
46769+ attached to objects. We have already counted
46770+ the subjects in userspace for the allocation
46771+ stack
46772+ */
46773+ if (!(s_tmp.mode & GR_NESTED))
46774+ num++;
46775+ }
46776+
46777+ return num;
46778+}
46779+
46780+static int
46781+copy_user_allowedips(struct acl_role_label *rolep)
46782+{
46783+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
46784+
46785+ ruserip = rolep->allowed_ips;
46786+
46787+ while (ruserip) {
46788+ rlast = rtmp;
46789+
46790+ if ((rtmp = (struct role_allowed_ip *)
46791+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
46792+ return -ENOMEM;
46793+
46794+ if (copy_from_user(rtmp, ruserip,
46795+ sizeof (struct role_allowed_ip)))
46796+ return -EFAULT;
46797+
46798+ ruserip = rtmp->prev;
46799+
46800+ if (!rlast) {
46801+ rtmp->prev = NULL;
46802+ rolep->allowed_ips = rtmp;
46803+ } else {
46804+ rlast->next = rtmp;
46805+ rtmp->prev = rlast;
46806+ }
46807+
46808+ if (!ruserip)
46809+ rtmp->next = NULL;
46810+ }
46811+
46812+ return 0;
46813+}
46814+
46815+static int
46816+copy_user_transitions(struct acl_role_label *rolep)
46817+{
46818+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
46819+
46820+ unsigned int len;
46821+ char *tmp;
46822+
46823+ rusertp = rolep->transitions;
46824+
46825+ while (rusertp) {
46826+ rlast = rtmp;
46827+
46828+ if ((rtmp = (struct role_transition *)
46829+ acl_alloc(sizeof (struct role_transition))) == NULL)
46830+ return -ENOMEM;
46831+
46832+ if (copy_from_user(rtmp, rusertp,
46833+ sizeof (struct role_transition)))
46834+ return -EFAULT;
46835+
46836+ rusertp = rtmp->prev;
46837+
46838+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
46839+
46840+ if (!len || len >= GR_SPROLE_LEN)
46841+ return -EINVAL;
46842+
46843+ if ((tmp = (char *) acl_alloc(len)) == NULL)
46844+ return -ENOMEM;
46845+
46846+ if (copy_from_user(tmp, rtmp->rolename, len))
46847+ return -EFAULT;
46848+ tmp[len-1] = '\0';
46849+ rtmp->rolename = tmp;
46850+
46851+ if (!rlast) {
46852+ rtmp->prev = NULL;
46853+ rolep->transitions = rtmp;
46854+ } else {
46855+ rlast->next = rtmp;
46856+ rtmp->prev = rlast;
46857+ }
46858+
46859+ if (!rusertp)
46860+ rtmp->next = NULL;
46861+ }
46862+
46863+ return 0;
46864+}
46865+
46866+static struct acl_subject_label *
46867+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
46868+{
46869+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
46870+ unsigned int len;
46871+ char *tmp;
46872+ __u32 num_objs;
46873+ struct acl_ip_label **i_tmp, *i_utmp2;
46874+ struct gr_hash_struct ghash;
46875+ struct subject_map *subjmap;
46876+ unsigned int i_num;
46877+ int err;
46878+
46879+ s_tmp = lookup_subject_map(userp);
46880+
46881+ /* we've already copied this subject into the kernel, just return
46882+ the reference to it, and don't copy it over again
46883+ */
46884+ if (s_tmp)
46885+ return(s_tmp);
46886+
46887+ if ((s_tmp = (struct acl_subject_label *)
46888+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
46889+ return ERR_PTR(-ENOMEM);
46890+
46891+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
46892+ if (subjmap == NULL)
46893+ return ERR_PTR(-ENOMEM);
46894+
46895+ subjmap->user = userp;
46896+ subjmap->kernel = s_tmp;
46897+ insert_subj_map_entry(subjmap);
46898+
46899+ if (copy_from_user(s_tmp, userp,
46900+ sizeof (struct acl_subject_label)))
46901+ return ERR_PTR(-EFAULT);
46902+
46903+ len = strnlen_user(s_tmp->filename, PATH_MAX);
46904+
46905+ if (!len || len >= PATH_MAX)
46906+ return ERR_PTR(-EINVAL);
46907+
46908+ if ((tmp = (char *) acl_alloc(len)) == NULL)
46909+ return ERR_PTR(-ENOMEM);
46910+
46911+ if (copy_from_user(tmp, s_tmp->filename, len))
46912+ return ERR_PTR(-EFAULT);
46913+ tmp[len-1] = '\0';
46914+ s_tmp->filename = tmp;
46915+
46916+ if (!strcmp(s_tmp->filename, "/"))
46917+ role->root_label = s_tmp;
46918+
46919+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
46920+ return ERR_PTR(-EFAULT);
46921+
46922+ /* copy user and group transition tables */
46923+
46924+ if (s_tmp->user_trans_num) {
46925+ uid_t *uidlist;
46926+
46927+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
46928+ if (uidlist == NULL)
46929+ return ERR_PTR(-ENOMEM);
46930+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
46931+ return ERR_PTR(-EFAULT);
46932+
46933+ s_tmp->user_transitions = uidlist;
46934+ }
46935+
46936+ if (s_tmp->group_trans_num) {
46937+ gid_t *gidlist;
46938+
46939+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
46940+ if (gidlist == NULL)
46941+ return ERR_PTR(-ENOMEM);
46942+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
46943+ return ERR_PTR(-EFAULT);
46944+
46945+ s_tmp->group_transitions = gidlist;
46946+ }
46947+
46948+ /* set up object hash table */
46949+ num_objs = count_user_objs(ghash.first);
46950+
46951+ s_tmp->obj_hash_size = num_objs;
46952+ s_tmp->obj_hash =
46953+ (struct acl_object_label **)
46954+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
46955+
46956+ if (!s_tmp->obj_hash)
46957+ return ERR_PTR(-ENOMEM);
46958+
46959+ memset(s_tmp->obj_hash, 0,
46960+ s_tmp->obj_hash_size *
46961+ sizeof (struct acl_object_label *));
46962+
46963+ /* add in objects */
46964+ err = copy_user_objs(ghash.first, s_tmp, role);
46965+
46966+ if (err)
46967+ return ERR_PTR(err);
46968+
46969+ /* set pointer for parent subject */
46970+ if (s_tmp->parent_subject) {
46971+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
46972+
46973+ if (IS_ERR(s_tmp2))
46974+ return s_tmp2;
46975+
46976+ s_tmp->parent_subject = s_tmp2;
46977+ }
46978+
46979+ /* add in ip acls */
46980+
46981+ if (!s_tmp->ip_num) {
46982+ s_tmp->ips = NULL;
46983+ goto insert;
46984+ }
46985+
46986+ i_tmp =
46987+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
46988+ sizeof (struct acl_ip_label *));
46989+
46990+ if (!i_tmp)
46991+ return ERR_PTR(-ENOMEM);
46992+
46993+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
46994+ *(i_tmp + i_num) =
46995+ (struct acl_ip_label *)
46996+ acl_alloc(sizeof (struct acl_ip_label));
46997+ if (!*(i_tmp + i_num))
46998+ return ERR_PTR(-ENOMEM);
46999+
47000+ if (copy_from_user
47001+ (&i_utmp2, s_tmp->ips + i_num,
47002+ sizeof (struct acl_ip_label *)))
47003+ return ERR_PTR(-EFAULT);
47004+
47005+ if (copy_from_user
47006+ (*(i_tmp + i_num), i_utmp2,
47007+ sizeof (struct acl_ip_label)))
47008+ return ERR_PTR(-EFAULT);
47009+
47010+ if ((*(i_tmp + i_num))->iface == NULL)
47011+ continue;
47012+
47013+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
47014+ if (!len || len >= IFNAMSIZ)
47015+ return ERR_PTR(-EINVAL);
47016+ tmp = acl_alloc(len);
47017+ if (tmp == NULL)
47018+ return ERR_PTR(-ENOMEM);
47019+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
47020+ return ERR_PTR(-EFAULT);
47021+ (*(i_tmp + i_num))->iface = tmp;
47022+ }
47023+
47024+ s_tmp->ips = i_tmp;
47025+
47026+insert:
47027+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
47028+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
47029+ return ERR_PTR(-ENOMEM);
47030+
47031+ return s_tmp;
47032+}
47033+
47034+static int
47035+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
47036+{
47037+ struct acl_subject_label s_pre;
47038+ struct acl_subject_label * ret;
47039+ int err;
47040+
47041+ while (userp) {
47042+ if (copy_from_user(&s_pre, userp,
47043+ sizeof (struct acl_subject_label)))
47044+ return -EFAULT;
47045+
47046+ /* do not add nested subjects here, add
47047+ while parsing objects
47048+ */
47049+
47050+ if (s_pre.mode & GR_NESTED) {
47051+ userp = s_pre.prev;
47052+ continue;
47053+ }
47054+
47055+ ret = do_copy_user_subj(userp, role);
47056+
47057+ err = PTR_ERR(ret);
47058+ if (IS_ERR(ret))
47059+ return err;
47060+
47061+ insert_acl_subj_label(ret, role);
47062+
47063+ userp = s_pre.prev;
47064+ }
47065+
47066+ return 0;
47067+}
47068+
47069+static int
47070+copy_user_acl(struct gr_arg *arg)
47071+{
47072+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
47073+ struct sprole_pw *sptmp;
47074+ struct gr_hash_struct *ghash;
47075+ uid_t *domainlist;
47076+ unsigned int r_num;
47077+ unsigned int len;
47078+ char *tmp;
47079+ int err = 0;
47080+ __u16 i;
47081+ __u32 num_subjs;
47082+
47083+ /* we need a default and kernel role */
47084+ if (arg->role_db.num_roles < 2)
47085+ return -EINVAL;
47086+
47087+ /* copy special role authentication info from userspace */
47088+
47089+ num_sprole_pws = arg->num_sprole_pws;
47090+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
47091+
47092+ if (!acl_special_roles) {
47093+ err = -ENOMEM;
47094+ goto cleanup;
47095+ }
47096+
47097+ for (i = 0; i < num_sprole_pws; i++) {
47098+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
47099+ if (!sptmp) {
47100+ err = -ENOMEM;
47101+ goto cleanup;
47102+ }
47103+ if (copy_from_user(sptmp, arg->sprole_pws + i,
47104+ sizeof (struct sprole_pw))) {
47105+ err = -EFAULT;
47106+ goto cleanup;
47107+ }
47108+
47109+ len =
47110+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
47111+
47112+ if (!len || len >= GR_SPROLE_LEN) {
47113+ err = -EINVAL;
47114+ goto cleanup;
47115+ }
47116+
47117+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
47118+ err = -ENOMEM;
47119+ goto cleanup;
47120+ }
47121+
47122+ if (copy_from_user(tmp, sptmp->rolename, len)) {
47123+ err = -EFAULT;
47124+ goto cleanup;
47125+ }
47126+ tmp[len-1] = '\0';
47127+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47128+ printk(KERN_ALERT "Copying special role %s\n", tmp);
47129+#endif
47130+ sptmp->rolename = tmp;
47131+ acl_special_roles[i] = sptmp;
47132+ }
47133+
47134+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
47135+
47136+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
47137+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
47138+
47139+ if (!r_tmp) {
47140+ err = -ENOMEM;
47141+ goto cleanup;
47142+ }
47143+
47144+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
47145+ sizeof (struct acl_role_label *))) {
47146+ err = -EFAULT;
47147+ goto cleanup;
47148+ }
47149+
47150+ if (copy_from_user(r_tmp, r_utmp2,
47151+ sizeof (struct acl_role_label))) {
47152+ err = -EFAULT;
47153+ goto cleanup;
47154+ }
47155+
47156+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
47157+
47158+ if (!len || len >= PATH_MAX) {
47159+ err = -EINVAL;
47160+ goto cleanup;
47161+ }
47162+
47163+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
47164+ err = -ENOMEM;
47165+ goto cleanup;
47166+ }
47167+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
47168+ err = -EFAULT;
47169+ goto cleanup;
47170+ }
47171+ tmp[len-1] = '\0';
47172+ r_tmp->rolename = tmp;
47173+
47174+ if (!strcmp(r_tmp->rolename, "default")
47175+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
47176+ default_role = r_tmp;
47177+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
47178+ kernel_role = r_tmp;
47179+ }
47180+
47181+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
47182+ err = -ENOMEM;
47183+ goto cleanup;
47184+ }
47185+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
47186+ err = -EFAULT;
47187+ goto cleanup;
47188+ }
47189+
47190+ r_tmp->hash = ghash;
47191+
47192+ num_subjs = count_user_subjs(r_tmp->hash->first);
47193+
47194+ r_tmp->subj_hash_size = num_subjs;
47195+ r_tmp->subj_hash =
47196+ (struct acl_subject_label **)
47197+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
47198+
47199+ if (!r_tmp->subj_hash) {
47200+ err = -ENOMEM;
47201+ goto cleanup;
47202+ }
47203+
47204+ err = copy_user_allowedips(r_tmp);
47205+ if (err)
47206+ goto cleanup;
47207+
47208+ /* copy domain info */
47209+ if (r_tmp->domain_children != NULL) {
47210+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
47211+ if (domainlist == NULL) {
47212+ err = -ENOMEM;
47213+ goto cleanup;
47214+ }
47215+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
47216+ err = -EFAULT;
47217+ goto cleanup;
47218+ }
47219+ r_tmp->domain_children = domainlist;
47220+ }
47221+
47222+ err = copy_user_transitions(r_tmp);
47223+ if (err)
47224+ goto cleanup;
47225+
47226+ memset(r_tmp->subj_hash, 0,
47227+ r_tmp->subj_hash_size *
47228+ sizeof (struct acl_subject_label *));
47229+
47230+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
47231+
47232+ if (err)
47233+ goto cleanup;
47234+
47235+ /* set nested subject list to null */
47236+ r_tmp->hash->first = NULL;
47237+
47238+ insert_acl_role_label(r_tmp);
47239+ }
47240+
47241+ goto return_err;
47242+ cleanup:
47243+ free_variables();
47244+ return_err:
47245+ return err;
47246+
47247+}
47248+
47249+static int
47250+gracl_init(struct gr_arg *args)
47251+{
47252+ int error = 0;
47253+
47254+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
47255+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
47256+
47257+ if (init_variables(args)) {
47258+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
47259+ error = -ENOMEM;
47260+ free_variables();
47261+ goto out;
47262+ }
47263+
47264+ error = copy_user_acl(args);
47265+ free_init_variables();
47266+ if (error) {
47267+ free_variables();
47268+ goto out;
47269+ }
47270+
47271+ if ((error = gr_set_acls(0))) {
47272+ free_variables();
47273+ goto out;
47274+ }
47275+
47276+ pax_open_kernel();
47277+ gr_status |= GR_READY;
47278+ pax_close_kernel();
47279+
47280+ out:
47281+ return error;
47282+}
47283+
47284+/* derived from glibc fnmatch() 0: match, 1: no match*/
47285+
47286+static int
47287+glob_match(const char *p, const char *n)
47288+{
47289+ char c;
47290+
47291+ while ((c = *p++) != '\0') {
47292+ switch (c) {
47293+ case '?':
47294+ if (*n == '\0')
47295+ return 1;
47296+ else if (*n == '/')
47297+ return 1;
47298+ break;
47299+ case '\\':
47300+ if (*n != c)
47301+ return 1;
47302+ break;
47303+ case '*':
47304+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
47305+ if (*n == '/')
47306+ return 1;
47307+ else if (c == '?') {
47308+ if (*n == '\0')
47309+ return 1;
47310+ else
47311+ ++n;
47312+ }
47313+ }
47314+ if (c == '\0') {
47315+ return 0;
47316+ } else {
47317+ const char *endp;
47318+
47319+ if ((endp = strchr(n, '/')) == NULL)
47320+ endp = n + strlen(n);
47321+
47322+ if (c == '[') {
47323+ for (--p; n < endp; ++n)
47324+ if (!glob_match(p, n))
47325+ return 0;
47326+ } else if (c == '/') {
47327+ while (*n != '\0' && *n != '/')
47328+ ++n;
47329+ if (*n == '/' && !glob_match(p, n + 1))
47330+ return 0;
47331+ } else {
47332+ for (--p; n < endp; ++n)
47333+ if (*n == c && !glob_match(p, n))
47334+ return 0;
47335+ }
47336+
47337+ return 1;
47338+ }
47339+ case '[':
47340+ {
47341+ int not;
47342+ char cold;
47343+
47344+ if (*n == '\0' || *n == '/')
47345+ return 1;
47346+
47347+ not = (*p == '!' || *p == '^');
47348+ if (not)
47349+ ++p;
47350+
47351+ c = *p++;
47352+ for (;;) {
47353+ unsigned char fn = (unsigned char)*n;
47354+
47355+ if (c == '\0')
47356+ return 1;
47357+ else {
47358+ if (c == fn)
47359+ goto matched;
47360+ cold = c;
47361+ c = *p++;
47362+
47363+ if (c == '-' && *p != ']') {
47364+ unsigned char cend = *p++;
47365+
47366+ if (cend == '\0')
47367+ return 1;
47368+
47369+ if (cold <= fn && fn <= cend)
47370+ goto matched;
47371+
47372+ c = *p++;
47373+ }
47374+ }
47375+
47376+ if (c == ']')
47377+ break;
47378+ }
47379+ if (!not)
47380+ return 1;
47381+ break;
47382+ matched:
47383+ while (c != ']') {
47384+ if (c == '\0')
47385+ return 1;
47386+
47387+ c = *p++;
47388+ }
47389+ if (not)
47390+ return 1;
47391+ }
47392+ break;
47393+ default:
47394+ if (c != *n)
47395+ return 1;
47396+ }
47397+
47398+ ++n;
47399+ }
47400+
47401+ if (*n == '\0')
47402+ return 0;
47403+
47404+ if (*n == '/')
47405+ return 0;
47406+
47407+ return 1;
47408+}
47409+
47410+static struct acl_object_label *
47411+chk_glob_label(struct acl_object_label *globbed,
47412+ struct dentry *dentry, struct vfsmount *mnt, char **path)
47413+{
47414+ struct acl_object_label *tmp;
47415+
47416+ if (*path == NULL)
47417+ *path = gr_to_filename_nolock(dentry, mnt);
47418+
47419+ tmp = globbed;
47420+
47421+ while (tmp) {
47422+ if (!glob_match(tmp->filename, *path))
47423+ return tmp;
47424+ tmp = tmp->next;
47425+ }
47426+
47427+ return NULL;
47428+}
47429+
47430+static struct acl_object_label *
47431+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
47432+ const ino_t curr_ino, const dev_t curr_dev,
47433+ const struct acl_subject_label *subj, char **path, const int checkglob)
47434+{
47435+ struct acl_subject_label *tmpsubj;
47436+ struct acl_object_label *retval;
47437+ struct acl_object_label *retval2;
47438+
47439+ tmpsubj = (struct acl_subject_label *) subj;
47440+ read_lock(&gr_inode_lock);
47441+ do {
47442+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
47443+ if (retval) {
47444+ if (checkglob && retval->globbed) {
47445+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
47446+ (struct vfsmount *)orig_mnt, path);
47447+ if (retval2)
47448+ retval = retval2;
47449+ }
47450+ break;
47451+ }
47452+ } while ((tmpsubj = tmpsubj->parent_subject));
47453+ read_unlock(&gr_inode_lock);
47454+
47455+ return retval;
47456+}
47457+
47458+static __inline__ struct acl_object_label *
47459+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
47460+ struct dentry *curr_dentry,
47461+ const struct acl_subject_label *subj, char **path, const int checkglob)
47462+{
47463+ int newglob = checkglob;
47464+ ino_t inode;
47465+ dev_t device;
47466+
47467+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
47468+ as we don't want a / * rule to match instead of the / object
47469+ don't do this for create lookups that call this function though, since they're looking up
47470+ on the parent and thus need globbing checks on all paths
47471+ */
47472+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
47473+ newglob = GR_NO_GLOB;
47474+
47475+ spin_lock(&curr_dentry->d_lock);
47476+ inode = curr_dentry->d_inode->i_ino;
47477+ device = __get_dev(curr_dentry);
47478+ spin_unlock(&curr_dentry->d_lock);
47479+
47480+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
47481+}
47482+
47483+static struct acl_object_label *
47484+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
47485+ const struct acl_subject_label *subj, char *path, const int checkglob)
47486+{
47487+ struct dentry *dentry = (struct dentry *) l_dentry;
47488+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
47489+ struct acl_object_label *retval;
47490+ struct dentry *parent;
47491+
47492+ write_seqlock(&rename_lock);
47493+ br_read_lock(vfsmount_lock);
47494+
47495+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
47496+#ifdef CONFIG_NET
47497+ mnt == sock_mnt ||
47498+#endif
47499+#ifdef CONFIG_HUGETLBFS
47500+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
47501+#endif
47502+ /* ignore Eric Biederman */
47503+ IS_PRIVATE(l_dentry->d_inode))) {
47504+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
47505+ goto out;
47506+ }
47507+
47508+ for (;;) {
47509+ if (dentry == real_root.dentry && mnt == real_root.mnt)
47510+ break;
47511+
47512+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
47513+ if (mnt->mnt_parent == mnt)
47514+ break;
47515+
47516+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
47517+ if (retval != NULL)
47518+ goto out;
47519+
47520+ dentry = mnt->mnt_mountpoint;
47521+ mnt = mnt->mnt_parent;
47522+ continue;
47523+ }
47524+
47525+ parent = dentry->d_parent;
47526+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
47527+ if (retval != NULL)
47528+ goto out;
47529+
47530+ dentry = parent;
47531+ }
47532+
47533+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
47534+
47535+ /* real_root is pinned so we don't have to hold a reference */
47536+ if (retval == NULL)
47537+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
47538+out:
47539+ br_read_unlock(vfsmount_lock);
47540+ write_sequnlock(&rename_lock);
47541+
47542+ BUG_ON(retval == NULL);
47543+
47544+ return retval;
47545+}
47546+
47547+static __inline__ struct acl_object_label *
47548+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
47549+ const struct acl_subject_label *subj)
47550+{
47551+ char *path = NULL;
47552+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
47553+}
47554+
47555+static __inline__ struct acl_object_label *
47556+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
47557+ const struct acl_subject_label *subj)
47558+{
47559+ char *path = NULL;
47560+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
47561+}
47562+
47563+static __inline__ struct acl_object_label *
47564+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
47565+ const struct acl_subject_label *subj, char *path)
47566+{
47567+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
47568+}
47569+
47570+static struct acl_subject_label *
47571+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
47572+ const struct acl_role_label *role)
47573+{
47574+ struct dentry *dentry = (struct dentry *) l_dentry;
47575+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
47576+ struct acl_subject_label *retval;
47577+ struct dentry *parent;
47578+
47579+ write_seqlock(&rename_lock);
47580+ br_read_lock(vfsmount_lock);
47581+
47582+ for (;;) {
47583+ if (dentry == real_root.dentry && mnt == real_root.mnt)
47584+ break;
47585+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
47586+ if (mnt->mnt_parent == mnt)
47587+ break;
47588+
47589+ spin_lock(&dentry->d_lock);
47590+ read_lock(&gr_inode_lock);
47591+ retval =
47592+ lookup_acl_subj_label(dentry->d_inode->i_ino,
47593+ __get_dev(dentry), role);
47594+ read_unlock(&gr_inode_lock);
47595+ spin_unlock(&dentry->d_lock);
47596+ if (retval != NULL)
47597+ goto out;
47598+
47599+ dentry = mnt->mnt_mountpoint;
47600+ mnt = mnt->mnt_parent;
47601+ continue;
47602+ }
47603+
47604+ spin_lock(&dentry->d_lock);
47605+ read_lock(&gr_inode_lock);
47606+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
47607+ __get_dev(dentry), role);
47608+ read_unlock(&gr_inode_lock);
47609+ parent = dentry->d_parent;
47610+ spin_unlock(&dentry->d_lock);
47611+
47612+ if (retval != NULL)
47613+ goto out;
47614+
47615+ dentry = parent;
47616+ }
47617+
47618+ spin_lock(&dentry->d_lock);
47619+ read_lock(&gr_inode_lock);
47620+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
47621+ __get_dev(dentry), role);
47622+ read_unlock(&gr_inode_lock);
47623+ spin_unlock(&dentry->d_lock);
47624+
47625+ if (unlikely(retval == NULL)) {
47626+ /* real_root is pinned, we don't need to hold a reference */
47627+ read_lock(&gr_inode_lock);
47628+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
47629+ __get_dev(real_root.dentry), role);
47630+ read_unlock(&gr_inode_lock);
47631+ }
47632+out:
47633+ br_read_unlock(vfsmount_lock);
47634+ write_sequnlock(&rename_lock);
47635+
47636+ BUG_ON(retval == NULL);
47637+
47638+ return retval;
47639+}
47640+
47641+static void
47642+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
47643+{
47644+ struct task_struct *task = current;
47645+ const struct cred *cred = current_cred();
47646+
47647+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
47648+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
47649+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
47650+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
47651+
47652+ return;
47653+}
47654+
47655+static void
47656+gr_log_learn_sysctl(const char *path, const __u32 mode)
47657+{
47658+ struct task_struct *task = current;
47659+ const struct cred *cred = current_cred();
47660+
47661+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
47662+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
47663+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
47664+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
47665+
47666+ return;
47667+}
47668+
47669+static void
47670+gr_log_learn_id_change(const char type, const unsigned int real,
47671+ const unsigned int effective, const unsigned int fs)
47672+{
47673+ struct task_struct *task = current;
47674+ const struct cred *cred = current_cred();
47675+
47676+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
47677+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
47678+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
47679+ type, real, effective, fs, &task->signal->saved_ip);
47680+
47681+ return;
47682+}
47683+
47684+__u32
47685+gr_check_link(const struct dentry * new_dentry,
47686+ const struct dentry * parent_dentry,
47687+ const struct vfsmount * parent_mnt,
47688+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
47689+{
47690+ struct acl_object_label *obj;
47691+ __u32 oldmode, newmode;
47692+ __u32 needmode;
47693+
47694+ if (unlikely(!(gr_status & GR_READY)))
47695+ return (GR_CREATE | GR_LINK);
47696+
47697+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
47698+ oldmode = obj->mode;
47699+
47700+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
47701+ oldmode |= (GR_CREATE | GR_LINK);
47702+
47703+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
47704+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
47705+ needmode |= GR_SETID | GR_AUDIT_SETID;
47706+
47707+ newmode =
47708+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
47709+ oldmode | needmode);
47710+
47711+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
47712+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
47713+ GR_INHERIT | GR_AUDIT_INHERIT);
47714+
47715+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
47716+ goto bad;
47717+
47718+ if ((oldmode & needmode) != needmode)
47719+ goto bad;
47720+
47721+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
47722+ if ((newmode & needmode) != needmode)
47723+ goto bad;
47724+
47725+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
47726+ return newmode;
47727+bad:
47728+ needmode = oldmode;
47729+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
47730+ needmode |= GR_SETID;
47731+
47732+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
47733+ gr_log_learn(old_dentry, old_mnt, needmode);
47734+ return (GR_CREATE | GR_LINK);
47735+ } else if (newmode & GR_SUPPRESS)
47736+ return GR_SUPPRESS;
47737+ else
47738+ return 0;
47739+}
47740+
47741+__u32
47742+gr_search_file(const struct dentry * dentry, const __u32 mode,
47743+ const struct vfsmount * mnt)
47744+{
47745+ __u32 retval = mode;
47746+ struct acl_subject_label *curracl;
47747+ struct acl_object_label *currobj;
47748+
47749+ if (unlikely(!(gr_status & GR_READY)))
47750+ return (mode & ~GR_AUDITS);
47751+
47752+ curracl = current->acl;
47753+
47754+ currobj = chk_obj_label(dentry, mnt, curracl);
47755+ retval = currobj->mode & mode;
47756+
47757+ /* if we're opening a specified transfer file for writing
47758+ (e.g. /dev/initctl), then transfer our role to init
47759+ */
47760+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
47761+ current->role->roletype & GR_ROLE_PERSIST)) {
47762+ struct task_struct *task = init_pid_ns.child_reaper;
47763+
47764+ if (task->role != current->role) {
47765+ task->acl_sp_role = 0;
47766+ task->acl_role_id = current->acl_role_id;
47767+ task->role = current->role;
47768+ rcu_read_lock();
47769+ read_lock(&grsec_exec_file_lock);
47770+ gr_apply_subject_to_task(task);
47771+ read_unlock(&grsec_exec_file_lock);
47772+ rcu_read_unlock();
47773+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
47774+ }
47775+ }
47776+
47777+ if (unlikely
47778+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
47779+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
47780+ __u32 new_mode = mode;
47781+
47782+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47783+
47784+ retval = new_mode;
47785+
47786+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
47787+ new_mode |= GR_INHERIT;
47788+
47789+ if (!(mode & GR_NOLEARN))
47790+ gr_log_learn(dentry, mnt, new_mode);
47791+ }
47792+
47793+ return retval;
47794+}
47795+
47796+__u32
47797+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
47798+ const struct vfsmount * mnt, const __u32 mode)
47799+{
47800+ struct name_entry *match;
47801+ struct acl_object_label *matchpo;
47802+ struct acl_subject_label *curracl;
47803+ char *path;
47804+ __u32 retval;
47805+
47806+ if (unlikely(!(gr_status & GR_READY)))
47807+ return (mode & ~GR_AUDITS);
47808+
47809+ preempt_disable();
47810+ path = gr_to_filename_rbac(new_dentry, mnt);
47811+ match = lookup_name_entry_create(path);
47812+
47813+ if (!match)
47814+ goto check_parent;
47815+
47816+ curracl = current->acl;
47817+
47818+ read_lock(&gr_inode_lock);
47819+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
47820+ read_unlock(&gr_inode_lock);
47821+
47822+ if (matchpo) {
47823+ if ((matchpo->mode & mode) !=
47824+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
47825+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
47826+ __u32 new_mode = mode;
47827+
47828+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47829+
47830+ gr_log_learn(new_dentry, mnt, new_mode);
47831+
47832+ preempt_enable();
47833+ return new_mode;
47834+ }
47835+ preempt_enable();
47836+ return (matchpo->mode & mode);
47837+ }
47838+
47839+ check_parent:
47840+ curracl = current->acl;
47841+
47842+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
47843+ retval = matchpo->mode & mode;
47844+
47845+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
47846+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
47847+ __u32 new_mode = mode;
47848+
47849+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47850+
47851+ gr_log_learn(new_dentry, mnt, new_mode);
47852+ preempt_enable();
47853+ return new_mode;
47854+ }
47855+
47856+ preempt_enable();
47857+ return retval;
47858+}
47859+
47860+int
47861+gr_check_hidden_task(const struct task_struct *task)
47862+{
47863+ if (unlikely(!(gr_status & GR_READY)))
47864+ return 0;
47865+
47866+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
47867+ return 1;
47868+
47869+ return 0;
47870+}
47871+
47872+int
47873+gr_check_protected_task(const struct task_struct *task)
47874+{
47875+ if (unlikely(!(gr_status & GR_READY) || !task))
47876+ return 0;
47877+
47878+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
47879+ task->acl != current->acl)
47880+ return 1;
47881+
47882+ return 0;
47883+}
47884+
47885+int
47886+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
47887+{
47888+ struct task_struct *p;
47889+ int ret = 0;
47890+
47891+ if (unlikely(!(gr_status & GR_READY) || !pid))
47892+ return ret;
47893+
47894+ read_lock(&tasklist_lock);
47895+ do_each_pid_task(pid, type, p) {
47896+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
47897+ p->acl != current->acl) {
47898+ ret = 1;
47899+ goto out;
47900+ }
47901+ } while_each_pid_task(pid, type, p);
47902+out:
47903+ read_unlock(&tasklist_lock);
47904+
47905+ return ret;
47906+}
47907+
47908+void
47909+gr_copy_label(struct task_struct *tsk)
47910+{
47911+ tsk->signal->used_accept = 0;
47912+ tsk->acl_sp_role = 0;
47913+ tsk->acl_role_id = current->acl_role_id;
47914+ tsk->acl = current->acl;
47915+ tsk->role = current->role;
47916+ tsk->signal->curr_ip = current->signal->curr_ip;
47917+ tsk->signal->saved_ip = current->signal->saved_ip;
47918+ if (current->exec_file)
47919+ get_file(current->exec_file);
47920+ tsk->exec_file = current->exec_file;
47921+ tsk->is_writable = current->is_writable;
47922+ if (unlikely(current->signal->used_accept)) {
47923+ current->signal->curr_ip = 0;
47924+ current->signal->saved_ip = 0;
47925+ }
47926+
47927+ return;
47928+}
47929+
47930+static void
47931+gr_set_proc_res(struct task_struct *task)
47932+{
47933+ struct acl_subject_label *proc;
47934+ unsigned short i;
47935+
47936+ proc = task->acl;
47937+
47938+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
47939+ return;
47940+
47941+ for (i = 0; i < RLIM_NLIMITS; i++) {
47942+ if (!(proc->resmask & (1 << i)))
47943+ continue;
47944+
47945+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
47946+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
47947+ }
47948+
47949+ return;
47950+}
47951+
47952+extern int __gr_process_user_ban(struct user_struct *user);
47953+
47954+int
47955+gr_check_user_change(int real, int effective, int fs)
47956+{
47957+ unsigned int i;
47958+ __u16 num;
47959+ uid_t *uidlist;
47960+ int curuid;
47961+ int realok = 0;
47962+ int effectiveok = 0;
47963+ int fsok = 0;
47964+
47965+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
47966+ struct user_struct *user;
47967+
47968+ if (real == -1)
47969+ goto skipit;
47970+
47971+ user = find_user(real);
47972+ if (user == NULL)
47973+ goto skipit;
47974+
47975+ if (__gr_process_user_ban(user)) {
47976+ /* for find_user */
47977+ free_uid(user);
47978+ return 1;
47979+ }
47980+
47981+ /* for find_user */
47982+ free_uid(user);
47983+
47984+skipit:
47985+#endif
47986+
47987+ if (unlikely(!(gr_status & GR_READY)))
47988+ return 0;
47989+
47990+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
47991+ gr_log_learn_id_change('u', real, effective, fs);
47992+
47993+ num = current->acl->user_trans_num;
47994+ uidlist = current->acl->user_transitions;
47995+
47996+ if (uidlist == NULL)
47997+ return 0;
47998+
47999+ if (real == -1)
48000+ realok = 1;
48001+ if (effective == -1)
48002+ effectiveok = 1;
48003+ if (fs == -1)
48004+ fsok = 1;
48005+
48006+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
48007+ for (i = 0; i < num; i++) {
48008+ curuid = (int)uidlist[i];
48009+ if (real == curuid)
48010+ realok = 1;
48011+ if (effective == curuid)
48012+ effectiveok = 1;
48013+ if (fs == curuid)
48014+ fsok = 1;
48015+ }
48016+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
48017+ for (i = 0; i < num; i++) {
48018+ curuid = (int)uidlist[i];
48019+ if (real == curuid)
48020+ break;
48021+ if (effective == curuid)
48022+ break;
48023+ if (fs == curuid)
48024+ break;
48025+ }
48026+ /* not in deny list */
48027+ if (i == num) {
48028+ realok = 1;
48029+ effectiveok = 1;
48030+ fsok = 1;
48031+ }
48032+ }
48033+
48034+ if (realok && effectiveok && fsok)
48035+ return 0;
48036+ else {
48037+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
48038+ return 1;
48039+ }
48040+}
48041+
48042+int
48043+gr_check_group_change(int real, int effective, int fs)
48044+{
48045+ unsigned int i;
48046+ __u16 num;
48047+ gid_t *gidlist;
48048+ int curgid;
48049+ int realok = 0;
48050+ int effectiveok = 0;
48051+ int fsok = 0;
48052+
48053+ if (unlikely(!(gr_status & GR_READY)))
48054+ return 0;
48055+
48056+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48057+ gr_log_learn_id_change('g', real, effective, fs);
48058+
48059+ num = current->acl->group_trans_num;
48060+ gidlist = current->acl->group_transitions;
48061+
48062+ if (gidlist == NULL)
48063+ return 0;
48064+
48065+ if (real == -1)
48066+ realok = 1;
48067+ if (effective == -1)
48068+ effectiveok = 1;
48069+ if (fs == -1)
48070+ fsok = 1;
48071+
48072+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
48073+ for (i = 0; i < num; i++) {
48074+ curgid = (int)gidlist[i];
48075+ if (real == curgid)
48076+ realok = 1;
48077+ if (effective == curgid)
48078+ effectiveok = 1;
48079+ if (fs == curgid)
48080+ fsok = 1;
48081+ }
48082+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
48083+ for (i = 0; i < num; i++) {
48084+ curgid = (int)gidlist[i];
48085+ if (real == curgid)
48086+ break;
48087+ if (effective == curgid)
48088+ break;
48089+ if (fs == curgid)
48090+ break;
48091+ }
48092+ /* not in deny list */
48093+ if (i == num) {
48094+ realok = 1;
48095+ effectiveok = 1;
48096+ fsok = 1;
48097+ }
48098+ }
48099+
48100+ if (realok && effectiveok && fsok)
48101+ return 0;
48102+ else {
48103+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
48104+ return 1;
48105+ }
48106+}
48107+
48108+void
48109+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
48110+{
48111+ struct acl_role_label *role = task->role;
48112+ struct acl_subject_label *subj = NULL;
48113+ struct acl_object_label *obj;
48114+ struct file *filp;
48115+
48116+ if (unlikely(!(gr_status & GR_READY)))
48117+ return;
48118+
48119+ filp = task->exec_file;
48120+
48121+ /* kernel process, we'll give them the kernel role */
48122+ if (unlikely(!filp)) {
48123+ task->role = kernel_role;
48124+ task->acl = kernel_role->root_label;
48125+ return;
48126+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
48127+ role = lookup_acl_role_label(task, uid, gid);
48128+
48129+ /* perform subject lookup in possibly new role
48130+ we can use this result below in the case where role == task->role
48131+ */
48132+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
48133+
48134+ /* if we changed uid/gid, but result in the same role
48135+ and are using inheritance, don't lose the inherited subject
48136+ if current subject is other than what normal lookup
48137+ would result in, we arrived via inheritance, don't
48138+ lose subject
48139+ */
48140+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
48141+ (subj == task->acl)))
48142+ task->acl = subj;
48143+
48144+ task->role = role;
48145+
48146+ task->is_writable = 0;
48147+
48148+ /* ignore additional mmap checks for processes that are writable
48149+ by the default ACL */
48150+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
48151+ if (unlikely(obj->mode & GR_WRITE))
48152+ task->is_writable = 1;
48153+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
48154+ if (unlikely(obj->mode & GR_WRITE))
48155+ task->is_writable = 1;
48156+
48157+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48158+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
48159+#endif
48160+
48161+ gr_set_proc_res(task);
48162+
48163+ return;
48164+}
48165+
48166+int
48167+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
48168+ const int unsafe_share)
48169+{
48170+ struct task_struct *task = current;
48171+ struct acl_subject_label *newacl;
48172+ struct acl_object_label *obj;
48173+ __u32 retmode;
48174+
48175+ if (unlikely(!(gr_status & GR_READY)))
48176+ return 0;
48177+
48178+ newacl = chk_subj_label(dentry, mnt, task->role);
48179+
48180+ task_lock(task);
48181+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
48182+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
48183+ !(task->role->roletype & GR_ROLE_GOD) &&
48184+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
48185+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
48186+ task_unlock(task);
48187+ if (unsafe_share)
48188+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
48189+ else
48190+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
48191+ return -EACCES;
48192+ }
48193+ task_unlock(task);
48194+
48195+ obj = chk_obj_label(dentry, mnt, task->acl);
48196+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
48197+
48198+ if (!(task->acl->mode & GR_INHERITLEARN) &&
48199+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
48200+ if (obj->nested)
48201+ task->acl = obj->nested;
48202+ else
48203+ task->acl = newacl;
48204+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
48205+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
48206+
48207+ task->is_writable = 0;
48208+
48209+ /* ignore additional mmap checks for processes that are writable
48210+ by the default ACL */
48211+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
48212+ if (unlikely(obj->mode & GR_WRITE))
48213+ task->is_writable = 1;
48214+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
48215+ if (unlikely(obj->mode & GR_WRITE))
48216+ task->is_writable = 1;
48217+
48218+ gr_set_proc_res(task);
48219+
48220+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48221+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
48222+#endif
48223+ return 0;
48224+}
48225+
48226+/* always called with valid inodev ptr */
48227+static void
48228+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
48229+{
48230+ struct acl_object_label *matchpo;
48231+ struct acl_subject_label *matchps;
48232+ struct acl_subject_label *subj;
48233+ struct acl_role_label *role;
48234+ unsigned int x;
48235+
48236+ FOR_EACH_ROLE_START(role)
48237+ FOR_EACH_SUBJECT_START(role, subj, x)
48238+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
48239+ matchpo->mode |= GR_DELETED;
48240+ FOR_EACH_SUBJECT_END(subj,x)
48241+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
48242+ if (subj->inode == ino && subj->device == dev)
48243+ subj->mode |= GR_DELETED;
48244+ FOR_EACH_NESTED_SUBJECT_END(subj)
48245+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
48246+ matchps->mode |= GR_DELETED;
48247+ FOR_EACH_ROLE_END(role)
48248+
48249+ inodev->nentry->deleted = 1;
48250+
48251+ return;
48252+}
48253+
48254+void
48255+gr_handle_delete(const ino_t ino, const dev_t dev)
48256+{
48257+ struct inodev_entry *inodev;
48258+
48259+ if (unlikely(!(gr_status & GR_READY)))
48260+ return;
48261+
48262+ write_lock(&gr_inode_lock);
48263+ inodev = lookup_inodev_entry(ino, dev);
48264+ if (inodev != NULL)
48265+ do_handle_delete(inodev, ino, dev);
48266+ write_unlock(&gr_inode_lock);
48267+
48268+ return;
48269+}
48270+
48271+static void
48272+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
48273+ const ino_t newinode, const dev_t newdevice,
48274+ struct acl_subject_label *subj)
48275+{
48276+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
48277+ struct acl_object_label *match;
48278+
48279+ match = subj->obj_hash[index];
48280+
48281+ while (match && (match->inode != oldinode ||
48282+ match->device != olddevice ||
48283+ !(match->mode & GR_DELETED)))
48284+ match = match->next;
48285+
48286+ if (match && (match->inode == oldinode)
48287+ && (match->device == olddevice)
48288+ && (match->mode & GR_DELETED)) {
48289+ if (match->prev == NULL) {
48290+ subj->obj_hash[index] = match->next;
48291+ if (match->next != NULL)
48292+ match->next->prev = NULL;
48293+ } else {
48294+ match->prev->next = match->next;
48295+ if (match->next != NULL)
48296+ match->next->prev = match->prev;
48297+ }
48298+ match->prev = NULL;
48299+ match->next = NULL;
48300+ match->inode = newinode;
48301+ match->device = newdevice;
48302+ match->mode &= ~GR_DELETED;
48303+
48304+ insert_acl_obj_label(match, subj);
48305+ }
48306+
48307+ return;
48308+}
48309+
48310+static void
48311+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
48312+ const ino_t newinode, const dev_t newdevice,
48313+ struct acl_role_label *role)
48314+{
48315+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
48316+ struct acl_subject_label *match;
48317+
48318+ match = role->subj_hash[index];
48319+
48320+ while (match && (match->inode != oldinode ||
48321+ match->device != olddevice ||
48322+ !(match->mode & GR_DELETED)))
48323+ match = match->next;
48324+
48325+ if (match && (match->inode == oldinode)
48326+ && (match->device == olddevice)
48327+ && (match->mode & GR_DELETED)) {
48328+ if (match->prev == NULL) {
48329+ role->subj_hash[index] = match->next;
48330+ if (match->next != NULL)
48331+ match->next->prev = NULL;
48332+ } else {
48333+ match->prev->next = match->next;
48334+ if (match->next != NULL)
48335+ match->next->prev = match->prev;
48336+ }
48337+ match->prev = NULL;
48338+ match->next = NULL;
48339+ match->inode = newinode;
48340+ match->device = newdevice;
48341+ match->mode &= ~GR_DELETED;
48342+
48343+ insert_acl_subj_label(match, role);
48344+ }
48345+
48346+ return;
48347+}
48348+
48349+static void
48350+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
48351+ const ino_t newinode, const dev_t newdevice)
48352+{
48353+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
48354+ struct inodev_entry *match;
48355+
48356+ match = inodev_set.i_hash[index];
48357+
48358+ while (match && (match->nentry->inode != oldinode ||
48359+ match->nentry->device != olddevice || !match->nentry->deleted))
48360+ match = match->next;
48361+
48362+ if (match && (match->nentry->inode == oldinode)
48363+ && (match->nentry->device == olddevice) &&
48364+ match->nentry->deleted) {
48365+ if (match->prev == NULL) {
48366+ inodev_set.i_hash[index] = match->next;
48367+ if (match->next != NULL)
48368+ match->next->prev = NULL;
48369+ } else {
48370+ match->prev->next = match->next;
48371+ if (match->next != NULL)
48372+ match->next->prev = match->prev;
48373+ }
48374+ match->prev = NULL;
48375+ match->next = NULL;
48376+ match->nentry->inode = newinode;
48377+ match->nentry->device = newdevice;
48378+ match->nentry->deleted = 0;
48379+
48380+ insert_inodev_entry(match);
48381+ }
48382+
48383+ return;
48384+}
48385+
48386+static void
48387+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
48388+ const struct vfsmount *mnt)
48389+{
48390+ struct acl_subject_label *subj;
48391+ struct acl_role_label *role;
48392+ unsigned int x;
48393+ ino_t ino = dentry->d_inode->i_ino;
48394+ dev_t dev = __get_dev(dentry);
48395+
48396+ FOR_EACH_ROLE_START(role)
48397+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
48398+
48399+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
48400+ if ((subj->inode == ino) && (subj->device == dev)) {
48401+ subj->inode = ino;
48402+ subj->device = dev;
48403+ }
48404+ FOR_EACH_NESTED_SUBJECT_END(subj)
48405+ FOR_EACH_SUBJECT_START(role, subj, x)
48406+ update_acl_obj_label(matchn->inode, matchn->device,
48407+ ino, dev, subj);
48408+ FOR_EACH_SUBJECT_END(subj,x)
48409+ FOR_EACH_ROLE_END(role)
48410+
48411+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
48412+
48413+ return;
48414+}
48415+
48416+void
48417+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
48418+{
48419+ struct name_entry *matchn;
48420+
48421+ if (unlikely(!(gr_status & GR_READY)))
48422+ return;
48423+
48424+ preempt_disable();
48425+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
48426+
48427+ if (unlikely((unsigned long)matchn)) {
48428+ write_lock(&gr_inode_lock);
48429+ do_handle_create(matchn, dentry, mnt);
48430+ write_unlock(&gr_inode_lock);
48431+ }
48432+ preempt_enable();
48433+
48434+ return;
48435+}
48436+
48437+void
48438+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
48439+ struct dentry *old_dentry,
48440+ struct dentry *new_dentry,
48441+ struct vfsmount *mnt, const __u8 replace)
48442+{
48443+ struct name_entry *matchn;
48444+ struct inodev_entry *inodev;
48445+ ino_t old_ino = old_dentry->d_inode->i_ino;
48446+ dev_t old_dev = __get_dev(old_dentry);
48447+
48448+ /* vfs_rename swaps the name and parent link for old_dentry and
48449+ new_dentry
48450+ at this point, old_dentry has the new name, parent link, and inode
48451+ for the renamed file
48452+ if a file is being replaced by a rename, new_dentry has the inode
48453+ and name for the replaced file
48454+ */
48455+
48456+ if (unlikely(!(gr_status & GR_READY)))
48457+ return;
48458+
48459+ preempt_disable();
48460+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
48461+
48462+ /* we wouldn't have to check d_inode if it weren't for
48463+ NFS silly-renaming
48464+ */
48465+
48466+ write_lock(&gr_inode_lock);
48467+ if (unlikely(replace && new_dentry->d_inode)) {
48468+ ino_t new_ino = new_dentry->d_inode->i_ino;
48469+ dev_t new_dev = __get_dev(new_dentry);
48470+
48471+ inodev = lookup_inodev_entry(new_ino, new_dev);
48472+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
48473+ do_handle_delete(inodev, new_ino, new_dev);
48474+ }
48475+
48476+ inodev = lookup_inodev_entry(old_ino, old_dev);
48477+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
48478+ do_handle_delete(inodev, old_ino, old_dev);
48479+
48480+ if (unlikely((unsigned long)matchn))
48481+ do_handle_create(matchn, old_dentry, mnt);
48482+
48483+ write_unlock(&gr_inode_lock);
48484+ preempt_enable();
48485+
48486+ return;
48487+}
48488+
48489+static int
48490+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
48491+ unsigned char **sum)
48492+{
48493+ struct acl_role_label *r;
48494+ struct role_allowed_ip *ipp;
48495+ struct role_transition *trans;
48496+ unsigned int i;
48497+ int found = 0;
48498+ u32 curr_ip = current->signal->curr_ip;
48499+
48500+ current->signal->saved_ip = curr_ip;
48501+
48502+ /* check transition table */
48503+
48504+ for (trans = current->role->transitions; trans; trans = trans->next) {
48505+ if (!strcmp(rolename, trans->rolename)) {
48506+ found = 1;
48507+ break;
48508+ }
48509+ }
48510+
48511+ if (!found)
48512+ return 0;
48513+
48514+ /* handle special roles that do not require authentication
48515+ and check ip */
48516+
48517+ FOR_EACH_ROLE_START(r)
48518+ if (!strcmp(rolename, r->rolename) &&
48519+ (r->roletype & GR_ROLE_SPECIAL)) {
48520+ found = 0;
48521+ if (r->allowed_ips != NULL) {
48522+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
48523+ if ((ntohl(curr_ip) & ipp->netmask) ==
48524+ (ntohl(ipp->addr) & ipp->netmask))
48525+ found = 1;
48526+ }
48527+ } else
48528+ found = 2;
48529+ if (!found)
48530+ return 0;
48531+
48532+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
48533+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
48534+ *salt = NULL;
48535+ *sum = NULL;
48536+ return 1;
48537+ }
48538+ }
48539+ FOR_EACH_ROLE_END(r)
48540+
48541+ for (i = 0; i < num_sprole_pws; i++) {
48542+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
48543+ *salt = acl_special_roles[i]->salt;
48544+ *sum = acl_special_roles[i]->sum;
48545+ return 1;
48546+ }
48547+ }
48548+
48549+ return 0;
48550+}
48551+
48552+static void
48553+assign_special_role(char *rolename)
48554+{
48555+ struct acl_object_label *obj;
48556+ struct acl_role_label *r;
48557+ struct acl_role_label *assigned = NULL;
48558+ struct task_struct *tsk;
48559+ struct file *filp;
48560+
48561+ FOR_EACH_ROLE_START(r)
48562+ if (!strcmp(rolename, r->rolename) &&
48563+ (r->roletype & GR_ROLE_SPECIAL)) {
48564+ assigned = r;
48565+ break;
48566+ }
48567+ FOR_EACH_ROLE_END(r)
48568+
48569+ if (!assigned)
48570+ return;
48571+
48572+ read_lock(&tasklist_lock);
48573+ read_lock(&grsec_exec_file_lock);
48574+
48575+ tsk = current->real_parent;
48576+ if (tsk == NULL)
48577+ goto out_unlock;
48578+
48579+ filp = tsk->exec_file;
48580+ if (filp == NULL)
48581+ goto out_unlock;
48582+
48583+ tsk->is_writable = 0;
48584+
48585+ tsk->acl_sp_role = 1;
48586+ tsk->acl_role_id = ++acl_sp_role_value;
48587+ tsk->role = assigned;
48588+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
48589+
48590+ /* ignore additional mmap checks for processes that are writable
48591+ by the default ACL */
48592+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
48593+ if (unlikely(obj->mode & GR_WRITE))
48594+ tsk->is_writable = 1;
48595+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
48596+ if (unlikely(obj->mode & GR_WRITE))
48597+ tsk->is_writable = 1;
48598+
48599+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48600+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
48601+#endif
48602+
48603+out_unlock:
48604+ read_unlock(&grsec_exec_file_lock);
48605+ read_unlock(&tasklist_lock);
48606+ return;
48607+}
48608+
48609+int gr_check_secure_terminal(struct task_struct *task)
48610+{
48611+ struct task_struct *p, *p2, *p3;
48612+ struct files_struct *files;
48613+ struct fdtable *fdt;
48614+ struct file *our_file = NULL, *file;
48615+ int i;
48616+
48617+ if (task->signal->tty == NULL)
48618+ return 1;
48619+
48620+ files = get_files_struct(task);
48621+ if (files != NULL) {
48622+ rcu_read_lock();
48623+ fdt = files_fdtable(files);
48624+ for (i=0; i < fdt->max_fds; i++) {
48625+ file = fcheck_files(files, i);
48626+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
48627+ get_file(file);
48628+ our_file = file;
48629+ }
48630+ }
48631+ rcu_read_unlock();
48632+ put_files_struct(files);
48633+ }
48634+
48635+ if (our_file == NULL)
48636+ return 1;
48637+
48638+ read_lock(&tasklist_lock);
48639+ do_each_thread(p2, p) {
48640+ files = get_files_struct(p);
48641+ if (files == NULL ||
48642+ (p->signal && p->signal->tty == task->signal->tty)) {
48643+ if (files != NULL)
48644+ put_files_struct(files);
48645+ continue;
48646+ }
48647+ rcu_read_lock();
48648+ fdt = files_fdtable(files);
48649+ for (i=0; i < fdt->max_fds; i++) {
48650+ file = fcheck_files(files, i);
48651+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
48652+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
48653+ p3 = task;
48654+ while (p3->pid > 0) {
48655+ if (p3 == p)
48656+ break;
48657+ p3 = p3->real_parent;
48658+ }
48659+ if (p3 == p)
48660+ break;
48661+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
48662+ gr_handle_alertkill(p);
48663+ rcu_read_unlock();
48664+ put_files_struct(files);
48665+ read_unlock(&tasklist_lock);
48666+ fput(our_file);
48667+ return 0;
48668+ }
48669+ }
48670+ rcu_read_unlock();
48671+ put_files_struct(files);
48672+ } while_each_thread(p2, p);
48673+ read_unlock(&tasklist_lock);
48674+
48675+ fput(our_file);
48676+ return 1;
48677+}
48678+
48679+ssize_t
48680+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
48681+{
48682+ struct gr_arg_wrapper uwrap;
48683+ unsigned char *sprole_salt = NULL;
48684+ unsigned char *sprole_sum = NULL;
48685+ int error = sizeof (struct gr_arg_wrapper);
48686+ int error2 = 0;
48687+
48688+ mutex_lock(&gr_dev_mutex);
48689+
48690+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
48691+ error = -EPERM;
48692+ goto out;
48693+ }
48694+
48695+ if (count != sizeof (struct gr_arg_wrapper)) {
48696+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
48697+ error = -EINVAL;
48698+ goto out;
48699+ }
48700+
48701+
48702+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
48703+ gr_auth_expires = 0;
48704+ gr_auth_attempts = 0;
48705+ }
48706+
48707+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
48708+ error = -EFAULT;
48709+ goto out;
48710+ }
48711+
48712+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
48713+ error = -EINVAL;
48714+ goto out;
48715+ }
48716+
48717+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
48718+ error = -EFAULT;
48719+ goto out;
48720+ }
48721+
48722+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
48723+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
48724+ time_after(gr_auth_expires, get_seconds())) {
48725+ error = -EBUSY;
48726+ goto out;
48727+ }
48728+
48729+ /* if non-root trying to do anything other than use a special role,
48730+ do not attempt authentication, do not count towards authentication
48731+ locking
48732+ */
48733+
48734+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
48735+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
48736+ current_uid()) {
48737+ error = -EPERM;
48738+ goto out;
48739+ }
48740+
48741+ /* ensure pw and special role name are null terminated */
48742+
48743+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
48744+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
48745+
48746+ /* Okay.
48747+ * We have our enough of the argument structure..(we have yet
48748+ * to copy_from_user the tables themselves) . Copy the tables
48749+ * only if we need them, i.e. for loading operations. */
48750+
48751+ switch (gr_usermode->mode) {
48752+ case GR_STATUS:
48753+ if (gr_status & GR_READY) {
48754+ error = 1;
48755+ if (!gr_check_secure_terminal(current))
48756+ error = 3;
48757+ } else
48758+ error = 2;
48759+ goto out;
48760+ case GR_SHUTDOWN:
48761+ if ((gr_status & GR_READY)
48762+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
48763+ pax_open_kernel();
48764+ gr_status &= ~GR_READY;
48765+ pax_close_kernel();
48766+
48767+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
48768+ free_variables();
48769+ memset(gr_usermode, 0, sizeof (struct gr_arg));
48770+ memset(gr_system_salt, 0, GR_SALT_LEN);
48771+ memset(gr_system_sum, 0, GR_SHA_LEN);
48772+ } else if (gr_status & GR_READY) {
48773+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
48774+ error = -EPERM;
48775+ } else {
48776+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
48777+ error = -EAGAIN;
48778+ }
48779+ break;
48780+ case GR_ENABLE:
48781+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
48782+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
48783+ else {
48784+ if (gr_status & GR_READY)
48785+ error = -EAGAIN;
48786+ else
48787+ error = error2;
48788+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
48789+ }
48790+ break;
48791+ case GR_RELOAD:
48792+ if (!(gr_status & GR_READY)) {
48793+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
48794+ error = -EAGAIN;
48795+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
48796+ preempt_disable();
48797+
48798+ pax_open_kernel();
48799+ gr_status &= ~GR_READY;
48800+ pax_close_kernel();
48801+
48802+ free_variables();
48803+ if (!(error2 = gracl_init(gr_usermode))) {
48804+ preempt_enable();
48805+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
48806+ } else {
48807+ preempt_enable();
48808+ error = error2;
48809+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
48810+ }
48811+ } else {
48812+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
48813+ error = -EPERM;
48814+ }
48815+ break;
48816+ case GR_SEGVMOD:
48817+ if (unlikely(!(gr_status & GR_READY))) {
48818+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
48819+ error = -EAGAIN;
48820+ break;
48821+ }
48822+
48823+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
48824+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
48825+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
48826+ struct acl_subject_label *segvacl;
48827+ segvacl =
48828+ lookup_acl_subj_label(gr_usermode->segv_inode,
48829+ gr_usermode->segv_device,
48830+ current->role);
48831+ if (segvacl) {
48832+ segvacl->crashes = 0;
48833+ segvacl->expires = 0;
48834+ }
48835+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
48836+ gr_remove_uid(gr_usermode->segv_uid);
48837+ }
48838+ } else {
48839+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
48840+ error = -EPERM;
48841+ }
48842+ break;
48843+ case GR_SPROLE:
48844+ case GR_SPROLEPAM:
48845+ if (unlikely(!(gr_status & GR_READY))) {
48846+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
48847+ error = -EAGAIN;
48848+ break;
48849+ }
48850+
48851+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
48852+ current->role->expires = 0;
48853+ current->role->auth_attempts = 0;
48854+ }
48855+
48856+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
48857+ time_after(current->role->expires, get_seconds())) {
48858+ error = -EBUSY;
48859+ goto out;
48860+ }
48861+
48862+ if (lookup_special_role_auth
48863+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
48864+ && ((!sprole_salt && !sprole_sum)
48865+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
48866+ char *p = "";
48867+ assign_special_role(gr_usermode->sp_role);
48868+ read_lock(&tasklist_lock);
48869+ if (current->real_parent)
48870+ p = current->real_parent->role->rolename;
48871+ read_unlock(&tasklist_lock);
48872+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
48873+ p, acl_sp_role_value);
48874+ } else {
48875+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
48876+ error = -EPERM;
48877+ if(!(current->role->auth_attempts++))
48878+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
48879+
48880+ goto out;
48881+ }
48882+ break;
48883+ case GR_UNSPROLE:
48884+ if (unlikely(!(gr_status & GR_READY))) {
48885+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
48886+ error = -EAGAIN;
48887+ break;
48888+ }
48889+
48890+ if (current->role->roletype & GR_ROLE_SPECIAL) {
48891+ char *p = "";
48892+ int i = 0;
48893+
48894+ read_lock(&tasklist_lock);
48895+ if (current->real_parent) {
48896+ p = current->real_parent->role->rolename;
48897+ i = current->real_parent->acl_role_id;
48898+ }
48899+ read_unlock(&tasklist_lock);
48900+
48901+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
48902+ gr_set_acls(1);
48903+ } else {
48904+ error = -EPERM;
48905+ goto out;
48906+ }
48907+ break;
48908+ default:
48909+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
48910+ error = -EINVAL;
48911+ break;
48912+ }
48913+
48914+ if (error != -EPERM)
48915+ goto out;
48916+
48917+ if(!(gr_auth_attempts++))
48918+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
48919+
48920+ out:
48921+ mutex_unlock(&gr_dev_mutex);
48922+ return error;
48923+}
48924+
48925+/* must be called with
48926+ rcu_read_lock();
48927+ read_lock(&tasklist_lock);
48928+ read_lock(&grsec_exec_file_lock);
48929+*/
48930+int gr_apply_subject_to_task(struct task_struct *task)
48931+{
48932+ struct acl_object_label *obj;
48933+ char *tmpname;
48934+ struct acl_subject_label *tmpsubj;
48935+ struct file *filp;
48936+ struct name_entry *nmatch;
48937+
48938+ filp = task->exec_file;
48939+ if (filp == NULL)
48940+ return 0;
48941+
48942+ /* the following is to apply the correct subject
48943+ on binaries running when the RBAC system
48944+ is enabled, when the binaries have been
48945+ replaced or deleted since their execution
48946+ -----
48947+ when the RBAC system starts, the inode/dev
48948+ from exec_file will be one the RBAC system
48949+ is unaware of. It only knows the inode/dev
48950+ of the present file on disk, or the absence
48951+ of it.
48952+ */
48953+ preempt_disable();
48954+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
48955+
48956+ nmatch = lookup_name_entry(tmpname);
48957+ preempt_enable();
48958+ tmpsubj = NULL;
48959+ if (nmatch) {
48960+ if (nmatch->deleted)
48961+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
48962+ else
48963+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
48964+ if (tmpsubj != NULL)
48965+ task->acl = tmpsubj;
48966+ }
48967+ if (tmpsubj == NULL)
48968+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
48969+ task->role);
48970+ if (task->acl) {
48971+ task->is_writable = 0;
48972+ /* ignore additional mmap checks for processes that are writable
48973+ by the default ACL */
48974+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
48975+ if (unlikely(obj->mode & GR_WRITE))
48976+ task->is_writable = 1;
48977+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
48978+ if (unlikely(obj->mode & GR_WRITE))
48979+ task->is_writable = 1;
48980+
48981+ gr_set_proc_res(task);
48982+
48983+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48984+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
48985+#endif
48986+ } else {
48987+ return 1;
48988+ }
48989+
48990+ return 0;
48991+}
48992+
48993+int
48994+gr_set_acls(const int type)
48995+{
48996+ struct task_struct *task, *task2;
48997+ struct acl_role_label *role = current->role;
48998+ __u16 acl_role_id = current->acl_role_id;
48999+ const struct cred *cred;
49000+ int ret;
49001+
49002+ rcu_read_lock();
49003+ read_lock(&tasklist_lock);
49004+ read_lock(&grsec_exec_file_lock);
49005+ do_each_thread(task2, task) {
49006+ /* check to see if we're called from the exit handler,
49007+ if so, only replace ACLs that have inherited the admin
49008+ ACL */
49009+
49010+ if (type && (task->role != role ||
49011+ task->acl_role_id != acl_role_id))
49012+ continue;
49013+
49014+ task->acl_role_id = 0;
49015+ task->acl_sp_role = 0;
49016+
49017+ if (task->exec_file) {
49018+ cred = __task_cred(task);
49019+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
49020+ ret = gr_apply_subject_to_task(task);
49021+ if (ret) {
49022+ read_unlock(&grsec_exec_file_lock);
49023+ read_unlock(&tasklist_lock);
49024+ rcu_read_unlock();
49025+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
49026+ return ret;
49027+ }
49028+ } else {
49029+ // it's a kernel process
49030+ task->role = kernel_role;
49031+ task->acl = kernel_role->root_label;
49032+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
49033+ task->acl->mode &= ~GR_PROCFIND;
49034+#endif
49035+ }
49036+ } while_each_thread(task2, task);
49037+ read_unlock(&grsec_exec_file_lock);
49038+ read_unlock(&tasklist_lock);
49039+ rcu_read_unlock();
49040+
49041+ return 0;
49042+}
49043+
49044+void
49045+gr_learn_resource(const struct task_struct *task,
49046+ const int res, const unsigned long wanted, const int gt)
49047+{
49048+ struct acl_subject_label *acl;
49049+ const struct cred *cred;
49050+
49051+ if (unlikely((gr_status & GR_READY) &&
49052+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
49053+ goto skip_reslog;
49054+
49055+#ifdef CONFIG_GRKERNSEC_RESLOG
49056+ gr_log_resource(task, res, wanted, gt);
49057+#endif
49058+ skip_reslog:
49059+
49060+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
49061+ return;
49062+
49063+ acl = task->acl;
49064+
49065+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
49066+ !(acl->resmask & (1 << (unsigned short) res))))
49067+ return;
49068+
49069+ if (wanted >= acl->res[res].rlim_cur) {
49070+ unsigned long res_add;
49071+
49072+ res_add = wanted;
49073+ switch (res) {
49074+ case RLIMIT_CPU:
49075+ res_add += GR_RLIM_CPU_BUMP;
49076+ break;
49077+ case RLIMIT_FSIZE:
49078+ res_add += GR_RLIM_FSIZE_BUMP;
49079+ break;
49080+ case RLIMIT_DATA:
49081+ res_add += GR_RLIM_DATA_BUMP;
49082+ break;
49083+ case RLIMIT_STACK:
49084+ res_add += GR_RLIM_STACK_BUMP;
49085+ break;
49086+ case RLIMIT_CORE:
49087+ res_add += GR_RLIM_CORE_BUMP;
49088+ break;
49089+ case RLIMIT_RSS:
49090+ res_add += GR_RLIM_RSS_BUMP;
49091+ break;
49092+ case RLIMIT_NPROC:
49093+ res_add += GR_RLIM_NPROC_BUMP;
49094+ break;
49095+ case RLIMIT_NOFILE:
49096+ res_add += GR_RLIM_NOFILE_BUMP;
49097+ break;
49098+ case RLIMIT_MEMLOCK:
49099+ res_add += GR_RLIM_MEMLOCK_BUMP;
49100+ break;
49101+ case RLIMIT_AS:
49102+ res_add += GR_RLIM_AS_BUMP;
49103+ break;
49104+ case RLIMIT_LOCKS:
49105+ res_add += GR_RLIM_LOCKS_BUMP;
49106+ break;
49107+ case RLIMIT_SIGPENDING:
49108+ res_add += GR_RLIM_SIGPENDING_BUMP;
49109+ break;
49110+ case RLIMIT_MSGQUEUE:
49111+ res_add += GR_RLIM_MSGQUEUE_BUMP;
49112+ break;
49113+ case RLIMIT_NICE:
49114+ res_add += GR_RLIM_NICE_BUMP;
49115+ break;
49116+ case RLIMIT_RTPRIO:
49117+ res_add += GR_RLIM_RTPRIO_BUMP;
49118+ break;
49119+ case RLIMIT_RTTIME:
49120+ res_add += GR_RLIM_RTTIME_BUMP;
49121+ break;
49122+ }
49123+
49124+ acl->res[res].rlim_cur = res_add;
49125+
49126+ if (wanted > acl->res[res].rlim_max)
49127+ acl->res[res].rlim_max = res_add;
49128+
49129+ /* only log the subject filename, since resource logging is supported for
49130+ single-subject learning only */
49131+ rcu_read_lock();
49132+ cred = __task_cred(task);
49133+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
49134+ task->role->roletype, cred->uid, cred->gid, acl->filename,
49135+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
49136+ "", (unsigned long) res, &task->signal->saved_ip);
49137+ rcu_read_unlock();
49138+ }
49139+
49140+ return;
49141+}
49142+
49143+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
49144+void
49145+pax_set_initial_flags(struct linux_binprm *bprm)
49146+{
49147+ struct task_struct *task = current;
49148+ struct acl_subject_label *proc;
49149+ unsigned long flags;
49150+
49151+ if (unlikely(!(gr_status & GR_READY)))
49152+ return;
49153+
49154+ flags = pax_get_flags(task);
49155+
49156+ proc = task->acl;
49157+
49158+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
49159+ flags &= ~MF_PAX_PAGEEXEC;
49160+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
49161+ flags &= ~MF_PAX_SEGMEXEC;
49162+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
49163+ flags &= ~MF_PAX_RANDMMAP;
49164+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
49165+ flags &= ~MF_PAX_EMUTRAMP;
49166+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
49167+ flags &= ~MF_PAX_MPROTECT;
49168+
49169+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
49170+ flags |= MF_PAX_PAGEEXEC;
49171+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
49172+ flags |= MF_PAX_SEGMEXEC;
49173+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
49174+ flags |= MF_PAX_RANDMMAP;
49175+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
49176+ flags |= MF_PAX_EMUTRAMP;
49177+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
49178+ flags |= MF_PAX_MPROTECT;
49179+
49180+ pax_set_flags(task, flags);
49181+
49182+ return;
49183+}
49184+#endif
49185+
49186+#ifdef CONFIG_SYSCTL
49187+/* Eric Biederman likes breaking userland ABI and every inode-based security
49188+ system to save 35kb of memory */
49189+
49190+/* we modify the passed in filename, but adjust it back before returning */
49191+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
49192+{
49193+ struct name_entry *nmatch;
49194+ char *p, *lastp = NULL;
49195+ struct acl_object_label *obj = NULL, *tmp;
49196+ struct acl_subject_label *tmpsubj;
49197+ char c = '\0';
49198+
49199+ read_lock(&gr_inode_lock);
49200+
49201+ p = name + len - 1;
49202+ do {
49203+ nmatch = lookup_name_entry(name);
49204+ if (lastp != NULL)
49205+ *lastp = c;
49206+
49207+ if (nmatch == NULL)
49208+ goto next_component;
49209+ tmpsubj = current->acl;
49210+ do {
49211+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
49212+ if (obj != NULL) {
49213+ tmp = obj->globbed;
49214+ while (tmp) {
49215+ if (!glob_match(tmp->filename, name)) {
49216+ obj = tmp;
49217+ goto found_obj;
49218+ }
49219+ tmp = tmp->next;
49220+ }
49221+ goto found_obj;
49222+ }
49223+ } while ((tmpsubj = tmpsubj->parent_subject));
49224+next_component:
49225+ /* end case */
49226+ if (p == name)
49227+ break;
49228+
49229+ while (*p != '/')
49230+ p--;
49231+ if (p == name)
49232+ lastp = p + 1;
49233+ else {
49234+ lastp = p;
49235+ p--;
49236+ }
49237+ c = *lastp;
49238+ *lastp = '\0';
49239+ } while (1);
49240+found_obj:
49241+ read_unlock(&gr_inode_lock);
49242+ /* obj returned will always be non-null */
49243+ return obj;
49244+}
49245+
49246+/* returns 0 when allowing, non-zero on error
49247+ op of 0 is used for readdir, so we don't log the names of hidden files
49248+*/
49249+__u32
49250+gr_handle_sysctl(const struct ctl_table *table, const int op)
49251+{
49252+ struct ctl_table *tmp;
49253+ const char *proc_sys = "/proc/sys";
49254+ char *path;
49255+ struct acl_object_label *obj;
49256+ unsigned short len = 0, pos = 0, depth = 0, i;
49257+ __u32 err = 0;
49258+ __u32 mode = 0;
49259+
49260+ if (unlikely(!(gr_status & GR_READY)))
49261+ return 0;
49262+
49263+ /* for now, ignore operations on non-sysctl entries if it's not a
49264+ readdir*/
49265+ if (table->child != NULL && op != 0)
49266+ return 0;
49267+
49268+ mode |= GR_FIND;
49269+ /* it's only a read if it's an entry, read on dirs is for readdir */
49270+ if (op & MAY_READ)
49271+ mode |= GR_READ;
49272+ if (op & MAY_WRITE)
49273+ mode |= GR_WRITE;
49274+
49275+ preempt_disable();
49276+
49277+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
49278+
49279+ /* it's only a read/write if it's an actual entry, not a dir
49280+ (which are opened for readdir)
49281+ */
49282+
49283+ /* convert the requested sysctl entry into a pathname */
49284+
49285+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
49286+ len += strlen(tmp->procname);
49287+ len++;
49288+ depth++;
49289+ }
49290+
49291+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
49292+ /* deny */
49293+ goto out;
49294+ }
49295+
49296+ memset(path, 0, PAGE_SIZE);
49297+
49298+ memcpy(path, proc_sys, strlen(proc_sys));
49299+
49300+ pos += strlen(proc_sys);
49301+
49302+ for (; depth > 0; depth--) {
49303+ path[pos] = '/';
49304+ pos++;
49305+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
49306+ if (depth == i) {
49307+ memcpy(path + pos, tmp->procname,
49308+ strlen(tmp->procname));
49309+ pos += strlen(tmp->procname);
49310+ }
49311+ i++;
49312+ }
49313+ }
49314+
49315+ obj = gr_lookup_by_name(path, pos);
49316+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
49317+
49318+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
49319+ ((err & mode) != mode))) {
49320+ __u32 new_mode = mode;
49321+
49322+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49323+
49324+ err = 0;
49325+ gr_log_learn_sysctl(path, new_mode);
49326+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
49327+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
49328+ err = -ENOENT;
49329+ } else if (!(err & GR_FIND)) {
49330+ err = -ENOENT;
49331+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
49332+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
49333+ path, (mode & GR_READ) ? " reading" : "",
49334+ (mode & GR_WRITE) ? " writing" : "");
49335+ err = -EACCES;
49336+ } else if ((err & mode) != mode) {
49337+ err = -EACCES;
49338+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
49339+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
49340+ path, (mode & GR_READ) ? " reading" : "",
49341+ (mode & GR_WRITE) ? " writing" : "");
49342+ err = 0;
49343+ } else
49344+ err = 0;
49345+
49346+ out:
49347+ preempt_enable();
49348+
49349+ return err;
49350+}
49351+#endif
49352+
49353+int
49354+gr_handle_proc_ptrace(struct task_struct *task)
49355+{
49356+ struct file *filp;
49357+ struct task_struct *tmp = task;
49358+ struct task_struct *curtemp = current;
49359+ __u32 retmode;
49360+
49361+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
49362+ if (unlikely(!(gr_status & GR_READY)))
49363+ return 0;
49364+#endif
49365+
49366+ read_lock(&tasklist_lock);
49367+ read_lock(&grsec_exec_file_lock);
49368+ filp = task->exec_file;
49369+
49370+ while (tmp->pid > 0) {
49371+ if (tmp == curtemp)
49372+ break;
49373+ tmp = tmp->real_parent;
49374+ }
49375+
49376+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
49377+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
49378+ read_unlock(&grsec_exec_file_lock);
49379+ read_unlock(&tasklist_lock);
49380+ return 1;
49381+ }
49382+
49383+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
49384+ if (!(gr_status & GR_READY)) {
49385+ read_unlock(&grsec_exec_file_lock);
49386+ read_unlock(&tasklist_lock);
49387+ return 0;
49388+ }
49389+#endif
49390+
49391+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
49392+ read_unlock(&grsec_exec_file_lock);
49393+ read_unlock(&tasklist_lock);
49394+
49395+ if (retmode & GR_NOPTRACE)
49396+ return 1;
49397+
49398+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
49399+ && (current->acl != task->acl || (current->acl != current->role->root_label
49400+ && current->pid != task->pid)))
49401+ return 1;
49402+
49403+ return 0;
49404+}
49405+
49406+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
49407+{
49408+ if (unlikely(!(gr_status & GR_READY)))
49409+ return;
49410+
49411+ if (!(current->role->roletype & GR_ROLE_GOD))
49412+ return;
49413+
49414+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
49415+ p->role->rolename, gr_task_roletype_to_char(p),
49416+ p->acl->filename);
49417+}
49418+
49419+int
49420+gr_handle_ptrace(struct task_struct *task, const long request)
49421+{
49422+ struct task_struct *tmp = task;
49423+ struct task_struct *curtemp = current;
49424+ __u32 retmode;
49425+
49426+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
49427+ if (unlikely(!(gr_status & GR_READY)))
49428+ return 0;
49429+#endif
49430+
49431+ read_lock(&tasklist_lock);
49432+ while (tmp->pid > 0) {
49433+ if (tmp == curtemp)
49434+ break;
49435+ tmp = tmp->real_parent;
49436+ }
49437+
49438+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
49439+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
49440+ read_unlock(&tasklist_lock);
49441+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
49442+ return 1;
49443+ }
49444+ read_unlock(&tasklist_lock);
49445+
49446+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
49447+ if (!(gr_status & GR_READY))
49448+ return 0;
49449+#endif
49450+
49451+ read_lock(&grsec_exec_file_lock);
49452+ if (unlikely(!task->exec_file)) {
49453+ read_unlock(&grsec_exec_file_lock);
49454+ return 0;
49455+ }
49456+
49457+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
49458+ read_unlock(&grsec_exec_file_lock);
49459+
49460+ if (retmode & GR_NOPTRACE) {
49461+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
49462+ return 1;
49463+ }
49464+
49465+ if (retmode & GR_PTRACERD) {
49466+ switch (request) {
49467+ case PTRACE_POKETEXT:
49468+ case PTRACE_POKEDATA:
49469+ case PTRACE_POKEUSR:
49470+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
49471+ case PTRACE_SETREGS:
49472+ case PTRACE_SETFPREGS:
49473+#endif
49474+#ifdef CONFIG_X86
49475+ case PTRACE_SETFPXREGS:
49476+#endif
49477+#ifdef CONFIG_ALTIVEC
49478+ case PTRACE_SETVRREGS:
49479+#endif
49480+ return 1;
49481+ default:
49482+ return 0;
49483+ }
49484+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
49485+ !(current->role->roletype & GR_ROLE_GOD) &&
49486+ (current->acl != task->acl)) {
49487+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
49488+ return 1;
49489+ }
49490+
49491+ return 0;
49492+}
49493+
49494+static int is_writable_mmap(const struct file *filp)
49495+{
49496+ struct task_struct *task = current;
49497+ struct acl_object_label *obj, *obj2;
49498+
49499+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
49500+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
49501+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49502+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
49503+ task->role->root_label);
49504+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
49505+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
49506+ return 1;
49507+ }
49508+ }
49509+ return 0;
49510+}
49511+
49512+int
49513+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
49514+{
49515+ __u32 mode;
49516+
49517+ if (unlikely(!file || !(prot & PROT_EXEC)))
49518+ return 1;
49519+
49520+ if (is_writable_mmap(file))
49521+ return 0;
49522+
49523+ mode =
49524+ gr_search_file(file->f_path.dentry,
49525+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
49526+ file->f_path.mnt);
49527+
49528+ if (!gr_tpe_allow(file))
49529+ return 0;
49530+
49531+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
49532+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
49533+ return 0;
49534+ } else if (unlikely(!(mode & GR_EXEC))) {
49535+ return 0;
49536+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
49537+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
49538+ return 1;
49539+ }
49540+
49541+ return 1;
49542+}
49543+
49544+int
49545+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
49546+{
49547+ __u32 mode;
49548+
49549+ if (unlikely(!file || !(prot & PROT_EXEC)))
49550+ return 1;
49551+
49552+ if (is_writable_mmap(file))
49553+ return 0;
49554+
49555+ mode =
49556+ gr_search_file(file->f_path.dentry,
49557+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
49558+ file->f_path.mnt);
49559+
49560+ if (!gr_tpe_allow(file))
49561+ return 0;
49562+
49563+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
49564+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
49565+ return 0;
49566+ } else if (unlikely(!(mode & GR_EXEC))) {
49567+ return 0;
49568+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
49569+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
49570+ return 1;
49571+ }
49572+
49573+ return 1;
49574+}
49575+
49576+void
49577+gr_acl_handle_psacct(struct task_struct *task, const long code)
49578+{
49579+ unsigned long runtime;
49580+ unsigned long cputime;
49581+ unsigned int wday, cday;
49582+ __u8 whr, chr;
49583+ __u8 wmin, cmin;
49584+ __u8 wsec, csec;
49585+ struct timespec timeval;
49586+
49587+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
49588+ !(task->acl->mode & GR_PROCACCT)))
49589+ return;
49590+
49591+ do_posix_clock_monotonic_gettime(&timeval);
49592+ runtime = timeval.tv_sec - task->start_time.tv_sec;
49593+ wday = runtime / (3600 * 24);
49594+ runtime -= wday * (3600 * 24);
49595+ whr = runtime / 3600;
49596+ runtime -= whr * 3600;
49597+ wmin = runtime / 60;
49598+ runtime -= wmin * 60;
49599+ wsec = runtime;
49600+
49601+ cputime = (task->utime + task->stime) / HZ;
49602+ cday = cputime / (3600 * 24);
49603+ cputime -= cday * (3600 * 24);
49604+ chr = cputime / 3600;
49605+ cputime -= chr * 3600;
49606+ cmin = cputime / 60;
49607+ cputime -= cmin * 60;
49608+ csec = cputime;
49609+
49610+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
49611+
49612+ return;
49613+}
49614+
49615+void gr_set_kernel_label(struct task_struct *task)
49616+{
49617+ if (gr_status & GR_READY) {
49618+ task->role = kernel_role;
49619+ task->acl = kernel_role->root_label;
49620+ }
49621+ return;
49622+}
49623+
49624+#ifdef CONFIG_TASKSTATS
49625+int gr_is_taskstats_denied(int pid)
49626+{
49627+ struct task_struct *task;
49628+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49629+ const struct cred *cred;
49630+#endif
49631+ int ret = 0;
49632+
49633+ /* restrict taskstats viewing to un-chrooted root users
49634+ who have the 'view' subject flag if the RBAC system is enabled
49635+ */
49636+
49637+ rcu_read_lock();
49638+ read_lock(&tasklist_lock);
49639+ task = find_task_by_vpid(pid);
49640+ if (task) {
49641+#ifdef CONFIG_GRKERNSEC_CHROOT
49642+ if (proc_is_chrooted(task))
49643+ ret = -EACCES;
49644+#endif
49645+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49646+ cred = __task_cred(task);
49647+#ifdef CONFIG_GRKERNSEC_PROC_USER
49648+ if (cred->uid != 0)
49649+ ret = -EACCES;
49650+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49651+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
49652+ ret = -EACCES;
49653+#endif
49654+#endif
49655+ if (gr_status & GR_READY) {
49656+ if (!(task->acl->mode & GR_VIEW))
49657+ ret = -EACCES;
49658+ }
49659+ } else
49660+ ret = -ENOENT;
49661+
49662+ read_unlock(&tasklist_lock);
49663+ rcu_read_unlock();
49664+
49665+ return ret;
49666+}
49667+#endif
49668+
49669+/* AUXV entries are filled via a descendant of search_binary_handler
49670+ after we've already applied the subject for the target
49671+*/
49672+int gr_acl_enable_at_secure(void)
49673+{
49674+ if (unlikely(!(gr_status & GR_READY)))
49675+ return 0;
49676+
49677+ if (current->acl->mode & GR_ATSECURE)
49678+ return 1;
49679+
49680+ return 0;
49681+}
49682+
49683+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
49684+{
49685+ struct task_struct *task = current;
49686+ struct dentry *dentry = file->f_path.dentry;
49687+ struct vfsmount *mnt = file->f_path.mnt;
49688+ struct acl_object_label *obj, *tmp;
49689+ struct acl_subject_label *subj;
49690+ unsigned int bufsize;
49691+ int is_not_root;
49692+ char *path;
49693+ dev_t dev = __get_dev(dentry);
49694+
49695+ if (unlikely(!(gr_status & GR_READY)))
49696+ return 1;
49697+
49698+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49699+ return 1;
49700+
49701+ /* ignore Eric Biederman */
49702+ if (IS_PRIVATE(dentry->d_inode))
49703+ return 1;
49704+
49705+ subj = task->acl;
49706+ do {
49707+ obj = lookup_acl_obj_label(ino, dev, subj);
49708+ if (obj != NULL)
49709+ return (obj->mode & GR_FIND) ? 1 : 0;
49710+ } while ((subj = subj->parent_subject));
49711+
49712+ /* this is purely an optimization since we're looking for an object
49713+ for the directory we're doing a readdir on
49714+ if it's possible for any globbed object to match the entry we're
49715+ filling into the directory, then the object we find here will be
49716+ an anchor point with attached globbed objects
49717+ */
49718+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
49719+ if (obj->globbed == NULL)
49720+ return (obj->mode & GR_FIND) ? 1 : 0;
49721+
49722+ is_not_root = ((obj->filename[0] == '/') &&
49723+ (obj->filename[1] == '\0')) ? 0 : 1;
49724+ bufsize = PAGE_SIZE - namelen - is_not_root;
49725+
49726+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
49727+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
49728+ return 1;
49729+
49730+ preempt_disable();
49731+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
49732+ bufsize);
49733+
49734+ bufsize = strlen(path);
49735+
49736+ /* if base is "/", don't append an additional slash */
49737+ if (is_not_root)
49738+ *(path + bufsize) = '/';
49739+ memcpy(path + bufsize + is_not_root, name, namelen);
49740+ *(path + bufsize + namelen + is_not_root) = '\0';
49741+
49742+ tmp = obj->globbed;
49743+ while (tmp) {
49744+ if (!glob_match(tmp->filename, path)) {
49745+ preempt_enable();
49746+ return (tmp->mode & GR_FIND) ? 1 : 0;
49747+ }
49748+ tmp = tmp->next;
49749+ }
49750+ preempt_enable();
49751+ return (obj->mode & GR_FIND) ? 1 : 0;
49752+}
49753+
49754+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
49755+EXPORT_SYMBOL(gr_acl_is_enabled);
49756+#endif
49757+EXPORT_SYMBOL(gr_learn_resource);
49758+EXPORT_SYMBOL(gr_set_kernel_label);
49759+#ifdef CONFIG_SECURITY
49760+EXPORT_SYMBOL(gr_check_user_change);
49761+EXPORT_SYMBOL(gr_check_group_change);
49762+#endif
49763+
49764diff -urNp linux-3.0.4/grsecurity/gracl_cap.c linux-3.0.4/grsecurity/gracl_cap.c
49765--- linux-3.0.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
49766+++ linux-3.0.4/grsecurity/gracl_cap.c 2011-09-14 09:21:24.000000000 -0400
49767@@ -0,0 +1,101 @@
49768+#include <linux/kernel.h>
49769+#include <linux/module.h>
49770+#include <linux/sched.h>
49771+#include <linux/gracl.h>
49772+#include <linux/grsecurity.h>
49773+#include <linux/grinternal.h>
49774+
49775+extern const char *captab_log[];
49776+extern int captab_log_entries;
49777+
49778+int
49779+gr_acl_is_capable(const int cap)
49780+{
49781+ struct task_struct *task = current;
49782+ const struct cred *cred = current_cred();
49783+ struct acl_subject_label *curracl;
49784+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
49785+ kernel_cap_t cap_audit = __cap_empty_set;
49786+
49787+ if (!gr_acl_is_enabled())
49788+ return 1;
49789+
49790+ curracl = task->acl;
49791+
49792+ cap_drop = curracl->cap_lower;
49793+ cap_mask = curracl->cap_mask;
49794+ cap_audit = curracl->cap_invert_audit;
49795+
49796+ while ((curracl = curracl->parent_subject)) {
49797+ /* if the cap isn't specified in the current computed mask but is specified in the
49798+ current level subject, and is lowered in the current level subject, then add
49799+ it to the set of dropped capabilities
49800+ otherwise, add the current level subject's mask to the current computed mask
49801+ */
49802+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
49803+ cap_raise(cap_mask, cap);
49804+ if (cap_raised(curracl->cap_lower, cap))
49805+ cap_raise(cap_drop, cap);
49806+ if (cap_raised(curracl->cap_invert_audit, cap))
49807+ cap_raise(cap_audit, cap);
49808+ }
49809+ }
49810+
49811+ if (!cap_raised(cap_drop, cap)) {
49812+ if (cap_raised(cap_audit, cap))
49813+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
49814+ return 1;
49815+ }
49816+
49817+ curracl = task->acl;
49818+
49819+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
49820+ && cap_raised(cred->cap_effective, cap)) {
49821+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
49822+ task->role->roletype, cred->uid,
49823+ cred->gid, task->exec_file ?
49824+ gr_to_filename(task->exec_file->f_path.dentry,
49825+ task->exec_file->f_path.mnt) : curracl->filename,
49826+ curracl->filename, 0UL,
49827+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
49828+ return 1;
49829+ }
49830+
49831+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
49832+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
49833+ return 0;
49834+}
49835+
49836+int
49837+gr_acl_is_capable_nolog(const int cap)
49838+{
49839+ struct acl_subject_label *curracl;
49840+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
49841+
49842+ if (!gr_acl_is_enabled())
49843+ return 1;
49844+
49845+ curracl = current->acl;
49846+
49847+ cap_drop = curracl->cap_lower;
49848+ cap_mask = curracl->cap_mask;
49849+
49850+ while ((curracl = curracl->parent_subject)) {
49851+ /* if the cap isn't specified in the current computed mask but is specified in the
49852+ current level subject, and is lowered in the current level subject, then add
49853+ it to the set of dropped capabilities
49854+ otherwise, add the current level subject's mask to the current computed mask
49855+ */
49856+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
49857+ cap_raise(cap_mask, cap);
49858+ if (cap_raised(curracl->cap_lower, cap))
49859+ cap_raise(cap_drop, cap);
49860+ }
49861+ }
49862+
49863+ if (!cap_raised(cap_drop, cap))
49864+ return 1;
49865+
49866+ return 0;
49867+}
49868+
49869diff -urNp linux-3.0.4/grsecurity/gracl_fs.c linux-3.0.4/grsecurity/gracl_fs.c
49870--- linux-3.0.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
49871+++ linux-3.0.4/grsecurity/gracl_fs.c 2011-08-23 21:48:14.000000000 -0400
49872@@ -0,0 +1,431 @@
49873+#include <linux/kernel.h>
49874+#include <linux/sched.h>
49875+#include <linux/types.h>
49876+#include <linux/fs.h>
49877+#include <linux/file.h>
49878+#include <linux/stat.h>
49879+#include <linux/grsecurity.h>
49880+#include <linux/grinternal.h>
49881+#include <linux/gracl.h>
49882+
49883+__u32
49884+gr_acl_handle_hidden_file(const struct dentry * dentry,
49885+ const struct vfsmount * mnt)
49886+{
49887+ __u32 mode;
49888+
49889+ if (unlikely(!dentry->d_inode))
49890+ return GR_FIND;
49891+
49892+ mode =
49893+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
49894+
49895+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
49896+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
49897+ return mode;
49898+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
49899+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
49900+ return 0;
49901+ } else if (unlikely(!(mode & GR_FIND)))
49902+ return 0;
49903+
49904+ return GR_FIND;
49905+}
49906+
49907+__u32
49908+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
49909+ const int fmode)
49910+{
49911+ __u32 reqmode = GR_FIND;
49912+ __u32 mode;
49913+
49914+ if (unlikely(!dentry->d_inode))
49915+ return reqmode;
49916+
49917+ if (unlikely(fmode & O_APPEND))
49918+ reqmode |= GR_APPEND;
49919+ else if (unlikely(fmode & FMODE_WRITE))
49920+ reqmode |= GR_WRITE;
49921+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
49922+ reqmode |= GR_READ;
49923+ if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
49924+ reqmode &= ~GR_READ;
49925+ mode =
49926+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
49927+ mnt);
49928+
49929+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
49930+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
49931+ reqmode & GR_READ ? " reading" : "",
49932+ reqmode & GR_WRITE ? " writing" : reqmode &
49933+ GR_APPEND ? " appending" : "");
49934+ return reqmode;
49935+ } else
49936+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
49937+ {
49938+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
49939+ reqmode & GR_READ ? " reading" : "",
49940+ reqmode & GR_WRITE ? " writing" : reqmode &
49941+ GR_APPEND ? " appending" : "");
49942+ return 0;
49943+ } else if (unlikely((mode & reqmode) != reqmode))
49944+ return 0;
49945+
49946+ return reqmode;
49947+}
49948+
49949+__u32
49950+gr_acl_handle_creat(const struct dentry * dentry,
49951+ const struct dentry * p_dentry,
49952+ const struct vfsmount * p_mnt, const int fmode,
49953+ const int imode)
49954+{
49955+ __u32 reqmode = GR_WRITE | GR_CREATE;
49956+ __u32 mode;
49957+
49958+ if (unlikely(fmode & O_APPEND))
49959+ reqmode |= GR_APPEND;
49960+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
49961+ reqmode |= GR_READ;
49962+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
49963+ reqmode |= GR_SETID;
49964+
49965+ mode =
49966+ gr_check_create(dentry, p_dentry, p_mnt,
49967+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
49968+
49969+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
49970+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
49971+ reqmode & GR_READ ? " reading" : "",
49972+ reqmode & GR_WRITE ? " writing" : reqmode &
49973+ GR_APPEND ? " appending" : "");
49974+ return reqmode;
49975+ } else
49976+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
49977+ {
49978+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
49979+ reqmode & GR_READ ? " reading" : "",
49980+ reqmode & GR_WRITE ? " writing" : reqmode &
49981+ GR_APPEND ? " appending" : "");
49982+ return 0;
49983+ } else if (unlikely((mode & reqmode) != reqmode))
49984+ return 0;
49985+
49986+ return reqmode;
49987+}
49988+
49989+__u32
49990+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
49991+ const int fmode)
49992+{
49993+ __u32 mode, reqmode = GR_FIND;
49994+
49995+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
49996+ reqmode |= GR_EXEC;
49997+ if (fmode & S_IWOTH)
49998+ reqmode |= GR_WRITE;
49999+ if (fmode & S_IROTH)
50000+ reqmode |= GR_READ;
50001+
50002+ mode =
50003+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
50004+ mnt);
50005+
50006+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
50007+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
50008+ reqmode & GR_READ ? " reading" : "",
50009+ reqmode & GR_WRITE ? " writing" : "",
50010+ reqmode & GR_EXEC ? " executing" : "");
50011+ return reqmode;
50012+ } else
50013+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
50014+ {
50015+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
50016+ reqmode & GR_READ ? " reading" : "",
50017+ reqmode & GR_WRITE ? " writing" : "",
50018+ reqmode & GR_EXEC ? " executing" : "");
50019+ return 0;
50020+ } else if (unlikely((mode & reqmode) != reqmode))
50021+ return 0;
50022+
50023+ return reqmode;
50024+}
50025+
50026+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
50027+{
50028+ __u32 mode;
50029+
50030+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
50031+
50032+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
50033+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
50034+ return mode;
50035+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
50036+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
50037+ return 0;
50038+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
50039+ return 0;
50040+
50041+ return (reqmode);
50042+}
50043+
50044+__u32
50045+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
50046+{
50047+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
50048+}
50049+
50050+__u32
50051+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
50052+{
50053+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
50054+}
50055+
50056+__u32
50057+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
50058+{
50059+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
50060+}
50061+
50062+__u32
50063+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
50064+{
50065+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
50066+}
50067+
50068+__u32
50069+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
50070+ mode_t mode)
50071+{
50072+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
50073+ return 1;
50074+
50075+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
50076+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
50077+ GR_FCHMOD_ACL_MSG);
50078+ } else {
50079+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
50080+ }
50081+}
50082+
50083+__u32
50084+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
50085+ mode_t mode)
50086+{
50087+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
50088+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
50089+ GR_CHMOD_ACL_MSG);
50090+ } else {
50091+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
50092+ }
50093+}
50094+
50095+__u32
50096+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
50097+{
50098+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
50099+}
50100+
50101+__u32
50102+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
50103+{
50104+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
50105+}
50106+
50107+__u32
50108+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
50109+{
50110+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
50111+}
50112+
50113+__u32
50114+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
50115+{
50116+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
50117+ GR_UNIXCONNECT_ACL_MSG);
50118+}
50119+
50120+/* hardlinks require at minimum create permission,
50121+ any additional privilege required is based on the
50122+ privilege of the file being linked to
50123+*/
50124+__u32
50125+gr_acl_handle_link(const struct dentry * new_dentry,
50126+ const struct dentry * parent_dentry,
50127+ const struct vfsmount * parent_mnt,
50128+ const struct dentry * old_dentry,
50129+ const struct vfsmount * old_mnt, const char *to)
50130+{
50131+ __u32 mode;
50132+ __u32 needmode = GR_CREATE | GR_LINK;
50133+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
50134+
50135+ mode =
50136+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
50137+ old_mnt);
50138+
50139+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
50140+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
50141+ return mode;
50142+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
50143+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
50144+ return 0;
50145+ } else if (unlikely((mode & needmode) != needmode))
50146+ return 0;
50147+
50148+ return 1;
50149+}
50150+
50151+__u32
50152+gr_acl_handle_symlink(const struct dentry * new_dentry,
50153+ const struct dentry * parent_dentry,
50154+ const struct vfsmount * parent_mnt, const char *from)
50155+{
50156+ __u32 needmode = GR_WRITE | GR_CREATE;
50157+ __u32 mode;
50158+
50159+ mode =
50160+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
50161+ GR_CREATE | GR_AUDIT_CREATE |
50162+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
50163+
50164+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
50165+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
50166+ return mode;
50167+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
50168+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
50169+ return 0;
50170+ } else if (unlikely((mode & needmode) != needmode))
50171+ return 0;
50172+
50173+ return (GR_WRITE | GR_CREATE);
50174+}
50175+
50176+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
50177+{
50178+ __u32 mode;
50179+
50180+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
50181+
50182+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
50183+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
50184+ return mode;
50185+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
50186+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
50187+ return 0;
50188+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
50189+ return 0;
50190+
50191+ return (reqmode);
50192+}
50193+
50194+__u32
50195+gr_acl_handle_mknod(const struct dentry * new_dentry,
50196+ const struct dentry * parent_dentry,
50197+ const struct vfsmount * parent_mnt,
50198+ const int mode)
50199+{
50200+ __u32 reqmode = GR_WRITE | GR_CREATE;
50201+ if (unlikely(mode & (S_ISUID | S_ISGID)))
50202+ reqmode |= GR_SETID;
50203+
50204+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
50205+ reqmode, GR_MKNOD_ACL_MSG);
50206+}
50207+
50208+__u32
50209+gr_acl_handle_mkdir(const struct dentry *new_dentry,
50210+ const struct dentry *parent_dentry,
50211+ const struct vfsmount *parent_mnt)
50212+{
50213+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
50214+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
50215+}
50216+
50217+#define RENAME_CHECK_SUCCESS(old, new) \
50218+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
50219+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
50220+
50221+int
50222+gr_acl_handle_rename(struct dentry *new_dentry,
50223+ struct dentry *parent_dentry,
50224+ const struct vfsmount *parent_mnt,
50225+ struct dentry *old_dentry,
50226+ struct inode *old_parent_inode,
50227+ struct vfsmount *old_mnt, const char *newname)
50228+{
50229+ __u32 comp1, comp2;
50230+ int error = 0;
50231+
50232+ if (unlikely(!gr_acl_is_enabled()))
50233+ return 0;
50234+
50235+ if (!new_dentry->d_inode) {
50236+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
50237+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
50238+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
50239+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
50240+ GR_DELETE | GR_AUDIT_DELETE |
50241+ GR_AUDIT_READ | GR_AUDIT_WRITE |
50242+ GR_SUPPRESS, old_mnt);
50243+ } else {
50244+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
50245+ GR_CREATE | GR_DELETE |
50246+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
50247+ GR_AUDIT_READ | GR_AUDIT_WRITE |
50248+ GR_SUPPRESS, parent_mnt);
50249+ comp2 =
50250+ gr_search_file(old_dentry,
50251+ GR_READ | GR_WRITE | GR_AUDIT_READ |
50252+ GR_DELETE | GR_AUDIT_DELETE |
50253+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
50254+ }
50255+
50256+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
50257+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
50258+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
50259+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
50260+ && !(comp2 & GR_SUPPRESS)) {
50261+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
50262+ error = -EACCES;
50263+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
50264+ error = -EACCES;
50265+
50266+ return error;
50267+}
50268+
50269+void
50270+gr_acl_handle_exit(void)
50271+{
50272+ u16 id;
50273+ char *rolename;
50274+ struct file *exec_file;
50275+
50276+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
50277+ !(current->role->roletype & GR_ROLE_PERSIST))) {
50278+ id = current->acl_role_id;
50279+ rolename = current->role->rolename;
50280+ gr_set_acls(1);
50281+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
50282+ }
50283+
50284+ write_lock(&grsec_exec_file_lock);
50285+ exec_file = current->exec_file;
50286+ current->exec_file = NULL;
50287+ write_unlock(&grsec_exec_file_lock);
50288+
50289+ if (exec_file)
50290+ fput(exec_file);
50291+}
50292+
50293+int
50294+gr_acl_handle_procpidmem(const struct task_struct *task)
50295+{
50296+ if (unlikely(!gr_acl_is_enabled()))
50297+ return 0;
50298+
50299+ if (task != current && task->acl->mode & GR_PROTPROCFD)
50300+ return -EACCES;
50301+
50302+ return 0;
50303+}
50304diff -urNp linux-3.0.4/grsecurity/gracl_ip.c linux-3.0.4/grsecurity/gracl_ip.c
50305--- linux-3.0.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
50306+++ linux-3.0.4/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
50307@@ -0,0 +1,381 @@
50308+#include <linux/kernel.h>
50309+#include <asm/uaccess.h>
50310+#include <asm/errno.h>
50311+#include <net/sock.h>
50312+#include <linux/file.h>
50313+#include <linux/fs.h>
50314+#include <linux/net.h>
50315+#include <linux/in.h>
50316+#include <linux/skbuff.h>
50317+#include <linux/ip.h>
50318+#include <linux/udp.h>
50319+#include <linux/types.h>
50320+#include <linux/sched.h>
50321+#include <linux/netdevice.h>
50322+#include <linux/inetdevice.h>
50323+#include <linux/gracl.h>
50324+#include <linux/grsecurity.h>
50325+#include <linux/grinternal.h>
50326+
50327+#define GR_BIND 0x01
50328+#define GR_CONNECT 0x02
50329+#define GR_INVERT 0x04
50330+#define GR_BINDOVERRIDE 0x08
50331+#define GR_CONNECTOVERRIDE 0x10
50332+#define GR_SOCK_FAMILY 0x20
50333+
50334+static const char * gr_protocols[IPPROTO_MAX] = {
50335+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
50336+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
50337+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
50338+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
50339+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
50340+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
50341+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
50342+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
50343+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
50344+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
50345+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
50346+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
50347+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
50348+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
50349+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
50350+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
50351+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
50352+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
50353+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
50354+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
50355+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
50356+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
50357+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
50358+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
50359+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
50360+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
50361+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
50362+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
50363+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
50364+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
50365+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
50366+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
50367+ };
50368+
50369+static const char * gr_socktypes[SOCK_MAX] = {
50370+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
50371+ "unknown:7", "unknown:8", "unknown:9", "packet"
50372+ };
50373+
50374+static const char * gr_sockfamilies[AF_MAX+1] = {
50375+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
50376+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
50377+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
50378+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
50379+ };
50380+
50381+const char *
50382+gr_proto_to_name(unsigned char proto)
50383+{
50384+ return gr_protocols[proto];
50385+}
50386+
50387+const char *
50388+gr_socktype_to_name(unsigned char type)
50389+{
50390+ return gr_socktypes[type];
50391+}
50392+
50393+const char *
50394+gr_sockfamily_to_name(unsigned char family)
50395+{
50396+ return gr_sockfamilies[family];
50397+}
50398+
50399+int
50400+gr_search_socket(const int domain, const int type, const int protocol)
50401+{
50402+ struct acl_subject_label *curr;
50403+ const struct cred *cred = current_cred();
50404+
50405+ if (unlikely(!gr_acl_is_enabled()))
50406+ goto exit;
50407+
50408+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
50409+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
50410+ goto exit; // let the kernel handle it
50411+
50412+ curr = current->acl;
50413+
50414+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
50415+ /* the family is allowed, if this is PF_INET allow it only if
50416+ the extra sock type/protocol checks pass */
50417+ if (domain == PF_INET)
50418+ goto inet_check;
50419+ goto exit;
50420+ } else {
50421+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
50422+ __u32 fakeip = 0;
50423+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
50424+ current->role->roletype, cred->uid,
50425+ cred->gid, current->exec_file ?
50426+ gr_to_filename(current->exec_file->f_path.dentry,
50427+ current->exec_file->f_path.mnt) :
50428+ curr->filename, curr->filename,
50429+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
50430+ &current->signal->saved_ip);
50431+ goto exit;
50432+ }
50433+ goto exit_fail;
50434+ }
50435+
50436+inet_check:
50437+ /* the rest of this checking is for IPv4 only */
50438+ if (!curr->ips)
50439+ goto exit;
50440+
50441+ if ((curr->ip_type & (1 << type)) &&
50442+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
50443+ goto exit;
50444+
50445+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
50446+ /* we don't place acls on raw sockets , and sometimes
50447+ dgram/ip sockets are opened for ioctl and not
50448+ bind/connect, so we'll fake a bind learn log */
50449+ if (type == SOCK_RAW || type == SOCK_PACKET) {
50450+ __u32 fakeip = 0;
50451+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
50452+ current->role->roletype, cred->uid,
50453+ cred->gid, current->exec_file ?
50454+ gr_to_filename(current->exec_file->f_path.dentry,
50455+ current->exec_file->f_path.mnt) :
50456+ curr->filename, curr->filename,
50457+ &fakeip, 0, type,
50458+ protocol, GR_CONNECT, &current->signal->saved_ip);
50459+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
50460+ __u32 fakeip = 0;
50461+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
50462+ current->role->roletype, cred->uid,
50463+ cred->gid, current->exec_file ?
50464+ gr_to_filename(current->exec_file->f_path.dentry,
50465+ current->exec_file->f_path.mnt) :
50466+ curr->filename, curr->filename,
50467+ &fakeip, 0, type,
50468+ protocol, GR_BIND, &current->signal->saved_ip);
50469+ }
50470+ /* we'll log when they use connect or bind */
50471+ goto exit;
50472+ }
50473+
50474+exit_fail:
50475+ if (domain == PF_INET)
50476+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
50477+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
50478+ else
50479+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
50480+ gr_socktype_to_name(type), protocol);
50481+
50482+ return 0;
50483+exit:
50484+ return 1;
50485+}
50486+
50487+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
50488+{
50489+ if ((ip->mode & mode) &&
50490+ (ip_port >= ip->low) &&
50491+ (ip_port <= ip->high) &&
50492+ ((ntohl(ip_addr) & our_netmask) ==
50493+ (ntohl(our_addr) & our_netmask))
50494+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
50495+ && (ip->type & (1 << type))) {
50496+ if (ip->mode & GR_INVERT)
50497+ return 2; // specifically denied
50498+ else
50499+ return 1; // allowed
50500+ }
50501+
50502+ return 0; // not specifically allowed, may continue parsing
50503+}
50504+
50505+static int
50506+gr_search_connectbind(const int full_mode, struct sock *sk,
50507+ struct sockaddr_in *addr, const int type)
50508+{
50509+ char iface[IFNAMSIZ] = {0};
50510+ struct acl_subject_label *curr;
50511+ struct acl_ip_label *ip;
50512+ struct inet_sock *isk;
50513+ struct net_device *dev;
50514+ struct in_device *idev;
50515+ unsigned long i;
50516+ int ret;
50517+ int mode = full_mode & (GR_BIND | GR_CONNECT);
50518+ __u32 ip_addr = 0;
50519+ __u32 our_addr;
50520+ __u32 our_netmask;
50521+ char *p;
50522+ __u16 ip_port = 0;
50523+ const struct cred *cred = current_cred();
50524+
50525+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
50526+ return 0;
50527+
50528+ curr = current->acl;
50529+ isk = inet_sk(sk);
50530+
50531+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
50532+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
50533+ addr->sin_addr.s_addr = curr->inaddr_any_override;
50534+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
50535+ struct sockaddr_in saddr;
50536+ int err;
50537+
50538+ saddr.sin_family = AF_INET;
50539+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
50540+ saddr.sin_port = isk->inet_sport;
50541+
50542+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
50543+ if (err)
50544+ return err;
50545+
50546+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
50547+ if (err)
50548+ return err;
50549+ }
50550+
50551+ if (!curr->ips)
50552+ return 0;
50553+
50554+ ip_addr = addr->sin_addr.s_addr;
50555+ ip_port = ntohs(addr->sin_port);
50556+
50557+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
50558+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
50559+ current->role->roletype, cred->uid,
50560+ cred->gid, current->exec_file ?
50561+ gr_to_filename(current->exec_file->f_path.dentry,
50562+ current->exec_file->f_path.mnt) :
50563+ curr->filename, curr->filename,
50564+ &ip_addr, ip_port, type,
50565+ sk->sk_protocol, mode, &current->signal->saved_ip);
50566+ return 0;
50567+ }
50568+
50569+ for (i = 0; i < curr->ip_num; i++) {
50570+ ip = *(curr->ips + i);
50571+ if (ip->iface != NULL) {
50572+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
50573+ p = strchr(iface, ':');
50574+ if (p != NULL)
50575+ *p = '\0';
50576+ dev = dev_get_by_name(sock_net(sk), iface);
50577+ if (dev == NULL)
50578+ continue;
50579+ idev = in_dev_get(dev);
50580+ if (idev == NULL) {
50581+ dev_put(dev);
50582+ continue;
50583+ }
50584+ rcu_read_lock();
50585+ for_ifa(idev) {
50586+ if (!strcmp(ip->iface, ifa->ifa_label)) {
50587+ our_addr = ifa->ifa_address;
50588+ our_netmask = 0xffffffff;
50589+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
50590+ if (ret == 1) {
50591+ rcu_read_unlock();
50592+ in_dev_put(idev);
50593+ dev_put(dev);
50594+ return 0;
50595+ } else if (ret == 2) {
50596+ rcu_read_unlock();
50597+ in_dev_put(idev);
50598+ dev_put(dev);
50599+ goto denied;
50600+ }
50601+ }
50602+ } endfor_ifa(idev);
50603+ rcu_read_unlock();
50604+ in_dev_put(idev);
50605+ dev_put(dev);
50606+ } else {
50607+ our_addr = ip->addr;
50608+ our_netmask = ip->netmask;
50609+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
50610+ if (ret == 1)
50611+ return 0;
50612+ else if (ret == 2)
50613+ goto denied;
50614+ }
50615+ }
50616+
50617+denied:
50618+ if (mode == GR_BIND)
50619+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
50620+ else if (mode == GR_CONNECT)
50621+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
50622+
50623+ return -EACCES;
50624+}
50625+
50626+int
50627+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
50628+{
50629+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
50630+}
50631+
50632+int
50633+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
50634+{
50635+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
50636+}
50637+
50638+int gr_search_listen(struct socket *sock)
50639+{
50640+ struct sock *sk = sock->sk;
50641+ struct sockaddr_in addr;
50642+
50643+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
50644+ addr.sin_port = inet_sk(sk)->inet_sport;
50645+
50646+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
50647+}
50648+
50649+int gr_search_accept(struct socket *sock)
50650+{
50651+ struct sock *sk = sock->sk;
50652+ struct sockaddr_in addr;
50653+
50654+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
50655+ addr.sin_port = inet_sk(sk)->inet_sport;
50656+
50657+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
50658+}
50659+
50660+int
50661+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
50662+{
50663+ if (addr)
50664+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
50665+ else {
50666+ struct sockaddr_in sin;
50667+ const struct inet_sock *inet = inet_sk(sk);
50668+
50669+ sin.sin_addr.s_addr = inet->inet_daddr;
50670+ sin.sin_port = inet->inet_dport;
50671+
50672+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
50673+ }
50674+}
50675+
50676+int
50677+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
50678+{
50679+ struct sockaddr_in sin;
50680+
50681+ if (unlikely(skb->len < sizeof (struct udphdr)))
50682+ return 0; // skip this packet
50683+
50684+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
50685+ sin.sin_port = udp_hdr(skb)->source;
50686+
50687+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
50688+}
50689diff -urNp linux-3.0.4/grsecurity/gracl_learn.c linux-3.0.4/grsecurity/gracl_learn.c
50690--- linux-3.0.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
50691+++ linux-3.0.4/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
50692@@ -0,0 +1,207 @@
50693+#include <linux/kernel.h>
50694+#include <linux/mm.h>
50695+#include <linux/sched.h>
50696+#include <linux/poll.h>
50697+#include <linux/string.h>
50698+#include <linux/file.h>
50699+#include <linux/types.h>
50700+#include <linux/vmalloc.h>
50701+#include <linux/grinternal.h>
50702+
50703+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
50704+ size_t count, loff_t *ppos);
50705+extern int gr_acl_is_enabled(void);
50706+
50707+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
50708+static int gr_learn_attached;
50709+
50710+/* use a 512k buffer */
50711+#define LEARN_BUFFER_SIZE (512 * 1024)
50712+
50713+static DEFINE_SPINLOCK(gr_learn_lock);
50714+static DEFINE_MUTEX(gr_learn_user_mutex);
50715+
50716+/* we need to maintain two buffers, so that the kernel context of grlearn
50717+ uses a semaphore around the userspace copying, and the other kernel contexts
50718+ use a spinlock when copying into the buffer, since they cannot sleep
50719+*/
50720+static char *learn_buffer;
50721+static char *learn_buffer_user;
50722+static int learn_buffer_len;
50723+static int learn_buffer_user_len;
50724+
50725+static ssize_t
50726+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
50727+{
50728+ DECLARE_WAITQUEUE(wait, current);
50729+ ssize_t retval = 0;
50730+
50731+ add_wait_queue(&learn_wait, &wait);
50732+ set_current_state(TASK_INTERRUPTIBLE);
50733+ do {
50734+ mutex_lock(&gr_learn_user_mutex);
50735+ spin_lock(&gr_learn_lock);
50736+ if (learn_buffer_len)
50737+ break;
50738+ spin_unlock(&gr_learn_lock);
50739+ mutex_unlock(&gr_learn_user_mutex);
50740+ if (file->f_flags & O_NONBLOCK) {
50741+ retval = -EAGAIN;
50742+ goto out;
50743+ }
50744+ if (signal_pending(current)) {
50745+ retval = -ERESTARTSYS;
50746+ goto out;
50747+ }
50748+
50749+ schedule();
50750+ } while (1);
50751+
50752+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
50753+ learn_buffer_user_len = learn_buffer_len;
50754+ retval = learn_buffer_len;
50755+ learn_buffer_len = 0;
50756+
50757+ spin_unlock(&gr_learn_lock);
50758+
50759+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
50760+ retval = -EFAULT;
50761+
50762+ mutex_unlock(&gr_learn_user_mutex);
50763+out:
50764+ set_current_state(TASK_RUNNING);
50765+ remove_wait_queue(&learn_wait, &wait);
50766+ return retval;
50767+}
50768+
50769+static unsigned int
50770+poll_learn(struct file * file, poll_table * wait)
50771+{
50772+ poll_wait(file, &learn_wait, wait);
50773+
50774+ if (learn_buffer_len)
50775+ return (POLLIN | POLLRDNORM);
50776+
50777+ return 0;
50778+}
50779+
50780+void
50781+gr_clear_learn_entries(void)
50782+{
50783+ char *tmp;
50784+
50785+ mutex_lock(&gr_learn_user_mutex);
50786+ spin_lock(&gr_learn_lock);
50787+ tmp = learn_buffer;
50788+ learn_buffer = NULL;
50789+ spin_unlock(&gr_learn_lock);
50790+ if (tmp)
50791+ vfree(tmp);
50792+ if (learn_buffer_user != NULL) {
50793+ vfree(learn_buffer_user);
50794+ learn_buffer_user = NULL;
50795+ }
50796+ learn_buffer_len = 0;
50797+ mutex_unlock(&gr_learn_user_mutex);
50798+
50799+ return;
50800+}
50801+
50802+void
50803+gr_add_learn_entry(const char *fmt, ...)
50804+{
50805+ va_list args;
50806+ unsigned int len;
50807+
50808+ if (!gr_learn_attached)
50809+ return;
50810+
50811+ spin_lock(&gr_learn_lock);
50812+
50813+ /* leave a gap at the end so we know when it's "full" but don't have to
50814+ compute the exact length of the string we're trying to append
50815+ */
50816+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
50817+ spin_unlock(&gr_learn_lock);
50818+ wake_up_interruptible(&learn_wait);
50819+ return;
50820+ }
50821+ if (learn_buffer == NULL) {
50822+ spin_unlock(&gr_learn_lock);
50823+ return;
50824+ }
50825+
50826+ va_start(args, fmt);
50827+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
50828+ va_end(args);
50829+
50830+ learn_buffer_len += len + 1;
50831+
50832+ spin_unlock(&gr_learn_lock);
50833+ wake_up_interruptible(&learn_wait);
50834+
50835+ return;
50836+}
50837+
50838+static int
50839+open_learn(struct inode *inode, struct file *file)
50840+{
50841+ if (file->f_mode & FMODE_READ && gr_learn_attached)
50842+ return -EBUSY;
50843+ if (file->f_mode & FMODE_READ) {
50844+ int retval = 0;
50845+ mutex_lock(&gr_learn_user_mutex);
50846+ if (learn_buffer == NULL)
50847+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
50848+ if (learn_buffer_user == NULL)
50849+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
50850+ if (learn_buffer == NULL) {
50851+ retval = -ENOMEM;
50852+ goto out_error;
50853+ }
50854+ if (learn_buffer_user == NULL) {
50855+ retval = -ENOMEM;
50856+ goto out_error;
50857+ }
50858+ learn_buffer_len = 0;
50859+ learn_buffer_user_len = 0;
50860+ gr_learn_attached = 1;
50861+out_error:
50862+ mutex_unlock(&gr_learn_user_mutex);
50863+ return retval;
50864+ }
50865+ return 0;
50866+}
50867+
50868+static int
50869+close_learn(struct inode *inode, struct file *file)
50870+{
50871+ if (file->f_mode & FMODE_READ) {
50872+ char *tmp = NULL;
50873+ mutex_lock(&gr_learn_user_mutex);
50874+ spin_lock(&gr_learn_lock);
50875+ tmp = learn_buffer;
50876+ learn_buffer = NULL;
50877+ spin_unlock(&gr_learn_lock);
50878+ if (tmp)
50879+ vfree(tmp);
50880+ if (learn_buffer_user != NULL) {
50881+ vfree(learn_buffer_user);
50882+ learn_buffer_user = NULL;
50883+ }
50884+ learn_buffer_len = 0;
50885+ learn_buffer_user_len = 0;
50886+ gr_learn_attached = 0;
50887+ mutex_unlock(&gr_learn_user_mutex);
50888+ }
50889+
50890+ return 0;
50891+}
50892+
50893+const struct file_operations grsec_fops = {
50894+ .read = read_learn,
50895+ .write = write_grsec_handler,
50896+ .open = open_learn,
50897+ .release = close_learn,
50898+ .poll = poll_learn,
50899+};
50900diff -urNp linux-3.0.4/grsecurity/gracl_res.c linux-3.0.4/grsecurity/gracl_res.c
50901--- linux-3.0.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
50902+++ linux-3.0.4/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
50903@@ -0,0 +1,68 @@
50904+#include <linux/kernel.h>
50905+#include <linux/sched.h>
50906+#include <linux/gracl.h>
50907+#include <linux/grinternal.h>
50908+
50909+static const char *restab_log[] = {
50910+ [RLIMIT_CPU] = "RLIMIT_CPU",
50911+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
50912+ [RLIMIT_DATA] = "RLIMIT_DATA",
50913+ [RLIMIT_STACK] = "RLIMIT_STACK",
50914+ [RLIMIT_CORE] = "RLIMIT_CORE",
50915+ [RLIMIT_RSS] = "RLIMIT_RSS",
50916+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
50917+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
50918+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
50919+ [RLIMIT_AS] = "RLIMIT_AS",
50920+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
50921+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
50922+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
50923+ [RLIMIT_NICE] = "RLIMIT_NICE",
50924+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
50925+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
50926+ [GR_CRASH_RES] = "RLIMIT_CRASH"
50927+};
50928+
50929+void
50930+gr_log_resource(const struct task_struct *task,
50931+ const int res, const unsigned long wanted, const int gt)
50932+{
50933+ const struct cred *cred;
50934+ unsigned long rlim;
50935+
50936+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
50937+ return;
50938+
50939+ // not yet supported resource
50940+ if (unlikely(!restab_log[res]))
50941+ return;
50942+
50943+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
50944+ rlim = task_rlimit_max(task, res);
50945+ else
50946+ rlim = task_rlimit(task, res);
50947+
50948+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
50949+ return;
50950+
50951+ rcu_read_lock();
50952+ cred = __task_cred(task);
50953+
50954+ if (res == RLIMIT_NPROC &&
50955+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
50956+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
50957+ goto out_rcu_unlock;
50958+ else if (res == RLIMIT_MEMLOCK &&
50959+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
50960+ goto out_rcu_unlock;
50961+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
50962+ goto out_rcu_unlock;
50963+ rcu_read_unlock();
50964+
50965+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
50966+
50967+ return;
50968+out_rcu_unlock:
50969+ rcu_read_unlock();
50970+ return;
50971+}
50972diff -urNp linux-3.0.4/grsecurity/gracl_segv.c linux-3.0.4/grsecurity/gracl_segv.c
50973--- linux-3.0.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
50974+++ linux-3.0.4/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
50975@@ -0,0 +1,299 @@
50976+#include <linux/kernel.h>
50977+#include <linux/mm.h>
50978+#include <asm/uaccess.h>
50979+#include <asm/errno.h>
50980+#include <asm/mman.h>
50981+#include <net/sock.h>
50982+#include <linux/file.h>
50983+#include <linux/fs.h>
50984+#include <linux/net.h>
50985+#include <linux/in.h>
50986+#include <linux/slab.h>
50987+#include <linux/types.h>
50988+#include <linux/sched.h>
50989+#include <linux/timer.h>
50990+#include <linux/gracl.h>
50991+#include <linux/grsecurity.h>
50992+#include <linux/grinternal.h>
50993+
50994+static struct crash_uid *uid_set;
50995+static unsigned short uid_used;
50996+static DEFINE_SPINLOCK(gr_uid_lock);
50997+extern rwlock_t gr_inode_lock;
50998+extern struct acl_subject_label *
50999+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
51000+ struct acl_role_label *role);
51001+
51002+#ifdef CONFIG_BTRFS_FS
51003+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
51004+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
51005+#endif
51006+
51007+static inline dev_t __get_dev(const struct dentry *dentry)
51008+{
51009+#ifdef CONFIG_BTRFS_FS
51010+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
51011+ return get_btrfs_dev_from_inode(dentry->d_inode);
51012+ else
51013+#endif
51014+ return dentry->d_inode->i_sb->s_dev;
51015+}
51016+
51017+int
51018+gr_init_uidset(void)
51019+{
51020+ uid_set =
51021+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
51022+ uid_used = 0;
51023+
51024+ return uid_set ? 1 : 0;
51025+}
51026+
51027+void
51028+gr_free_uidset(void)
51029+{
51030+ if (uid_set)
51031+ kfree(uid_set);
51032+
51033+ return;
51034+}
51035+
51036+int
51037+gr_find_uid(const uid_t uid)
51038+{
51039+ struct crash_uid *tmp = uid_set;
51040+ uid_t buid;
51041+ int low = 0, high = uid_used - 1, mid;
51042+
51043+ while (high >= low) {
51044+ mid = (low + high) >> 1;
51045+ buid = tmp[mid].uid;
51046+ if (buid == uid)
51047+ return mid;
51048+ if (buid > uid)
51049+ high = mid - 1;
51050+ if (buid < uid)
51051+ low = mid + 1;
51052+ }
51053+
51054+ return -1;
51055+}
51056+
51057+static __inline__ void
51058+gr_insertsort(void)
51059+{
51060+ unsigned short i, j;
51061+ struct crash_uid index;
51062+
51063+ for (i = 1; i < uid_used; i++) {
51064+ index = uid_set[i];
51065+ j = i;
51066+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
51067+ uid_set[j] = uid_set[j - 1];
51068+ j--;
51069+ }
51070+ uid_set[j] = index;
51071+ }
51072+
51073+ return;
51074+}
51075+
51076+static __inline__ void
51077+gr_insert_uid(const uid_t uid, const unsigned long expires)
51078+{
51079+ int loc;
51080+
51081+ if (uid_used == GR_UIDTABLE_MAX)
51082+ return;
51083+
51084+ loc = gr_find_uid(uid);
51085+
51086+ if (loc >= 0) {
51087+ uid_set[loc].expires = expires;
51088+ return;
51089+ }
51090+
51091+ uid_set[uid_used].uid = uid;
51092+ uid_set[uid_used].expires = expires;
51093+ uid_used++;
51094+
51095+ gr_insertsort();
51096+
51097+ return;
51098+}
51099+
51100+void
51101+gr_remove_uid(const unsigned short loc)
51102+{
51103+ unsigned short i;
51104+
51105+ for (i = loc + 1; i < uid_used; i++)
51106+ uid_set[i - 1] = uid_set[i];
51107+
51108+ uid_used--;
51109+
51110+ return;
51111+}
51112+
51113+int
51114+gr_check_crash_uid(const uid_t uid)
51115+{
51116+ int loc;
51117+ int ret = 0;
51118+
51119+ if (unlikely(!gr_acl_is_enabled()))
51120+ return 0;
51121+
51122+ spin_lock(&gr_uid_lock);
51123+ loc = gr_find_uid(uid);
51124+
51125+ if (loc < 0)
51126+ goto out_unlock;
51127+
51128+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
51129+ gr_remove_uid(loc);
51130+ else
51131+ ret = 1;
51132+
51133+out_unlock:
51134+ spin_unlock(&gr_uid_lock);
51135+ return ret;
51136+}
51137+
51138+static __inline__ int
51139+proc_is_setxid(const struct cred *cred)
51140+{
51141+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
51142+ cred->uid != cred->fsuid)
51143+ return 1;
51144+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
51145+ cred->gid != cred->fsgid)
51146+ return 1;
51147+
51148+ return 0;
51149+}
51150+
51151+extern int gr_fake_force_sig(int sig, struct task_struct *t);
51152+
51153+void
51154+gr_handle_crash(struct task_struct *task, const int sig)
51155+{
51156+ struct acl_subject_label *curr;
51157+ struct acl_subject_label *curr2;
51158+ struct task_struct *tsk, *tsk2;
51159+ const struct cred *cred;
51160+ const struct cred *cred2;
51161+
51162+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
51163+ return;
51164+
51165+ if (unlikely(!gr_acl_is_enabled()))
51166+ return;
51167+
51168+ curr = task->acl;
51169+
51170+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
51171+ return;
51172+
51173+ if (time_before_eq(curr->expires, get_seconds())) {
51174+ curr->expires = 0;
51175+ curr->crashes = 0;
51176+ }
51177+
51178+ curr->crashes++;
51179+
51180+ if (!curr->expires)
51181+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
51182+
51183+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
51184+ time_after(curr->expires, get_seconds())) {
51185+ rcu_read_lock();
51186+ cred = __task_cred(task);
51187+ if (cred->uid && proc_is_setxid(cred)) {
51188+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
51189+ spin_lock(&gr_uid_lock);
51190+ gr_insert_uid(cred->uid, curr->expires);
51191+ spin_unlock(&gr_uid_lock);
51192+ curr->expires = 0;
51193+ curr->crashes = 0;
51194+ read_lock(&tasklist_lock);
51195+ do_each_thread(tsk2, tsk) {
51196+ cred2 = __task_cred(tsk);
51197+ if (tsk != task && cred2->uid == cred->uid)
51198+ gr_fake_force_sig(SIGKILL, tsk);
51199+ } while_each_thread(tsk2, tsk);
51200+ read_unlock(&tasklist_lock);
51201+ } else {
51202+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
51203+ read_lock(&tasklist_lock);
51204+ do_each_thread(tsk2, tsk) {
51205+ if (likely(tsk != task)) {
51206+ curr2 = tsk->acl;
51207+
51208+ if (curr2->device == curr->device &&
51209+ curr2->inode == curr->inode)
51210+ gr_fake_force_sig(SIGKILL, tsk);
51211+ }
51212+ } while_each_thread(tsk2, tsk);
51213+ read_unlock(&tasklist_lock);
51214+ }
51215+ rcu_read_unlock();
51216+ }
51217+
51218+ return;
51219+}
51220+
51221+int
51222+gr_check_crash_exec(const struct file *filp)
51223+{
51224+ struct acl_subject_label *curr;
51225+
51226+ if (unlikely(!gr_acl_is_enabled()))
51227+ return 0;
51228+
51229+ read_lock(&gr_inode_lock);
51230+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
51231+ __get_dev(filp->f_path.dentry),
51232+ current->role);
51233+ read_unlock(&gr_inode_lock);
51234+
51235+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
51236+ (!curr->crashes && !curr->expires))
51237+ return 0;
51238+
51239+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
51240+ time_after(curr->expires, get_seconds()))
51241+ return 1;
51242+ else if (time_before_eq(curr->expires, get_seconds())) {
51243+ curr->crashes = 0;
51244+ curr->expires = 0;
51245+ }
51246+
51247+ return 0;
51248+}
51249+
51250+void
51251+gr_handle_alertkill(struct task_struct *task)
51252+{
51253+ struct acl_subject_label *curracl;
51254+ __u32 curr_ip;
51255+ struct task_struct *p, *p2;
51256+
51257+ if (unlikely(!gr_acl_is_enabled()))
51258+ return;
51259+
51260+ curracl = task->acl;
51261+ curr_ip = task->signal->curr_ip;
51262+
51263+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
51264+ read_lock(&tasklist_lock);
51265+ do_each_thread(p2, p) {
51266+ if (p->signal->curr_ip == curr_ip)
51267+ gr_fake_force_sig(SIGKILL, p);
51268+ } while_each_thread(p2, p);
51269+ read_unlock(&tasklist_lock);
51270+ } else if (curracl->mode & GR_KILLPROC)
51271+ gr_fake_force_sig(SIGKILL, task);
51272+
51273+ return;
51274+}
51275diff -urNp linux-3.0.4/grsecurity/gracl_shm.c linux-3.0.4/grsecurity/gracl_shm.c
51276--- linux-3.0.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
51277+++ linux-3.0.4/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
51278@@ -0,0 +1,40 @@
51279+#include <linux/kernel.h>
51280+#include <linux/mm.h>
51281+#include <linux/sched.h>
51282+#include <linux/file.h>
51283+#include <linux/ipc.h>
51284+#include <linux/gracl.h>
51285+#include <linux/grsecurity.h>
51286+#include <linux/grinternal.h>
51287+
51288+int
51289+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
51290+ const time_t shm_createtime, const uid_t cuid, const int shmid)
51291+{
51292+ struct task_struct *task;
51293+
51294+ if (!gr_acl_is_enabled())
51295+ return 1;
51296+
51297+ rcu_read_lock();
51298+ read_lock(&tasklist_lock);
51299+
51300+ task = find_task_by_vpid(shm_cprid);
51301+
51302+ if (unlikely(!task))
51303+ task = find_task_by_vpid(shm_lapid);
51304+
51305+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
51306+ (task->pid == shm_lapid)) &&
51307+ (task->acl->mode & GR_PROTSHM) &&
51308+ (task->acl != current->acl))) {
51309+ read_unlock(&tasklist_lock);
51310+ rcu_read_unlock();
51311+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
51312+ return 0;
51313+ }
51314+ read_unlock(&tasklist_lock);
51315+ rcu_read_unlock();
51316+
51317+ return 1;
51318+}
51319diff -urNp linux-3.0.4/grsecurity/grsec_chdir.c linux-3.0.4/grsecurity/grsec_chdir.c
51320--- linux-3.0.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
51321+++ linux-3.0.4/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
51322@@ -0,0 +1,19 @@
51323+#include <linux/kernel.h>
51324+#include <linux/sched.h>
51325+#include <linux/fs.h>
51326+#include <linux/file.h>
51327+#include <linux/grsecurity.h>
51328+#include <linux/grinternal.h>
51329+
51330+void
51331+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
51332+{
51333+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
51334+ if ((grsec_enable_chdir && grsec_enable_group &&
51335+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
51336+ !grsec_enable_group)) {
51337+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
51338+ }
51339+#endif
51340+ return;
51341+}
51342diff -urNp linux-3.0.4/grsecurity/grsec_chroot.c linux-3.0.4/grsecurity/grsec_chroot.c
51343--- linux-3.0.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
51344+++ linux-3.0.4/grsecurity/grsec_chroot.c 2011-09-15 06:47:48.000000000 -0400
51345@@ -0,0 +1,351 @@
51346+#include <linux/kernel.h>
51347+#include <linux/module.h>
51348+#include <linux/sched.h>
51349+#include <linux/file.h>
51350+#include <linux/fs.h>
51351+#include <linux/mount.h>
51352+#include <linux/types.h>
51353+#include <linux/pid_namespace.h>
51354+#include <linux/grsecurity.h>
51355+#include <linux/grinternal.h>
51356+
51357+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
51358+{
51359+#ifdef CONFIG_GRKERNSEC
51360+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
51361+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
51362+ task->gr_is_chrooted = 1;
51363+ else
51364+ task->gr_is_chrooted = 0;
51365+
51366+ task->gr_chroot_dentry = path->dentry;
51367+#endif
51368+ return;
51369+}
51370+
51371+void gr_clear_chroot_entries(struct task_struct *task)
51372+{
51373+#ifdef CONFIG_GRKERNSEC
51374+ task->gr_is_chrooted = 0;
51375+ task->gr_chroot_dentry = NULL;
51376+#endif
51377+ return;
51378+}
51379+
51380+int
51381+gr_handle_chroot_unix(const pid_t pid)
51382+{
51383+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
51384+ struct task_struct *p;
51385+
51386+ if (unlikely(!grsec_enable_chroot_unix))
51387+ return 1;
51388+
51389+ if (likely(!proc_is_chrooted(current)))
51390+ return 1;
51391+
51392+ rcu_read_lock();
51393+ read_lock(&tasklist_lock);
51394+ p = find_task_by_vpid_unrestricted(pid);
51395+ if (unlikely(p && !have_same_root(current, p))) {
51396+ read_unlock(&tasklist_lock);
51397+ rcu_read_unlock();
51398+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
51399+ return 0;
51400+ }
51401+ read_unlock(&tasklist_lock);
51402+ rcu_read_unlock();
51403+#endif
51404+ return 1;
51405+}
51406+
51407+int
51408+gr_handle_chroot_nice(void)
51409+{
51410+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51411+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
51412+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
51413+ return -EPERM;
51414+ }
51415+#endif
51416+ return 0;
51417+}
51418+
51419+int
51420+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
51421+{
51422+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51423+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
51424+ && proc_is_chrooted(current)) {
51425+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
51426+ return -EACCES;
51427+ }
51428+#endif
51429+ return 0;
51430+}
51431+
51432+int
51433+gr_handle_chroot_rawio(const struct inode *inode)
51434+{
51435+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51436+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
51437+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
51438+ return 1;
51439+#endif
51440+ return 0;
51441+}
51442+
51443+int
51444+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
51445+{
51446+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
51447+ struct task_struct *p;
51448+ int ret = 0;
51449+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
51450+ return ret;
51451+
51452+ read_lock(&tasklist_lock);
51453+ do_each_pid_task(pid, type, p) {
51454+ if (!have_same_root(current, p)) {
51455+ ret = 1;
51456+ goto out;
51457+ }
51458+ } while_each_pid_task(pid, type, p);
51459+out:
51460+ read_unlock(&tasklist_lock);
51461+ return ret;
51462+#endif
51463+ return 0;
51464+}
51465+
51466+int
51467+gr_pid_is_chrooted(struct task_struct *p)
51468+{
51469+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
51470+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
51471+ return 0;
51472+
51473+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
51474+ !have_same_root(current, p)) {
51475+ return 1;
51476+ }
51477+#endif
51478+ return 0;
51479+}
51480+
51481+EXPORT_SYMBOL(gr_pid_is_chrooted);
51482+
51483+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
51484+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
51485+{
51486+ struct path path, currentroot;
51487+ int ret = 0;
51488+
51489+ path.dentry = (struct dentry *)u_dentry;
51490+ path.mnt = (struct vfsmount *)u_mnt;
51491+ get_fs_root(current->fs, &currentroot);
51492+ if (path_is_under(&path, &currentroot))
51493+ ret = 1;
51494+ path_put(&currentroot);
51495+
51496+ return ret;
51497+}
51498+#endif
51499+
51500+int
51501+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
51502+{
51503+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
51504+ if (!grsec_enable_chroot_fchdir)
51505+ return 1;
51506+
51507+ if (!proc_is_chrooted(current))
51508+ return 1;
51509+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
51510+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
51511+ return 0;
51512+ }
51513+#endif
51514+ return 1;
51515+}
51516+
51517+int
51518+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
51519+ const time_t shm_createtime)
51520+{
51521+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
51522+ struct task_struct *p;
51523+ time_t starttime;
51524+
51525+ if (unlikely(!grsec_enable_chroot_shmat))
51526+ return 1;
51527+
51528+ if (likely(!proc_is_chrooted(current)))
51529+ return 1;
51530+
51531+ rcu_read_lock();
51532+ read_lock(&tasklist_lock);
51533+
51534+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
51535+ starttime = p->start_time.tv_sec;
51536+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
51537+ if (have_same_root(current, p)) {
51538+ goto allow;
51539+ } else {
51540+ read_unlock(&tasklist_lock);
51541+ rcu_read_unlock();
51542+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
51543+ return 0;
51544+ }
51545+ }
51546+ /* creator exited, pid reuse, fall through to next check */
51547+ }
51548+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
51549+ if (unlikely(!have_same_root(current, p))) {
51550+ read_unlock(&tasklist_lock);
51551+ rcu_read_unlock();
51552+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
51553+ return 0;
51554+ }
51555+ }
51556+
51557+allow:
51558+ read_unlock(&tasklist_lock);
51559+ rcu_read_unlock();
51560+#endif
51561+ return 1;
51562+}
51563+
51564+void
51565+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
51566+{
51567+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
51568+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
51569+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
51570+#endif
51571+ return;
51572+}
51573+
51574+int
51575+gr_handle_chroot_mknod(const struct dentry *dentry,
51576+ const struct vfsmount *mnt, const int mode)
51577+{
51578+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
51579+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
51580+ proc_is_chrooted(current)) {
51581+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
51582+ return -EPERM;
51583+ }
51584+#endif
51585+ return 0;
51586+}
51587+
51588+int
51589+gr_handle_chroot_mount(const struct dentry *dentry,
51590+ const struct vfsmount *mnt, const char *dev_name)
51591+{
51592+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
51593+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
51594+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
51595+ return -EPERM;
51596+ }
51597+#endif
51598+ return 0;
51599+}
51600+
51601+int
51602+gr_handle_chroot_pivot(void)
51603+{
51604+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
51605+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
51606+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
51607+ return -EPERM;
51608+ }
51609+#endif
51610+ return 0;
51611+}
51612+
51613+int
51614+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
51615+{
51616+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
51617+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
51618+ !gr_is_outside_chroot(dentry, mnt)) {
51619+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
51620+ return -EPERM;
51621+ }
51622+#endif
51623+ return 0;
51624+}
51625+
51626+extern const char *captab_log[];
51627+extern int captab_log_entries;
51628+
51629+int
51630+gr_chroot_is_capable(const int cap)
51631+{
51632+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51633+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
51634+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
51635+ if (cap_raised(chroot_caps, cap)) {
51636+ const struct cred *creds = current_cred();
51637+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
51638+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
51639+ }
51640+ return 0;
51641+ }
51642+ }
51643+#endif
51644+ return 1;
51645+}
51646+
51647+int
51648+gr_chroot_is_capable_nolog(const int cap)
51649+{
51650+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51651+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
51652+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
51653+ if (cap_raised(chroot_caps, cap)) {
51654+ return 0;
51655+ }
51656+ }
51657+#endif
51658+ return 1;
51659+}
51660+
51661+int
51662+gr_handle_chroot_sysctl(const int op)
51663+{
51664+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
51665+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
51666+ proc_is_chrooted(current))
51667+ return -EACCES;
51668+#endif
51669+ return 0;
51670+}
51671+
51672+void
51673+gr_handle_chroot_chdir(struct path *path)
51674+{
51675+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
51676+ if (grsec_enable_chroot_chdir)
51677+ set_fs_pwd(current->fs, path);
51678+#endif
51679+ return;
51680+}
51681+
51682+int
51683+gr_handle_chroot_chmod(const struct dentry *dentry,
51684+ const struct vfsmount *mnt, const int mode)
51685+{
51686+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
51687+ /* allow chmod +s on directories, but not files */
51688+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
51689+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
51690+ proc_is_chrooted(current)) {
51691+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
51692+ return -EPERM;
51693+ }
51694+#endif
51695+ return 0;
51696+}
51697diff -urNp linux-3.0.4/grsecurity/grsec_disabled.c linux-3.0.4/grsecurity/grsec_disabled.c
51698--- linux-3.0.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
51699+++ linux-3.0.4/grsecurity/grsec_disabled.c 2011-09-24 08:13:01.000000000 -0400
51700@@ -0,0 +1,433 @@
51701+#include <linux/kernel.h>
51702+#include <linux/module.h>
51703+#include <linux/sched.h>
51704+#include <linux/file.h>
51705+#include <linux/fs.h>
51706+#include <linux/kdev_t.h>
51707+#include <linux/net.h>
51708+#include <linux/in.h>
51709+#include <linux/ip.h>
51710+#include <linux/skbuff.h>
51711+#include <linux/sysctl.h>
51712+
51713+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
51714+void
51715+pax_set_initial_flags(struct linux_binprm *bprm)
51716+{
51717+ return;
51718+}
51719+#endif
51720+
51721+#ifdef CONFIG_SYSCTL
51722+__u32
51723+gr_handle_sysctl(const struct ctl_table * table, const int op)
51724+{
51725+ return 0;
51726+}
51727+#endif
51728+
51729+#ifdef CONFIG_TASKSTATS
51730+int gr_is_taskstats_denied(int pid)
51731+{
51732+ return 0;
51733+}
51734+#endif
51735+
51736+int
51737+gr_acl_is_enabled(void)
51738+{
51739+ return 0;
51740+}
51741+
51742+int
51743+gr_handle_rawio(const struct inode *inode)
51744+{
51745+ return 0;
51746+}
51747+
51748+void
51749+gr_acl_handle_psacct(struct task_struct *task, const long code)
51750+{
51751+ return;
51752+}
51753+
51754+int
51755+gr_handle_ptrace(struct task_struct *task, const long request)
51756+{
51757+ return 0;
51758+}
51759+
51760+int
51761+gr_handle_proc_ptrace(struct task_struct *task)
51762+{
51763+ return 0;
51764+}
51765+
51766+void
51767+gr_learn_resource(const struct task_struct *task,
51768+ const int res, const unsigned long wanted, const int gt)
51769+{
51770+ return;
51771+}
51772+
51773+int
51774+gr_set_acls(const int type)
51775+{
51776+ return 0;
51777+}
51778+
51779+int
51780+gr_check_hidden_task(const struct task_struct *tsk)
51781+{
51782+ return 0;
51783+}
51784+
51785+int
51786+gr_check_protected_task(const struct task_struct *task)
51787+{
51788+ return 0;
51789+}
51790+
51791+int
51792+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
51793+{
51794+ return 0;
51795+}
51796+
51797+void
51798+gr_copy_label(struct task_struct *tsk)
51799+{
51800+ return;
51801+}
51802+
51803+void
51804+gr_set_pax_flags(struct task_struct *task)
51805+{
51806+ return;
51807+}
51808+
51809+int
51810+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
51811+ const int unsafe_share)
51812+{
51813+ return 0;
51814+}
51815+
51816+void
51817+gr_handle_delete(const ino_t ino, const dev_t dev)
51818+{
51819+ return;
51820+}
51821+
51822+void
51823+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
51824+{
51825+ return;
51826+}
51827+
51828+void
51829+gr_handle_crash(struct task_struct *task, const int sig)
51830+{
51831+ return;
51832+}
51833+
51834+int
51835+gr_check_crash_exec(const struct file *filp)
51836+{
51837+ return 0;
51838+}
51839+
51840+int
51841+gr_check_crash_uid(const uid_t uid)
51842+{
51843+ return 0;
51844+}
51845+
51846+void
51847+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
51848+ struct dentry *old_dentry,
51849+ struct dentry *new_dentry,
51850+ struct vfsmount *mnt, const __u8 replace)
51851+{
51852+ return;
51853+}
51854+
51855+int
51856+gr_search_socket(const int family, const int type, const int protocol)
51857+{
51858+ return 1;
51859+}
51860+
51861+int
51862+gr_search_connectbind(const int mode, const struct socket *sock,
51863+ const struct sockaddr_in *addr)
51864+{
51865+ return 0;
51866+}
51867+
51868+void
51869+gr_handle_alertkill(struct task_struct *task)
51870+{
51871+ return;
51872+}
51873+
51874+__u32
51875+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
51876+{
51877+ return 1;
51878+}
51879+
51880+__u32
51881+gr_acl_handle_hidden_file(const struct dentry * dentry,
51882+ const struct vfsmount * mnt)
51883+{
51884+ return 1;
51885+}
51886+
51887+__u32
51888+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
51889+ const int fmode)
51890+{
51891+ return 1;
51892+}
51893+
51894+__u32
51895+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51896+{
51897+ return 1;
51898+}
51899+
51900+__u32
51901+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
51902+{
51903+ return 1;
51904+}
51905+
51906+int
51907+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
51908+ unsigned int *vm_flags)
51909+{
51910+ return 1;
51911+}
51912+
51913+__u32
51914+gr_acl_handle_truncate(const struct dentry * dentry,
51915+ const struct vfsmount * mnt)
51916+{
51917+ return 1;
51918+}
51919+
51920+__u32
51921+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
51922+{
51923+ return 1;
51924+}
51925+
51926+__u32
51927+gr_acl_handle_access(const struct dentry * dentry,
51928+ const struct vfsmount * mnt, const int fmode)
51929+{
51930+ return 1;
51931+}
51932+
51933+__u32
51934+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
51935+ mode_t mode)
51936+{
51937+ return 1;
51938+}
51939+
51940+__u32
51941+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
51942+ mode_t mode)
51943+{
51944+ return 1;
51945+}
51946+
51947+__u32
51948+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
51949+{
51950+ return 1;
51951+}
51952+
51953+__u32
51954+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
51955+{
51956+ return 1;
51957+}
51958+
51959+void
51960+grsecurity_init(void)
51961+{
51962+ return;
51963+}
51964+
51965+__u32
51966+gr_acl_handle_mknod(const struct dentry * new_dentry,
51967+ const struct dentry * parent_dentry,
51968+ const struct vfsmount * parent_mnt,
51969+ const int mode)
51970+{
51971+ return 1;
51972+}
51973+
51974+__u32
51975+gr_acl_handle_mkdir(const struct dentry * new_dentry,
51976+ const struct dentry * parent_dentry,
51977+ const struct vfsmount * parent_mnt)
51978+{
51979+ return 1;
51980+}
51981+
51982+__u32
51983+gr_acl_handle_symlink(const struct dentry * new_dentry,
51984+ const struct dentry * parent_dentry,
51985+ const struct vfsmount * parent_mnt, const char *from)
51986+{
51987+ return 1;
51988+}
51989+
51990+__u32
51991+gr_acl_handle_link(const struct dentry * new_dentry,
51992+ const struct dentry * parent_dentry,
51993+ const struct vfsmount * parent_mnt,
51994+ const struct dentry * old_dentry,
51995+ const struct vfsmount * old_mnt, const char *to)
51996+{
51997+ return 1;
51998+}
51999+
52000+int
52001+gr_acl_handle_rename(const struct dentry *new_dentry,
52002+ const struct dentry *parent_dentry,
52003+ const struct vfsmount *parent_mnt,
52004+ const struct dentry *old_dentry,
52005+ const struct inode *old_parent_inode,
52006+ const struct vfsmount *old_mnt, const char *newname)
52007+{
52008+ return 0;
52009+}
52010+
52011+int
52012+gr_acl_handle_filldir(const struct file *file, const char *name,
52013+ const int namelen, const ino_t ino)
52014+{
52015+ return 1;
52016+}
52017+
52018+int
52019+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52020+ const time_t shm_createtime, const uid_t cuid, const int shmid)
52021+{
52022+ return 1;
52023+}
52024+
52025+int
52026+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
52027+{
52028+ return 0;
52029+}
52030+
52031+int
52032+gr_search_accept(const struct socket *sock)
52033+{
52034+ return 0;
52035+}
52036+
52037+int
52038+gr_search_listen(const struct socket *sock)
52039+{
52040+ return 0;
52041+}
52042+
52043+int
52044+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
52045+{
52046+ return 0;
52047+}
52048+
52049+__u32
52050+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
52051+{
52052+ return 1;
52053+}
52054+
52055+__u32
52056+gr_acl_handle_creat(const struct dentry * dentry,
52057+ const struct dentry * p_dentry,
52058+ const struct vfsmount * p_mnt, const int fmode,
52059+ const int imode)
52060+{
52061+ return 1;
52062+}
52063+
52064+void
52065+gr_acl_handle_exit(void)
52066+{
52067+ return;
52068+}
52069+
52070+int
52071+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
52072+{
52073+ return 1;
52074+}
52075+
52076+void
52077+gr_set_role_label(const uid_t uid, const gid_t gid)
52078+{
52079+ return;
52080+}
52081+
52082+int
52083+gr_acl_handle_procpidmem(const struct task_struct *task)
52084+{
52085+ return 0;
52086+}
52087+
52088+int
52089+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
52090+{
52091+ return 0;
52092+}
52093+
52094+int
52095+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
52096+{
52097+ return 0;
52098+}
52099+
52100+void
52101+gr_set_kernel_label(struct task_struct *task)
52102+{
52103+ return;
52104+}
52105+
52106+int
52107+gr_check_user_change(int real, int effective, int fs)
52108+{
52109+ return 0;
52110+}
52111+
52112+int
52113+gr_check_group_change(int real, int effective, int fs)
52114+{
52115+ return 0;
52116+}
52117+
52118+int gr_acl_enable_at_secure(void)
52119+{
52120+ return 0;
52121+}
52122+
52123+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
52124+{
52125+ return dentry->d_inode->i_sb->s_dev;
52126+}
52127+
52128+EXPORT_SYMBOL(gr_learn_resource);
52129+EXPORT_SYMBOL(gr_set_kernel_label);
52130+#ifdef CONFIG_SECURITY
52131+EXPORT_SYMBOL(gr_check_user_change);
52132+EXPORT_SYMBOL(gr_check_group_change);
52133+#endif
52134diff -urNp linux-3.0.4/grsecurity/grsec_exec.c linux-3.0.4/grsecurity/grsec_exec.c
52135--- linux-3.0.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
52136+++ linux-3.0.4/grsecurity/grsec_exec.c 2011-09-14 09:20:28.000000000 -0400
52137@@ -0,0 +1,145 @@
52138+#include <linux/kernel.h>
52139+#include <linux/sched.h>
52140+#include <linux/file.h>
52141+#include <linux/binfmts.h>
52142+#include <linux/fs.h>
52143+#include <linux/types.h>
52144+#include <linux/grdefs.h>
52145+#include <linux/grsecurity.h>
52146+#include <linux/grinternal.h>
52147+#include <linux/capability.h>
52148+#include <linux/module.h>
52149+
52150+#include <asm/uaccess.h>
52151+
52152+#ifdef CONFIG_GRKERNSEC_EXECLOG
52153+static char gr_exec_arg_buf[132];
52154+static DEFINE_MUTEX(gr_exec_arg_mutex);
52155+#endif
52156+
52157+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
52158+
52159+void
52160+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
52161+{
52162+#ifdef CONFIG_GRKERNSEC_EXECLOG
52163+ char *grarg = gr_exec_arg_buf;
52164+ unsigned int i, x, execlen = 0;
52165+ char c;
52166+
52167+ if (!((grsec_enable_execlog && grsec_enable_group &&
52168+ in_group_p(grsec_audit_gid))
52169+ || (grsec_enable_execlog && !grsec_enable_group)))
52170+ return;
52171+
52172+ mutex_lock(&gr_exec_arg_mutex);
52173+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
52174+
52175+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
52176+ const char __user *p;
52177+ unsigned int len;
52178+
52179+ p = get_user_arg_ptr(argv, i);
52180+ if (IS_ERR(p))
52181+ goto log;
52182+
52183+ len = strnlen_user(p, 128 - execlen);
52184+ if (len > 128 - execlen)
52185+ len = 128 - execlen;
52186+ else if (len > 0)
52187+ len--;
52188+ if (copy_from_user(grarg + execlen, p, len))
52189+ goto log;
52190+
52191+ /* rewrite unprintable characters */
52192+ for (x = 0; x < len; x++) {
52193+ c = *(grarg + execlen + x);
52194+ if (c < 32 || c > 126)
52195+ *(grarg + execlen + x) = ' ';
52196+ }
52197+
52198+ execlen += len;
52199+ *(grarg + execlen) = ' ';
52200+ *(grarg + execlen + 1) = '\0';
52201+ execlen++;
52202+ }
52203+
52204+ log:
52205+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
52206+ bprm->file->f_path.mnt, grarg);
52207+ mutex_unlock(&gr_exec_arg_mutex);
52208+#endif
52209+ return;
52210+}
52211+
52212+#ifdef CONFIG_GRKERNSEC
52213+extern int gr_acl_is_capable(const int cap);
52214+extern int gr_acl_is_capable_nolog(const int cap);
52215+extern int gr_chroot_is_capable(const int cap);
52216+extern int gr_chroot_is_capable_nolog(const int cap);
52217+#endif
52218+
52219+const char *captab_log[] = {
52220+ "CAP_CHOWN",
52221+ "CAP_DAC_OVERRIDE",
52222+ "CAP_DAC_READ_SEARCH",
52223+ "CAP_FOWNER",
52224+ "CAP_FSETID",
52225+ "CAP_KILL",
52226+ "CAP_SETGID",
52227+ "CAP_SETUID",
52228+ "CAP_SETPCAP",
52229+ "CAP_LINUX_IMMUTABLE",
52230+ "CAP_NET_BIND_SERVICE",
52231+ "CAP_NET_BROADCAST",
52232+ "CAP_NET_ADMIN",
52233+ "CAP_NET_RAW",
52234+ "CAP_IPC_LOCK",
52235+ "CAP_IPC_OWNER",
52236+ "CAP_SYS_MODULE",
52237+ "CAP_SYS_RAWIO",
52238+ "CAP_SYS_CHROOT",
52239+ "CAP_SYS_PTRACE",
52240+ "CAP_SYS_PACCT",
52241+ "CAP_SYS_ADMIN",
52242+ "CAP_SYS_BOOT",
52243+ "CAP_SYS_NICE",
52244+ "CAP_SYS_RESOURCE",
52245+ "CAP_SYS_TIME",
52246+ "CAP_SYS_TTY_CONFIG",
52247+ "CAP_MKNOD",
52248+ "CAP_LEASE",
52249+ "CAP_AUDIT_WRITE",
52250+ "CAP_AUDIT_CONTROL",
52251+ "CAP_SETFCAP",
52252+ "CAP_MAC_OVERRIDE",
52253+ "CAP_MAC_ADMIN",
52254+ "CAP_SYSLOG"
52255+};
52256+
52257+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
52258+
52259+int gr_is_capable(const int cap)
52260+{
52261+#ifdef CONFIG_GRKERNSEC
52262+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
52263+ return 1;
52264+ return 0;
52265+#else
52266+ return 1;
52267+#endif
52268+}
52269+
52270+int gr_is_capable_nolog(const int cap)
52271+{
52272+#ifdef CONFIG_GRKERNSEC
52273+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
52274+ return 1;
52275+ return 0;
52276+#else
52277+ return 1;
52278+#endif
52279+}
52280+
52281+EXPORT_SYMBOL(gr_is_capable);
52282+EXPORT_SYMBOL(gr_is_capable_nolog);
52283diff -urNp linux-3.0.4/grsecurity/grsec_fifo.c linux-3.0.4/grsecurity/grsec_fifo.c
52284--- linux-3.0.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
52285+++ linux-3.0.4/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
52286@@ -0,0 +1,24 @@
52287+#include <linux/kernel.h>
52288+#include <linux/sched.h>
52289+#include <linux/fs.h>
52290+#include <linux/file.h>
52291+#include <linux/grinternal.h>
52292+
52293+int
52294+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
52295+ const struct dentry *dir, const int flag, const int acc_mode)
52296+{
52297+#ifdef CONFIG_GRKERNSEC_FIFO
52298+ const struct cred *cred = current_cred();
52299+
52300+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
52301+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
52302+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
52303+ (cred->fsuid != dentry->d_inode->i_uid)) {
52304+ if (!inode_permission(dentry->d_inode, acc_mode))
52305+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
52306+ return -EACCES;
52307+ }
52308+#endif
52309+ return 0;
52310+}
52311diff -urNp linux-3.0.4/grsecurity/grsec_fork.c linux-3.0.4/grsecurity/grsec_fork.c
52312--- linux-3.0.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
52313+++ linux-3.0.4/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
52314@@ -0,0 +1,23 @@
52315+#include <linux/kernel.h>
52316+#include <linux/sched.h>
52317+#include <linux/grsecurity.h>
52318+#include <linux/grinternal.h>
52319+#include <linux/errno.h>
52320+
52321+void
52322+gr_log_forkfail(const int retval)
52323+{
52324+#ifdef CONFIG_GRKERNSEC_FORKFAIL
52325+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
52326+ switch (retval) {
52327+ case -EAGAIN:
52328+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
52329+ break;
52330+ case -ENOMEM:
52331+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
52332+ break;
52333+ }
52334+ }
52335+#endif
52336+ return;
52337+}
52338diff -urNp linux-3.0.4/grsecurity/grsec_init.c linux-3.0.4/grsecurity/grsec_init.c
52339--- linux-3.0.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
52340+++ linux-3.0.4/grsecurity/grsec_init.c 2011-08-25 17:25:12.000000000 -0400
52341@@ -0,0 +1,269 @@
52342+#include <linux/kernel.h>
52343+#include <linux/sched.h>
52344+#include <linux/mm.h>
52345+#include <linux/gracl.h>
52346+#include <linux/slab.h>
52347+#include <linux/vmalloc.h>
52348+#include <linux/percpu.h>
52349+#include <linux/module.h>
52350+
52351+int grsec_enable_brute;
52352+int grsec_enable_link;
52353+int grsec_enable_dmesg;
52354+int grsec_enable_harden_ptrace;
52355+int grsec_enable_fifo;
52356+int grsec_enable_execlog;
52357+int grsec_enable_signal;
52358+int grsec_enable_forkfail;
52359+int grsec_enable_audit_ptrace;
52360+int grsec_enable_time;
52361+int grsec_enable_audit_textrel;
52362+int grsec_enable_group;
52363+int grsec_audit_gid;
52364+int grsec_enable_chdir;
52365+int grsec_enable_mount;
52366+int grsec_enable_rofs;
52367+int grsec_enable_chroot_findtask;
52368+int grsec_enable_chroot_mount;
52369+int grsec_enable_chroot_shmat;
52370+int grsec_enable_chroot_fchdir;
52371+int grsec_enable_chroot_double;
52372+int grsec_enable_chroot_pivot;
52373+int grsec_enable_chroot_chdir;
52374+int grsec_enable_chroot_chmod;
52375+int grsec_enable_chroot_mknod;
52376+int grsec_enable_chroot_nice;
52377+int grsec_enable_chroot_execlog;
52378+int grsec_enable_chroot_caps;
52379+int grsec_enable_chroot_sysctl;
52380+int grsec_enable_chroot_unix;
52381+int grsec_enable_tpe;
52382+int grsec_tpe_gid;
52383+int grsec_enable_blackhole;
52384+#ifdef CONFIG_IPV6_MODULE
52385+EXPORT_SYMBOL(grsec_enable_blackhole);
52386+#endif
52387+int grsec_lastack_retries;
52388+int grsec_enable_tpe_all;
52389+int grsec_enable_tpe_invert;
52390+int grsec_enable_socket_all;
52391+int grsec_socket_all_gid;
52392+int grsec_enable_socket_client;
52393+int grsec_socket_client_gid;
52394+int grsec_enable_socket_server;
52395+int grsec_socket_server_gid;
52396+int grsec_resource_logging;
52397+int grsec_disable_privio;
52398+int grsec_enable_log_rwxmaps;
52399+int grsec_lock;
52400+
52401+DEFINE_SPINLOCK(grsec_alert_lock);
52402+unsigned long grsec_alert_wtime = 0;
52403+unsigned long grsec_alert_fyet = 0;
52404+
52405+DEFINE_SPINLOCK(grsec_audit_lock);
52406+
52407+DEFINE_RWLOCK(grsec_exec_file_lock);
52408+
52409+char *gr_shared_page[4];
52410+
52411+char *gr_alert_log_fmt;
52412+char *gr_audit_log_fmt;
52413+char *gr_alert_log_buf;
52414+char *gr_audit_log_buf;
52415+
52416+extern struct gr_arg *gr_usermode;
52417+extern unsigned char *gr_system_salt;
52418+extern unsigned char *gr_system_sum;
52419+
52420+void __init
52421+grsecurity_init(void)
52422+{
52423+ int j;
52424+ /* create the per-cpu shared pages */
52425+
52426+#ifdef CONFIG_X86
52427+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
52428+#endif
52429+
52430+ for (j = 0; j < 4; j++) {
52431+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
52432+ if (gr_shared_page[j] == NULL) {
52433+ panic("Unable to allocate grsecurity shared page");
52434+ return;
52435+ }
52436+ }
52437+
52438+ /* allocate log buffers */
52439+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
52440+ if (!gr_alert_log_fmt) {
52441+ panic("Unable to allocate grsecurity alert log format buffer");
52442+ return;
52443+ }
52444+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
52445+ if (!gr_audit_log_fmt) {
52446+ panic("Unable to allocate grsecurity audit log format buffer");
52447+ return;
52448+ }
52449+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
52450+ if (!gr_alert_log_buf) {
52451+ panic("Unable to allocate grsecurity alert log buffer");
52452+ return;
52453+ }
52454+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
52455+ if (!gr_audit_log_buf) {
52456+ panic("Unable to allocate grsecurity audit log buffer");
52457+ return;
52458+ }
52459+
52460+ /* allocate memory for authentication structure */
52461+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
52462+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
52463+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
52464+
52465+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
52466+ panic("Unable to allocate grsecurity authentication structure");
52467+ return;
52468+ }
52469+
52470+
52471+#ifdef CONFIG_GRKERNSEC_IO
52472+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
52473+ grsec_disable_privio = 1;
52474+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
52475+ grsec_disable_privio = 1;
52476+#else
52477+ grsec_disable_privio = 0;
52478+#endif
52479+#endif
52480+
52481+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52482+ /* for backward compatibility, tpe_invert always defaults to on if
52483+ enabled in the kernel
52484+ */
52485+ grsec_enable_tpe_invert = 1;
52486+#endif
52487+
52488+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
52489+#ifndef CONFIG_GRKERNSEC_SYSCTL
52490+ grsec_lock = 1;
52491+#endif
52492+
52493+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52494+ grsec_enable_audit_textrel = 1;
52495+#endif
52496+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52497+ grsec_enable_log_rwxmaps = 1;
52498+#endif
52499+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
52500+ grsec_enable_group = 1;
52501+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
52502+#endif
52503+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52504+ grsec_enable_chdir = 1;
52505+#endif
52506+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52507+ grsec_enable_harden_ptrace = 1;
52508+#endif
52509+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52510+ grsec_enable_mount = 1;
52511+#endif
52512+#ifdef CONFIG_GRKERNSEC_LINK
52513+ grsec_enable_link = 1;
52514+#endif
52515+#ifdef CONFIG_GRKERNSEC_BRUTE
52516+ grsec_enable_brute = 1;
52517+#endif
52518+#ifdef CONFIG_GRKERNSEC_DMESG
52519+ grsec_enable_dmesg = 1;
52520+#endif
52521+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52522+ grsec_enable_blackhole = 1;
52523+ grsec_lastack_retries = 4;
52524+#endif
52525+#ifdef CONFIG_GRKERNSEC_FIFO
52526+ grsec_enable_fifo = 1;
52527+#endif
52528+#ifdef CONFIG_GRKERNSEC_EXECLOG
52529+ grsec_enable_execlog = 1;
52530+#endif
52531+#ifdef CONFIG_GRKERNSEC_SIGNAL
52532+ grsec_enable_signal = 1;
52533+#endif
52534+#ifdef CONFIG_GRKERNSEC_FORKFAIL
52535+ grsec_enable_forkfail = 1;
52536+#endif
52537+#ifdef CONFIG_GRKERNSEC_TIME
52538+ grsec_enable_time = 1;
52539+#endif
52540+#ifdef CONFIG_GRKERNSEC_RESLOG
52541+ grsec_resource_logging = 1;
52542+#endif
52543+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52544+ grsec_enable_chroot_findtask = 1;
52545+#endif
52546+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52547+ grsec_enable_chroot_unix = 1;
52548+#endif
52549+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52550+ grsec_enable_chroot_mount = 1;
52551+#endif
52552+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52553+ grsec_enable_chroot_fchdir = 1;
52554+#endif
52555+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52556+ grsec_enable_chroot_shmat = 1;
52557+#endif
52558+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52559+ grsec_enable_audit_ptrace = 1;
52560+#endif
52561+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52562+ grsec_enable_chroot_double = 1;
52563+#endif
52564+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52565+ grsec_enable_chroot_pivot = 1;
52566+#endif
52567+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52568+ grsec_enable_chroot_chdir = 1;
52569+#endif
52570+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52571+ grsec_enable_chroot_chmod = 1;
52572+#endif
52573+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52574+ grsec_enable_chroot_mknod = 1;
52575+#endif
52576+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52577+ grsec_enable_chroot_nice = 1;
52578+#endif
52579+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52580+ grsec_enable_chroot_execlog = 1;
52581+#endif
52582+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52583+ grsec_enable_chroot_caps = 1;
52584+#endif
52585+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52586+ grsec_enable_chroot_sysctl = 1;
52587+#endif
52588+#ifdef CONFIG_GRKERNSEC_TPE
52589+ grsec_enable_tpe = 1;
52590+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
52591+#ifdef CONFIG_GRKERNSEC_TPE_ALL
52592+ grsec_enable_tpe_all = 1;
52593+#endif
52594+#endif
52595+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52596+ grsec_enable_socket_all = 1;
52597+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
52598+#endif
52599+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52600+ grsec_enable_socket_client = 1;
52601+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
52602+#endif
52603+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52604+ grsec_enable_socket_server = 1;
52605+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
52606+#endif
52607+#endif
52608+
52609+ return;
52610+}
52611diff -urNp linux-3.0.4/grsecurity/grsec_link.c linux-3.0.4/grsecurity/grsec_link.c
52612--- linux-3.0.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
52613+++ linux-3.0.4/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
52614@@ -0,0 +1,43 @@
52615+#include <linux/kernel.h>
52616+#include <linux/sched.h>
52617+#include <linux/fs.h>
52618+#include <linux/file.h>
52619+#include <linux/grinternal.h>
52620+
52621+int
52622+gr_handle_follow_link(const struct inode *parent,
52623+ const struct inode *inode,
52624+ const struct dentry *dentry, const struct vfsmount *mnt)
52625+{
52626+#ifdef CONFIG_GRKERNSEC_LINK
52627+ const struct cred *cred = current_cred();
52628+
52629+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
52630+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
52631+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
52632+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
52633+ return -EACCES;
52634+ }
52635+#endif
52636+ return 0;
52637+}
52638+
52639+int
52640+gr_handle_hardlink(const struct dentry *dentry,
52641+ const struct vfsmount *mnt,
52642+ struct inode *inode, const int mode, const char *to)
52643+{
52644+#ifdef CONFIG_GRKERNSEC_LINK
52645+ const struct cred *cred = current_cred();
52646+
52647+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
52648+ (!S_ISREG(mode) || (mode & S_ISUID) ||
52649+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
52650+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
52651+ !capable(CAP_FOWNER) && cred->uid) {
52652+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
52653+ return -EPERM;
52654+ }
52655+#endif
52656+ return 0;
52657+}
52658diff -urNp linux-3.0.4/grsecurity/grsec_log.c linux-3.0.4/grsecurity/grsec_log.c
52659--- linux-3.0.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
52660+++ linux-3.0.4/grsecurity/grsec_log.c 2011-09-26 10:46:21.000000000 -0400
52661@@ -0,0 +1,315 @@
52662+#include <linux/kernel.h>
52663+#include <linux/sched.h>
52664+#include <linux/file.h>
52665+#include <linux/tty.h>
52666+#include <linux/fs.h>
52667+#include <linux/grinternal.h>
52668+
52669+#ifdef CONFIG_TREE_PREEMPT_RCU
52670+#define DISABLE_PREEMPT() preempt_disable()
52671+#define ENABLE_PREEMPT() preempt_enable()
52672+#else
52673+#define DISABLE_PREEMPT()
52674+#define ENABLE_PREEMPT()
52675+#endif
52676+
52677+#define BEGIN_LOCKS(x) \
52678+ DISABLE_PREEMPT(); \
52679+ rcu_read_lock(); \
52680+ read_lock(&tasklist_lock); \
52681+ read_lock(&grsec_exec_file_lock); \
52682+ if (x != GR_DO_AUDIT) \
52683+ spin_lock(&grsec_alert_lock); \
52684+ else \
52685+ spin_lock(&grsec_audit_lock)
52686+
52687+#define END_LOCKS(x) \
52688+ if (x != GR_DO_AUDIT) \
52689+ spin_unlock(&grsec_alert_lock); \
52690+ else \
52691+ spin_unlock(&grsec_audit_lock); \
52692+ read_unlock(&grsec_exec_file_lock); \
52693+ read_unlock(&tasklist_lock); \
52694+ rcu_read_unlock(); \
52695+ ENABLE_PREEMPT(); \
52696+ if (x == GR_DONT_AUDIT) \
52697+ gr_handle_alertkill(current)
52698+
52699+enum {
52700+ FLOODING,
52701+ NO_FLOODING
52702+};
52703+
52704+extern char *gr_alert_log_fmt;
52705+extern char *gr_audit_log_fmt;
52706+extern char *gr_alert_log_buf;
52707+extern char *gr_audit_log_buf;
52708+
52709+static int gr_log_start(int audit)
52710+{
52711+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
52712+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
52713+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
52714+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
52715+ unsigned long curr_secs = get_seconds();
52716+
52717+ if (audit == GR_DO_AUDIT)
52718+ goto set_fmt;
52719+
52720+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
52721+ grsec_alert_wtime = curr_secs;
52722+ grsec_alert_fyet = 0;
52723+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
52724+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
52725+ grsec_alert_fyet++;
52726+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
52727+ grsec_alert_wtime = curr_secs;
52728+ grsec_alert_fyet++;
52729+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
52730+ return FLOODING;
52731+ }
52732+ else return FLOODING;
52733+
52734+set_fmt:
52735+#endif
52736+ memset(buf, 0, PAGE_SIZE);
52737+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
52738+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
52739+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
52740+ } else if (current->signal->curr_ip) {
52741+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
52742+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
52743+ } else if (gr_acl_is_enabled()) {
52744+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
52745+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
52746+ } else {
52747+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
52748+ strcpy(buf, fmt);
52749+ }
52750+
52751+ return NO_FLOODING;
52752+}
52753+
52754+static void gr_log_middle(int audit, const char *msg, va_list ap)
52755+ __attribute__ ((format (printf, 2, 0)));
52756+
52757+static void gr_log_middle(int audit, const char *msg, va_list ap)
52758+{
52759+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
52760+ unsigned int len = strlen(buf);
52761+
52762+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
52763+
52764+ return;
52765+}
52766+
52767+static void gr_log_middle_varargs(int audit, const char *msg, ...)
52768+ __attribute__ ((format (printf, 2, 3)));
52769+
52770+static void gr_log_middle_varargs(int audit, const char *msg, ...)
52771+{
52772+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
52773+ unsigned int len = strlen(buf);
52774+ va_list ap;
52775+
52776+ va_start(ap, msg);
52777+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
52778+ va_end(ap);
52779+
52780+ return;
52781+}
52782+
52783+static void gr_log_end(int audit)
52784+{
52785+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
52786+ unsigned int len = strlen(buf);
52787+
52788+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
52789+ printk("%s\n", buf);
52790+
52791+ return;
52792+}
52793+
52794+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
52795+{
52796+ int logtype;
52797+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
52798+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
52799+ void *voidptr = NULL;
52800+ int num1 = 0, num2 = 0;
52801+ unsigned long ulong1 = 0, ulong2 = 0;
52802+ struct dentry *dentry = NULL;
52803+ struct vfsmount *mnt = NULL;
52804+ struct file *file = NULL;
52805+ struct task_struct *task = NULL;
52806+ const struct cred *cred, *pcred;
52807+ va_list ap;
52808+
52809+ BEGIN_LOCKS(audit);
52810+ logtype = gr_log_start(audit);
52811+ if (logtype == FLOODING) {
52812+ END_LOCKS(audit);
52813+ return;
52814+ }
52815+ va_start(ap, argtypes);
52816+ switch (argtypes) {
52817+ case GR_TTYSNIFF:
52818+ task = va_arg(ap, struct task_struct *);
52819+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
52820+ break;
52821+ case GR_SYSCTL_HIDDEN:
52822+ str1 = va_arg(ap, char *);
52823+ gr_log_middle_varargs(audit, msg, result, str1);
52824+ break;
52825+ case GR_RBAC:
52826+ dentry = va_arg(ap, struct dentry *);
52827+ mnt = va_arg(ap, struct vfsmount *);
52828+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
52829+ break;
52830+ case GR_RBAC_STR:
52831+ dentry = va_arg(ap, struct dentry *);
52832+ mnt = va_arg(ap, struct vfsmount *);
52833+ str1 = va_arg(ap, char *);
52834+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
52835+ break;
52836+ case GR_STR_RBAC:
52837+ str1 = va_arg(ap, char *);
52838+ dentry = va_arg(ap, struct dentry *);
52839+ mnt = va_arg(ap, struct vfsmount *);
52840+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
52841+ break;
52842+ case GR_RBAC_MODE2:
52843+ dentry = va_arg(ap, struct dentry *);
52844+ mnt = va_arg(ap, struct vfsmount *);
52845+ str1 = va_arg(ap, char *);
52846+ str2 = va_arg(ap, char *);
52847+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
52848+ break;
52849+ case GR_RBAC_MODE3:
52850+ dentry = va_arg(ap, struct dentry *);
52851+ mnt = va_arg(ap, struct vfsmount *);
52852+ str1 = va_arg(ap, char *);
52853+ str2 = va_arg(ap, char *);
52854+ str3 = va_arg(ap, char *);
52855+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
52856+ break;
52857+ case GR_FILENAME:
52858+ dentry = va_arg(ap, struct dentry *);
52859+ mnt = va_arg(ap, struct vfsmount *);
52860+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
52861+ break;
52862+ case GR_STR_FILENAME:
52863+ str1 = va_arg(ap, char *);
52864+ dentry = va_arg(ap, struct dentry *);
52865+ mnt = va_arg(ap, struct vfsmount *);
52866+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
52867+ break;
52868+ case GR_FILENAME_STR:
52869+ dentry = va_arg(ap, struct dentry *);
52870+ mnt = va_arg(ap, struct vfsmount *);
52871+ str1 = va_arg(ap, char *);
52872+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
52873+ break;
52874+ case GR_FILENAME_TWO_INT:
52875+ dentry = va_arg(ap, struct dentry *);
52876+ mnt = va_arg(ap, struct vfsmount *);
52877+ num1 = va_arg(ap, int);
52878+ num2 = va_arg(ap, int);
52879+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
52880+ break;
52881+ case GR_FILENAME_TWO_INT_STR:
52882+ dentry = va_arg(ap, struct dentry *);
52883+ mnt = va_arg(ap, struct vfsmount *);
52884+ num1 = va_arg(ap, int);
52885+ num2 = va_arg(ap, int);
52886+ str1 = va_arg(ap, char *);
52887+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
52888+ break;
52889+ case GR_TEXTREL:
52890+ file = va_arg(ap, struct file *);
52891+ ulong1 = va_arg(ap, unsigned long);
52892+ ulong2 = va_arg(ap, unsigned long);
52893+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
52894+ break;
52895+ case GR_PTRACE:
52896+ task = va_arg(ap, struct task_struct *);
52897+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
52898+ break;
52899+ case GR_RESOURCE:
52900+ task = va_arg(ap, struct task_struct *);
52901+ cred = __task_cred(task);
52902+ pcred = __task_cred(task->real_parent);
52903+ ulong1 = va_arg(ap, unsigned long);
52904+ str1 = va_arg(ap, char *);
52905+ ulong2 = va_arg(ap, unsigned long);
52906+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
52907+ break;
52908+ case GR_CAP:
52909+ task = va_arg(ap, struct task_struct *);
52910+ cred = __task_cred(task);
52911+ pcred = __task_cred(task->real_parent);
52912+ str1 = va_arg(ap, char *);
52913+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
52914+ break;
52915+ case GR_SIG:
52916+ str1 = va_arg(ap, char *);
52917+ voidptr = va_arg(ap, void *);
52918+ gr_log_middle_varargs(audit, msg, str1, voidptr);
52919+ break;
52920+ case GR_SIG2:
52921+ task = va_arg(ap, struct task_struct *);
52922+ cred = __task_cred(task);
52923+ pcred = __task_cred(task->real_parent);
52924+ num1 = va_arg(ap, int);
52925+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
52926+ break;
52927+ case GR_CRASH1:
52928+ task = va_arg(ap, struct task_struct *);
52929+ cred = __task_cred(task);
52930+ pcred = __task_cred(task->real_parent);
52931+ ulong1 = va_arg(ap, unsigned long);
52932+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
52933+ break;
52934+ case GR_CRASH2:
52935+ task = va_arg(ap, struct task_struct *);
52936+ cred = __task_cred(task);
52937+ pcred = __task_cred(task->real_parent);
52938+ ulong1 = va_arg(ap, unsigned long);
52939+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
52940+ break;
52941+ case GR_RWXMAP:
52942+ file = va_arg(ap, struct file *);
52943+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
52944+ break;
52945+ case GR_PSACCT:
52946+ {
52947+ unsigned int wday, cday;
52948+ __u8 whr, chr;
52949+ __u8 wmin, cmin;
52950+ __u8 wsec, csec;
52951+ char cur_tty[64] = { 0 };
52952+ char parent_tty[64] = { 0 };
52953+
52954+ task = va_arg(ap, struct task_struct *);
52955+ wday = va_arg(ap, unsigned int);
52956+ cday = va_arg(ap, unsigned int);
52957+ whr = va_arg(ap, int);
52958+ chr = va_arg(ap, int);
52959+ wmin = va_arg(ap, int);
52960+ cmin = va_arg(ap, int);
52961+ wsec = va_arg(ap, int);
52962+ csec = va_arg(ap, int);
52963+ ulong1 = va_arg(ap, unsigned long);
52964+ cred = __task_cred(task);
52965+ pcred = __task_cred(task->real_parent);
52966+
52967+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
52968+ }
52969+ break;
52970+ default:
52971+ gr_log_middle(audit, msg, ap);
52972+ }
52973+ va_end(ap);
52974+ gr_log_end(audit);
52975+ END_LOCKS(audit);
52976+}
52977diff -urNp linux-3.0.4/grsecurity/grsec_mem.c linux-3.0.4/grsecurity/grsec_mem.c
52978--- linux-3.0.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
52979+++ linux-3.0.4/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
52980@@ -0,0 +1,33 @@
52981+#include <linux/kernel.h>
52982+#include <linux/sched.h>
52983+#include <linux/mm.h>
52984+#include <linux/mman.h>
52985+#include <linux/grinternal.h>
52986+
52987+void
52988+gr_handle_ioperm(void)
52989+{
52990+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
52991+ return;
52992+}
52993+
52994+void
52995+gr_handle_iopl(void)
52996+{
52997+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
52998+ return;
52999+}
53000+
53001+void
53002+gr_handle_mem_readwrite(u64 from, u64 to)
53003+{
53004+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
53005+ return;
53006+}
53007+
53008+void
53009+gr_handle_vm86(void)
53010+{
53011+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
53012+ return;
53013+}
53014diff -urNp linux-3.0.4/grsecurity/grsec_mount.c linux-3.0.4/grsecurity/grsec_mount.c
53015--- linux-3.0.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
53016+++ linux-3.0.4/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
53017@@ -0,0 +1,62 @@
53018+#include <linux/kernel.h>
53019+#include <linux/sched.h>
53020+#include <linux/mount.h>
53021+#include <linux/grsecurity.h>
53022+#include <linux/grinternal.h>
53023+
53024+void
53025+gr_log_remount(const char *devname, const int retval)
53026+{
53027+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53028+ if (grsec_enable_mount && (retval >= 0))
53029+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
53030+#endif
53031+ return;
53032+}
53033+
53034+void
53035+gr_log_unmount(const char *devname, const int retval)
53036+{
53037+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53038+ if (grsec_enable_mount && (retval >= 0))
53039+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
53040+#endif
53041+ return;
53042+}
53043+
53044+void
53045+gr_log_mount(const char *from, const char *to, const int retval)
53046+{
53047+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53048+ if (grsec_enable_mount && (retval >= 0))
53049+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
53050+#endif
53051+ return;
53052+}
53053+
53054+int
53055+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
53056+{
53057+#ifdef CONFIG_GRKERNSEC_ROFS
53058+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
53059+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
53060+ return -EPERM;
53061+ } else
53062+ return 0;
53063+#endif
53064+ return 0;
53065+}
53066+
53067+int
53068+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
53069+{
53070+#ifdef CONFIG_GRKERNSEC_ROFS
53071+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
53072+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
53073+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
53074+ return -EPERM;
53075+ } else
53076+ return 0;
53077+#endif
53078+ return 0;
53079+}
53080diff -urNp linux-3.0.4/grsecurity/grsec_pax.c linux-3.0.4/grsecurity/grsec_pax.c
53081--- linux-3.0.4/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
53082+++ linux-3.0.4/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
53083@@ -0,0 +1,36 @@
53084+#include <linux/kernel.h>
53085+#include <linux/sched.h>
53086+#include <linux/mm.h>
53087+#include <linux/file.h>
53088+#include <linux/grinternal.h>
53089+#include <linux/grsecurity.h>
53090+
53091+void
53092+gr_log_textrel(struct vm_area_struct * vma)
53093+{
53094+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
53095+ if (grsec_enable_audit_textrel)
53096+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
53097+#endif
53098+ return;
53099+}
53100+
53101+void
53102+gr_log_rwxmmap(struct file *file)
53103+{
53104+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53105+ if (grsec_enable_log_rwxmaps)
53106+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
53107+#endif
53108+ return;
53109+}
53110+
53111+void
53112+gr_log_rwxmprotect(struct file *file)
53113+{
53114+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53115+ if (grsec_enable_log_rwxmaps)
53116+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
53117+#endif
53118+ return;
53119+}
53120diff -urNp linux-3.0.4/grsecurity/grsec_ptrace.c linux-3.0.4/grsecurity/grsec_ptrace.c
53121--- linux-3.0.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
53122+++ linux-3.0.4/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
53123@@ -0,0 +1,14 @@
53124+#include <linux/kernel.h>
53125+#include <linux/sched.h>
53126+#include <linux/grinternal.h>
53127+#include <linux/grsecurity.h>
53128+
53129+void
53130+gr_audit_ptrace(struct task_struct *task)
53131+{
53132+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
53133+ if (grsec_enable_audit_ptrace)
53134+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
53135+#endif
53136+ return;
53137+}
53138diff -urNp linux-3.0.4/grsecurity/grsec_sig.c linux-3.0.4/grsecurity/grsec_sig.c
53139--- linux-3.0.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
53140+++ linux-3.0.4/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
53141@@ -0,0 +1,206 @@
53142+#include <linux/kernel.h>
53143+#include <linux/sched.h>
53144+#include <linux/delay.h>
53145+#include <linux/grsecurity.h>
53146+#include <linux/grinternal.h>
53147+#include <linux/hardirq.h>
53148+
53149+char *signames[] = {
53150+ [SIGSEGV] = "Segmentation fault",
53151+ [SIGILL] = "Illegal instruction",
53152+ [SIGABRT] = "Abort",
53153+ [SIGBUS] = "Invalid alignment/Bus error"
53154+};
53155+
53156+void
53157+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
53158+{
53159+#ifdef CONFIG_GRKERNSEC_SIGNAL
53160+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
53161+ (sig == SIGABRT) || (sig == SIGBUS))) {
53162+ if (t->pid == current->pid) {
53163+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
53164+ } else {
53165+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
53166+ }
53167+ }
53168+#endif
53169+ return;
53170+}
53171+
53172+int
53173+gr_handle_signal(const struct task_struct *p, const int sig)
53174+{
53175+#ifdef CONFIG_GRKERNSEC
53176+ if (current->pid > 1 && gr_check_protected_task(p)) {
53177+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
53178+ return -EPERM;
53179+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
53180+ return -EPERM;
53181+ }
53182+#endif
53183+ return 0;
53184+}
53185+
53186+#ifdef CONFIG_GRKERNSEC
53187+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
53188+
53189+int gr_fake_force_sig(int sig, struct task_struct *t)
53190+{
53191+ unsigned long int flags;
53192+ int ret, blocked, ignored;
53193+ struct k_sigaction *action;
53194+
53195+ spin_lock_irqsave(&t->sighand->siglock, flags);
53196+ action = &t->sighand->action[sig-1];
53197+ ignored = action->sa.sa_handler == SIG_IGN;
53198+ blocked = sigismember(&t->blocked, sig);
53199+ if (blocked || ignored) {
53200+ action->sa.sa_handler = SIG_DFL;
53201+ if (blocked) {
53202+ sigdelset(&t->blocked, sig);
53203+ recalc_sigpending_and_wake(t);
53204+ }
53205+ }
53206+ if (action->sa.sa_handler == SIG_DFL)
53207+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
53208+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
53209+
53210+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
53211+
53212+ return ret;
53213+}
53214+#endif
53215+
53216+#ifdef CONFIG_GRKERNSEC_BRUTE
53217+#define GR_USER_BAN_TIME (15 * 60)
53218+
53219+static int __get_dumpable(unsigned long mm_flags)
53220+{
53221+ int ret;
53222+
53223+ ret = mm_flags & MMF_DUMPABLE_MASK;
53224+ return (ret >= 2) ? 2 : ret;
53225+}
53226+#endif
53227+
53228+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
53229+{
53230+#ifdef CONFIG_GRKERNSEC_BRUTE
53231+ uid_t uid = 0;
53232+
53233+ if (!grsec_enable_brute)
53234+ return;
53235+
53236+ rcu_read_lock();
53237+ read_lock(&tasklist_lock);
53238+ read_lock(&grsec_exec_file_lock);
53239+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
53240+ p->real_parent->brute = 1;
53241+ else {
53242+ const struct cred *cred = __task_cred(p), *cred2;
53243+ struct task_struct *tsk, *tsk2;
53244+
53245+ if (!__get_dumpable(mm_flags) && cred->uid) {
53246+ struct user_struct *user;
53247+
53248+ uid = cred->uid;
53249+
53250+ /* this is put upon execution past expiration */
53251+ user = find_user(uid);
53252+ if (user == NULL)
53253+ goto unlock;
53254+ user->banned = 1;
53255+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
53256+ if (user->ban_expires == ~0UL)
53257+ user->ban_expires--;
53258+
53259+ do_each_thread(tsk2, tsk) {
53260+ cred2 = __task_cred(tsk);
53261+ if (tsk != p && cred2->uid == uid)
53262+ gr_fake_force_sig(SIGKILL, tsk);
53263+ } while_each_thread(tsk2, tsk);
53264+ }
53265+ }
53266+unlock:
53267+ read_unlock(&grsec_exec_file_lock);
53268+ read_unlock(&tasklist_lock);
53269+ rcu_read_unlock();
53270+
53271+ if (uid)
53272+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
53273+
53274+#endif
53275+ return;
53276+}
53277+
53278+void gr_handle_brute_check(void)
53279+{
53280+#ifdef CONFIG_GRKERNSEC_BRUTE
53281+ if (current->brute)
53282+ msleep(30 * 1000);
53283+#endif
53284+ return;
53285+}
53286+
53287+void gr_handle_kernel_exploit(void)
53288+{
53289+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
53290+ const struct cred *cred;
53291+ struct task_struct *tsk, *tsk2;
53292+ struct user_struct *user;
53293+ uid_t uid;
53294+
53295+ if (in_irq() || in_serving_softirq() || in_nmi())
53296+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
53297+
53298+ uid = current_uid();
53299+
53300+ if (uid == 0)
53301+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
53302+ else {
53303+ /* kill all the processes of this user, hold a reference
53304+ to their creds struct, and prevent them from creating
53305+ another process until system reset
53306+ */
53307+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
53308+ /* we intentionally leak this ref */
53309+ user = get_uid(current->cred->user);
53310+ if (user) {
53311+ user->banned = 1;
53312+ user->ban_expires = ~0UL;
53313+ }
53314+
53315+ read_lock(&tasklist_lock);
53316+ do_each_thread(tsk2, tsk) {
53317+ cred = __task_cred(tsk);
53318+ if (cred->uid == uid)
53319+ gr_fake_force_sig(SIGKILL, tsk);
53320+ } while_each_thread(tsk2, tsk);
53321+ read_unlock(&tasklist_lock);
53322+ }
53323+#endif
53324+}
53325+
53326+int __gr_process_user_ban(struct user_struct *user)
53327+{
53328+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
53329+ if (unlikely(user->banned)) {
53330+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
53331+ user->banned = 0;
53332+ user->ban_expires = 0;
53333+ free_uid(user);
53334+ } else
53335+ return -EPERM;
53336+ }
53337+#endif
53338+ return 0;
53339+}
53340+
53341+int gr_process_user_ban(void)
53342+{
53343+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
53344+ return __gr_process_user_ban(current->cred->user);
53345+#endif
53346+ return 0;
53347+}
53348diff -urNp linux-3.0.4/grsecurity/grsec_sock.c linux-3.0.4/grsecurity/grsec_sock.c
53349--- linux-3.0.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
53350+++ linux-3.0.4/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
53351@@ -0,0 +1,244 @@
53352+#include <linux/kernel.h>
53353+#include <linux/module.h>
53354+#include <linux/sched.h>
53355+#include <linux/file.h>
53356+#include <linux/net.h>
53357+#include <linux/in.h>
53358+#include <linux/ip.h>
53359+#include <net/sock.h>
53360+#include <net/inet_sock.h>
53361+#include <linux/grsecurity.h>
53362+#include <linux/grinternal.h>
53363+#include <linux/gracl.h>
53364+
53365+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
53366+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
53367+
53368+EXPORT_SYMBOL(gr_search_udp_recvmsg);
53369+EXPORT_SYMBOL(gr_search_udp_sendmsg);
53370+
53371+#ifdef CONFIG_UNIX_MODULE
53372+EXPORT_SYMBOL(gr_acl_handle_unix);
53373+EXPORT_SYMBOL(gr_acl_handle_mknod);
53374+EXPORT_SYMBOL(gr_handle_chroot_unix);
53375+EXPORT_SYMBOL(gr_handle_create);
53376+#endif
53377+
53378+#ifdef CONFIG_GRKERNSEC
53379+#define gr_conn_table_size 32749
53380+struct conn_table_entry {
53381+ struct conn_table_entry *next;
53382+ struct signal_struct *sig;
53383+};
53384+
53385+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
53386+DEFINE_SPINLOCK(gr_conn_table_lock);
53387+
53388+extern const char * gr_socktype_to_name(unsigned char type);
53389+extern const char * gr_proto_to_name(unsigned char proto);
53390+extern const char * gr_sockfamily_to_name(unsigned char family);
53391+
53392+static __inline__ int
53393+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
53394+{
53395+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
53396+}
53397+
53398+static __inline__ int
53399+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
53400+ __u16 sport, __u16 dport)
53401+{
53402+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
53403+ sig->gr_sport == sport && sig->gr_dport == dport))
53404+ return 1;
53405+ else
53406+ return 0;
53407+}
53408+
53409+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
53410+{
53411+ struct conn_table_entry **match;
53412+ unsigned int index;
53413+
53414+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
53415+ sig->gr_sport, sig->gr_dport,
53416+ gr_conn_table_size);
53417+
53418+ newent->sig = sig;
53419+
53420+ match = &gr_conn_table[index];
53421+ newent->next = *match;
53422+ *match = newent;
53423+
53424+ return;
53425+}
53426+
53427+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
53428+{
53429+ struct conn_table_entry *match, *last = NULL;
53430+ unsigned int index;
53431+
53432+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
53433+ sig->gr_sport, sig->gr_dport,
53434+ gr_conn_table_size);
53435+
53436+ match = gr_conn_table[index];
53437+ while (match && !conn_match(match->sig,
53438+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
53439+ sig->gr_dport)) {
53440+ last = match;
53441+ match = match->next;
53442+ }
53443+
53444+ if (match) {
53445+ if (last)
53446+ last->next = match->next;
53447+ else
53448+ gr_conn_table[index] = NULL;
53449+ kfree(match);
53450+ }
53451+
53452+ return;
53453+}
53454+
53455+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
53456+ __u16 sport, __u16 dport)
53457+{
53458+ struct conn_table_entry *match;
53459+ unsigned int index;
53460+
53461+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
53462+
53463+ match = gr_conn_table[index];
53464+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
53465+ match = match->next;
53466+
53467+ if (match)
53468+ return match->sig;
53469+ else
53470+ return NULL;
53471+}
53472+
53473+#endif
53474+
53475+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
53476+{
53477+#ifdef CONFIG_GRKERNSEC
53478+ struct signal_struct *sig = task->signal;
53479+ struct conn_table_entry *newent;
53480+
53481+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
53482+ if (newent == NULL)
53483+ return;
53484+ /* no bh lock needed since we are called with bh disabled */
53485+ spin_lock(&gr_conn_table_lock);
53486+ gr_del_task_from_ip_table_nolock(sig);
53487+ sig->gr_saddr = inet->inet_rcv_saddr;
53488+ sig->gr_daddr = inet->inet_daddr;
53489+ sig->gr_sport = inet->inet_sport;
53490+ sig->gr_dport = inet->inet_dport;
53491+ gr_add_to_task_ip_table_nolock(sig, newent);
53492+ spin_unlock(&gr_conn_table_lock);
53493+#endif
53494+ return;
53495+}
53496+
53497+void gr_del_task_from_ip_table(struct task_struct *task)
53498+{
53499+#ifdef CONFIG_GRKERNSEC
53500+ spin_lock_bh(&gr_conn_table_lock);
53501+ gr_del_task_from_ip_table_nolock(task->signal);
53502+ spin_unlock_bh(&gr_conn_table_lock);
53503+#endif
53504+ return;
53505+}
53506+
53507+void
53508+gr_attach_curr_ip(const struct sock *sk)
53509+{
53510+#ifdef CONFIG_GRKERNSEC
53511+ struct signal_struct *p, *set;
53512+ const struct inet_sock *inet = inet_sk(sk);
53513+
53514+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
53515+ return;
53516+
53517+ set = current->signal;
53518+
53519+ spin_lock_bh(&gr_conn_table_lock);
53520+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
53521+ inet->inet_dport, inet->inet_sport);
53522+ if (unlikely(p != NULL)) {
53523+ set->curr_ip = p->curr_ip;
53524+ set->used_accept = 1;
53525+ gr_del_task_from_ip_table_nolock(p);
53526+ spin_unlock_bh(&gr_conn_table_lock);
53527+ return;
53528+ }
53529+ spin_unlock_bh(&gr_conn_table_lock);
53530+
53531+ set->curr_ip = inet->inet_daddr;
53532+ set->used_accept = 1;
53533+#endif
53534+ return;
53535+}
53536+
53537+int
53538+gr_handle_sock_all(const int family, const int type, const int protocol)
53539+{
53540+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
53541+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
53542+ (family != AF_UNIX)) {
53543+ if (family == AF_INET)
53544+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
53545+ else
53546+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
53547+ return -EACCES;
53548+ }
53549+#endif
53550+ return 0;
53551+}
53552+
53553+int
53554+gr_handle_sock_server(const struct sockaddr *sck)
53555+{
53556+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53557+ if (grsec_enable_socket_server &&
53558+ in_group_p(grsec_socket_server_gid) &&
53559+ sck && (sck->sa_family != AF_UNIX) &&
53560+ (sck->sa_family != AF_LOCAL)) {
53561+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
53562+ return -EACCES;
53563+ }
53564+#endif
53565+ return 0;
53566+}
53567+
53568+int
53569+gr_handle_sock_server_other(const struct sock *sck)
53570+{
53571+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53572+ if (grsec_enable_socket_server &&
53573+ in_group_p(grsec_socket_server_gid) &&
53574+ sck && (sck->sk_family != AF_UNIX) &&
53575+ (sck->sk_family != AF_LOCAL)) {
53576+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
53577+ return -EACCES;
53578+ }
53579+#endif
53580+ return 0;
53581+}
53582+
53583+int
53584+gr_handle_sock_client(const struct sockaddr *sck)
53585+{
53586+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
53587+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
53588+ sck && (sck->sa_family != AF_UNIX) &&
53589+ (sck->sa_family != AF_LOCAL)) {
53590+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
53591+ return -EACCES;
53592+ }
53593+#endif
53594+ return 0;
53595+}
53596diff -urNp linux-3.0.4/grsecurity/grsec_sysctl.c linux-3.0.4/grsecurity/grsec_sysctl.c
53597--- linux-3.0.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
53598+++ linux-3.0.4/grsecurity/grsec_sysctl.c 2011-08-25 17:26:15.000000000 -0400
53599@@ -0,0 +1,433 @@
53600+#include <linux/kernel.h>
53601+#include <linux/sched.h>
53602+#include <linux/sysctl.h>
53603+#include <linux/grsecurity.h>
53604+#include <linux/grinternal.h>
53605+
53606+int
53607+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
53608+{
53609+#ifdef CONFIG_GRKERNSEC_SYSCTL
53610+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
53611+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
53612+ return -EACCES;
53613+ }
53614+#endif
53615+ return 0;
53616+}
53617+
53618+#ifdef CONFIG_GRKERNSEC_ROFS
53619+static int __maybe_unused one = 1;
53620+#endif
53621+
53622+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
53623+struct ctl_table grsecurity_table[] = {
53624+#ifdef CONFIG_GRKERNSEC_SYSCTL
53625+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
53626+#ifdef CONFIG_GRKERNSEC_IO
53627+ {
53628+ .procname = "disable_priv_io",
53629+ .data = &grsec_disable_privio,
53630+ .maxlen = sizeof(int),
53631+ .mode = 0600,
53632+ .proc_handler = &proc_dointvec,
53633+ },
53634+#endif
53635+#endif
53636+#ifdef CONFIG_GRKERNSEC_LINK
53637+ {
53638+ .procname = "linking_restrictions",
53639+ .data = &grsec_enable_link,
53640+ .maxlen = sizeof(int),
53641+ .mode = 0600,
53642+ .proc_handler = &proc_dointvec,
53643+ },
53644+#endif
53645+#ifdef CONFIG_GRKERNSEC_BRUTE
53646+ {
53647+ .procname = "deter_bruteforce",
53648+ .data = &grsec_enable_brute,
53649+ .maxlen = sizeof(int),
53650+ .mode = 0600,
53651+ .proc_handler = &proc_dointvec,
53652+ },
53653+#endif
53654+#ifdef CONFIG_GRKERNSEC_FIFO
53655+ {
53656+ .procname = "fifo_restrictions",
53657+ .data = &grsec_enable_fifo,
53658+ .maxlen = sizeof(int),
53659+ .mode = 0600,
53660+ .proc_handler = &proc_dointvec,
53661+ },
53662+#endif
53663+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
53664+ {
53665+ .procname = "ip_blackhole",
53666+ .data = &grsec_enable_blackhole,
53667+ .maxlen = sizeof(int),
53668+ .mode = 0600,
53669+ .proc_handler = &proc_dointvec,
53670+ },
53671+ {
53672+ .procname = "lastack_retries",
53673+ .data = &grsec_lastack_retries,
53674+ .maxlen = sizeof(int),
53675+ .mode = 0600,
53676+ .proc_handler = &proc_dointvec,
53677+ },
53678+#endif
53679+#ifdef CONFIG_GRKERNSEC_EXECLOG
53680+ {
53681+ .procname = "exec_logging",
53682+ .data = &grsec_enable_execlog,
53683+ .maxlen = sizeof(int),
53684+ .mode = 0600,
53685+ .proc_handler = &proc_dointvec,
53686+ },
53687+#endif
53688+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53689+ {
53690+ .procname = "rwxmap_logging",
53691+ .data = &grsec_enable_log_rwxmaps,
53692+ .maxlen = sizeof(int),
53693+ .mode = 0600,
53694+ .proc_handler = &proc_dointvec,
53695+ },
53696+#endif
53697+#ifdef CONFIG_GRKERNSEC_SIGNAL
53698+ {
53699+ .procname = "signal_logging",
53700+ .data = &grsec_enable_signal,
53701+ .maxlen = sizeof(int),
53702+ .mode = 0600,
53703+ .proc_handler = &proc_dointvec,
53704+ },
53705+#endif
53706+#ifdef CONFIG_GRKERNSEC_FORKFAIL
53707+ {
53708+ .procname = "forkfail_logging",
53709+ .data = &grsec_enable_forkfail,
53710+ .maxlen = sizeof(int),
53711+ .mode = 0600,
53712+ .proc_handler = &proc_dointvec,
53713+ },
53714+#endif
53715+#ifdef CONFIG_GRKERNSEC_TIME
53716+ {
53717+ .procname = "timechange_logging",
53718+ .data = &grsec_enable_time,
53719+ .maxlen = sizeof(int),
53720+ .mode = 0600,
53721+ .proc_handler = &proc_dointvec,
53722+ },
53723+#endif
53724+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53725+ {
53726+ .procname = "chroot_deny_shmat",
53727+ .data = &grsec_enable_chroot_shmat,
53728+ .maxlen = sizeof(int),
53729+ .mode = 0600,
53730+ .proc_handler = &proc_dointvec,
53731+ },
53732+#endif
53733+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53734+ {
53735+ .procname = "chroot_deny_unix",
53736+ .data = &grsec_enable_chroot_unix,
53737+ .maxlen = sizeof(int),
53738+ .mode = 0600,
53739+ .proc_handler = &proc_dointvec,
53740+ },
53741+#endif
53742+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53743+ {
53744+ .procname = "chroot_deny_mount",
53745+ .data = &grsec_enable_chroot_mount,
53746+ .maxlen = sizeof(int),
53747+ .mode = 0600,
53748+ .proc_handler = &proc_dointvec,
53749+ },
53750+#endif
53751+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53752+ {
53753+ .procname = "chroot_deny_fchdir",
53754+ .data = &grsec_enable_chroot_fchdir,
53755+ .maxlen = sizeof(int),
53756+ .mode = 0600,
53757+ .proc_handler = &proc_dointvec,
53758+ },
53759+#endif
53760+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53761+ {
53762+ .procname = "chroot_deny_chroot",
53763+ .data = &grsec_enable_chroot_double,
53764+ .maxlen = sizeof(int),
53765+ .mode = 0600,
53766+ .proc_handler = &proc_dointvec,
53767+ },
53768+#endif
53769+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53770+ {
53771+ .procname = "chroot_deny_pivot",
53772+ .data = &grsec_enable_chroot_pivot,
53773+ .maxlen = sizeof(int),
53774+ .mode = 0600,
53775+ .proc_handler = &proc_dointvec,
53776+ },
53777+#endif
53778+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53779+ {
53780+ .procname = "chroot_enforce_chdir",
53781+ .data = &grsec_enable_chroot_chdir,
53782+ .maxlen = sizeof(int),
53783+ .mode = 0600,
53784+ .proc_handler = &proc_dointvec,
53785+ },
53786+#endif
53787+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53788+ {
53789+ .procname = "chroot_deny_chmod",
53790+ .data = &grsec_enable_chroot_chmod,
53791+ .maxlen = sizeof(int),
53792+ .mode = 0600,
53793+ .proc_handler = &proc_dointvec,
53794+ },
53795+#endif
53796+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53797+ {
53798+ .procname = "chroot_deny_mknod",
53799+ .data = &grsec_enable_chroot_mknod,
53800+ .maxlen = sizeof(int),
53801+ .mode = 0600,
53802+ .proc_handler = &proc_dointvec,
53803+ },
53804+#endif
53805+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53806+ {
53807+ .procname = "chroot_restrict_nice",
53808+ .data = &grsec_enable_chroot_nice,
53809+ .maxlen = sizeof(int),
53810+ .mode = 0600,
53811+ .proc_handler = &proc_dointvec,
53812+ },
53813+#endif
53814+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53815+ {
53816+ .procname = "chroot_execlog",
53817+ .data = &grsec_enable_chroot_execlog,
53818+ .maxlen = sizeof(int),
53819+ .mode = 0600,
53820+ .proc_handler = &proc_dointvec,
53821+ },
53822+#endif
53823+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53824+ {
53825+ .procname = "chroot_caps",
53826+ .data = &grsec_enable_chroot_caps,
53827+ .maxlen = sizeof(int),
53828+ .mode = 0600,
53829+ .proc_handler = &proc_dointvec,
53830+ },
53831+#endif
53832+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53833+ {
53834+ .procname = "chroot_deny_sysctl",
53835+ .data = &grsec_enable_chroot_sysctl,
53836+ .maxlen = sizeof(int),
53837+ .mode = 0600,
53838+ .proc_handler = &proc_dointvec,
53839+ },
53840+#endif
53841+#ifdef CONFIG_GRKERNSEC_TPE
53842+ {
53843+ .procname = "tpe",
53844+ .data = &grsec_enable_tpe,
53845+ .maxlen = sizeof(int),
53846+ .mode = 0600,
53847+ .proc_handler = &proc_dointvec,
53848+ },
53849+ {
53850+ .procname = "tpe_gid",
53851+ .data = &grsec_tpe_gid,
53852+ .maxlen = sizeof(int),
53853+ .mode = 0600,
53854+ .proc_handler = &proc_dointvec,
53855+ },
53856+#endif
53857+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53858+ {
53859+ .procname = "tpe_invert",
53860+ .data = &grsec_enable_tpe_invert,
53861+ .maxlen = sizeof(int),
53862+ .mode = 0600,
53863+ .proc_handler = &proc_dointvec,
53864+ },
53865+#endif
53866+#ifdef CONFIG_GRKERNSEC_TPE_ALL
53867+ {
53868+ .procname = "tpe_restrict_all",
53869+ .data = &grsec_enable_tpe_all,
53870+ .maxlen = sizeof(int),
53871+ .mode = 0600,
53872+ .proc_handler = &proc_dointvec,
53873+ },
53874+#endif
53875+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
53876+ {
53877+ .procname = "socket_all",
53878+ .data = &grsec_enable_socket_all,
53879+ .maxlen = sizeof(int),
53880+ .mode = 0600,
53881+ .proc_handler = &proc_dointvec,
53882+ },
53883+ {
53884+ .procname = "socket_all_gid",
53885+ .data = &grsec_socket_all_gid,
53886+ .maxlen = sizeof(int),
53887+ .mode = 0600,
53888+ .proc_handler = &proc_dointvec,
53889+ },
53890+#endif
53891+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
53892+ {
53893+ .procname = "socket_client",
53894+ .data = &grsec_enable_socket_client,
53895+ .maxlen = sizeof(int),
53896+ .mode = 0600,
53897+ .proc_handler = &proc_dointvec,
53898+ },
53899+ {
53900+ .procname = "socket_client_gid",
53901+ .data = &grsec_socket_client_gid,
53902+ .maxlen = sizeof(int),
53903+ .mode = 0600,
53904+ .proc_handler = &proc_dointvec,
53905+ },
53906+#endif
53907+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53908+ {
53909+ .procname = "socket_server",
53910+ .data = &grsec_enable_socket_server,
53911+ .maxlen = sizeof(int),
53912+ .mode = 0600,
53913+ .proc_handler = &proc_dointvec,
53914+ },
53915+ {
53916+ .procname = "socket_server_gid",
53917+ .data = &grsec_socket_server_gid,
53918+ .maxlen = sizeof(int),
53919+ .mode = 0600,
53920+ .proc_handler = &proc_dointvec,
53921+ },
53922+#endif
53923+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
53924+ {
53925+ .procname = "audit_group",
53926+ .data = &grsec_enable_group,
53927+ .maxlen = sizeof(int),
53928+ .mode = 0600,
53929+ .proc_handler = &proc_dointvec,
53930+ },
53931+ {
53932+ .procname = "audit_gid",
53933+ .data = &grsec_audit_gid,
53934+ .maxlen = sizeof(int),
53935+ .mode = 0600,
53936+ .proc_handler = &proc_dointvec,
53937+ },
53938+#endif
53939+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53940+ {
53941+ .procname = "audit_chdir",
53942+ .data = &grsec_enable_chdir,
53943+ .maxlen = sizeof(int),
53944+ .mode = 0600,
53945+ .proc_handler = &proc_dointvec,
53946+ },
53947+#endif
53948+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53949+ {
53950+ .procname = "audit_mount",
53951+ .data = &grsec_enable_mount,
53952+ .maxlen = sizeof(int),
53953+ .mode = 0600,
53954+ .proc_handler = &proc_dointvec,
53955+ },
53956+#endif
53957+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
53958+ {
53959+ .procname = "audit_textrel",
53960+ .data = &grsec_enable_audit_textrel,
53961+ .maxlen = sizeof(int),
53962+ .mode = 0600,
53963+ .proc_handler = &proc_dointvec,
53964+ },
53965+#endif
53966+#ifdef CONFIG_GRKERNSEC_DMESG
53967+ {
53968+ .procname = "dmesg",
53969+ .data = &grsec_enable_dmesg,
53970+ .maxlen = sizeof(int),
53971+ .mode = 0600,
53972+ .proc_handler = &proc_dointvec,
53973+ },
53974+#endif
53975+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53976+ {
53977+ .procname = "chroot_findtask",
53978+ .data = &grsec_enable_chroot_findtask,
53979+ .maxlen = sizeof(int),
53980+ .mode = 0600,
53981+ .proc_handler = &proc_dointvec,
53982+ },
53983+#endif
53984+#ifdef CONFIG_GRKERNSEC_RESLOG
53985+ {
53986+ .procname = "resource_logging",
53987+ .data = &grsec_resource_logging,
53988+ .maxlen = sizeof(int),
53989+ .mode = 0600,
53990+ .proc_handler = &proc_dointvec,
53991+ },
53992+#endif
53993+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
53994+ {
53995+ .procname = "audit_ptrace",
53996+ .data = &grsec_enable_audit_ptrace,
53997+ .maxlen = sizeof(int),
53998+ .mode = 0600,
53999+ .proc_handler = &proc_dointvec,
54000+ },
54001+#endif
54002+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54003+ {
54004+ .procname = "harden_ptrace",
54005+ .data = &grsec_enable_harden_ptrace,
54006+ .maxlen = sizeof(int),
54007+ .mode = 0600,
54008+ .proc_handler = &proc_dointvec,
54009+ },
54010+#endif
54011+ {
54012+ .procname = "grsec_lock",
54013+ .data = &grsec_lock,
54014+ .maxlen = sizeof(int),
54015+ .mode = 0600,
54016+ .proc_handler = &proc_dointvec,
54017+ },
54018+#endif
54019+#ifdef CONFIG_GRKERNSEC_ROFS
54020+ {
54021+ .procname = "romount_protect",
54022+ .data = &grsec_enable_rofs,
54023+ .maxlen = sizeof(int),
54024+ .mode = 0600,
54025+ .proc_handler = &proc_dointvec_minmax,
54026+ .extra1 = &one,
54027+ .extra2 = &one,
54028+ },
54029+#endif
54030+ { }
54031+};
54032+#endif
54033diff -urNp linux-3.0.4/grsecurity/grsec_time.c linux-3.0.4/grsecurity/grsec_time.c
54034--- linux-3.0.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
54035+++ linux-3.0.4/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
54036@@ -0,0 +1,16 @@
54037+#include <linux/kernel.h>
54038+#include <linux/sched.h>
54039+#include <linux/grinternal.h>
54040+#include <linux/module.h>
54041+
54042+void
54043+gr_log_timechange(void)
54044+{
54045+#ifdef CONFIG_GRKERNSEC_TIME
54046+ if (grsec_enable_time)
54047+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
54048+#endif
54049+ return;
54050+}
54051+
54052+EXPORT_SYMBOL(gr_log_timechange);
54053diff -urNp linux-3.0.4/grsecurity/grsec_tpe.c linux-3.0.4/grsecurity/grsec_tpe.c
54054--- linux-3.0.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
54055+++ linux-3.0.4/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
54056@@ -0,0 +1,39 @@
54057+#include <linux/kernel.h>
54058+#include <linux/sched.h>
54059+#include <linux/file.h>
54060+#include <linux/fs.h>
54061+#include <linux/grinternal.h>
54062+
54063+extern int gr_acl_tpe_check(void);
54064+
54065+int
54066+gr_tpe_allow(const struct file *file)
54067+{
54068+#ifdef CONFIG_GRKERNSEC
54069+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
54070+ const struct cred *cred = current_cred();
54071+
54072+ if (cred->uid && ((grsec_enable_tpe &&
54073+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
54074+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
54075+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
54076+#else
54077+ in_group_p(grsec_tpe_gid)
54078+#endif
54079+ ) || gr_acl_tpe_check()) &&
54080+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
54081+ (inode->i_mode & S_IWOTH))))) {
54082+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
54083+ return 0;
54084+ }
54085+#ifdef CONFIG_GRKERNSEC_TPE_ALL
54086+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
54087+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
54088+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
54089+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
54090+ return 0;
54091+ }
54092+#endif
54093+#endif
54094+ return 1;
54095+}
54096diff -urNp linux-3.0.4/grsecurity/grsum.c linux-3.0.4/grsecurity/grsum.c
54097--- linux-3.0.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
54098+++ linux-3.0.4/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
54099@@ -0,0 +1,61 @@
54100+#include <linux/err.h>
54101+#include <linux/kernel.h>
54102+#include <linux/sched.h>
54103+#include <linux/mm.h>
54104+#include <linux/scatterlist.h>
54105+#include <linux/crypto.h>
54106+#include <linux/gracl.h>
54107+
54108+
54109+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
54110+#error "crypto and sha256 must be built into the kernel"
54111+#endif
54112+
54113+int
54114+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
54115+{
54116+ char *p;
54117+ struct crypto_hash *tfm;
54118+ struct hash_desc desc;
54119+ struct scatterlist sg;
54120+ unsigned char temp_sum[GR_SHA_LEN];
54121+ volatile int retval = 0;
54122+ volatile int dummy = 0;
54123+ unsigned int i;
54124+
54125+ sg_init_table(&sg, 1);
54126+
54127+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
54128+ if (IS_ERR(tfm)) {
54129+ /* should never happen, since sha256 should be built in */
54130+ return 1;
54131+ }
54132+
54133+ desc.tfm = tfm;
54134+ desc.flags = 0;
54135+
54136+ crypto_hash_init(&desc);
54137+
54138+ p = salt;
54139+ sg_set_buf(&sg, p, GR_SALT_LEN);
54140+ crypto_hash_update(&desc, &sg, sg.length);
54141+
54142+ p = entry->pw;
54143+ sg_set_buf(&sg, p, strlen(p));
54144+
54145+ crypto_hash_update(&desc, &sg, sg.length);
54146+
54147+ crypto_hash_final(&desc, temp_sum);
54148+
54149+ memset(entry->pw, 0, GR_PW_LEN);
54150+
54151+ for (i = 0; i < GR_SHA_LEN; i++)
54152+ if (sum[i] != temp_sum[i])
54153+ retval = 1;
54154+ else
54155+ dummy = 1; // waste a cycle
54156+
54157+ crypto_free_hash(tfm);
54158+
54159+ return retval;
54160+}
54161diff -urNp linux-3.0.4/grsecurity/Kconfig linux-3.0.4/grsecurity/Kconfig
54162--- linux-3.0.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
54163+++ linux-3.0.4/grsecurity/Kconfig 2011-09-15 00:00:57.000000000 -0400
54164@@ -0,0 +1,1038 @@
54165+#
54166+# grecurity configuration
54167+#
54168+
54169+menu "Grsecurity"
54170+
54171+config GRKERNSEC
54172+ bool "Grsecurity"
54173+ select CRYPTO
54174+ select CRYPTO_SHA256
54175+ help
54176+ If you say Y here, you will be able to configure many features
54177+ that will enhance the security of your system. It is highly
54178+ recommended that you say Y here and read through the help
54179+ for each option so that you fully understand the features and
54180+ can evaluate their usefulness for your machine.
54181+
54182+choice
54183+ prompt "Security Level"
54184+ depends on GRKERNSEC
54185+ default GRKERNSEC_CUSTOM
54186+
54187+config GRKERNSEC_LOW
54188+ bool "Low"
54189+ select GRKERNSEC_LINK
54190+ select GRKERNSEC_FIFO
54191+ select GRKERNSEC_RANDNET
54192+ select GRKERNSEC_DMESG
54193+ select GRKERNSEC_CHROOT
54194+ select GRKERNSEC_CHROOT_CHDIR
54195+
54196+ help
54197+ If you choose this option, several of the grsecurity options will
54198+ be enabled that will give you greater protection against a number
54199+ of attacks, while assuring that none of your software will have any
54200+ conflicts with the additional security measures. If you run a lot
54201+ of unusual software, or you are having problems with the higher
54202+ security levels, you should say Y here. With this option, the
54203+ following features are enabled:
54204+
54205+ - Linking restrictions
54206+ - FIFO restrictions
54207+ - Restricted dmesg
54208+ - Enforced chdir("/") on chroot
54209+ - Runtime module disabling
54210+
54211+config GRKERNSEC_MEDIUM
54212+ bool "Medium"
54213+ select PAX
54214+ select PAX_EI_PAX
54215+ select PAX_PT_PAX_FLAGS
54216+ select PAX_HAVE_ACL_FLAGS
54217+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54218+ select GRKERNSEC_CHROOT
54219+ select GRKERNSEC_CHROOT_SYSCTL
54220+ select GRKERNSEC_LINK
54221+ select GRKERNSEC_FIFO
54222+ select GRKERNSEC_DMESG
54223+ select GRKERNSEC_RANDNET
54224+ select GRKERNSEC_FORKFAIL
54225+ select GRKERNSEC_TIME
54226+ select GRKERNSEC_SIGNAL
54227+ select GRKERNSEC_CHROOT
54228+ select GRKERNSEC_CHROOT_UNIX
54229+ select GRKERNSEC_CHROOT_MOUNT
54230+ select GRKERNSEC_CHROOT_PIVOT
54231+ select GRKERNSEC_CHROOT_DOUBLE
54232+ select GRKERNSEC_CHROOT_CHDIR
54233+ select GRKERNSEC_CHROOT_MKNOD
54234+ select GRKERNSEC_PROC
54235+ select GRKERNSEC_PROC_USERGROUP
54236+ select PAX_RANDUSTACK
54237+ select PAX_ASLR
54238+ select PAX_RANDMMAP
54239+ select PAX_REFCOUNT if (X86 || SPARC64)
54240+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
54241+
54242+ help
54243+ If you say Y here, several features in addition to those included
54244+ in the low additional security level will be enabled. These
54245+ features provide even more security to your system, though in rare
54246+ cases they may be incompatible with very old or poorly written
54247+ software. If you enable this option, make sure that your auth
54248+ service (identd) is running as gid 1001. With this option,
54249+ the following features (in addition to those provided in the
54250+ low additional security level) will be enabled:
54251+
54252+ - Failed fork logging
54253+ - Time change logging
54254+ - Signal logging
54255+ - Deny mounts in chroot
54256+ - Deny double chrooting
54257+ - Deny sysctl writes in chroot
54258+ - Deny mknod in chroot
54259+ - Deny access to abstract AF_UNIX sockets out of chroot
54260+ - Deny pivot_root in chroot
54261+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
54262+ - /proc restrictions with special GID set to 10 (usually wheel)
54263+ - Address Space Layout Randomization (ASLR)
54264+ - Prevent exploitation of most refcount overflows
54265+ - Bounds checking of copying between the kernel and userland
54266+
54267+config GRKERNSEC_HIGH
54268+ bool "High"
54269+ select GRKERNSEC_LINK
54270+ select GRKERNSEC_FIFO
54271+ select GRKERNSEC_DMESG
54272+ select GRKERNSEC_FORKFAIL
54273+ select GRKERNSEC_TIME
54274+ select GRKERNSEC_SIGNAL
54275+ select GRKERNSEC_CHROOT
54276+ select GRKERNSEC_CHROOT_SHMAT
54277+ select GRKERNSEC_CHROOT_UNIX
54278+ select GRKERNSEC_CHROOT_MOUNT
54279+ select GRKERNSEC_CHROOT_FCHDIR
54280+ select GRKERNSEC_CHROOT_PIVOT
54281+ select GRKERNSEC_CHROOT_DOUBLE
54282+ select GRKERNSEC_CHROOT_CHDIR
54283+ select GRKERNSEC_CHROOT_MKNOD
54284+ select GRKERNSEC_CHROOT_CAPS
54285+ select GRKERNSEC_CHROOT_SYSCTL
54286+ select GRKERNSEC_CHROOT_FINDTASK
54287+ select GRKERNSEC_SYSFS_RESTRICT
54288+ select GRKERNSEC_PROC
54289+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54290+ select GRKERNSEC_HIDESYM
54291+ select GRKERNSEC_BRUTE
54292+ select GRKERNSEC_PROC_USERGROUP
54293+ select GRKERNSEC_KMEM
54294+ select GRKERNSEC_RESLOG
54295+ select GRKERNSEC_RANDNET
54296+ select GRKERNSEC_PROC_ADD
54297+ select GRKERNSEC_CHROOT_CHMOD
54298+ select GRKERNSEC_CHROOT_NICE
54299+ select GRKERNSEC_AUDIT_MOUNT
54300+ select GRKERNSEC_MODHARDEN if (MODULES)
54301+ select GRKERNSEC_HARDEN_PTRACE
54302+ select GRKERNSEC_VM86 if (X86_32)
54303+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
54304+ select PAX
54305+ select PAX_RANDUSTACK
54306+ select PAX_ASLR
54307+ select PAX_RANDMMAP
54308+ select PAX_NOEXEC
54309+ select PAX_MPROTECT
54310+ select PAX_EI_PAX
54311+ select PAX_PT_PAX_FLAGS
54312+ select PAX_HAVE_ACL_FLAGS
54313+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
54314+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
54315+ select PAX_RANDKSTACK if (X86_TSC && X86)
54316+ select PAX_SEGMEXEC if (X86_32)
54317+ select PAX_PAGEEXEC
54318+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
54319+ select PAX_EMUTRAMP if (PARISC)
54320+ select PAX_EMUSIGRT if (PARISC)
54321+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
54322+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
54323+ select PAX_REFCOUNT if (X86 || SPARC64)
54324+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
54325+ help
54326+ If you say Y here, many of the features of grsecurity will be
54327+ enabled, which will protect you against many kinds of attacks
54328+ against your system. The heightened security comes at a cost
54329+ of an increased chance of incompatibilities with rare software
54330+ on your machine. Since this security level enables PaX, you should
54331+ view <http://pax.grsecurity.net> and read about the PaX
54332+ project. While you are there, download chpax and run it on
54333+ binaries that cause problems with PaX. Also remember that
54334+ since the /proc restrictions are enabled, you must run your
54335+ identd as gid 1001. This security level enables the following
54336+ features in addition to those listed in the low and medium
54337+ security levels:
54338+
54339+ - Additional /proc restrictions
54340+ - Chmod restrictions in chroot
54341+ - No signals, ptrace, or viewing of processes outside of chroot
54342+ - Capability restrictions in chroot
54343+ - Deny fchdir out of chroot
54344+ - Priority restrictions in chroot
54345+ - Segmentation-based implementation of PaX
54346+ - Mprotect restrictions
54347+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
54348+ - Kernel stack randomization
54349+ - Mount/unmount/remount logging
54350+ - Kernel symbol hiding
54351+ - Prevention of memory exhaustion-based exploits
54352+ - Hardening of module auto-loading
54353+ - Ptrace restrictions
54354+ - Restricted vm86 mode
54355+ - Restricted sysfs/debugfs
54356+ - Active kernel exploit response
54357+
54358+config GRKERNSEC_CUSTOM
54359+ bool "Custom"
54360+ help
54361+ If you say Y here, you will be able to configure every grsecurity
54362+ option, which allows you to enable many more features that aren't
54363+ covered in the basic security levels. These additional features
54364+ include TPE, socket restrictions, and the sysctl system for
54365+ grsecurity. It is advised that you read through the help for
54366+ each option to determine its usefulness in your situation.
54367+
54368+endchoice
54369+
54370+menu "Address Space Protection"
54371+depends on GRKERNSEC
54372+
54373+config GRKERNSEC_KMEM
54374+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
54375+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
54376+ help
54377+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
54378+ be written to via mmap or otherwise to modify the running kernel.
54379+ /dev/port will also not be allowed to be opened. If you have module
54380+ support disabled, enabling this will close up four ways that are
54381+ currently used to insert malicious code into the running kernel.
54382+ Even with all these features enabled, we still highly recommend that
54383+ you use the RBAC system, as it is still possible for an attacker to
54384+ modify the running kernel through privileged I/O granted by ioperm/iopl.
54385+ If you are not using XFree86, you may be able to stop this additional
54386+ case by enabling the 'Disable privileged I/O' option. Though nothing
54387+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
54388+ but only to video memory, which is the only writing we allow in this
54389+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
54390+ not be allowed to mprotect it with PROT_WRITE later.
54391+ It is highly recommended that you say Y here if you meet all the
54392+ conditions above.
54393+
54394+config GRKERNSEC_VM86
54395+ bool "Restrict VM86 mode"
54396+ depends on X86_32
54397+
54398+ help
54399+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
54400+ make use of a special execution mode on 32bit x86 processors called
54401+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
54402+ video cards and will still work with this option enabled. The purpose
54403+ of the option is to prevent exploitation of emulation errors in
54404+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
54405+ Nearly all users should be able to enable this option.
54406+
54407+config GRKERNSEC_IO
54408+ bool "Disable privileged I/O"
54409+ depends on X86
54410+ select RTC_CLASS
54411+ select RTC_INTF_DEV
54412+ select RTC_DRV_CMOS
54413+
54414+ help
54415+ If you say Y here, all ioperm and iopl calls will return an error.
54416+ Ioperm and iopl can be used to modify the running kernel.
54417+ Unfortunately, some programs need this access to operate properly,
54418+ the most notable of which are XFree86 and hwclock. hwclock can be
54419+ remedied by having RTC support in the kernel, so real-time
54420+ clock support is enabled if this option is enabled, to ensure
54421+ that hwclock operates correctly. XFree86 still will not
54422+ operate correctly with this option enabled, so DO NOT CHOOSE Y
54423+ IF YOU USE XFree86. If you use XFree86 and you still want to
54424+ protect your kernel against modification, use the RBAC system.
54425+
54426+config GRKERNSEC_PROC_MEMMAP
54427+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
54428+ default y if (PAX_NOEXEC || PAX_ASLR)
54429+ depends on PAX_NOEXEC || PAX_ASLR
54430+ help
54431+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
54432+ give no information about the addresses of its mappings if
54433+ PaX features that rely on random addresses are enabled on the task.
54434+ If you use PaX it is greatly recommended that you say Y here as it
54435+ closes up a hole that makes the full ASLR useless for suid
54436+ binaries.
54437+
54438+config GRKERNSEC_BRUTE
54439+ bool "Deter exploit bruteforcing"
54440+ help
54441+ If you say Y here, attempts to bruteforce exploits against forking
54442+ daemons such as apache or sshd, as well as against suid/sgid binaries
54443+ will be deterred. When a child of a forking daemon is killed by PaX
54444+ or crashes due to an illegal instruction or other suspicious signal,
54445+ the parent process will be delayed 30 seconds upon every subsequent
54446+ fork until the administrator is able to assess the situation and
54447+ restart the daemon.
54448+ In the suid/sgid case, the attempt is logged, the user has all their
54449+ processes terminated, and they are prevented from executing any further
54450+ processes for 15 minutes.
54451+ It is recommended that you also enable signal logging in the auditing
54452+ section so that logs are generated when a process triggers a suspicious
54453+ signal.
54454+ If the sysctl option is enabled, a sysctl option with name
54455+ "deter_bruteforce" is created.
54456+
54457+
54458+config GRKERNSEC_MODHARDEN
54459+ bool "Harden module auto-loading"
54460+ depends on MODULES
54461+ help
54462+ If you say Y here, module auto-loading in response to use of some
54463+ feature implemented by an unloaded module will be restricted to
54464+ root users. Enabling this option helps defend against attacks
54465+ by unprivileged users who abuse the auto-loading behavior to
54466+ cause a vulnerable module to load that is then exploited.
54467+
54468+ If this option prevents a legitimate use of auto-loading for a
54469+ non-root user, the administrator can execute modprobe manually
54470+ with the exact name of the module mentioned in the alert log.
54471+ Alternatively, the administrator can add the module to the list
54472+ of modules loaded at boot by modifying init scripts.
54473+
54474+ Modification of init scripts will most likely be needed on
54475+ Ubuntu servers with encrypted home directory support enabled,
54476+ as the first non-root user logging in will cause the ecb(aes),
54477+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
54478+
54479+config GRKERNSEC_HIDESYM
54480+ bool "Hide kernel symbols"
54481+ help
54482+ If you say Y here, getting information on loaded modules, and
54483+ displaying all kernel symbols through a syscall will be restricted
54484+ to users with CAP_SYS_MODULE. For software compatibility reasons,
54485+ /proc/kallsyms will be restricted to the root user. The RBAC
54486+ system can hide that entry even from root.
54487+
54488+ This option also prevents leaking of kernel addresses through
54489+ several /proc entries.
54490+
54491+ Note that this option is only effective provided the following
54492+ conditions are met:
54493+ 1) The kernel using grsecurity is not precompiled by some distribution
54494+ 2) You have also enabled GRKERNSEC_DMESG
54495+ 3) You are using the RBAC system and hiding other files such as your
54496+ kernel image and System.map. Alternatively, enabling this option
54497+ causes the permissions on /boot, /lib/modules, and the kernel
54498+ source directory to change at compile time to prevent
54499+ reading by non-root users.
54500+ If the above conditions are met, this option will aid in providing a
54501+ useful protection against local kernel exploitation of overflows
54502+ and arbitrary read/write vulnerabilities.
54503+
54504+config GRKERNSEC_KERN_LOCKOUT
54505+ bool "Active kernel exploit response"
54506+ depends on X86 || ARM || PPC || SPARC
54507+ help
54508+ If you say Y here, when a PaX alert is triggered due to suspicious
54509+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
54510+ or an OOPs occurs due to bad memory accesses, instead of just
54511+ terminating the offending process (and potentially allowing
54512+ a subsequent exploit from the same user), we will take one of two
54513+ actions:
54514+ If the user was root, we will panic the system
54515+ If the user was non-root, we will log the attempt, terminate
54516+ all processes owned by the user, then prevent them from creating
54517+ any new processes until the system is restarted
54518+ This deters repeated kernel exploitation/bruteforcing attempts
54519+ and is useful for later forensics.
54520+
54521+endmenu
54522+menu "Role Based Access Control Options"
54523+depends on GRKERNSEC
54524+
54525+config GRKERNSEC_RBAC_DEBUG
54526+ bool
54527+
54528+config GRKERNSEC_NO_RBAC
54529+ bool "Disable RBAC system"
54530+ help
54531+ If you say Y here, the /dev/grsec device will be removed from the kernel,
54532+ preventing the RBAC system from being enabled. You should only say Y
54533+ here if you have no intention of using the RBAC system, so as to prevent
54534+ an attacker with root access from misusing the RBAC system to hide files
54535+ and processes when loadable module support and /dev/[k]mem have been
54536+ locked down.
54537+
54538+config GRKERNSEC_ACL_HIDEKERN
54539+ bool "Hide kernel processes"
54540+ help
54541+ If you say Y here, all kernel threads will be hidden to all
54542+ processes but those whose subject has the "view hidden processes"
54543+ flag.
54544+
54545+config GRKERNSEC_ACL_MAXTRIES
54546+ int "Maximum tries before password lockout"
54547+ default 3
54548+ help
54549+ This option enforces the maximum number of times a user can attempt
54550+ to authorize themselves with the grsecurity RBAC system before being
54551+ denied the ability to attempt authorization again for a specified time.
54552+ The lower the number, the harder it will be to brute-force a password.
54553+
54554+config GRKERNSEC_ACL_TIMEOUT
54555+ int "Time to wait after max password tries, in seconds"
54556+ default 30
54557+ help
54558+ This option specifies the time the user must wait after attempting to
54559+ authorize to the RBAC system with the maximum number of invalid
54560+ passwords. The higher the number, the harder it will be to brute-force
54561+ a password.
54562+
54563+endmenu
54564+menu "Filesystem Protections"
54565+depends on GRKERNSEC
54566+
54567+config GRKERNSEC_PROC
54568+ bool "Proc restrictions"
54569+ help
54570+ If you say Y here, the permissions of the /proc filesystem
54571+ will be altered to enhance system security and privacy. You MUST
54572+ choose either a user only restriction or a user and group restriction.
54573+ Depending upon the option you choose, you can either restrict users to
54574+ see only the processes they themselves run, or choose a group that can
54575+ view all processes and files normally restricted to root if you choose
54576+ the "restrict to user only" option. NOTE: If you're running identd as
54577+ a non-root user, you will have to run it as the group you specify here.
54578+
54579+config GRKERNSEC_PROC_USER
54580+ bool "Restrict /proc to user only"
54581+ depends on GRKERNSEC_PROC
54582+ help
54583+ If you say Y here, non-root users will only be able to view their own
54584+ processes, and restricts them from viewing network-related information,
54585+ and viewing kernel symbol and module information.
54586+
54587+config GRKERNSEC_PROC_USERGROUP
54588+ bool "Allow special group"
54589+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
54590+ help
54591+ If you say Y here, you will be able to select a group that will be
54592+ able to view all processes and network-related information. If you've
54593+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
54594+ remain hidden. This option is useful if you want to run identd as
54595+ a non-root user.
54596+
54597+config GRKERNSEC_PROC_GID
54598+ int "GID for special group"
54599+ depends on GRKERNSEC_PROC_USERGROUP
54600+ default 1001
54601+
54602+config GRKERNSEC_PROC_ADD
54603+ bool "Additional restrictions"
54604+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
54605+ help
54606+ If you say Y here, additional restrictions will be placed on
54607+ /proc that keep normal users from viewing device information and
54608+ slabinfo information that could be useful for exploits.
54609+
54610+config GRKERNSEC_LINK
54611+ bool "Linking restrictions"
54612+ help
54613+ If you say Y here, /tmp race exploits will be prevented, since users
54614+ will no longer be able to follow symlinks owned by other users in
54615+ world-writable +t directories (e.g. /tmp), unless the owner of the
54616+ symlink is the owner of the directory. users will also not be
54617+ able to hardlink to files they do not own. If the sysctl option is
54618+ enabled, a sysctl option with name "linking_restrictions" is created.
54619+
54620+config GRKERNSEC_FIFO
54621+ bool "FIFO restrictions"
54622+ help
54623+ If you say Y here, users will not be able to write to FIFOs they don't
54624+ own in world-writable +t directories (e.g. /tmp), unless the owner of
54625+ the FIFO is the same owner of the directory it's held in. If the sysctl
54626+ option is enabled, a sysctl option with name "fifo_restrictions" is
54627+ created.
54628+
54629+config GRKERNSEC_SYSFS_RESTRICT
54630+ bool "Sysfs/debugfs restriction"
54631+ depends on SYSFS
54632+ help
54633+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
54634+ any filesystem normally mounted under it (e.g. debugfs) will only
54635+ be accessible by root. These filesystems generally provide access
54636+ to hardware and debug information that isn't appropriate for unprivileged
54637+ users of the system. Sysfs and debugfs have also become a large source
54638+ of new vulnerabilities, ranging from infoleaks to local compromise.
54639+ There has been very little oversight with an eye toward security involved
54640+ in adding new exporters of information to these filesystems, so their
54641+ use is discouraged.
54642+ This option is equivalent to a chmod 0700 of the mount paths.
54643+
54644+config GRKERNSEC_ROFS
54645+ bool "Runtime read-only mount protection"
54646+ help
54647+ If you say Y here, a sysctl option with name "romount_protect" will
54648+ be created. By setting this option to 1 at runtime, filesystems
54649+ will be protected in the following ways:
54650+ * No new writable mounts will be allowed
54651+ * Existing read-only mounts won't be able to be remounted read/write
54652+ * Write operations will be denied on all block devices
54653+ This option acts independently of grsec_lock: once it is set to 1,
54654+ it cannot be turned off. Therefore, please be mindful of the resulting
54655+ behavior if this option is enabled in an init script on a read-only
54656+ filesystem. This feature is mainly intended for secure embedded systems.
54657+
54658+config GRKERNSEC_CHROOT
54659+ bool "Chroot jail restrictions"
54660+ help
54661+ If you say Y here, you will be able to choose several options that will
54662+ make breaking out of a chrooted jail much more difficult. If you
54663+ encounter no software incompatibilities with the following options, it
54664+ is recommended that you enable each one.
54665+
54666+config GRKERNSEC_CHROOT_MOUNT
54667+ bool "Deny mounts"
54668+ depends on GRKERNSEC_CHROOT
54669+ help
54670+ If you say Y here, processes inside a chroot will not be able to
54671+ mount or remount filesystems. If the sysctl option is enabled, a
54672+ sysctl option with name "chroot_deny_mount" is created.
54673+
54674+config GRKERNSEC_CHROOT_DOUBLE
54675+ bool "Deny double-chroots"
54676+ depends on GRKERNSEC_CHROOT
54677+ help
54678+ If you say Y here, processes inside a chroot will not be able to chroot
54679+ again outside the chroot. This is a widely used method of breaking
54680+ out of a chroot jail and should not be allowed. If the sysctl
54681+ option is enabled, a sysctl option with name
54682+ "chroot_deny_chroot" is created.
54683+
54684+config GRKERNSEC_CHROOT_PIVOT
54685+ bool "Deny pivot_root in chroot"
54686+ depends on GRKERNSEC_CHROOT
54687+ help
54688+ If you say Y here, processes inside a chroot will not be able to use
54689+ a function called pivot_root() that was introduced in Linux 2.3.41. It
54690+ works similar to chroot in that it changes the root filesystem. This
54691+ function could be misused in a chrooted process to attempt to break out
54692+ of the chroot, and therefore should not be allowed. If the sysctl
54693+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
54694+ created.
54695+
54696+config GRKERNSEC_CHROOT_CHDIR
54697+ bool "Enforce chdir(\"/\") on all chroots"
54698+ depends on GRKERNSEC_CHROOT
54699+ help
54700+ If you say Y here, the current working directory of all newly-chrooted
54701+ applications will be set to the the root directory of the chroot.
54702+ The man page on chroot(2) states:
54703+ Note that this call does not change the current working
54704+ directory, so that `.' can be outside the tree rooted at
54705+ `/'. In particular, the super-user can escape from a
54706+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
54707+
54708+ It is recommended that you say Y here, since it's not known to break
54709+ any software. If the sysctl option is enabled, a sysctl option with
54710+ name "chroot_enforce_chdir" is created.
54711+
54712+config GRKERNSEC_CHROOT_CHMOD
54713+ bool "Deny (f)chmod +s"
54714+ depends on GRKERNSEC_CHROOT
54715+ help
54716+ If you say Y here, processes inside a chroot will not be able to chmod
54717+ or fchmod files to make them have suid or sgid bits. This protects
54718+ against another published method of breaking a chroot. If the sysctl
54719+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
54720+ created.
54721+
54722+config GRKERNSEC_CHROOT_FCHDIR
54723+ bool "Deny fchdir out of chroot"
54724+ depends on GRKERNSEC_CHROOT
54725+ help
54726+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
54727+ to a file descriptor of the chrooting process that points to a directory
54728+ outside the filesystem will be stopped. If the sysctl option
54729+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
54730+
54731+config GRKERNSEC_CHROOT_MKNOD
54732+ bool "Deny mknod"
54733+ depends on GRKERNSEC_CHROOT
54734+ help
54735+ If you say Y here, processes inside a chroot will not be allowed to
54736+ mknod. The problem with using mknod inside a chroot is that it
54737+ would allow an attacker to create a device entry that is the same
54738+ as one on the physical root of your system, which could range from
54739+ anything from the console device to a device for your harddrive (which
54740+ they could then use to wipe the drive or steal data). It is recommended
54741+ that you say Y here, unless you run into software incompatibilities.
54742+ If the sysctl option is enabled, a sysctl option with name
54743+ "chroot_deny_mknod" is created.
54744+
54745+config GRKERNSEC_CHROOT_SHMAT
54746+ bool "Deny shmat() out of chroot"
54747+ depends on GRKERNSEC_CHROOT
54748+ help
54749+ If you say Y here, processes inside a chroot will not be able to attach
54750+ to shared memory segments that were created outside of the chroot jail.
54751+ It is recommended that you say Y here. If the sysctl option is enabled,
54752+ a sysctl option with name "chroot_deny_shmat" is created.
54753+
54754+config GRKERNSEC_CHROOT_UNIX
54755+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
54756+ depends on GRKERNSEC_CHROOT
54757+ help
54758+ If you say Y here, processes inside a chroot will not be able to
54759+ connect to abstract (meaning not belonging to a filesystem) Unix
54760+ domain sockets that were bound outside of a chroot. It is recommended
54761+ that you say Y here. If the sysctl option is enabled, a sysctl option
54762+ with name "chroot_deny_unix" is created.
54763+
54764+config GRKERNSEC_CHROOT_FINDTASK
54765+ bool "Protect outside processes"
54766+ depends on GRKERNSEC_CHROOT
54767+ help
54768+ If you say Y here, processes inside a chroot will not be able to
54769+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
54770+ getsid, or view any process outside of the chroot. If the sysctl
54771+ option is enabled, a sysctl option with name "chroot_findtask" is
54772+ created.
54773+
54774+config GRKERNSEC_CHROOT_NICE
54775+ bool "Restrict priority changes"
54776+ depends on GRKERNSEC_CHROOT
54777+ help
54778+ If you say Y here, processes inside a chroot will not be able to raise
54779+ the priority of processes in the chroot, or alter the priority of
54780+ processes outside the chroot. This provides more security than simply
54781+ removing CAP_SYS_NICE from the process' capability set. If the
54782+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
54783+ is created.
54784+
54785+config GRKERNSEC_CHROOT_SYSCTL
54786+ bool "Deny sysctl writes"
54787+ depends on GRKERNSEC_CHROOT
54788+ help
54789+ If you say Y here, an attacker in a chroot will not be able to
54790+ write to sysctl entries, either by sysctl(2) or through a /proc
54791+ interface. It is strongly recommended that you say Y here. If the
54792+ sysctl option is enabled, a sysctl option with name
54793+ "chroot_deny_sysctl" is created.
54794+
54795+config GRKERNSEC_CHROOT_CAPS
54796+ bool "Capability restrictions"
54797+ depends on GRKERNSEC_CHROOT
54798+ help
54799+ If you say Y here, the capabilities on all processes within a
54800+ chroot jail will be lowered to stop module insertion, raw i/o,
54801+ system and net admin tasks, rebooting the system, modifying immutable
54802+ files, modifying IPC owned by another, and changing the system time.
54803+ This is left an option because it can break some apps. Disable this
54804+ if your chrooted apps are having problems performing those kinds of
54805+ tasks. If the sysctl option is enabled, a sysctl option with
54806+ name "chroot_caps" is created.
54807+
54808+endmenu
54809+menu "Kernel Auditing"
54810+depends on GRKERNSEC
54811+
54812+config GRKERNSEC_AUDIT_GROUP
54813+ bool "Single group for auditing"
54814+ help
54815+ If you say Y here, the exec, chdir, and (un)mount logging features
54816+ will only operate on a group you specify. This option is recommended
54817+ if you only want to watch certain users instead of having a large
54818+ amount of logs from the entire system. If the sysctl option is enabled,
54819+ a sysctl option with name "audit_group" is created.
54820+
54821+config GRKERNSEC_AUDIT_GID
54822+ int "GID for auditing"
54823+ depends on GRKERNSEC_AUDIT_GROUP
54824+ default 1007
54825+
54826+config GRKERNSEC_EXECLOG
54827+ bool "Exec logging"
54828+ help
54829+ If you say Y here, all execve() calls will be logged (since the
54830+ other exec*() calls are frontends to execve(), all execution
54831+ will be logged). Useful for shell-servers that like to keep track
54832+ of their users. If the sysctl option is enabled, a sysctl option with
54833+ name "exec_logging" is created.
54834+ WARNING: This option when enabled will produce a LOT of logs, especially
54835+ on an active system.
54836+
54837+config GRKERNSEC_RESLOG
54838+ bool "Resource logging"
54839+ help
54840+ If you say Y here, all attempts to overstep resource limits will
54841+ be logged with the resource name, the requested size, and the current
54842+ limit. It is highly recommended that you say Y here. If the sysctl
54843+ option is enabled, a sysctl option with name "resource_logging" is
54844+ created. If the RBAC system is enabled, the sysctl value is ignored.
54845+
54846+config GRKERNSEC_CHROOT_EXECLOG
54847+ bool "Log execs within chroot"
54848+ help
54849+ If you say Y here, all executions inside a chroot jail will be logged
54850+ to syslog. This can cause a large amount of logs if certain
54851+ applications (eg. djb's daemontools) are installed on the system, and
54852+ is therefore left as an option. If the sysctl option is enabled, a
54853+ sysctl option with name "chroot_execlog" is created.
54854+
54855+config GRKERNSEC_AUDIT_PTRACE
54856+ bool "Ptrace logging"
54857+ help
54858+ If you say Y here, all attempts to attach to a process via ptrace
54859+ will be logged. If the sysctl option is enabled, a sysctl option
54860+ with name "audit_ptrace" is created.
54861+
54862+config GRKERNSEC_AUDIT_CHDIR
54863+ bool "Chdir logging"
54864+ help
54865+ If you say Y here, all chdir() calls will be logged. If the sysctl
54866+ option is enabled, a sysctl option with name "audit_chdir" is created.
54867+
54868+config GRKERNSEC_AUDIT_MOUNT
54869+ bool "(Un)Mount logging"
54870+ help
54871+ If you say Y here, all mounts and unmounts will be logged. If the
54872+ sysctl option is enabled, a sysctl option with name "audit_mount" is
54873+ created.
54874+
54875+config GRKERNSEC_SIGNAL
54876+ bool "Signal logging"
54877+ help
54878+ If you say Y here, certain important signals will be logged, such as
54879+ SIGSEGV, which will as a result inform you of when a error in a program
54880+ occurred, which in some cases could mean a possible exploit attempt.
54881+ If the sysctl option is enabled, a sysctl option with name
54882+ "signal_logging" is created.
54883+
54884+config GRKERNSEC_FORKFAIL
54885+ bool "Fork failure logging"
54886+ help
54887+ If you say Y here, all failed fork() attempts will be logged.
54888+ This could suggest a fork bomb, or someone attempting to overstep
54889+ their process limit. If the sysctl option is enabled, a sysctl option
54890+ with name "forkfail_logging" is created.
54891+
54892+config GRKERNSEC_TIME
54893+ bool "Time change logging"
54894+ help
54895+ If you say Y here, any changes of the system clock will be logged.
54896+ If the sysctl option is enabled, a sysctl option with name
54897+ "timechange_logging" is created.
54898+
54899+config GRKERNSEC_PROC_IPADDR
54900+ bool "/proc/<pid>/ipaddr support"
54901+ help
54902+ If you say Y here, a new entry will be added to each /proc/<pid>
54903+ directory that contains the IP address of the person using the task.
54904+ The IP is carried across local TCP and AF_UNIX stream sockets.
54905+ This information can be useful for IDS/IPSes to perform remote response
54906+ to a local attack. The entry is readable by only the owner of the
54907+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
54908+ the RBAC system), and thus does not create privacy concerns.
54909+
54910+config GRKERNSEC_RWXMAP_LOG
54911+ bool 'Denied RWX mmap/mprotect logging'
54912+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
54913+ help
54914+ If you say Y here, calls to mmap() and mprotect() with explicit
54915+ usage of PROT_WRITE and PROT_EXEC together will be logged when
54916+ denied by the PAX_MPROTECT feature. If the sysctl option is
54917+ enabled, a sysctl option with name "rwxmap_logging" is created.
54918+
54919+config GRKERNSEC_AUDIT_TEXTREL
54920+ bool 'ELF text relocations logging (READ HELP)'
54921+ depends on PAX_MPROTECT
54922+ help
54923+ If you say Y here, text relocations will be logged with the filename
54924+ of the offending library or binary. The purpose of the feature is
54925+ to help Linux distribution developers get rid of libraries and
54926+ binaries that need text relocations which hinder the future progress
54927+ of PaX. Only Linux distribution developers should say Y here, and
54928+ never on a production machine, as this option creates an information
54929+ leak that could aid an attacker in defeating the randomization of
54930+ a single memory region. If the sysctl option is enabled, a sysctl
54931+ option with name "audit_textrel" is created.
54932+
54933+endmenu
54934+
54935+menu "Executable Protections"
54936+depends on GRKERNSEC
54937+
54938+config GRKERNSEC_DMESG
54939+ bool "Dmesg(8) restriction"
54940+ help
54941+ If you say Y here, non-root users will not be able to use dmesg(8)
54942+ to view up to the last 4kb of messages in the kernel's log buffer.
54943+ The kernel's log buffer often contains kernel addresses and other
54944+ identifying information useful to an attacker in fingerprinting a
54945+ system for a targeted exploit.
54946+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
54947+ created.
54948+
54949+config GRKERNSEC_HARDEN_PTRACE
54950+ bool "Deter ptrace-based process snooping"
54951+ help
54952+ If you say Y here, TTY sniffers and other malicious monitoring
54953+ programs implemented through ptrace will be defeated. If you
54954+ have been using the RBAC system, this option has already been
54955+ enabled for several years for all users, with the ability to make
54956+ fine-grained exceptions.
54957+
54958+ This option only affects the ability of non-root users to ptrace
54959+ processes that are not a descendent of the ptracing process.
54960+ This means that strace ./binary and gdb ./binary will still work,
54961+ but attaching to arbitrary processes will not. If the sysctl
54962+ option is enabled, a sysctl option with name "harden_ptrace" is
54963+ created.
54964+
54965+config GRKERNSEC_TPE
54966+ bool "Trusted Path Execution (TPE)"
54967+ help
54968+ If you say Y here, you will be able to choose a gid to add to the
54969+ supplementary groups of users you want to mark as "untrusted."
54970+ These users will not be able to execute any files that are not in
54971+ root-owned directories writable only by root. If the sysctl option
54972+ is enabled, a sysctl option with name "tpe" is created.
54973+
54974+config GRKERNSEC_TPE_ALL
54975+ bool "Partially restrict all non-root users"
54976+ depends on GRKERNSEC_TPE
54977+ help
54978+ If you say Y here, all non-root users will be covered under
54979+ a weaker TPE restriction. This is separate from, and in addition to,
54980+ the main TPE options that you have selected elsewhere. Thus, if a
54981+ "trusted" GID is chosen, this restriction applies to even that GID.
54982+ Under this restriction, all non-root users will only be allowed to
54983+ execute files in directories they own that are not group or
54984+ world-writable, or in directories owned by root and writable only by
54985+ root. If the sysctl option is enabled, a sysctl option with name
54986+ "tpe_restrict_all" is created.
54987+
54988+config GRKERNSEC_TPE_INVERT
54989+ bool "Invert GID option"
54990+ depends on GRKERNSEC_TPE
54991+ help
54992+ If you say Y here, the group you specify in the TPE configuration will
54993+ decide what group TPE restrictions will be *disabled* for. This
54994+ option is useful if you want TPE restrictions to be applied to most
54995+ users on the system. If the sysctl option is enabled, a sysctl option
54996+ with name "tpe_invert" is created. Unlike other sysctl options, this
54997+ entry will default to on for backward-compatibility.
54998+
54999+config GRKERNSEC_TPE_GID
55000+ int "GID for untrusted users"
55001+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
55002+ default 1005
55003+ help
55004+ Setting this GID determines what group TPE restrictions will be
55005+ *enabled* for. If the sysctl option is enabled, a sysctl option
55006+ with name "tpe_gid" is created.
55007+
55008+config GRKERNSEC_TPE_GID
55009+ int "GID for trusted users"
55010+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
55011+ default 1005
55012+ help
55013+ Setting this GID determines what group TPE restrictions will be
55014+ *disabled* for. If the sysctl option is enabled, a sysctl option
55015+ with name "tpe_gid" is created.
55016+
55017+endmenu
55018+menu "Network Protections"
55019+depends on GRKERNSEC
55020+
55021+config GRKERNSEC_RANDNET
55022+ bool "Larger entropy pools"
55023+ help
55024+ If you say Y here, the entropy pools used for many features of Linux
55025+ and grsecurity will be doubled in size. Since several grsecurity
55026+ features use additional randomness, it is recommended that you say Y
55027+ here. Saying Y here has a similar effect as modifying
55028+ /proc/sys/kernel/random/poolsize.
55029+
55030+config GRKERNSEC_BLACKHOLE
55031+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
55032+ depends on NET
55033+ help
55034+ If you say Y here, neither TCP resets nor ICMP
55035+ destination-unreachable packets will be sent in response to packets
55036+ sent to ports for which no associated listening process exists.
55037+ This feature supports both IPV4 and IPV6 and exempts the
55038+ loopback interface from blackholing. Enabling this feature
55039+ makes a host more resilient to DoS attacks and reduces network
55040+ visibility against scanners.
55041+
55042+ The blackhole feature as-implemented is equivalent to the FreeBSD
55043+ blackhole feature, as it prevents RST responses to all packets, not
55044+ just SYNs. Under most application behavior this causes no
55045+ problems, but applications (like haproxy) may not close certain
55046+ connections in a way that cleanly terminates them on the remote
55047+ end, leaving the remote host in LAST_ACK state. Because of this
55048+ side-effect and to prevent intentional LAST_ACK DoSes, this
55049+ feature also adds automatic mitigation against such attacks.
55050+ The mitigation drastically reduces the amount of time a socket
55051+ can spend in LAST_ACK state. If you're using haproxy and not
55052+ all servers it connects to have this option enabled, consider
55053+ disabling this feature on the haproxy host.
55054+
55055+ If the sysctl option is enabled, two sysctl options with names
55056+ "ip_blackhole" and "lastack_retries" will be created.
55057+ While "ip_blackhole" takes the standard zero/non-zero on/off
55058+ toggle, "lastack_retries" uses the same kinds of values as
55059+ "tcp_retries1" and "tcp_retries2". The default value of 4
55060+ prevents a socket from lasting more than 45 seconds in LAST_ACK
55061+ state.
55062+
55063+config GRKERNSEC_SOCKET
55064+ bool "Socket restrictions"
55065+ depends on NET
55066+ help
55067+ If you say Y here, you will be able to choose from several options.
55068+ If you assign a GID on your system and add it to the supplementary
55069+ groups of users you want to restrict socket access to, this patch
55070+ will perform up to three things, based on the option(s) you choose.
55071+
55072+config GRKERNSEC_SOCKET_ALL
55073+ bool "Deny any sockets to group"
55074+ depends on GRKERNSEC_SOCKET
55075+ help
55076+ If you say Y here, you will be able to choose a GID of whose users will
55077+ be unable to connect to other hosts from your machine or run server
55078+ applications from your machine. If the sysctl option is enabled, a
55079+ sysctl option with name "socket_all" is created.
55080+
55081+config GRKERNSEC_SOCKET_ALL_GID
55082+ int "GID to deny all sockets for"
55083+ depends on GRKERNSEC_SOCKET_ALL
55084+ default 1004
55085+ help
55086+ Here you can choose the GID to disable socket access for. Remember to
55087+ add the users you want socket access disabled for to the GID
55088+ specified here. If the sysctl option is enabled, a sysctl option
55089+ with name "socket_all_gid" is created.
55090+
55091+config GRKERNSEC_SOCKET_CLIENT
55092+ bool "Deny client sockets to group"
55093+ depends on GRKERNSEC_SOCKET
55094+ help
55095+ If you say Y here, you will be able to choose a GID of whose users will
55096+ be unable to connect to other hosts from your machine, but will be
55097+ able to run servers. If this option is enabled, all users in the group
55098+ you specify will have to use passive mode when initiating ftp transfers
55099+ from the shell on your machine. If the sysctl option is enabled, a
55100+ sysctl option with name "socket_client" is created.
55101+
55102+config GRKERNSEC_SOCKET_CLIENT_GID
55103+ int "GID to deny client sockets for"
55104+ depends on GRKERNSEC_SOCKET_CLIENT
55105+ default 1003
55106+ help
55107+ Here you can choose the GID to disable client socket access for.
55108+ Remember to add the users you want client socket access disabled for to
55109+ the GID specified here. If the sysctl option is enabled, a sysctl
55110+ option with name "socket_client_gid" is created.
55111+
55112+config GRKERNSEC_SOCKET_SERVER
55113+ bool "Deny server sockets to group"
55114+ depends on GRKERNSEC_SOCKET
55115+ help
55116+ If you say Y here, you will be able to choose a GID of whose users will
55117+ be unable to run server applications from your machine. If the sysctl
55118+ option is enabled, a sysctl option with name "socket_server" is created.
55119+
55120+config GRKERNSEC_SOCKET_SERVER_GID
55121+ int "GID to deny server sockets for"
55122+ depends on GRKERNSEC_SOCKET_SERVER
55123+ default 1002
55124+ help
55125+ Here you can choose the GID to disable server socket access for.
55126+ Remember to add the users you want server socket access disabled for to
55127+ the GID specified here. If the sysctl option is enabled, a sysctl
55128+ option with name "socket_server_gid" is created.
55129+
55130+endmenu
55131+menu "Sysctl support"
55132+depends on GRKERNSEC && SYSCTL
55133+
55134+config GRKERNSEC_SYSCTL
55135+ bool "Sysctl support"
55136+ help
55137+ If you say Y here, you will be able to change the options that
55138+ grsecurity runs with at bootup, without having to recompile your
55139+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
55140+ to enable (1) or disable (0) various features. All the sysctl entries
55141+ are mutable until the "grsec_lock" entry is set to a non-zero value.
55142+ All features enabled in the kernel configuration are disabled at boot
55143+ if you do not say Y to the "Turn on features by default" option.
55144+ All options should be set at startup, and the grsec_lock entry should
55145+ be set to a non-zero value after all the options are set.
55146+ *THIS IS EXTREMELY IMPORTANT*
55147+
55148+config GRKERNSEC_SYSCTL_DISTRO
55149+ bool "Extra sysctl support for distro makers (READ HELP)"
55150+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
55151+ help
55152+ If you say Y here, additional sysctl options will be created
55153+ for features that affect processes running as root. Therefore,
55154+ it is critical when using this option that the grsec_lock entry be
55155+ enabled after boot. Only distros with prebuilt kernel packages
55156+ with this option enabled that can ensure grsec_lock is enabled
55157+ after boot should use this option.
55158+ *Failure to set grsec_lock after boot makes all grsec features
55159+ this option covers useless*
55160+
55161+ Currently this option creates the following sysctl entries:
55162+ "Disable Privileged I/O": "disable_priv_io"
55163+
55164+config GRKERNSEC_SYSCTL_ON
55165+ bool "Turn on features by default"
55166+ depends on GRKERNSEC_SYSCTL
55167+ help
55168+ If you say Y here, instead of having all features enabled in the
55169+ kernel configuration disabled at boot time, the features will be
55170+ enabled at boot time. It is recommended you say Y here unless
55171+ there is some reason you would want all sysctl-tunable features to
55172+ be disabled by default. As mentioned elsewhere, it is important
55173+ to enable the grsec_lock entry once you have finished modifying
55174+ the sysctl entries.
55175+
55176+endmenu
55177+menu "Logging Options"
55178+depends on GRKERNSEC
55179+
55180+config GRKERNSEC_FLOODTIME
55181+ int "Seconds in between log messages (minimum)"
55182+ default 10
55183+ help
55184+ This option allows you to enforce the number of seconds between
55185+ grsecurity log messages. The default should be suitable for most
55186+ people, however, if you choose to change it, choose a value small enough
55187+ to allow informative logs to be produced, but large enough to
55188+ prevent flooding.
55189+
55190+config GRKERNSEC_FLOODBURST
55191+ int "Number of messages in a burst (maximum)"
55192+ default 6
55193+ help
55194+ This option allows you to choose the maximum number of messages allowed
55195+ within the flood time interval you chose in a separate option. The
55196+ default should be suitable for most people, however if you find that
55197+ many of your logs are being interpreted as flooding, you may want to
55198+ raise this value.
55199+
55200+endmenu
55201+
55202+endmenu
55203diff -urNp linux-3.0.4/grsecurity/Makefile linux-3.0.4/grsecurity/Makefile
55204--- linux-3.0.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
55205+++ linux-3.0.4/grsecurity/Makefile 2011-09-14 23:29:56.000000000 -0400
55206@@ -0,0 +1,35 @@
55207+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
55208+# during 2001-2009 it has been completely redesigned by Brad Spengler
55209+# into an RBAC system
55210+#
55211+# All code in this directory and various hooks inserted throughout the kernel
55212+# are copyright Brad Spengler - Open Source Security, Inc., and released
55213+# under the GPL v2 or higher
55214+
55215+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
55216+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
55217+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
55218+
55219+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
55220+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
55221+ gracl_learn.o grsec_log.o
55222+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
55223+
55224+ifdef CONFIG_NET
55225+obj-y += grsec_sock.o
55226+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
55227+endif
55228+
55229+ifndef CONFIG_GRKERNSEC
55230+obj-y += grsec_disabled.o
55231+endif
55232+
55233+ifdef CONFIG_GRKERNSEC_HIDESYM
55234+extra-y := grsec_hidesym.o
55235+$(obj)/grsec_hidesym.o:
55236+ @-chmod -f 500 /boot
55237+ @-chmod -f 500 /lib/modules
55238+ @-chmod -f 500 /lib64/modules
55239+ @-chmod -f 700 .
55240+ @echo ' grsec: protected kernel image paths'
55241+endif
55242diff -urNp linux-3.0.4/include/acpi/acpi_bus.h linux-3.0.4/include/acpi/acpi_bus.h
55243--- linux-3.0.4/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
55244+++ linux-3.0.4/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
55245@@ -107,7 +107,7 @@ struct acpi_device_ops {
55246 acpi_op_bind bind;
55247 acpi_op_unbind unbind;
55248 acpi_op_notify notify;
55249-};
55250+} __no_const;
55251
55252 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
55253
55254diff -urNp linux-3.0.4/include/asm-generic/atomic-long.h linux-3.0.4/include/asm-generic/atomic-long.h
55255--- linux-3.0.4/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
55256+++ linux-3.0.4/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
55257@@ -22,6 +22,12 @@
55258
55259 typedef atomic64_t atomic_long_t;
55260
55261+#ifdef CONFIG_PAX_REFCOUNT
55262+typedef atomic64_unchecked_t atomic_long_unchecked_t;
55263+#else
55264+typedef atomic64_t atomic_long_unchecked_t;
55265+#endif
55266+
55267 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
55268
55269 static inline long atomic_long_read(atomic_long_t *l)
55270@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
55271 return (long)atomic64_read(v);
55272 }
55273
55274+#ifdef CONFIG_PAX_REFCOUNT
55275+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
55276+{
55277+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
55278+
55279+ return (long)atomic64_read_unchecked(v);
55280+}
55281+#endif
55282+
55283 static inline void atomic_long_set(atomic_long_t *l, long i)
55284 {
55285 atomic64_t *v = (atomic64_t *)l;
55286@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
55287 atomic64_set(v, i);
55288 }
55289
55290+#ifdef CONFIG_PAX_REFCOUNT
55291+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
55292+{
55293+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
55294+
55295+ atomic64_set_unchecked(v, i);
55296+}
55297+#endif
55298+
55299 static inline void atomic_long_inc(atomic_long_t *l)
55300 {
55301 atomic64_t *v = (atomic64_t *)l;
55302@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
55303 atomic64_inc(v);
55304 }
55305
55306+#ifdef CONFIG_PAX_REFCOUNT
55307+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
55308+{
55309+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
55310+
55311+ atomic64_inc_unchecked(v);
55312+}
55313+#endif
55314+
55315 static inline void atomic_long_dec(atomic_long_t *l)
55316 {
55317 atomic64_t *v = (atomic64_t *)l;
55318@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
55319 atomic64_dec(v);
55320 }
55321
55322+#ifdef CONFIG_PAX_REFCOUNT
55323+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
55324+{
55325+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
55326+
55327+ atomic64_dec_unchecked(v);
55328+}
55329+#endif
55330+
55331 static inline void atomic_long_add(long i, atomic_long_t *l)
55332 {
55333 atomic64_t *v = (atomic64_t *)l;
55334@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
55335 atomic64_add(i, v);
55336 }
55337
55338+#ifdef CONFIG_PAX_REFCOUNT
55339+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
55340+{
55341+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
55342+
55343+ atomic64_add_unchecked(i, v);
55344+}
55345+#endif
55346+
55347 static inline void atomic_long_sub(long i, atomic_long_t *l)
55348 {
55349 atomic64_t *v = (atomic64_t *)l;
55350@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
55351 atomic64_sub(i, v);
55352 }
55353
55354+#ifdef CONFIG_PAX_REFCOUNT
55355+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
55356+{
55357+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
55358+
55359+ atomic64_sub_unchecked(i, v);
55360+}
55361+#endif
55362+
55363 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
55364 {
55365 atomic64_t *v = (atomic64_t *)l;
55366@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
55367 return (long)atomic64_inc_return(v);
55368 }
55369
55370+#ifdef CONFIG_PAX_REFCOUNT
55371+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
55372+{
55373+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
55374+
55375+ return (long)atomic64_inc_return_unchecked(v);
55376+}
55377+#endif
55378+
55379 static inline long atomic_long_dec_return(atomic_long_t *l)
55380 {
55381 atomic64_t *v = (atomic64_t *)l;
55382@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
55383
55384 typedef atomic_t atomic_long_t;
55385
55386+#ifdef CONFIG_PAX_REFCOUNT
55387+typedef atomic_unchecked_t atomic_long_unchecked_t;
55388+#else
55389+typedef atomic_t atomic_long_unchecked_t;
55390+#endif
55391+
55392 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
55393 static inline long atomic_long_read(atomic_long_t *l)
55394 {
55395@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
55396 return (long)atomic_read(v);
55397 }
55398
55399+#ifdef CONFIG_PAX_REFCOUNT
55400+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
55401+{
55402+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
55403+
55404+ return (long)atomic_read_unchecked(v);
55405+}
55406+#endif
55407+
55408 static inline void atomic_long_set(atomic_long_t *l, long i)
55409 {
55410 atomic_t *v = (atomic_t *)l;
55411@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
55412 atomic_set(v, i);
55413 }
55414
55415+#ifdef CONFIG_PAX_REFCOUNT
55416+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
55417+{
55418+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
55419+
55420+ atomic_set_unchecked(v, i);
55421+}
55422+#endif
55423+
55424 static inline void atomic_long_inc(atomic_long_t *l)
55425 {
55426 atomic_t *v = (atomic_t *)l;
55427@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
55428 atomic_inc(v);
55429 }
55430
55431+#ifdef CONFIG_PAX_REFCOUNT
55432+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
55433+{
55434+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
55435+
55436+ atomic_inc_unchecked(v);
55437+}
55438+#endif
55439+
55440 static inline void atomic_long_dec(atomic_long_t *l)
55441 {
55442 atomic_t *v = (atomic_t *)l;
55443@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
55444 atomic_dec(v);
55445 }
55446
55447+#ifdef CONFIG_PAX_REFCOUNT
55448+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
55449+{
55450+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
55451+
55452+ atomic_dec_unchecked(v);
55453+}
55454+#endif
55455+
55456 static inline void atomic_long_add(long i, atomic_long_t *l)
55457 {
55458 atomic_t *v = (atomic_t *)l;
55459@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
55460 atomic_add(i, v);
55461 }
55462
55463+#ifdef CONFIG_PAX_REFCOUNT
55464+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
55465+{
55466+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
55467+
55468+ atomic_add_unchecked(i, v);
55469+}
55470+#endif
55471+
55472 static inline void atomic_long_sub(long i, atomic_long_t *l)
55473 {
55474 atomic_t *v = (atomic_t *)l;
55475@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
55476 atomic_sub(i, v);
55477 }
55478
55479+#ifdef CONFIG_PAX_REFCOUNT
55480+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
55481+{
55482+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
55483+
55484+ atomic_sub_unchecked(i, v);
55485+}
55486+#endif
55487+
55488 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
55489 {
55490 atomic_t *v = (atomic_t *)l;
55491@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
55492 return (long)atomic_inc_return(v);
55493 }
55494
55495+#ifdef CONFIG_PAX_REFCOUNT
55496+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
55497+{
55498+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
55499+
55500+ return (long)atomic_inc_return_unchecked(v);
55501+}
55502+#endif
55503+
55504 static inline long atomic_long_dec_return(atomic_long_t *l)
55505 {
55506 atomic_t *v = (atomic_t *)l;
55507@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
55508
55509 #endif /* BITS_PER_LONG == 64 */
55510
55511+#ifdef CONFIG_PAX_REFCOUNT
55512+static inline void pax_refcount_needs_these_functions(void)
55513+{
55514+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
55515+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
55516+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
55517+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
55518+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
55519+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
55520+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
55521+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
55522+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
55523+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
55524+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
55525+
55526+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
55527+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
55528+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
55529+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
55530+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
55531+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
55532+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
55533+}
55534+#else
55535+#define atomic_read_unchecked(v) atomic_read(v)
55536+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
55537+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
55538+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
55539+#define atomic_inc_unchecked(v) atomic_inc(v)
55540+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
55541+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
55542+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
55543+#define atomic_dec_unchecked(v) atomic_dec(v)
55544+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
55545+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
55546+
55547+#define atomic_long_read_unchecked(v) atomic_long_read(v)
55548+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
55549+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
55550+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
55551+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
55552+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
55553+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
55554+#endif
55555+
55556 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
55557diff -urNp linux-3.0.4/include/asm-generic/cache.h linux-3.0.4/include/asm-generic/cache.h
55558--- linux-3.0.4/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
55559+++ linux-3.0.4/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
55560@@ -6,7 +6,7 @@
55561 * cache lines need to provide their own cache.h.
55562 */
55563
55564-#define L1_CACHE_SHIFT 5
55565-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
55566+#define L1_CACHE_SHIFT 5UL
55567+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
55568
55569 #endif /* __ASM_GENERIC_CACHE_H */
55570diff -urNp linux-3.0.4/include/asm-generic/int-l64.h linux-3.0.4/include/asm-generic/int-l64.h
55571--- linux-3.0.4/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
55572+++ linux-3.0.4/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
55573@@ -46,6 +46,8 @@ typedef unsigned int u32;
55574 typedef signed long s64;
55575 typedef unsigned long u64;
55576
55577+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
55578+
55579 #define S8_C(x) x
55580 #define U8_C(x) x ## U
55581 #define S16_C(x) x
55582diff -urNp linux-3.0.4/include/asm-generic/int-ll64.h linux-3.0.4/include/asm-generic/int-ll64.h
55583--- linux-3.0.4/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
55584+++ linux-3.0.4/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
55585@@ -51,6 +51,8 @@ typedef unsigned int u32;
55586 typedef signed long long s64;
55587 typedef unsigned long long u64;
55588
55589+typedef unsigned long long intoverflow_t;
55590+
55591 #define S8_C(x) x
55592 #define U8_C(x) x ## U
55593 #define S16_C(x) x
55594diff -urNp linux-3.0.4/include/asm-generic/kmap_types.h linux-3.0.4/include/asm-generic/kmap_types.h
55595--- linux-3.0.4/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
55596+++ linux-3.0.4/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
55597@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
55598 KMAP_D(17) KM_NMI,
55599 KMAP_D(18) KM_NMI_PTE,
55600 KMAP_D(19) KM_KDB,
55601+KMAP_D(20) KM_CLEARPAGE,
55602 /*
55603 * Remember to update debug_kmap_atomic() when adding new kmap types!
55604 */
55605-KMAP_D(20) KM_TYPE_NR
55606+KMAP_D(21) KM_TYPE_NR
55607 };
55608
55609 #undef KMAP_D
55610diff -urNp linux-3.0.4/include/asm-generic/pgtable.h linux-3.0.4/include/asm-generic/pgtable.h
55611--- linux-3.0.4/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
55612+++ linux-3.0.4/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
55613@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
55614 #endif /* __HAVE_ARCH_PMD_WRITE */
55615 #endif
55616
55617+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
55618+static inline unsigned long pax_open_kernel(void) { return 0; }
55619+#endif
55620+
55621+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
55622+static inline unsigned long pax_close_kernel(void) { return 0; }
55623+#endif
55624+
55625 #endif /* !__ASSEMBLY__ */
55626
55627 #endif /* _ASM_GENERIC_PGTABLE_H */
55628diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopmd.h linux-3.0.4/include/asm-generic/pgtable-nopmd.h
55629--- linux-3.0.4/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
55630+++ linux-3.0.4/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
55631@@ -1,14 +1,19 @@
55632 #ifndef _PGTABLE_NOPMD_H
55633 #define _PGTABLE_NOPMD_H
55634
55635-#ifndef __ASSEMBLY__
55636-
55637 #include <asm-generic/pgtable-nopud.h>
55638
55639-struct mm_struct;
55640-
55641 #define __PAGETABLE_PMD_FOLDED
55642
55643+#define PMD_SHIFT PUD_SHIFT
55644+#define PTRS_PER_PMD 1
55645+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
55646+#define PMD_MASK (~(PMD_SIZE-1))
55647+
55648+#ifndef __ASSEMBLY__
55649+
55650+struct mm_struct;
55651+
55652 /*
55653 * Having the pmd type consist of a pud gets the size right, and allows
55654 * us to conceptually access the pud entry that this pmd is folded into
55655@@ -16,11 +21,6 @@ struct mm_struct;
55656 */
55657 typedef struct { pud_t pud; } pmd_t;
55658
55659-#define PMD_SHIFT PUD_SHIFT
55660-#define PTRS_PER_PMD 1
55661-#define PMD_SIZE (1UL << PMD_SHIFT)
55662-#define PMD_MASK (~(PMD_SIZE-1))
55663-
55664 /*
55665 * The "pud_xxx()" functions here are trivial for a folded two-level
55666 * setup: the pmd is never bad, and a pmd always exists (as it's folded
55667diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopud.h linux-3.0.4/include/asm-generic/pgtable-nopud.h
55668--- linux-3.0.4/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
55669+++ linux-3.0.4/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
55670@@ -1,10 +1,15 @@
55671 #ifndef _PGTABLE_NOPUD_H
55672 #define _PGTABLE_NOPUD_H
55673
55674-#ifndef __ASSEMBLY__
55675-
55676 #define __PAGETABLE_PUD_FOLDED
55677
55678+#define PUD_SHIFT PGDIR_SHIFT
55679+#define PTRS_PER_PUD 1
55680+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
55681+#define PUD_MASK (~(PUD_SIZE-1))
55682+
55683+#ifndef __ASSEMBLY__
55684+
55685 /*
55686 * Having the pud type consist of a pgd gets the size right, and allows
55687 * us to conceptually access the pgd entry that this pud is folded into
55688@@ -12,11 +17,6 @@
55689 */
55690 typedef struct { pgd_t pgd; } pud_t;
55691
55692-#define PUD_SHIFT PGDIR_SHIFT
55693-#define PTRS_PER_PUD 1
55694-#define PUD_SIZE (1UL << PUD_SHIFT)
55695-#define PUD_MASK (~(PUD_SIZE-1))
55696-
55697 /*
55698 * The "pgd_xxx()" functions here are trivial for a folded two-level
55699 * setup: the pud is never bad, and a pud always exists (as it's folded
55700diff -urNp linux-3.0.4/include/asm-generic/vmlinux.lds.h linux-3.0.4/include/asm-generic/vmlinux.lds.h
55701--- linux-3.0.4/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
55702+++ linux-3.0.4/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
55703@@ -217,6 +217,7 @@
55704 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
55705 VMLINUX_SYMBOL(__start_rodata) = .; \
55706 *(.rodata) *(.rodata.*) \
55707+ *(.data..read_only) \
55708 *(__vermagic) /* Kernel version magic */ \
55709 . = ALIGN(8); \
55710 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
55711@@ -723,17 +724,18 @@
55712 * section in the linker script will go there too. @phdr should have
55713 * a leading colon.
55714 *
55715- * Note that this macros defines __per_cpu_load as an absolute symbol.
55716+ * Note that this macros defines per_cpu_load as an absolute symbol.
55717 * If there is no need to put the percpu section at a predetermined
55718 * address, use PERCPU_SECTION.
55719 */
55720 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
55721- VMLINUX_SYMBOL(__per_cpu_load) = .; \
55722- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
55723+ per_cpu_load = .; \
55724+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
55725 - LOAD_OFFSET) { \
55726+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
55727 PERCPU_INPUT(cacheline) \
55728 } phdr \
55729- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
55730+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
55731
55732 /**
55733 * PERCPU_SECTION - define output section for percpu area, simple version
55734diff -urNp linux-3.0.4/include/drm/drm_crtc_helper.h linux-3.0.4/include/drm/drm_crtc_helper.h
55735--- linux-3.0.4/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
55736+++ linux-3.0.4/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
55737@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
55738
55739 /* disable crtc when not in use - more explicit than dpms off */
55740 void (*disable)(struct drm_crtc *crtc);
55741-};
55742+} __no_const;
55743
55744 struct drm_encoder_helper_funcs {
55745 void (*dpms)(struct drm_encoder *encoder, int mode);
55746@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
55747 struct drm_connector *connector);
55748 /* disable encoder when not in use - more explicit than dpms off */
55749 void (*disable)(struct drm_encoder *encoder);
55750-};
55751+} __no_const;
55752
55753 struct drm_connector_helper_funcs {
55754 int (*get_modes)(struct drm_connector *connector);
55755diff -urNp linux-3.0.4/include/drm/drmP.h linux-3.0.4/include/drm/drmP.h
55756--- linux-3.0.4/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
55757+++ linux-3.0.4/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
55758@@ -73,6 +73,7 @@
55759 #include <linux/workqueue.h>
55760 #include <linux/poll.h>
55761 #include <asm/pgalloc.h>
55762+#include <asm/local.h>
55763 #include "drm.h"
55764
55765 #include <linux/idr.h>
55766@@ -1033,7 +1034,7 @@ struct drm_device {
55767
55768 /** \name Usage Counters */
55769 /*@{ */
55770- int open_count; /**< Outstanding files open */
55771+ local_t open_count; /**< Outstanding files open */
55772 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
55773 atomic_t vma_count; /**< Outstanding vma areas open */
55774 int buf_use; /**< Buffers in use -- cannot alloc */
55775@@ -1044,7 +1045,7 @@ struct drm_device {
55776 /*@{ */
55777 unsigned long counters;
55778 enum drm_stat_type types[15];
55779- atomic_t counts[15];
55780+ atomic_unchecked_t counts[15];
55781 /*@} */
55782
55783 struct list_head filelist;
55784diff -urNp linux-3.0.4/include/drm/ttm/ttm_memory.h linux-3.0.4/include/drm/ttm/ttm_memory.h
55785--- linux-3.0.4/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
55786+++ linux-3.0.4/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
55787@@ -47,7 +47,7 @@
55788
55789 struct ttm_mem_shrink {
55790 int (*do_shrink) (struct ttm_mem_shrink *);
55791-};
55792+} __no_const;
55793
55794 /**
55795 * struct ttm_mem_global - Global memory accounting structure.
55796diff -urNp linux-3.0.4/include/linux/a.out.h linux-3.0.4/include/linux/a.out.h
55797--- linux-3.0.4/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
55798+++ linux-3.0.4/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
55799@@ -39,6 +39,14 @@ enum machine_type {
55800 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
55801 };
55802
55803+/* Constants for the N_FLAGS field */
55804+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
55805+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
55806+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
55807+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
55808+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
55809+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
55810+
55811 #if !defined (N_MAGIC)
55812 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
55813 #endif
55814diff -urNp linux-3.0.4/include/linux/atmdev.h linux-3.0.4/include/linux/atmdev.h
55815--- linux-3.0.4/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
55816+++ linux-3.0.4/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
55817@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
55818 #endif
55819
55820 struct k_atm_aal_stats {
55821-#define __HANDLE_ITEM(i) atomic_t i
55822+#define __HANDLE_ITEM(i) atomic_unchecked_t i
55823 __AAL_STAT_ITEMS
55824 #undef __HANDLE_ITEM
55825 };
55826diff -urNp linux-3.0.4/include/linux/binfmts.h linux-3.0.4/include/linux/binfmts.h
55827--- linux-3.0.4/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
55828+++ linux-3.0.4/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
55829@@ -88,6 +88,7 @@ struct linux_binfmt {
55830 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
55831 int (*load_shlib)(struct file *);
55832 int (*core_dump)(struct coredump_params *cprm);
55833+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
55834 unsigned long min_coredump; /* minimal dump size */
55835 };
55836
55837diff -urNp linux-3.0.4/include/linux/blkdev.h linux-3.0.4/include/linux/blkdev.h
55838--- linux-3.0.4/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
55839+++ linux-3.0.4/include/linux/blkdev.h 2011-08-26 19:49:56.000000000 -0400
55840@@ -1308,7 +1308,7 @@ struct block_device_operations {
55841 /* this callback is with swap_lock and sometimes page table lock held */
55842 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
55843 struct module *owner;
55844-};
55845+} __do_const;
55846
55847 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
55848 unsigned long);
55849diff -urNp linux-3.0.4/include/linux/blktrace_api.h linux-3.0.4/include/linux/blktrace_api.h
55850--- linux-3.0.4/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
55851+++ linux-3.0.4/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
55852@@ -161,7 +161,7 @@ struct blk_trace {
55853 struct dentry *dir;
55854 struct dentry *dropped_file;
55855 struct dentry *msg_file;
55856- atomic_t dropped;
55857+ atomic_unchecked_t dropped;
55858 };
55859
55860 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
55861diff -urNp linux-3.0.4/include/linux/byteorder/little_endian.h linux-3.0.4/include/linux/byteorder/little_endian.h
55862--- linux-3.0.4/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
55863+++ linux-3.0.4/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
55864@@ -42,51 +42,51 @@
55865
55866 static inline __le64 __cpu_to_le64p(const __u64 *p)
55867 {
55868- return (__force __le64)*p;
55869+ return (__force const __le64)*p;
55870 }
55871 static inline __u64 __le64_to_cpup(const __le64 *p)
55872 {
55873- return (__force __u64)*p;
55874+ return (__force const __u64)*p;
55875 }
55876 static inline __le32 __cpu_to_le32p(const __u32 *p)
55877 {
55878- return (__force __le32)*p;
55879+ return (__force const __le32)*p;
55880 }
55881 static inline __u32 __le32_to_cpup(const __le32 *p)
55882 {
55883- return (__force __u32)*p;
55884+ return (__force const __u32)*p;
55885 }
55886 static inline __le16 __cpu_to_le16p(const __u16 *p)
55887 {
55888- return (__force __le16)*p;
55889+ return (__force const __le16)*p;
55890 }
55891 static inline __u16 __le16_to_cpup(const __le16 *p)
55892 {
55893- return (__force __u16)*p;
55894+ return (__force const __u16)*p;
55895 }
55896 static inline __be64 __cpu_to_be64p(const __u64 *p)
55897 {
55898- return (__force __be64)__swab64p(p);
55899+ return (__force const __be64)__swab64p(p);
55900 }
55901 static inline __u64 __be64_to_cpup(const __be64 *p)
55902 {
55903- return __swab64p((__u64 *)p);
55904+ return __swab64p((const __u64 *)p);
55905 }
55906 static inline __be32 __cpu_to_be32p(const __u32 *p)
55907 {
55908- return (__force __be32)__swab32p(p);
55909+ return (__force const __be32)__swab32p(p);
55910 }
55911 static inline __u32 __be32_to_cpup(const __be32 *p)
55912 {
55913- return __swab32p((__u32 *)p);
55914+ return __swab32p((const __u32 *)p);
55915 }
55916 static inline __be16 __cpu_to_be16p(const __u16 *p)
55917 {
55918- return (__force __be16)__swab16p(p);
55919+ return (__force const __be16)__swab16p(p);
55920 }
55921 static inline __u16 __be16_to_cpup(const __be16 *p)
55922 {
55923- return __swab16p((__u16 *)p);
55924+ return __swab16p((const __u16 *)p);
55925 }
55926 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
55927 #define __le64_to_cpus(x) do { (void)(x); } while (0)
55928diff -urNp linux-3.0.4/include/linux/cache.h linux-3.0.4/include/linux/cache.h
55929--- linux-3.0.4/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
55930+++ linux-3.0.4/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
55931@@ -16,6 +16,10 @@
55932 #define __read_mostly
55933 #endif
55934
55935+#ifndef __read_only
55936+#define __read_only __read_mostly
55937+#endif
55938+
55939 #ifndef ____cacheline_aligned
55940 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
55941 #endif
55942diff -urNp linux-3.0.4/include/linux/capability.h linux-3.0.4/include/linux/capability.h
55943--- linux-3.0.4/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
55944+++ linux-3.0.4/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
55945@@ -547,6 +547,9 @@ extern bool capable(int cap);
55946 extern bool ns_capable(struct user_namespace *ns, int cap);
55947 extern bool task_ns_capable(struct task_struct *t, int cap);
55948 extern bool nsown_capable(int cap);
55949+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
55950+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
55951+extern bool capable_nolog(int cap);
55952
55953 /* audit system wants to get cap info from files as well */
55954 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
55955diff -urNp linux-3.0.4/include/linux/cleancache.h linux-3.0.4/include/linux/cleancache.h
55956--- linux-3.0.4/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
55957+++ linux-3.0.4/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
55958@@ -31,7 +31,7 @@ struct cleancache_ops {
55959 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
55960 void (*flush_inode)(int, struct cleancache_filekey);
55961 void (*flush_fs)(int);
55962-};
55963+} __no_const;
55964
55965 extern struct cleancache_ops
55966 cleancache_register_ops(struct cleancache_ops *ops);
55967diff -urNp linux-3.0.4/include/linux/compiler-gcc4.h linux-3.0.4/include/linux/compiler-gcc4.h
55968--- linux-3.0.4/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
55969+++ linux-3.0.4/include/linux/compiler-gcc4.h 2011-08-26 19:49:56.000000000 -0400
55970@@ -31,6 +31,12 @@
55971
55972
55973 #if __GNUC_MINOR__ >= 5
55974+
55975+#ifdef CONSTIFY_PLUGIN
55976+#define __no_const __attribute__((no_const))
55977+#define __do_const __attribute__((do_const))
55978+#endif
55979+
55980 /*
55981 * Mark a position in code as unreachable. This can be used to
55982 * suppress control flow warnings after asm blocks that transfer
55983@@ -46,6 +52,11 @@
55984 #define __noclone __attribute__((__noclone__))
55985
55986 #endif
55987+
55988+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
55989+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
55990+#define __bos0(ptr) __bos((ptr), 0)
55991+#define __bos1(ptr) __bos((ptr), 1)
55992 #endif
55993
55994 #if __GNUC_MINOR__ > 0
55995diff -urNp linux-3.0.4/include/linux/compiler.h linux-3.0.4/include/linux/compiler.h
55996--- linux-3.0.4/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
55997+++ linux-3.0.4/include/linux/compiler.h 2011-10-06 04:17:55.000000000 -0400
55998@@ -5,31 +5,62 @@
55999
56000 #ifdef __CHECKER__
56001 # define __user __attribute__((noderef, address_space(1)))
56002+# define __force_user __force __user
56003 # define __kernel __attribute__((address_space(0)))
56004+# define __force_kernel __force __kernel
56005 # define __safe __attribute__((safe))
56006 # define __force __attribute__((force))
56007 # define __nocast __attribute__((nocast))
56008 # define __iomem __attribute__((noderef, address_space(2)))
56009+# define __force_iomem __force __iomem
56010 # define __acquires(x) __attribute__((context(x,0,1)))
56011 # define __releases(x) __attribute__((context(x,1,0)))
56012 # define __acquire(x) __context__(x,1)
56013 # define __release(x) __context__(x,-1)
56014 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
56015 # define __percpu __attribute__((noderef, address_space(3)))
56016+# define __force_percpu __force __percpu
56017 #ifdef CONFIG_SPARSE_RCU_POINTER
56018 # define __rcu __attribute__((noderef, address_space(4)))
56019+# define __force_rcu __force __rcu
56020 #else
56021 # define __rcu
56022+# define __force_rcu
56023 #endif
56024 extern void __chk_user_ptr(const volatile void __user *);
56025 extern void __chk_io_ptr(const volatile void __iomem *);
56026+#elif defined(CHECKER_PLUGIN)
56027+//# define __user
56028+//# define __force_user
56029+//# define __kernel
56030+//# define __force_kernel
56031+# define __safe
56032+# define __force
56033+# define __nocast
56034+# define __iomem
56035+# define __force_iomem
56036+# define __chk_user_ptr(x) (void)0
56037+# define __chk_io_ptr(x) (void)0
56038+# define __builtin_warning(x, y...) (1)
56039+# define __acquires(x)
56040+# define __releases(x)
56041+# define __acquire(x) (void)0
56042+# define __release(x) (void)0
56043+# define __cond_lock(x,c) (c)
56044+# define __percpu
56045+# define __force_percpu
56046+# define __rcu
56047+# define __force_rcu
56048 #else
56049 # define __user
56050+# define __force_user
56051 # define __kernel
56052+# define __force_kernel
56053 # define __safe
56054 # define __force
56055 # define __nocast
56056 # define __iomem
56057+# define __force_iomem
56058 # define __chk_user_ptr(x) (void)0
56059 # define __chk_io_ptr(x) (void)0
56060 # define __builtin_warning(x, y...) (1)
56061@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile
56062 # define __release(x) (void)0
56063 # define __cond_lock(x,c) (c)
56064 # define __percpu
56065+# define __force_percpu
56066 # define __rcu
56067+# define __force_rcu
56068 #endif
56069
56070 #ifdef __KERNEL__
56071@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_
56072 # define __attribute_const__ /* unimplemented */
56073 #endif
56074
56075+#ifndef __no_const
56076+# define __no_const
56077+#endif
56078+
56079+#ifndef __do_const
56080+# define __do_const
56081+#endif
56082+
56083 /*
56084 * Tell gcc if a function is cold. The compiler will assume any path
56085 * directly leading to the call is unlikely.
56086@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_
56087 #define __cold
56088 #endif
56089
56090+#ifndef __alloc_size
56091+#define __alloc_size(...)
56092+#endif
56093+
56094+#ifndef __bos
56095+#define __bos(ptr, arg)
56096+#endif
56097+
56098+#ifndef __bos0
56099+#define __bos0(ptr)
56100+#endif
56101+
56102+#ifndef __bos1
56103+#define __bos1(ptr)
56104+#endif
56105+
56106 /* Simple shorthand for a section definition */
56107 #ifndef __section
56108 # define __section(S) __attribute__ ((__section__(#S)))
56109@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_
56110 * use is to mediate communication between process-level code and irq/NMI
56111 * handlers, all running on the same CPU.
56112 */
56113-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
56114+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
56115+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
56116
56117 #endif /* __LINUX_COMPILER_H */
56118diff -urNp linux-3.0.4/include/linux/cpuset.h linux-3.0.4/include/linux/cpuset.h
56119--- linux-3.0.4/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
56120+++ linux-3.0.4/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
56121@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
56122 * nodemask.
56123 */
56124 smp_mb();
56125- --ACCESS_ONCE(current->mems_allowed_change_disable);
56126+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
56127 }
56128
56129 static inline void set_mems_allowed(nodemask_t nodemask)
56130diff -urNp linux-3.0.4/include/linux/crypto.h linux-3.0.4/include/linux/crypto.h
56131--- linux-3.0.4/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
56132+++ linux-3.0.4/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
56133@@ -361,7 +361,7 @@ struct cipher_tfm {
56134 const u8 *key, unsigned int keylen);
56135 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
56136 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
56137-};
56138+} __no_const;
56139
56140 struct hash_tfm {
56141 int (*init)(struct hash_desc *desc);
56142@@ -382,13 +382,13 @@ struct compress_tfm {
56143 int (*cot_decompress)(struct crypto_tfm *tfm,
56144 const u8 *src, unsigned int slen,
56145 u8 *dst, unsigned int *dlen);
56146-};
56147+} __no_const;
56148
56149 struct rng_tfm {
56150 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
56151 unsigned int dlen);
56152 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
56153-};
56154+} __no_const;
56155
56156 #define crt_ablkcipher crt_u.ablkcipher
56157 #define crt_aead crt_u.aead
56158diff -urNp linux-3.0.4/include/linux/decompress/mm.h linux-3.0.4/include/linux/decompress/mm.h
56159--- linux-3.0.4/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
56160+++ linux-3.0.4/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
56161@@ -77,7 +77,7 @@ static void free(void *where)
56162 * warnings when not needed (indeed large_malloc / large_free are not
56163 * needed by inflate */
56164
56165-#define malloc(a) kmalloc(a, GFP_KERNEL)
56166+#define malloc(a) kmalloc((a), GFP_KERNEL)
56167 #define free(a) kfree(a)
56168
56169 #define large_malloc(a) vmalloc(a)
56170diff -urNp linux-3.0.4/include/linux/dma-mapping.h linux-3.0.4/include/linux/dma-mapping.h
56171--- linux-3.0.4/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
56172+++ linux-3.0.4/include/linux/dma-mapping.h 2011-08-26 19:49:56.000000000 -0400
56173@@ -50,7 +50,7 @@ struct dma_map_ops {
56174 int (*dma_supported)(struct device *dev, u64 mask);
56175 int (*set_dma_mask)(struct device *dev, u64 mask);
56176 int is_phys;
56177-};
56178+} __do_const;
56179
56180 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
56181
56182diff -urNp linux-3.0.4/include/linux/efi.h linux-3.0.4/include/linux/efi.h
56183--- linux-3.0.4/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
56184+++ linux-3.0.4/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
56185@@ -410,7 +410,7 @@ struct efivar_operations {
56186 efi_get_variable_t *get_variable;
56187 efi_get_next_variable_t *get_next_variable;
56188 efi_set_variable_t *set_variable;
56189-};
56190+} __no_const;
56191
56192 struct efivars {
56193 /*
56194diff -urNp linux-3.0.4/include/linux/elf.h linux-3.0.4/include/linux/elf.h
56195--- linux-3.0.4/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
56196+++ linux-3.0.4/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
56197@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
56198 #define PT_GNU_EH_FRAME 0x6474e550
56199
56200 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
56201+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
56202+
56203+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
56204+
56205+/* Constants for the e_flags field */
56206+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
56207+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
56208+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
56209+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
56210+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
56211+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
56212
56213 /*
56214 * Extended Numbering
56215@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
56216 #define DT_DEBUG 21
56217 #define DT_TEXTREL 22
56218 #define DT_JMPREL 23
56219+#define DT_FLAGS 30
56220+ #define DF_TEXTREL 0x00000004
56221 #define DT_ENCODING 32
56222 #define OLD_DT_LOOS 0x60000000
56223 #define DT_LOOS 0x6000000d
56224@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
56225 #define PF_W 0x2
56226 #define PF_X 0x1
56227
56228+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
56229+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
56230+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
56231+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
56232+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
56233+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
56234+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
56235+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
56236+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
56237+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
56238+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
56239+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
56240+
56241 typedef struct elf32_phdr{
56242 Elf32_Word p_type;
56243 Elf32_Off p_offset;
56244@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
56245 #define EI_OSABI 7
56246 #define EI_PAD 8
56247
56248+#define EI_PAX 14
56249+
56250 #define ELFMAG0 0x7f /* EI_MAG */
56251 #define ELFMAG1 'E'
56252 #define ELFMAG2 'L'
56253@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
56254 #define elf_note elf32_note
56255 #define elf_addr_t Elf32_Off
56256 #define Elf_Half Elf32_Half
56257+#define elf_dyn Elf32_Dyn
56258
56259 #else
56260
56261@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
56262 #define elf_note elf64_note
56263 #define elf_addr_t Elf64_Off
56264 #define Elf_Half Elf64_Half
56265+#define elf_dyn Elf64_Dyn
56266
56267 #endif
56268
56269diff -urNp linux-3.0.4/include/linux/firewire.h linux-3.0.4/include/linux/firewire.h
56270--- linux-3.0.4/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
56271+++ linux-3.0.4/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
56272@@ -428,7 +428,7 @@ struct fw_iso_context {
56273 union {
56274 fw_iso_callback_t sc;
56275 fw_iso_mc_callback_t mc;
56276- } callback;
56277+ } __no_const callback;
56278 void *callback_data;
56279 };
56280
56281diff -urNp linux-3.0.4/include/linux/fscache-cache.h linux-3.0.4/include/linux/fscache-cache.h
56282--- linux-3.0.4/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
56283+++ linux-3.0.4/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
56284@@ -102,7 +102,7 @@ struct fscache_operation {
56285 fscache_operation_release_t release;
56286 };
56287
56288-extern atomic_t fscache_op_debug_id;
56289+extern atomic_unchecked_t fscache_op_debug_id;
56290 extern void fscache_op_work_func(struct work_struct *work);
56291
56292 extern void fscache_enqueue_operation(struct fscache_operation *);
56293@@ -122,7 +122,7 @@ static inline void fscache_operation_ini
56294 {
56295 INIT_WORK(&op->work, fscache_op_work_func);
56296 atomic_set(&op->usage, 1);
56297- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
56298+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
56299 op->processor = processor;
56300 op->release = release;
56301 INIT_LIST_HEAD(&op->pend_link);
56302diff -urNp linux-3.0.4/include/linux/fs.h linux-3.0.4/include/linux/fs.h
56303--- linux-3.0.4/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
56304+++ linux-3.0.4/include/linux/fs.h 2011-08-26 19:49:56.000000000 -0400
56305@@ -109,6 +109,11 @@ struct inodes_stat_t {
56306 /* File was opened by fanotify and shouldn't generate fanotify events */
56307 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
56308
56309+/* Hack for grsec so as not to require read permission simply to execute
56310+ * a binary
56311+ */
56312+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
56313+
56314 /*
56315 * The below are the various read and write types that we support. Some of
56316 * them include behavioral modifiers that send information down to the
56317@@ -1571,7 +1576,8 @@ struct file_operations {
56318 int (*setlease)(struct file *, long, struct file_lock **);
56319 long (*fallocate)(struct file *file, int mode, loff_t offset,
56320 loff_t len);
56321-};
56322+} __do_const;
56323+typedef struct file_operations __no_const file_operations_no_const;
56324
56325 #define IPERM_FLAG_RCU 0x0001
56326
56327diff -urNp linux-3.0.4/include/linux/fsnotify.h linux-3.0.4/include/linux/fsnotify.h
56328--- linux-3.0.4/include/linux/fsnotify.h 2011-07-21 22:17:23.000000000 -0400
56329+++ linux-3.0.4/include/linux/fsnotify.h 2011-08-24 18:10:29.000000000 -0400
56330@@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
56331 */
56332 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
56333 {
56334- return kstrdup(name, GFP_KERNEL);
56335+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
56336 }
56337
56338 /*
56339diff -urNp linux-3.0.4/include/linux/fs_struct.h linux-3.0.4/include/linux/fs_struct.h
56340--- linux-3.0.4/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
56341+++ linux-3.0.4/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
56342@@ -6,7 +6,7 @@
56343 #include <linux/seqlock.h>
56344
56345 struct fs_struct {
56346- int users;
56347+ atomic_t users;
56348 spinlock_t lock;
56349 seqcount_t seq;
56350 int umask;
56351diff -urNp linux-3.0.4/include/linux/ftrace_event.h linux-3.0.4/include/linux/ftrace_event.h
56352--- linux-3.0.4/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
56353+++ linux-3.0.4/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
56354@@ -96,7 +96,7 @@ struct trace_event_functions {
56355 trace_print_func raw;
56356 trace_print_func hex;
56357 trace_print_func binary;
56358-};
56359+} __no_const;
56360
56361 struct trace_event {
56362 struct hlist_node node;
56363@@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
56364 extern int trace_add_event_call(struct ftrace_event_call *call);
56365 extern void trace_remove_event_call(struct ftrace_event_call *call);
56366
56367-#define is_signed_type(type) (((type)(-1)) < 0)
56368+#define is_signed_type(type) (((type)(-1)) < (type)1)
56369
56370 int trace_set_clr_event(const char *system, const char *event, int set);
56371
56372diff -urNp linux-3.0.4/include/linux/genhd.h linux-3.0.4/include/linux/genhd.h
56373--- linux-3.0.4/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
56374+++ linux-3.0.4/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
56375@@ -184,7 +184,7 @@ struct gendisk {
56376 struct kobject *slave_dir;
56377
56378 struct timer_rand_state *random;
56379- atomic_t sync_io; /* RAID */
56380+ atomic_unchecked_t sync_io; /* RAID */
56381 struct disk_events *ev;
56382 #ifdef CONFIG_BLK_DEV_INTEGRITY
56383 struct blk_integrity *integrity;
56384diff -urNp linux-3.0.4/include/linux/gracl.h linux-3.0.4/include/linux/gracl.h
56385--- linux-3.0.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
56386+++ linux-3.0.4/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
56387@@ -0,0 +1,317 @@
56388+#ifndef GR_ACL_H
56389+#define GR_ACL_H
56390+
56391+#include <linux/grdefs.h>
56392+#include <linux/resource.h>
56393+#include <linux/capability.h>
56394+#include <linux/dcache.h>
56395+#include <asm/resource.h>
56396+
56397+/* Major status information */
56398+
56399+#define GR_VERSION "grsecurity 2.2.2"
56400+#define GRSECURITY_VERSION 0x2202
56401+
56402+enum {
56403+ GR_SHUTDOWN = 0,
56404+ GR_ENABLE = 1,
56405+ GR_SPROLE = 2,
56406+ GR_RELOAD = 3,
56407+ GR_SEGVMOD = 4,
56408+ GR_STATUS = 5,
56409+ GR_UNSPROLE = 6,
56410+ GR_PASSSET = 7,
56411+ GR_SPROLEPAM = 8,
56412+};
56413+
56414+/* Password setup definitions
56415+ * kernel/grhash.c */
56416+enum {
56417+ GR_PW_LEN = 128,
56418+ GR_SALT_LEN = 16,
56419+ GR_SHA_LEN = 32,
56420+};
56421+
56422+enum {
56423+ GR_SPROLE_LEN = 64,
56424+};
56425+
56426+enum {
56427+ GR_NO_GLOB = 0,
56428+ GR_REG_GLOB,
56429+ GR_CREATE_GLOB
56430+};
56431+
56432+#define GR_NLIMITS 32
56433+
56434+/* Begin Data Structures */
56435+
56436+struct sprole_pw {
56437+ unsigned char *rolename;
56438+ unsigned char salt[GR_SALT_LEN];
56439+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
56440+};
56441+
56442+struct name_entry {
56443+ __u32 key;
56444+ ino_t inode;
56445+ dev_t device;
56446+ char *name;
56447+ __u16 len;
56448+ __u8 deleted;
56449+ struct name_entry *prev;
56450+ struct name_entry *next;
56451+};
56452+
56453+struct inodev_entry {
56454+ struct name_entry *nentry;
56455+ struct inodev_entry *prev;
56456+ struct inodev_entry *next;
56457+};
56458+
56459+struct acl_role_db {
56460+ struct acl_role_label **r_hash;
56461+ __u32 r_size;
56462+};
56463+
56464+struct inodev_db {
56465+ struct inodev_entry **i_hash;
56466+ __u32 i_size;
56467+};
56468+
56469+struct name_db {
56470+ struct name_entry **n_hash;
56471+ __u32 n_size;
56472+};
56473+
56474+struct crash_uid {
56475+ uid_t uid;
56476+ unsigned long expires;
56477+};
56478+
56479+struct gr_hash_struct {
56480+ void **table;
56481+ void **nametable;
56482+ void *first;
56483+ __u32 table_size;
56484+ __u32 used_size;
56485+ int type;
56486+};
56487+
56488+/* Userspace Grsecurity ACL data structures */
56489+
56490+struct acl_subject_label {
56491+ char *filename;
56492+ ino_t inode;
56493+ dev_t device;
56494+ __u32 mode;
56495+ kernel_cap_t cap_mask;
56496+ kernel_cap_t cap_lower;
56497+ kernel_cap_t cap_invert_audit;
56498+
56499+ struct rlimit res[GR_NLIMITS];
56500+ __u32 resmask;
56501+
56502+ __u8 user_trans_type;
56503+ __u8 group_trans_type;
56504+ uid_t *user_transitions;
56505+ gid_t *group_transitions;
56506+ __u16 user_trans_num;
56507+ __u16 group_trans_num;
56508+
56509+ __u32 sock_families[2];
56510+ __u32 ip_proto[8];
56511+ __u32 ip_type;
56512+ struct acl_ip_label **ips;
56513+ __u32 ip_num;
56514+ __u32 inaddr_any_override;
56515+
56516+ __u32 crashes;
56517+ unsigned long expires;
56518+
56519+ struct acl_subject_label *parent_subject;
56520+ struct gr_hash_struct *hash;
56521+ struct acl_subject_label *prev;
56522+ struct acl_subject_label *next;
56523+
56524+ struct acl_object_label **obj_hash;
56525+ __u32 obj_hash_size;
56526+ __u16 pax_flags;
56527+};
56528+
56529+struct role_allowed_ip {
56530+ __u32 addr;
56531+ __u32 netmask;
56532+
56533+ struct role_allowed_ip *prev;
56534+ struct role_allowed_ip *next;
56535+};
56536+
56537+struct role_transition {
56538+ char *rolename;
56539+
56540+ struct role_transition *prev;
56541+ struct role_transition *next;
56542+};
56543+
56544+struct acl_role_label {
56545+ char *rolename;
56546+ uid_t uidgid;
56547+ __u16 roletype;
56548+
56549+ __u16 auth_attempts;
56550+ unsigned long expires;
56551+
56552+ struct acl_subject_label *root_label;
56553+ struct gr_hash_struct *hash;
56554+
56555+ struct acl_role_label *prev;
56556+ struct acl_role_label *next;
56557+
56558+ struct role_transition *transitions;
56559+ struct role_allowed_ip *allowed_ips;
56560+ uid_t *domain_children;
56561+ __u16 domain_child_num;
56562+
56563+ struct acl_subject_label **subj_hash;
56564+ __u32 subj_hash_size;
56565+};
56566+
56567+struct user_acl_role_db {
56568+ struct acl_role_label **r_table;
56569+ __u32 num_pointers; /* Number of allocations to track */
56570+ __u32 num_roles; /* Number of roles */
56571+ __u32 num_domain_children; /* Number of domain children */
56572+ __u32 num_subjects; /* Number of subjects */
56573+ __u32 num_objects; /* Number of objects */
56574+};
56575+
56576+struct acl_object_label {
56577+ char *filename;
56578+ ino_t inode;
56579+ dev_t device;
56580+ __u32 mode;
56581+
56582+ struct acl_subject_label *nested;
56583+ struct acl_object_label *globbed;
56584+
56585+ /* next two structures not used */
56586+
56587+ struct acl_object_label *prev;
56588+ struct acl_object_label *next;
56589+};
56590+
56591+struct acl_ip_label {
56592+ char *iface;
56593+ __u32 addr;
56594+ __u32 netmask;
56595+ __u16 low, high;
56596+ __u8 mode;
56597+ __u32 type;
56598+ __u32 proto[8];
56599+
56600+ /* next two structures not used */
56601+
56602+ struct acl_ip_label *prev;
56603+ struct acl_ip_label *next;
56604+};
56605+
56606+struct gr_arg {
56607+ struct user_acl_role_db role_db;
56608+ unsigned char pw[GR_PW_LEN];
56609+ unsigned char salt[GR_SALT_LEN];
56610+ unsigned char sum[GR_SHA_LEN];
56611+ unsigned char sp_role[GR_SPROLE_LEN];
56612+ struct sprole_pw *sprole_pws;
56613+ dev_t segv_device;
56614+ ino_t segv_inode;
56615+ uid_t segv_uid;
56616+ __u16 num_sprole_pws;
56617+ __u16 mode;
56618+};
56619+
56620+struct gr_arg_wrapper {
56621+ struct gr_arg *arg;
56622+ __u32 version;
56623+ __u32 size;
56624+};
56625+
56626+struct subject_map {
56627+ struct acl_subject_label *user;
56628+ struct acl_subject_label *kernel;
56629+ struct subject_map *prev;
56630+ struct subject_map *next;
56631+};
56632+
56633+struct acl_subj_map_db {
56634+ struct subject_map **s_hash;
56635+ __u32 s_size;
56636+};
56637+
56638+/* End Data Structures Section */
56639+
56640+/* Hash functions generated by empirical testing by Brad Spengler
56641+ Makes good use of the low bits of the inode. Generally 0-1 times
56642+ in loop for successful match. 0-3 for unsuccessful match.
56643+ Shift/add algorithm with modulus of table size and an XOR*/
56644+
56645+static __inline__ unsigned int
56646+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
56647+{
56648+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
56649+}
56650+
56651+ static __inline__ unsigned int
56652+shash(const struct acl_subject_label *userp, const unsigned int sz)
56653+{
56654+ return ((const unsigned long)userp % sz);
56655+}
56656+
56657+static __inline__ unsigned int
56658+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
56659+{
56660+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
56661+}
56662+
56663+static __inline__ unsigned int
56664+nhash(const char *name, const __u16 len, const unsigned int sz)
56665+{
56666+ return full_name_hash((const unsigned char *)name, len) % sz;
56667+}
56668+
56669+#define FOR_EACH_ROLE_START(role) \
56670+ role = role_list; \
56671+ while (role) {
56672+
56673+#define FOR_EACH_ROLE_END(role) \
56674+ role = role->prev; \
56675+ }
56676+
56677+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
56678+ subj = NULL; \
56679+ iter = 0; \
56680+ while (iter < role->subj_hash_size) { \
56681+ if (subj == NULL) \
56682+ subj = role->subj_hash[iter]; \
56683+ if (subj == NULL) { \
56684+ iter++; \
56685+ continue; \
56686+ }
56687+
56688+#define FOR_EACH_SUBJECT_END(subj,iter) \
56689+ subj = subj->next; \
56690+ if (subj == NULL) \
56691+ iter++; \
56692+ }
56693+
56694+
56695+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
56696+ subj = role->hash->first; \
56697+ while (subj != NULL) {
56698+
56699+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
56700+ subj = subj->next; \
56701+ }
56702+
56703+#endif
56704+
56705diff -urNp linux-3.0.4/include/linux/gralloc.h linux-3.0.4/include/linux/gralloc.h
56706--- linux-3.0.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
56707+++ linux-3.0.4/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
56708@@ -0,0 +1,9 @@
56709+#ifndef __GRALLOC_H
56710+#define __GRALLOC_H
56711+
56712+void acl_free_all(void);
56713+int acl_alloc_stack_init(unsigned long size);
56714+void *acl_alloc(unsigned long len);
56715+void *acl_alloc_num(unsigned long num, unsigned long len);
56716+
56717+#endif
56718diff -urNp linux-3.0.4/include/linux/grdefs.h linux-3.0.4/include/linux/grdefs.h
56719--- linux-3.0.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
56720+++ linux-3.0.4/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
56721@@ -0,0 +1,140 @@
56722+#ifndef GRDEFS_H
56723+#define GRDEFS_H
56724+
56725+/* Begin grsecurity status declarations */
56726+
56727+enum {
56728+ GR_READY = 0x01,
56729+ GR_STATUS_INIT = 0x00 // disabled state
56730+};
56731+
56732+/* Begin ACL declarations */
56733+
56734+/* Role flags */
56735+
56736+enum {
56737+ GR_ROLE_USER = 0x0001,
56738+ GR_ROLE_GROUP = 0x0002,
56739+ GR_ROLE_DEFAULT = 0x0004,
56740+ GR_ROLE_SPECIAL = 0x0008,
56741+ GR_ROLE_AUTH = 0x0010,
56742+ GR_ROLE_NOPW = 0x0020,
56743+ GR_ROLE_GOD = 0x0040,
56744+ GR_ROLE_LEARN = 0x0080,
56745+ GR_ROLE_TPE = 0x0100,
56746+ GR_ROLE_DOMAIN = 0x0200,
56747+ GR_ROLE_PAM = 0x0400,
56748+ GR_ROLE_PERSIST = 0x0800
56749+};
56750+
56751+/* ACL Subject and Object mode flags */
56752+enum {
56753+ GR_DELETED = 0x80000000
56754+};
56755+
56756+/* ACL Object-only mode flags */
56757+enum {
56758+ GR_READ = 0x00000001,
56759+ GR_APPEND = 0x00000002,
56760+ GR_WRITE = 0x00000004,
56761+ GR_EXEC = 0x00000008,
56762+ GR_FIND = 0x00000010,
56763+ GR_INHERIT = 0x00000020,
56764+ GR_SETID = 0x00000040,
56765+ GR_CREATE = 0x00000080,
56766+ GR_DELETE = 0x00000100,
56767+ GR_LINK = 0x00000200,
56768+ GR_AUDIT_READ = 0x00000400,
56769+ GR_AUDIT_APPEND = 0x00000800,
56770+ GR_AUDIT_WRITE = 0x00001000,
56771+ GR_AUDIT_EXEC = 0x00002000,
56772+ GR_AUDIT_FIND = 0x00004000,
56773+ GR_AUDIT_INHERIT= 0x00008000,
56774+ GR_AUDIT_SETID = 0x00010000,
56775+ GR_AUDIT_CREATE = 0x00020000,
56776+ GR_AUDIT_DELETE = 0x00040000,
56777+ GR_AUDIT_LINK = 0x00080000,
56778+ GR_PTRACERD = 0x00100000,
56779+ GR_NOPTRACE = 0x00200000,
56780+ GR_SUPPRESS = 0x00400000,
56781+ GR_NOLEARN = 0x00800000,
56782+ GR_INIT_TRANSFER= 0x01000000
56783+};
56784+
56785+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
56786+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
56787+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
56788+
56789+/* ACL subject-only mode flags */
56790+enum {
56791+ GR_KILL = 0x00000001,
56792+ GR_VIEW = 0x00000002,
56793+ GR_PROTECTED = 0x00000004,
56794+ GR_LEARN = 0x00000008,
56795+ GR_OVERRIDE = 0x00000010,
56796+ /* just a placeholder, this mode is only used in userspace */
56797+ GR_DUMMY = 0x00000020,
56798+ GR_PROTSHM = 0x00000040,
56799+ GR_KILLPROC = 0x00000080,
56800+ GR_KILLIPPROC = 0x00000100,
56801+ /* just a placeholder, this mode is only used in userspace */
56802+ GR_NOTROJAN = 0x00000200,
56803+ GR_PROTPROCFD = 0x00000400,
56804+ GR_PROCACCT = 0x00000800,
56805+ GR_RELAXPTRACE = 0x00001000,
56806+ GR_NESTED = 0x00002000,
56807+ GR_INHERITLEARN = 0x00004000,
56808+ GR_PROCFIND = 0x00008000,
56809+ GR_POVERRIDE = 0x00010000,
56810+ GR_KERNELAUTH = 0x00020000,
56811+ GR_ATSECURE = 0x00040000,
56812+ GR_SHMEXEC = 0x00080000
56813+};
56814+
56815+enum {
56816+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
56817+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
56818+ GR_PAX_ENABLE_MPROTECT = 0x0004,
56819+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
56820+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
56821+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
56822+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
56823+ GR_PAX_DISABLE_MPROTECT = 0x0400,
56824+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
56825+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
56826+};
56827+
56828+enum {
56829+ GR_ID_USER = 0x01,
56830+ GR_ID_GROUP = 0x02,
56831+};
56832+
56833+enum {
56834+ GR_ID_ALLOW = 0x01,
56835+ GR_ID_DENY = 0x02,
56836+};
56837+
56838+#define GR_CRASH_RES 31
56839+#define GR_UIDTABLE_MAX 500
56840+
56841+/* begin resource learning section */
56842+enum {
56843+ GR_RLIM_CPU_BUMP = 60,
56844+ GR_RLIM_FSIZE_BUMP = 50000,
56845+ GR_RLIM_DATA_BUMP = 10000,
56846+ GR_RLIM_STACK_BUMP = 1000,
56847+ GR_RLIM_CORE_BUMP = 10000,
56848+ GR_RLIM_RSS_BUMP = 500000,
56849+ GR_RLIM_NPROC_BUMP = 1,
56850+ GR_RLIM_NOFILE_BUMP = 5,
56851+ GR_RLIM_MEMLOCK_BUMP = 50000,
56852+ GR_RLIM_AS_BUMP = 500000,
56853+ GR_RLIM_LOCKS_BUMP = 2,
56854+ GR_RLIM_SIGPENDING_BUMP = 5,
56855+ GR_RLIM_MSGQUEUE_BUMP = 10000,
56856+ GR_RLIM_NICE_BUMP = 1,
56857+ GR_RLIM_RTPRIO_BUMP = 1,
56858+ GR_RLIM_RTTIME_BUMP = 1000000
56859+};
56860+
56861+#endif
56862diff -urNp linux-3.0.4/include/linux/grinternal.h linux-3.0.4/include/linux/grinternal.h
56863--- linux-3.0.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
56864+++ linux-3.0.4/include/linux/grinternal.h 2011-09-24 08:43:45.000000000 -0400
56865@@ -0,0 +1,219 @@
56866+#ifndef __GRINTERNAL_H
56867+#define __GRINTERNAL_H
56868+
56869+#ifdef CONFIG_GRKERNSEC
56870+
56871+#include <linux/fs.h>
56872+#include <linux/mnt_namespace.h>
56873+#include <linux/nsproxy.h>
56874+#include <linux/gracl.h>
56875+#include <linux/grdefs.h>
56876+#include <linux/grmsg.h>
56877+
56878+void gr_add_learn_entry(const char *fmt, ...)
56879+ __attribute__ ((format (printf, 1, 2)));
56880+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
56881+ const struct vfsmount *mnt);
56882+__u32 gr_check_create(const struct dentry *new_dentry,
56883+ const struct dentry *parent,
56884+ const struct vfsmount *mnt, const __u32 mode);
56885+int gr_check_protected_task(const struct task_struct *task);
56886+__u32 to_gr_audit(const __u32 reqmode);
56887+int gr_set_acls(const int type);
56888+int gr_apply_subject_to_task(struct task_struct *task);
56889+int gr_acl_is_enabled(void);
56890+char gr_roletype_to_char(void);
56891+
56892+void gr_handle_alertkill(struct task_struct *task);
56893+char *gr_to_filename(const struct dentry *dentry,
56894+ const struct vfsmount *mnt);
56895+char *gr_to_filename1(const struct dentry *dentry,
56896+ const struct vfsmount *mnt);
56897+char *gr_to_filename2(const struct dentry *dentry,
56898+ const struct vfsmount *mnt);
56899+char *gr_to_filename3(const struct dentry *dentry,
56900+ const struct vfsmount *mnt);
56901+
56902+extern int grsec_enable_harden_ptrace;
56903+extern int grsec_enable_link;
56904+extern int grsec_enable_fifo;
56905+extern int grsec_enable_execve;
56906+extern int grsec_enable_shm;
56907+extern int grsec_enable_execlog;
56908+extern int grsec_enable_signal;
56909+extern int grsec_enable_audit_ptrace;
56910+extern int grsec_enable_forkfail;
56911+extern int grsec_enable_time;
56912+extern int grsec_enable_rofs;
56913+extern int grsec_enable_chroot_shmat;
56914+extern int grsec_enable_chroot_mount;
56915+extern int grsec_enable_chroot_double;
56916+extern int grsec_enable_chroot_pivot;
56917+extern int grsec_enable_chroot_chdir;
56918+extern int grsec_enable_chroot_chmod;
56919+extern int grsec_enable_chroot_mknod;
56920+extern int grsec_enable_chroot_fchdir;
56921+extern int grsec_enable_chroot_nice;
56922+extern int grsec_enable_chroot_execlog;
56923+extern int grsec_enable_chroot_caps;
56924+extern int grsec_enable_chroot_sysctl;
56925+extern int grsec_enable_chroot_unix;
56926+extern int grsec_enable_tpe;
56927+extern int grsec_tpe_gid;
56928+extern int grsec_enable_tpe_all;
56929+extern int grsec_enable_tpe_invert;
56930+extern int grsec_enable_socket_all;
56931+extern int grsec_socket_all_gid;
56932+extern int grsec_enable_socket_client;
56933+extern int grsec_socket_client_gid;
56934+extern int grsec_enable_socket_server;
56935+extern int grsec_socket_server_gid;
56936+extern int grsec_audit_gid;
56937+extern int grsec_enable_group;
56938+extern int grsec_enable_audit_textrel;
56939+extern int grsec_enable_log_rwxmaps;
56940+extern int grsec_enable_mount;
56941+extern int grsec_enable_chdir;
56942+extern int grsec_resource_logging;
56943+extern int grsec_enable_blackhole;
56944+extern int grsec_lastack_retries;
56945+extern int grsec_enable_brute;
56946+extern int grsec_lock;
56947+
56948+extern spinlock_t grsec_alert_lock;
56949+extern unsigned long grsec_alert_wtime;
56950+extern unsigned long grsec_alert_fyet;
56951+
56952+extern spinlock_t grsec_audit_lock;
56953+
56954+extern rwlock_t grsec_exec_file_lock;
56955+
56956+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
56957+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
56958+ (tsk)->exec_file->f_vfsmnt) : "/")
56959+
56960+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
56961+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
56962+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
56963+
56964+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
56965+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
56966+ (tsk)->exec_file->f_vfsmnt) : "/")
56967+
56968+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
56969+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
56970+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
56971+
56972+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
56973+
56974+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
56975+
56976+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
56977+ (task)->pid, (cred)->uid, \
56978+ (cred)->euid, (cred)->gid, (cred)->egid, \
56979+ gr_parent_task_fullpath(task), \
56980+ (task)->real_parent->comm, (task)->real_parent->pid, \
56981+ (pcred)->uid, (pcred)->euid, \
56982+ (pcred)->gid, (pcred)->egid
56983+
56984+#define GR_CHROOT_CAPS {{ \
56985+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
56986+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
56987+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
56988+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
56989+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
56990+ CAP_TO_MASK(CAP_IPC_OWNER) , CAP_TO_MASK(CAP_SYSLOG) }}
56991+
56992+#define security_learn(normal_msg,args...) \
56993+({ \
56994+ read_lock(&grsec_exec_file_lock); \
56995+ gr_add_learn_entry(normal_msg "\n", ## args); \
56996+ read_unlock(&grsec_exec_file_lock); \
56997+})
56998+
56999+enum {
57000+ GR_DO_AUDIT,
57001+ GR_DONT_AUDIT,
57002+ /* used for non-audit messages that we shouldn't kill the task on */
57003+ GR_DONT_AUDIT_GOOD
57004+};
57005+
57006+enum {
57007+ GR_TTYSNIFF,
57008+ GR_RBAC,
57009+ GR_RBAC_STR,
57010+ GR_STR_RBAC,
57011+ GR_RBAC_MODE2,
57012+ GR_RBAC_MODE3,
57013+ GR_FILENAME,
57014+ GR_SYSCTL_HIDDEN,
57015+ GR_NOARGS,
57016+ GR_ONE_INT,
57017+ GR_ONE_INT_TWO_STR,
57018+ GR_ONE_STR,
57019+ GR_STR_INT,
57020+ GR_TWO_STR_INT,
57021+ GR_TWO_INT,
57022+ GR_TWO_U64,
57023+ GR_THREE_INT,
57024+ GR_FIVE_INT_TWO_STR,
57025+ GR_TWO_STR,
57026+ GR_THREE_STR,
57027+ GR_FOUR_STR,
57028+ GR_STR_FILENAME,
57029+ GR_FILENAME_STR,
57030+ GR_FILENAME_TWO_INT,
57031+ GR_FILENAME_TWO_INT_STR,
57032+ GR_TEXTREL,
57033+ GR_PTRACE,
57034+ GR_RESOURCE,
57035+ GR_CAP,
57036+ GR_SIG,
57037+ GR_SIG2,
57038+ GR_CRASH1,
57039+ GR_CRASH2,
57040+ GR_PSACCT,
57041+ GR_RWXMAP
57042+};
57043+
57044+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
57045+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
57046+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
57047+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
57048+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
57049+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
57050+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
57051+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
57052+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
57053+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
57054+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
57055+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
57056+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
57057+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
57058+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
57059+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
57060+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
57061+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
57062+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
57063+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
57064+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
57065+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
57066+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
57067+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
57068+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
57069+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
57070+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
57071+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
57072+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
57073+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
57074+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
57075+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
57076+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
57077+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
57078+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
57079+
57080+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
57081+
57082+#endif
57083+
57084+#endif
57085diff -urNp linux-3.0.4/include/linux/grmsg.h linux-3.0.4/include/linux/grmsg.h
57086--- linux-3.0.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
57087+++ linux-3.0.4/include/linux/grmsg.h 2011-09-14 09:16:54.000000000 -0400
57088@@ -0,0 +1,108 @@
57089+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
57090+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
57091+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
57092+#define GR_STOPMOD_MSG "denied modification of module state by "
57093+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
57094+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
57095+#define GR_IOPERM_MSG "denied use of ioperm() by "
57096+#define GR_IOPL_MSG "denied use of iopl() by "
57097+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
57098+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
57099+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
57100+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
57101+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
57102+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
57103+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
57104+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
57105+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
57106+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
57107+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
57108+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
57109+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
57110+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
57111+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
57112+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
57113+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
57114+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
57115+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
57116+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
57117+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
57118+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
57119+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
57120+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
57121+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
57122+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
57123+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
57124+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
57125+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
57126+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
57127+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
57128+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
57129+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
57130+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
57131+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
57132+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
57133+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
57134+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
57135+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
57136+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
57137+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
57138+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
57139+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
57140+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
57141+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
57142+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
57143+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
57144+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
57145+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
57146+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
57147+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
57148+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
57149+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
57150+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
57151+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
57152+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
57153+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
57154+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
57155+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
57156+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
57157+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
57158+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
57159+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
57160+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
57161+#define GR_FAILFORK_MSG "failed fork with errno %s by "
57162+#define GR_NICE_CHROOT_MSG "denied priority change by "
57163+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
57164+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
57165+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
57166+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
57167+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
57168+#define GR_TIME_MSG "time set by "
57169+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
57170+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
57171+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
57172+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
57173+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
57174+#define GR_BIND_MSG "denied bind() by "
57175+#define GR_CONNECT_MSG "denied connect() by "
57176+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
57177+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
57178+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
57179+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
57180+#define GR_CAP_ACL_MSG "use of %s denied for "
57181+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
57182+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
57183+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
57184+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
57185+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
57186+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
57187+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
57188+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
57189+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
57190+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
57191+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
57192+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
57193+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
57194+#define GR_VM86_MSG "denied use of vm86 by "
57195+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
57196+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
57197diff -urNp linux-3.0.4/include/linux/grsecurity.h linux-3.0.4/include/linux/grsecurity.h
57198--- linux-3.0.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
57199+++ linux-3.0.4/include/linux/grsecurity.h 2011-09-14 09:16:54.000000000 -0400
57200@@ -0,0 +1,226 @@
57201+#ifndef GR_SECURITY_H
57202+#define GR_SECURITY_H
57203+#include <linux/fs.h>
57204+#include <linux/fs_struct.h>
57205+#include <linux/binfmts.h>
57206+#include <linux/gracl.h>
57207+
57208+/* notify of brain-dead configs */
57209+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57210+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
57211+#endif
57212+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
57213+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
57214+#endif
57215+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
57216+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
57217+#endif
57218+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
57219+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
57220+#endif
57221+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
57222+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
57223+#endif
57224+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
57225+#error "CONFIG_PAX enabled, but no PaX options are enabled."
57226+#endif
57227+
57228+#include <linux/compat.h>
57229+
57230+struct user_arg_ptr {
57231+#ifdef CONFIG_COMPAT
57232+ bool is_compat;
57233+#endif
57234+ union {
57235+ const char __user *const __user *native;
57236+#ifdef CONFIG_COMPAT
57237+ compat_uptr_t __user *compat;
57238+#endif
57239+ } ptr;
57240+};
57241+
57242+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
57243+void gr_handle_brute_check(void);
57244+void gr_handle_kernel_exploit(void);
57245+int gr_process_user_ban(void);
57246+
57247+char gr_roletype_to_char(void);
57248+
57249+int gr_acl_enable_at_secure(void);
57250+
57251+int gr_check_user_change(int real, int effective, int fs);
57252+int gr_check_group_change(int real, int effective, int fs);
57253+
57254+void gr_del_task_from_ip_table(struct task_struct *p);
57255+
57256+int gr_pid_is_chrooted(struct task_struct *p);
57257+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
57258+int gr_handle_chroot_nice(void);
57259+int gr_handle_chroot_sysctl(const int op);
57260+int gr_handle_chroot_setpriority(struct task_struct *p,
57261+ const int niceval);
57262+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
57263+int gr_handle_chroot_chroot(const struct dentry *dentry,
57264+ const struct vfsmount *mnt);
57265+void gr_handle_chroot_chdir(struct path *path);
57266+int gr_handle_chroot_chmod(const struct dentry *dentry,
57267+ const struct vfsmount *mnt, const int mode);
57268+int gr_handle_chroot_mknod(const struct dentry *dentry,
57269+ const struct vfsmount *mnt, const int mode);
57270+int gr_handle_chroot_mount(const struct dentry *dentry,
57271+ const struct vfsmount *mnt,
57272+ const char *dev_name);
57273+int gr_handle_chroot_pivot(void);
57274+int gr_handle_chroot_unix(const pid_t pid);
57275+
57276+int gr_handle_rawio(const struct inode *inode);
57277+
57278+void gr_handle_ioperm(void);
57279+void gr_handle_iopl(void);
57280+
57281+int gr_tpe_allow(const struct file *file);
57282+
57283+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
57284+void gr_clear_chroot_entries(struct task_struct *task);
57285+
57286+void gr_log_forkfail(const int retval);
57287+void gr_log_timechange(void);
57288+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
57289+void gr_log_chdir(const struct dentry *dentry,
57290+ const struct vfsmount *mnt);
57291+void gr_log_chroot_exec(const struct dentry *dentry,
57292+ const struct vfsmount *mnt);
57293+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
57294+void gr_log_remount(const char *devname, const int retval);
57295+void gr_log_unmount(const char *devname, const int retval);
57296+void gr_log_mount(const char *from, const char *to, const int retval);
57297+void gr_log_textrel(struct vm_area_struct *vma);
57298+void gr_log_rwxmmap(struct file *file);
57299+void gr_log_rwxmprotect(struct file *file);
57300+
57301+int gr_handle_follow_link(const struct inode *parent,
57302+ const struct inode *inode,
57303+ const struct dentry *dentry,
57304+ const struct vfsmount *mnt);
57305+int gr_handle_fifo(const struct dentry *dentry,
57306+ const struct vfsmount *mnt,
57307+ const struct dentry *dir, const int flag,
57308+ const int acc_mode);
57309+int gr_handle_hardlink(const struct dentry *dentry,
57310+ const struct vfsmount *mnt,
57311+ struct inode *inode,
57312+ const int mode, const char *to);
57313+
57314+int gr_is_capable(const int cap);
57315+int gr_is_capable_nolog(const int cap);
57316+void gr_learn_resource(const struct task_struct *task, const int limit,
57317+ const unsigned long wanted, const int gt);
57318+void gr_copy_label(struct task_struct *tsk);
57319+void gr_handle_crash(struct task_struct *task, const int sig);
57320+int gr_handle_signal(const struct task_struct *p, const int sig);
57321+int gr_check_crash_uid(const uid_t uid);
57322+int gr_check_protected_task(const struct task_struct *task);
57323+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
57324+int gr_acl_handle_mmap(const struct file *file,
57325+ const unsigned long prot);
57326+int gr_acl_handle_mprotect(const struct file *file,
57327+ const unsigned long prot);
57328+int gr_check_hidden_task(const struct task_struct *tsk);
57329+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
57330+ const struct vfsmount *mnt);
57331+__u32 gr_acl_handle_utime(const struct dentry *dentry,
57332+ const struct vfsmount *mnt);
57333+__u32 gr_acl_handle_access(const struct dentry *dentry,
57334+ const struct vfsmount *mnt, const int fmode);
57335+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
57336+ const struct vfsmount *mnt, mode_t mode);
57337+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
57338+ const struct vfsmount *mnt, mode_t mode);
57339+__u32 gr_acl_handle_chown(const struct dentry *dentry,
57340+ const struct vfsmount *mnt);
57341+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
57342+ const struct vfsmount *mnt);
57343+int gr_handle_ptrace(struct task_struct *task, const long request);
57344+int gr_handle_proc_ptrace(struct task_struct *task);
57345+__u32 gr_acl_handle_execve(const struct dentry *dentry,
57346+ const struct vfsmount *mnt);
57347+int gr_check_crash_exec(const struct file *filp);
57348+int gr_acl_is_enabled(void);
57349+void gr_set_kernel_label(struct task_struct *task);
57350+void gr_set_role_label(struct task_struct *task, const uid_t uid,
57351+ const gid_t gid);
57352+int gr_set_proc_label(const struct dentry *dentry,
57353+ const struct vfsmount *mnt,
57354+ const int unsafe_share);
57355+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
57356+ const struct vfsmount *mnt);
57357+__u32 gr_acl_handle_open(const struct dentry *dentry,
57358+ const struct vfsmount *mnt, const int fmode);
57359+__u32 gr_acl_handle_creat(const struct dentry *dentry,
57360+ const struct dentry *p_dentry,
57361+ const struct vfsmount *p_mnt, const int fmode,
57362+ const int imode);
57363+void gr_handle_create(const struct dentry *dentry,
57364+ const struct vfsmount *mnt);
57365+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
57366+ const struct dentry *parent_dentry,
57367+ const struct vfsmount *parent_mnt,
57368+ const int mode);
57369+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
57370+ const struct dentry *parent_dentry,
57371+ const struct vfsmount *parent_mnt);
57372+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
57373+ const struct vfsmount *mnt);
57374+void gr_handle_delete(const ino_t ino, const dev_t dev);
57375+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
57376+ const struct vfsmount *mnt);
57377+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
57378+ const struct dentry *parent_dentry,
57379+ const struct vfsmount *parent_mnt,
57380+ const char *from);
57381+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
57382+ const struct dentry *parent_dentry,
57383+ const struct vfsmount *parent_mnt,
57384+ const struct dentry *old_dentry,
57385+ const struct vfsmount *old_mnt, const char *to);
57386+int gr_acl_handle_rename(struct dentry *new_dentry,
57387+ struct dentry *parent_dentry,
57388+ const struct vfsmount *parent_mnt,
57389+ struct dentry *old_dentry,
57390+ struct inode *old_parent_inode,
57391+ struct vfsmount *old_mnt, const char *newname);
57392+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
57393+ struct dentry *old_dentry,
57394+ struct dentry *new_dentry,
57395+ struct vfsmount *mnt, const __u8 replace);
57396+__u32 gr_check_link(const struct dentry *new_dentry,
57397+ const struct dentry *parent_dentry,
57398+ const struct vfsmount *parent_mnt,
57399+ const struct dentry *old_dentry,
57400+ const struct vfsmount *old_mnt);
57401+int gr_acl_handle_filldir(const struct file *file, const char *name,
57402+ const unsigned int namelen, const ino_t ino);
57403+
57404+__u32 gr_acl_handle_unix(const struct dentry *dentry,
57405+ const struct vfsmount *mnt);
57406+void gr_acl_handle_exit(void);
57407+void gr_acl_handle_psacct(struct task_struct *task, const long code);
57408+int gr_acl_handle_procpidmem(const struct task_struct *task);
57409+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
57410+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
57411+void gr_audit_ptrace(struct task_struct *task);
57412+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
57413+
57414+#ifdef CONFIG_GRKERNSEC
57415+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
57416+void gr_handle_vm86(void);
57417+void gr_handle_mem_readwrite(u64 from, u64 to);
57418+
57419+extern int grsec_enable_dmesg;
57420+extern int grsec_disable_privio;
57421+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57422+extern int grsec_enable_chroot_findtask;
57423+#endif
57424+#endif
57425+
57426+#endif
57427diff -urNp linux-3.0.4/include/linux/grsock.h linux-3.0.4/include/linux/grsock.h
57428--- linux-3.0.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
57429+++ linux-3.0.4/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
57430@@ -0,0 +1,19 @@
57431+#ifndef __GRSOCK_H
57432+#define __GRSOCK_H
57433+
57434+extern void gr_attach_curr_ip(const struct sock *sk);
57435+extern int gr_handle_sock_all(const int family, const int type,
57436+ const int protocol);
57437+extern int gr_handle_sock_server(const struct sockaddr *sck);
57438+extern int gr_handle_sock_server_other(const struct sock *sck);
57439+extern int gr_handle_sock_client(const struct sockaddr *sck);
57440+extern int gr_search_connect(struct socket * sock,
57441+ struct sockaddr_in * addr);
57442+extern int gr_search_bind(struct socket * sock,
57443+ struct sockaddr_in * addr);
57444+extern int gr_search_listen(struct socket * sock);
57445+extern int gr_search_accept(struct socket * sock);
57446+extern int gr_search_socket(const int domain, const int type,
57447+ const int protocol);
57448+
57449+#endif
57450diff -urNp linux-3.0.4/include/linux/hid.h linux-3.0.4/include/linux/hid.h
57451--- linux-3.0.4/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
57452+++ linux-3.0.4/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
57453@@ -675,7 +675,7 @@ struct hid_ll_driver {
57454 unsigned int code, int value);
57455
57456 int (*parse)(struct hid_device *hdev);
57457-};
57458+} __no_const;
57459
57460 #define PM_HINT_FULLON 1<<5
57461 #define PM_HINT_NORMAL 1<<1
57462diff -urNp linux-3.0.4/include/linux/highmem.h linux-3.0.4/include/linux/highmem.h
57463--- linux-3.0.4/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
57464+++ linux-3.0.4/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
57465@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
57466 kunmap_atomic(kaddr, KM_USER0);
57467 }
57468
57469+static inline void sanitize_highpage(struct page *page)
57470+{
57471+ void *kaddr;
57472+ unsigned long flags;
57473+
57474+ local_irq_save(flags);
57475+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
57476+ clear_page(kaddr);
57477+ kunmap_atomic(kaddr, KM_CLEARPAGE);
57478+ local_irq_restore(flags);
57479+}
57480+
57481 static inline void zero_user_segments(struct page *page,
57482 unsigned start1, unsigned end1,
57483 unsigned start2, unsigned end2)
57484diff -urNp linux-3.0.4/include/linux/i2c.h linux-3.0.4/include/linux/i2c.h
57485--- linux-3.0.4/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
57486+++ linux-3.0.4/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
57487@@ -346,6 +346,7 @@ struct i2c_algorithm {
57488 /* To determine what the adapter supports */
57489 u32 (*functionality) (struct i2c_adapter *);
57490 };
57491+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
57492
57493 /*
57494 * i2c_adapter is the structure used to identify a physical i2c bus along
57495diff -urNp linux-3.0.4/include/linux/i2o.h linux-3.0.4/include/linux/i2o.h
57496--- linux-3.0.4/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
57497+++ linux-3.0.4/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
57498@@ -564,7 +564,7 @@ struct i2o_controller {
57499 struct i2o_device *exec; /* Executive */
57500 #if BITS_PER_LONG == 64
57501 spinlock_t context_list_lock; /* lock for context_list */
57502- atomic_t context_list_counter; /* needed for unique contexts */
57503+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
57504 struct list_head context_list; /* list of context id's
57505 and pointers */
57506 #endif
57507diff -urNp linux-3.0.4/include/linux/init.h linux-3.0.4/include/linux/init.h
57508--- linux-3.0.4/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
57509+++ linux-3.0.4/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
57510@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
57511
57512 /* Each module must use one module_init(). */
57513 #define module_init(initfn) \
57514- static inline initcall_t __inittest(void) \
57515+ static inline __used initcall_t __inittest(void) \
57516 { return initfn; } \
57517 int init_module(void) __attribute__((alias(#initfn)));
57518
57519 /* This is only required if you want to be unloadable. */
57520 #define module_exit(exitfn) \
57521- static inline exitcall_t __exittest(void) \
57522+ static inline __used exitcall_t __exittest(void) \
57523 { return exitfn; } \
57524 void cleanup_module(void) __attribute__((alias(#exitfn)));
57525
57526diff -urNp linux-3.0.4/include/linux/init_task.h linux-3.0.4/include/linux/init_task.h
57527--- linux-3.0.4/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
57528+++ linux-3.0.4/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
57529@@ -126,6 +126,12 @@ extern struct cred init_cred;
57530 # define INIT_PERF_EVENTS(tsk)
57531 #endif
57532
57533+#ifdef CONFIG_X86
57534+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
57535+#else
57536+#define INIT_TASK_THREAD_INFO
57537+#endif
57538+
57539 /*
57540 * INIT_TASK is used to set up the first task table, touch at
57541 * your own risk!. Base=0, limit=0x1fffff (=2MB)
57542@@ -164,6 +170,7 @@ extern struct cred init_cred;
57543 RCU_INIT_POINTER(.cred, &init_cred), \
57544 .comm = "swapper", \
57545 .thread = INIT_THREAD, \
57546+ INIT_TASK_THREAD_INFO \
57547 .fs = &init_fs, \
57548 .files = &init_files, \
57549 .signal = &init_signals, \
57550diff -urNp linux-3.0.4/include/linux/intel-iommu.h linux-3.0.4/include/linux/intel-iommu.h
57551--- linux-3.0.4/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
57552+++ linux-3.0.4/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
57553@@ -296,7 +296,7 @@ struct iommu_flush {
57554 u8 fm, u64 type);
57555 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
57556 unsigned int size_order, u64 type);
57557-};
57558+} __no_const;
57559
57560 enum {
57561 SR_DMAR_FECTL_REG,
57562diff -urNp linux-3.0.4/include/linux/interrupt.h linux-3.0.4/include/linux/interrupt.h
57563--- linux-3.0.4/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
57564+++ linux-3.0.4/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
57565@@ -422,7 +422,7 @@ enum
57566 /* map softirq index to softirq name. update 'softirq_to_name' in
57567 * kernel/softirq.c when adding a new softirq.
57568 */
57569-extern char *softirq_to_name[NR_SOFTIRQS];
57570+extern const char * const softirq_to_name[NR_SOFTIRQS];
57571
57572 /* softirq mask and active fields moved to irq_cpustat_t in
57573 * asm/hardirq.h to get better cache usage. KAO
57574@@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
57575
57576 struct softirq_action
57577 {
57578- void (*action)(struct softirq_action *);
57579+ void (*action)(void);
57580 };
57581
57582 asmlinkage void do_softirq(void);
57583 asmlinkage void __do_softirq(void);
57584-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
57585+extern void open_softirq(int nr, void (*action)(void));
57586 extern void softirq_init(void);
57587 static inline void __raise_softirq_irqoff(unsigned int nr)
57588 {
57589diff -urNp linux-3.0.4/include/linux/kallsyms.h linux-3.0.4/include/linux/kallsyms.h
57590--- linux-3.0.4/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
57591+++ linux-3.0.4/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
57592@@ -15,7 +15,8 @@
57593
57594 struct module;
57595
57596-#ifdef CONFIG_KALLSYMS
57597+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
57598+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57599 /* Lookup the address for a symbol. Returns 0 if not found. */
57600 unsigned long kallsyms_lookup_name(const char *name);
57601
57602@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
57603 /* Stupid that this does nothing, but I didn't create this mess. */
57604 #define __print_symbol(fmt, addr)
57605 #endif /*CONFIG_KALLSYMS*/
57606+#else /* when included by kallsyms.c, vsnprintf.c, or
57607+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
57608+extern void __print_symbol(const char *fmt, unsigned long address);
57609+extern int sprint_backtrace(char *buffer, unsigned long address);
57610+extern int sprint_symbol(char *buffer, unsigned long address);
57611+const char *kallsyms_lookup(unsigned long addr,
57612+ unsigned long *symbolsize,
57613+ unsigned long *offset,
57614+ char **modname, char *namebuf);
57615+#endif
57616
57617 /* This macro allows us to keep printk typechecking */
57618 static void __check_printsym_format(const char *fmt, ...)
57619diff -urNp linux-3.0.4/include/linux/kgdb.h linux-3.0.4/include/linux/kgdb.h
57620--- linux-3.0.4/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
57621+++ linux-3.0.4/include/linux/kgdb.h 2011-08-26 19:49:56.000000000 -0400
57622@@ -53,7 +53,7 @@ extern int kgdb_connected;
57623 extern int kgdb_io_module_registered;
57624
57625 extern atomic_t kgdb_setting_breakpoint;
57626-extern atomic_t kgdb_cpu_doing_single_step;
57627+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
57628
57629 extern struct task_struct *kgdb_usethread;
57630 extern struct task_struct *kgdb_contthread;
57631@@ -251,7 +251,7 @@ struct kgdb_arch {
57632 void (*disable_hw_break)(struct pt_regs *regs);
57633 void (*remove_all_hw_break)(void);
57634 void (*correct_hw_break)(void);
57635-};
57636+} __do_const;
57637
57638 /**
57639 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
57640@@ -276,7 +276,7 @@ struct kgdb_io {
57641 void (*pre_exception) (void);
57642 void (*post_exception) (void);
57643 int is_console;
57644-};
57645+} __do_const;
57646
57647 extern struct kgdb_arch arch_kgdb_ops;
57648
57649diff -urNp linux-3.0.4/include/linux/kmod.h linux-3.0.4/include/linux/kmod.h
57650--- linux-3.0.4/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
57651+++ linux-3.0.4/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
57652@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
57653 * usually useless though. */
57654 extern int __request_module(bool wait, const char *name, ...) \
57655 __attribute__((format(printf, 2, 3)));
57656+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
57657+ __attribute__((format(printf, 3, 4)));
57658 #define request_module(mod...) __request_module(true, mod)
57659 #define request_module_nowait(mod...) __request_module(false, mod)
57660 #define try_then_request_module(x, mod...) \
57661diff -urNp linux-3.0.4/include/linux/kvm_host.h linux-3.0.4/include/linux/kvm_host.h
57662--- linux-3.0.4/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
57663+++ linux-3.0.4/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
57664@@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
57665 void vcpu_load(struct kvm_vcpu *vcpu);
57666 void vcpu_put(struct kvm_vcpu *vcpu);
57667
57668-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
57669+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
57670 struct module *module);
57671 void kvm_exit(void);
57672
57673@@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
57674 struct kvm_guest_debug *dbg);
57675 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
57676
57677-int kvm_arch_init(void *opaque);
57678+int kvm_arch_init(const void *opaque);
57679 void kvm_arch_exit(void);
57680
57681 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
57682diff -urNp linux-3.0.4/include/linux/libata.h linux-3.0.4/include/linux/libata.h
57683--- linux-3.0.4/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
57684+++ linux-3.0.4/include/linux/libata.h 2011-08-26 19:49:56.000000000 -0400
57685@@ -899,7 +899,7 @@ struct ata_port_operations {
57686 * fields must be pointers.
57687 */
57688 const struct ata_port_operations *inherits;
57689-};
57690+} __do_const;
57691
57692 struct ata_port_info {
57693 unsigned long flags;
57694diff -urNp linux-3.0.4/include/linux/mca.h linux-3.0.4/include/linux/mca.h
57695--- linux-3.0.4/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
57696+++ linux-3.0.4/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
57697@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
57698 int region);
57699 void * (*mca_transform_memory)(struct mca_device *,
57700 void *memory);
57701-};
57702+} __no_const;
57703
57704 struct mca_bus {
57705 u64 default_dma_mask;
57706diff -urNp linux-3.0.4/include/linux/memory.h linux-3.0.4/include/linux/memory.h
57707--- linux-3.0.4/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
57708+++ linux-3.0.4/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
57709@@ -144,7 +144,7 @@ struct memory_accessor {
57710 size_t count);
57711 ssize_t (*write)(struct memory_accessor *, const char *buf,
57712 off_t offset, size_t count);
57713-};
57714+} __no_const;
57715
57716 /*
57717 * Kernel text modification mutex, used for code patching. Users of this lock
57718diff -urNp linux-3.0.4/include/linux/mfd/abx500.h linux-3.0.4/include/linux/mfd/abx500.h
57719--- linux-3.0.4/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
57720+++ linux-3.0.4/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
57721@@ -234,6 +234,7 @@ struct abx500_ops {
57722 int (*event_registers_startup_state_get) (struct device *, u8 *);
57723 int (*startup_irq_enabled) (struct device *, unsigned int);
57724 };
57725+typedef struct abx500_ops __no_const abx500_ops_no_const;
57726
57727 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
57728 void abx500_remove_ops(struct device *dev);
57729diff -urNp linux-3.0.4/include/linux/mm.h linux-3.0.4/include/linux/mm.h
57730--- linux-3.0.4/include/linux/mm.h 2011-09-02 18:11:21.000000000 -0400
57731+++ linux-3.0.4/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
57732@@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
57733
57734 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
57735 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
57736+
57737+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
57738+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
57739+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
57740+#else
57741 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
57742+#endif
57743+
57744 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
57745 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
57746
57747@@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
57748 int set_page_dirty_lock(struct page *page);
57749 int clear_page_dirty_for_io(struct page *page);
57750
57751-/* Is the vma a continuation of the stack vma above it? */
57752-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
57753-{
57754- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
57755-}
57756-
57757-static inline int stack_guard_page_start(struct vm_area_struct *vma,
57758- unsigned long addr)
57759-{
57760- return (vma->vm_flags & VM_GROWSDOWN) &&
57761- (vma->vm_start == addr) &&
57762- !vma_growsdown(vma->vm_prev, addr);
57763-}
57764-
57765-/* Is the vma a continuation of the stack vma below it? */
57766-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
57767-{
57768- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
57769-}
57770-
57771-static inline int stack_guard_page_end(struct vm_area_struct *vma,
57772- unsigned long addr)
57773-{
57774- return (vma->vm_flags & VM_GROWSUP) &&
57775- (vma->vm_end == addr) &&
57776- !vma_growsup(vma->vm_next, addr);
57777-}
57778-
57779 extern unsigned long move_page_tables(struct vm_area_struct *vma,
57780 unsigned long old_addr, struct vm_area_struct *new_vma,
57781 unsigned long new_addr, unsigned long len);
57782@@ -1169,6 +1148,15 @@ struct shrinker {
57783 extern void register_shrinker(struct shrinker *);
57784 extern void unregister_shrinker(struct shrinker *);
57785
57786+#ifdef CONFIG_MMU
57787+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
57788+#else
57789+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
57790+{
57791+ return __pgprot(0);
57792+}
57793+#endif
57794+
57795 int vma_wants_writenotify(struct vm_area_struct *vma);
57796
57797 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
57798@@ -1452,6 +1440,7 @@ out:
57799 }
57800
57801 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
57802+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
57803
57804 extern unsigned long do_brk(unsigned long, unsigned long);
57805
57806@@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
57807 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
57808 struct vm_area_struct **pprev);
57809
57810+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
57811+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
57812+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
57813+
57814 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
57815 NULL if none. Assume start_addr < end_addr. */
57816 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
57817@@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
57818 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
57819 }
57820
57821-#ifdef CONFIG_MMU
57822-pgprot_t vm_get_page_prot(unsigned long vm_flags);
57823-#else
57824-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
57825-{
57826- return __pgprot(0);
57827-}
57828-#endif
57829-
57830 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
57831 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
57832 unsigned long pfn, unsigned long size, pgprot_t);
57833@@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
57834 extern int sysctl_memory_failure_early_kill;
57835 extern int sysctl_memory_failure_recovery;
57836 extern void shake_page(struct page *p, int access);
57837-extern atomic_long_t mce_bad_pages;
57838+extern atomic_long_unchecked_t mce_bad_pages;
57839 extern int soft_offline_page(struct page *page, int flags);
57840
57841 extern void dump_page(struct page *page);
57842@@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
57843 unsigned int pages_per_huge_page);
57844 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
57845
57846+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
57847+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
57848+#else
57849+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
57850+#endif
57851+
57852 #endif /* __KERNEL__ */
57853 #endif /* _LINUX_MM_H */
57854diff -urNp linux-3.0.4/include/linux/mm_types.h linux-3.0.4/include/linux/mm_types.h
57855--- linux-3.0.4/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
57856+++ linux-3.0.4/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
57857@@ -184,6 +184,8 @@ struct vm_area_struct {
57858 #ifdef CONFIG_NUMA
57859 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
57860 #endif
57861+
57862+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
57863 };
57864
57865 struct core_thread {
57866@@ -316,6 +318,24 @@ struct mm_struct {
57867 #ifdef CONFIG_CPUMASK_OFFSTACK
57868 struct cpumask cpumask_allocation;
57869 #endif
57870+
57871+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57872+ unsigned long pax_flags;
57873+#endif
57874+
57875+#ifdef CONFIG_PAX_DLRESOLVE
57876+ unsigned long call_dl_resolve;
57877+#endif
57878+
57879+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
57880+ unsigned long call_syscall;
57881+#endif
57882+
57883+#ifdef CONFIG_PAX_ASLR
57884+ unsigned long delta_mmap; /* randomized offset */
57885+ unsigned long delta_stack; /* randomized offset */
57886+#endif
57887+
57888 };
57889
57890 static inline void mm_init_cpumask(struct mm_struct *mm)
57891diff -urNp linux-3.0.4/include/linux/mmu_notifier.h linux-3.0.4/include/linux/mmu_notifier.h
57892--- linux-3.0.4/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
57893+++ linux-3.0.4/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
57894@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
57895 */
57896 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
57897 ({ \
57898- pte_t __pte; \
57899+ pte_t ___pte; \
57900 struct vm_area_struct *___vma = __vma; \
57901 unsigned long ___address = __address; \
57902- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
57903+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
57904 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
57905- __pte; \
57906+ ___pte; \
57907 })
57908
57909 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
57910diff -urNp linux-3.0.4/include/linux/mmzone.h linux-3.0.4/include/linux/mmzone.h
57911--- linux-3.0.4/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
57912+++ linux-3.0.4/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
57913@@ -350,7 +350,7 @@ struct zone {
57914 unsigned long flags; /* zone flags, see below */
57915
57916 /* Zone statistics */
57917- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57918+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57919
57920 /*
57921 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
57922diff -urNp linux-3.0.4/include/linux/mod_devicetable.h linux-3.0.4/include/linux/mod_devicetable.h
57923--- linux-3.0.4/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
57924+++ linux-3.0.4/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
57925@@ -12,7 +12,7 @@
57926 typedef unsigned long kernel_ulong_t;
57927 #endif
57928
57929-#define PCI_ANY_ID (~0)
57930+#define PCI_ANY_ID ((__u16)~0)
57931
57932 struct pci_device_id {
57933 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
57934@@ -131,7 +131,7 @@ struct usb_device_id {
57935 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
57936 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
57937
57938-#define HID_ANY_ID (~0)
57939+#define HID_ANY_ID (~0U)
57940
57941 struct hid_device_id {
57942 __u16 bus;
57943diff -urNp linux-3.0.4/include/linux/module.h linux-3.0.4/include/linux/module.h
57944--- linux-3.0.4/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
57945+++ linux-3.0.4/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
57946@@ -16,6 +16,7 @@
57947 #include <linux/kobject.h>
57948 #include <linux/moduleparam.h>
57949 #include <linux/tracepoint.h>
57950+#include <linux/fs.h>
57951
57952 #include <linux/percpu.h>
57953 #include <asm/module.h>
57954@@ -325,19 +326,16 @@ struct module
57955 int (*init)(void);
57956
57957 /* If this is non-NULL, vfree after init() returns */
57958- void *module_init;
57959+ void *module_init_rx, *module_init_rw;
57960
57961 /* Here is the actual code + data, vfree'd on unload. */
57962- void *module_core;
57963+ void *module_core_rx, *module_core_rw;
57964
57965 /* Here are the sizes of the init and core sections */
57966- unsigned int init_size, core_size;
57967+ unsigned int init_size_rw, core_size_rw;
57968
57969 /* The size of the executable code in each section. */
57970- unsigned int init_text_size, core_text_size;
57971-
57972- /* Size of RO sections of the module (text+rodata) */
57973- unsigned int init_ro_size, core_ro_size;
57974+ unsigned int init_size_rx, core_size_rx;
57975
57976 /* Arch-specific module values */
57977 struct mod_arch_specific arch;
57978@@ -393,6 +391,10 @@ struct module
57979 #ifdef CONFIG_EVENT_TRACING
57980 struct ftrace_event_call **trace_events;
57981 unsigned int num_trace_events;
57982+ struct file_operations trace_id;
57983+ struct file_operations trace_enable;
57984+ struct file_operations trace_format;
57985+ struct file_operations trace_filter;
57986 #endif
57987 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
57988 unsigned int num_ftrace_callsites;
57989@@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
57990 bool is_module_percpu_address(unsigned long addr);
57991 bool is_module_text_address(unsigned long addr);
57992
57993+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
57994+{
57995+
57996+#ifdef CONFIG_PAX_KERNEXEC
57997+ if (ktla_ktva(addr) >= (unsigned long)start &&
57998+ ktla_ktva(addr) < (unsigned long)start + size)
57999+ return 1;
58000+#endif
58001+
58002+ return ((void *)addr >= start && (void *)addr < start + size);
58003+}
58004+
58005+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
58006+{
58007+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
58008+}
58009+
58010+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
58011+{
58012+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
58013+}
58014+
58015+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
58016+{
58017+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
58018+}
58019+
58020+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
58021+{
58022+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
58023+}
58024+
58025 static inline int within_module_core(unsigned long addr, struct module *mod)
58026 {
58027- return (unsigned long)mod->module_core <= addr &&
58028- addr < (unsigned long)mod->module_core + mod->core_size;
58029+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
58030 }
58031
58032 static inline int within_module_init(unsigned long addr, struct module *mod)
58033 {
58034- return (unsigned long)mod->module_init <= addr &&
58035- addr < (unsigned long)mod->module_init + mod->init_size;
58036+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
58037 }
58038
58039 /* Search for module by name: must hold module_mutex. */
58040diff -urNp linux-3.0.4/include/linux/moduleloader.h linux-3.0.4/include/linux/moduleloader.h
58041--- linux-3.0.4/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
58042+++ linux-3.0.4/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
58043@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
58044 sections. Returns NULL on failure. */
58045 void *module_alloc(unsigned long size);
58046
58047+#ifdef CONFIG_PAX_KERNEXEC
58048+void *module_alloc_exec(unsigned long size);
58049+#else
58050+#define module_alloc_exec(x) module_alloc(x)
58051+#endif
58052+
58053 /* Free memory returned from module_alloc. */
58054 void module_free(struct module *mod, void *module_region);
58055
58056+#ifdef CONFIG_PAX_KERNEXEC
58057+void module_free_exec(struct module *mod, void *module_region);
58058+#else
58059+#define module_free_exec(x, y) module_free((x), (y))
58060+#endif
58061+
58062 /* Apply the given relocation to the (simplified) ELF. Return -error
58063 or 0. */
58064 int apply_relocate(Elf_Shdr *sechdrs,
58065diff -urNp linux-3.0.4/include/linux/moduleparam.h linux-3.0.4/include/linux/moduleparam.h
58066--- linux-3.0.4/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
58067+++ linux-3.0.4/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
58068@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
58069 * @len is usually just sizeof(string).
58070 */
58071 #define module_param_string(name, string, len, perm) \
58072- static const struct kparam_string __param_string_##name \
58073+ static const struct kparam_string __param_string_##name __used \
58074 = { len, string }; \
58075 __module_param_call(MODULE_PARAM_PREFIX, name, \
58076 &param_ops_string, \
58077@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
58078 * module_param_named() for why this might be necessary.
58079 */
58080 #define module_param_array_named(name, array, type, nump, perm) \
58081- static const struct kparam_array __param_arr_##name \
58082+ static const struct kparam_array __param_arr_##name __used \
58083 = { .max = ARRAY_SIZE(array), .num = nump, \
58084 .ops = &param_ops_##type, \
58085 .elemsize = sizeof(array[0]), .elem = array }; \
58086diff -urNp linux-3.0.4/include/linux/namei.h linux-3.0.4/include/linux/namei.h
58087--- linux-3.0.4/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
58088+++ linux-3.0.4/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
58089@@ -24,7 +24,7 @@ struct nameidata {
58090 unsigned seq;
58091 int last_type;
58092 unsigned depth;
58093- char *saved_names[MAX_NESTED_LINKS + 1];
58094+ const char *saved_names[MAX_NESTED_LINKS + 1];
58095
58096 /* Intent data */
58097 union {
58098@@ -91,12 +91,12 @@ extern int follow_up(struct path *);
58099 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
58100 extern void unlock_rename(struct dentry *, struct dentry *);
58101
58102-static inline void nd_set_link(struct nameidata *nd, char *path)
58103+static inline void nd_set_link(struct nameidata *nd, const char *path)
58104 {
58105 nd->saved_names[nd->depth] = path;
58106 }
58107
58108-static inline char *nd_get_link(struct nameidata *nd)
58109+static inline const char *nd_get_link(const struct nameidata *nd)
58110 {
58111 return nd->saved_names[nd->depth];
58112 }
58113diff -urNp linux-3.0.4/include/linux/netdevice.h linux-3.0.4/include/linux/netdevice.h
58114--- linux-3.0.4/include/linux/netdevice.h 2011-09-02 18:11:21.000000000 -0400
58115+++ linux-3.0.4/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
58116@@ -979,6 +979,7 @@ struct net_device_ops {
58117 int (*ndo_set_features)(struct net_device *dev,
58118 u32 features);
58119 };
58120+typedef struct net_device_ops __no_const net_device_ops_no_const;
58121
58122 /*
58123 * The DEVICE structure.
58124diff -urNp linux-3.0.4/include/linux/netfilter/xt_gradm.h linux-3.0.4/include/linux/netfilter/xt_gradm.h
58125--- linux-3.0.4/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
58126+++ linux-3.0.4/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
58127@@ -0,0 +1,9 @@
58128+#ifndef _LINUX_NETFILTER_XT_GRADM_H
58129+#define _LINUX_NETFILTER_XT_GRADM_H 1
58130+
58131+struct xt_gradm_mtinfo {
58132+ __u16 flags;
58133+ __u16 invflags;
58134+};
58135+
58136+#endif
58137diff -urNp linux-3.0.4/include/linux/of_pdt.h linux-3.0.4/include/linux/of_pdt.h
58138--- linux-3.0.4/include/linux/of_pdt.h 2011-07-21 22:17:23.000000000 -0400
58139+++ linux-3.0.4/include/linux/of_pdt.h 2011-08-30 06:20:11.000000000 -0400
58140@@ -32,7 +32,7 @@ struct of_pdt_ops {
58141
58142 /* return 0 on success; fill in 'len' with number of bytes in path */
58143 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
58144-};
58145+} __no_const;
58146
58147 extern void *prom_early_alloc(unsigned long size);
58148
58149diff -urNp linux-3.0.4/include/linux/oprofile.h linux-3.0.4/include/linux/oprofile.h
58150--- linux-3.0.4/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
58151+++ linux-3.0.4/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
58152@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
58153 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
58154 char const * name, ulong * val);
58155
58156-/** Create a file for read-only access to an atomic_t. */
58157+/** Create a file for read-only access to an atomic_unchecked_t. */
58158 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
58159- char const * name, atomic_t * val);
58160+ char const * name, atomic_unchecked_t * val);
58161
58162 /** create a directory */
58163 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
58164diff -urNp linux-3.0.4/include/linux/padata.h linux-3.0.4/include/linux/padata.h
58165--- linux-3.0.4/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
58166+++ linux-3.0.4/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
58167@@ -129,7 +129,7 @@ struct parallel_data {
58168 struct padata_instance *pinst;
58169 struct padata_parallel_queue __percpu *pqueue;
58170 struct padata_serial_queue __percpu *squeue;
58171- atomic_t seq_nr;
58172+ atomic_unchecked_t seq_nr;
58173 atomic_t reorder_objects;
58174 atomic_t refcnt;
58175 unsigned int max_seq_nr;
58176diff -urNp linux-3.0.4/include/linux/perf_event.h linux-3.0.4/include/linux/perf_event.h
58177--- linux-3.0.4/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
58178+++ linux-3.0.4/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
58179@@ -761,8 +761,8 @@ struct perf_event {
58180
58181 enum perf_event_active_state state;
58182 unsigned int attach_state;
58183- local64_t count;
58184- atomic64_t child_count;
58185+ local64_t count; /* PaX: fix it one day */
58186+ atomic64_unchecked_t child_count;
58187
58188 /*
58189 * These are the total time in nanoseconds that the event
58190@@ -813,8 +813,8 @@ struct perf_event {
58191 * These accumulate total time (in nanoseconds) that children
58192 * events have been enabled and running, respectively.
58193 */
58194- atomic64_t child_total_time_enabled;
58195- atomic64_t child_total_time_running;
58196+ atomic64_unchecked_t child_total_time_enabled;
58197+ atomic64_unchecked_t child_total_time_running;
58198
58199 /*
58200 * Protect attach/detach and child_list:
58201diff -urNp linux-3.0.4/include/linux/pipe_fs_i.h linux-3.0.4/include/linux/pipe_fs_i.h
58202--- linux-3.0.4/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
58203+++ linux-3.0.4/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
58204@@ -46,9 +46,9 @@ struct pipe_buffer {
58205 struct pipe_inode_info {
58206 wait_queue_head_t wait;
58207 unsigned int nrbufs, curbuf, buffers;
58208- unsigned int readers;
58209- unsigned int writers;
58210- unsigned int waiting_writers;
58211+ atomic_t readers;
58212+ atomic_t writers;
58213+ atomic_t waiting_writers;
58214 unsigned int r_counter;
58215 unsigned int w_counter;
58216 struct page *tmp_page;
58217diff -urNp linux-3.0.4/include/linux/pm_runtime.h linux-3.0.4/include/linux/pm_runtime.h
58218--- linux-3.0.4/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
58219+++ linux-3.0.4/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
58220@@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
58221
58222 static inline void pm_runtime_mark_last_busy(struct device *dev)
58223 {
58224- ACCESS_ONCE(dev->power.last_busy) = jiffies;
58225+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
58226 }
58227
58228 #else /* !CONFIG_PM_RUNTIME */
58229diff -urNp linux-3.0.4/include/linux/poison.h linux-3.0.4/include/linux/poison.h
58230--- linux-3.0.4/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
58231+++ linux-3.0.4/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
58232@@ -19,8 +19,8 @@
58233 * under normal circumstances, used to verify that nobody uses
58234 * non-initialized list entries.
58235 */
58236-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
58237-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
58238+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
58239+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
58240
58241 /********** include/linux/timer.h **********/
58242 /*
58243diff -urNp linux-3.0.4/include/linux/preempt.h linux-3.0.4/include/linux/preempt.h
58244--- linux-3.0.4/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
58245+++ linux-3.0.4/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
58246@@ -115,7 +115,7 @@ struct preempt_ops {
58247 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
58248 void (*sched_out)(struct preempt_notifier *notifier,
58249 struct task_struct *next);
58250-};
58251+} __no_const;
58252
58253 /**
58254 * preempt_notifier - key for installing preemption notifiers
58255diff -urNp linux-3.0.4/include/linux/proc_fs.h linux-3.0.4/include/linux/proc_fs.h
58256--- linux-3.0.4/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
58257+++ linux-3.0.4/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
58258@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
58259 return proc_create_data(name, mode, parent, proc_fops, NULL);
58260 }
58261
58262+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
58263+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
58264+{
58265+#ifdef CONFIG_GRKERNSEC_PROC_USER
58266+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
58267+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58268+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
58269+#else
58270+ return proc_create_data(name, mode, parent, proc_fops, NULL);
58271+#endif
58272+}
58273+
58274+
58275 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
58276 mode_t mode, struct proc_dir_entry *base,
58277 read_proc_t *read_proc, void * data)
58278@@ -258,7 +271,7 @@ union proc_op {
58279 int (*proc_show)(struct seq_file *m,
58280 struct pid_namespace *ns, struct pid *pid,
58281 struct task_struct *task);
58282-};
58283+} __no_const;
58284
58285 struct ctl_table_header;
58286 struct ctl_table;
58287diff -urNp linux-3.0.4/include/linux/ptrace.h linux-3.0.4/include/linux/ptrace.h
58288--- linux-3.0.4/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
58289+++ linux-3.0.4/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
58290@@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
58291 extern void exit_ptrace(struct task_struct *tracer);
58292 #define PTRACE_MODE_READ 1
58293 #define PTRACE_MODE_ATTACH 2
58294-/* Returns 0 on success, -errno on denial. */
58295-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
58296 /* Returns true on success, false on denial. */
58297 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
58298+/* Returns true on success, false on denial. */
58299+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
58300
58301 static inline int ptrace_reparented(struct task_struct *child)
58302 {
58303diff -urNp linux-3.0.4/include/linux/random.h linux-3.0.4/include/linux/random.h
58304--- linux-3.0.4/include/linux/random.h 2011-09-02 18:11:21.000000000 -0400
58305+++ linux-3.0.4/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
58306@@ -69,12 +69,17 @@ void srandom32(u32 seed);
58307
58308 u32 prandom32(struct rnd_state *);
58309
58310+static inline unsigned long pax_get_random_long(void)
58311+{
58312+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
58313+}
58314+
58315 /*
58316 * Handle minimum values for seeds
58317 */
58318 static inline u32 __seed(u32 x, u32 m)
58319 {
58320- return (x < m) ? x + m : x;
58321+ return (x <= m) ? x + m + 1 : x;
58322 }
58323
58324 /**
58325diff -urNp linux-3.0.4/include/linux/reboot.h linux-3.0.4/include/linux/reboot.h
58326--- linux-3.0.4/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
58327+++ linux-3.0.4/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
58328@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
58329 * Architecture-specific implementations of sys_reboot commands.
58330 */
58331
58332-extern void machine_restart(char *cmd);
58333-extern void machine_halt(void);
58334-extern void machine_power_off(void);
58335+extern void machine_restart(char *cmd) __noreturn;
58336+extern void machine_halt(void) __noreturn;
58337+extern void machine_power_off(void) __noreturn;
58338
58339 extern void machine_shutdown(void);
58340 struct pt_regs;
58341@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
58342 */
58343
58344 extern void kernel_restart_prepare(char *cmd);
58345-extern void kernel_restart(char *cmd);
58346-extern void kernel_halt(void);
58347-extern void kernel_power_off(void);
58348+extern void kernel_restart(char *cmd) __noreturn;
58349+extern void kernel_halt(void) __noreturn;
58350+extern void kernel_power_off(void) __noreturn;
58351
58352 extern int C_A_D; /* for sysctl */
58353 void ctrl_alt_del(void);
58354@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
58355 * Emergency restart, callable from an interrupt handler.
58356 */
58357
58358-extern void emergency_restart(void);
58359+extern void emergency_restart(void) __noreturn;
58360 #include <asm/emergency-restart.h>
58361
58362 #endif
58363diff -urNp linux-3.0.4/include/linux/reiserfs_fs.h linux-3.0.4/include/linux/reiserfs_fs.h
58364--- linux-3.0.4/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
58365+++ linux-3.0.4/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
58366@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
58367 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
58368
58369 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
58370-#define get_generation(s) atomic_read (&fs_generation(s))
58371+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
58372 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
58373 #define __fs_changed(gen,s) (gen != get_generation (s))
58374 #define fs_changed(gen,s) \
58375diff -urNp linux-3.0.4/include/linux/reiserfs_fs_sb.h linux-3.0.4/include/linux/reiserfs_fs_sb.h
58376--- linux-3.0.4/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
58377+++ linux-3.0.4/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
58378@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
58379 /* Comment? -Hans */
58380 wait_queue_head_t s_wait;
58381 /* To be obsoleted soon by per buffer seals.. -Hans */
58382- atomic_t s_generation_counter; // increased by one every time the
58383+ atomic_unchecked_t s_generation_counter; // increased by one every time the
58384 // tree gets re-balanced
58385 unsigned long s_properties; /* File system properties. Currently holds
58386 on-disk FS format */
58387diff -urNp linux-3.0.4/include/linux/relay.h linux-3.0.4/include/linux/relay.h
58388--- linux-3.0.4/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
58389+++ linux-3.0.4/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
58390@@ -159,7 +159,7 @@ struct rchan_callbacks
58391 * The callback should return 0 if successful, negative if not.
58392 */
58393 int (*remove_buf_file)(struct dentry *dentry);
58394-};
58395+} __no_const;
58396
58397 /*
58398 * CONFIG_RELAY kernel API, kernel/relay.c
58399diff -urNp linux-3.0.4/include/linux/rfkill.h linux-3.0.4/include/linux/rfkill.h
58400--- linux-3.0.4/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
58401+++ linux-3.0.4/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
58402@@ -147,6 +147,7 @@ struct rfkill_ops {
58403 void (*query)(struct rfkill *rfkill, void *data);
58404 int (*set_block)(void *data, bool blocked);
58405 };
58406+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
58407
58408 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
58409 /**
58410diff -urNp linux-3.0.4/include/linux/rmap.h linux-3.0.4/include/linux/rmap.h
58411--- linux-3.0.4/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
58412+++ linux-3.0.4/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
58413@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
58414 void anon_vma_init(void); /* create anon_vma_cachep */
58415 int anon_vma_prepare(struct vm_area_struct *);
58416 void unlink_anon_vmas(struct vm_area_struct *);
58417-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
58418-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
58419+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
58420+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
58421 void __anon_vma_link(struct vm_area_struct *);
58422
58423 static inline void anon_vma_merge(struct vm_area_struct *vma,
58424diff -urNp linux-3.0.4/include/linux/sched.h linux-3.0.4/include/linux/sched.h
58425--- linux-3.0.4/include/linux/sched.h 2011-07-21 22:17:23.000000000 -0400
58426+++ linux-3.0.4/include/linux/sched.h 2011-08-25 17:22:27.000000000 -0400
58427@@ -100,6 +100,7 @@ struct bio_list;
58428 struct fs_struct;
58429 struct perf_event_context;
58430 struct blk_plug;
58431+struct linux_binprm;
58432
58433 /*
58434 * List of flags we want to share for kernel threads,
58435@@ -380,10 +381,13 @@ struct user_namespace;
58436 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
58437
58438 extern int sysctl_max_map_count;
58439+extern unsigned long sysctl_heap_stack_gap;
58440
58441 #include <linux/aio.h>
58442
58443 #ifdef CONFIG_MMU
58444+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
58445+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
58446 extern void arch_pick_mmap_layout(struct mm_struct *mm);
58447 extern unsigned long
58448 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
58449@@ -629,6 +633,17 @@ struct signal_struct {
58450 #ifdef CONFIG_TASKSTATS
58451 struct taskstats *stats;
58452 #endif
58453+
58454+#ifdef CONFIG_GRKERNSEC
58455+ u32 curr_ip;
58456+ u32 saved_ip;
58457+ u32 gr_saddr;
58458+ u32 gr_daddr;
58459+ u16 gr_sport;
58460+ u16 gr_dport;
58461+ u8 used_accept:1;
58462+#endif
58463+
58464 #ifdef CONFIG_AUDIT
58465 unsigned audit_tty;
58466 struct tty_audit_buf *tty_audit_buf;
58467@@ -710,6 +725,11 @@ struct user_struct {
58468 struct key *session_keyring; /* UID's default session keyring */
58469 #endif
58470
58471+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58472+ unsigned int banned;
58473+ unsigned long ban_expires;
58474+#endif
58475+
58476 /* Hash table maintenance information */
58477 struct hlist_node uidhash_node;
58478 uid_t uid;
58479@@ -1340,8 +1360,8 @@ struct task_struct {
58480 struct list_head thread_group;
58481
58482 struct completion *vfork_done; /* for vfork() */
58483- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
58484- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
58485+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
58486+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
58487
58488 cputime_t utime, stime, utimescaled, stimescaled;
58489 cputime_t gtime;
58490@@ -1357,13 +1377,6 @@ struct task_struct {
58491 struct task_cputime cputime_expires;
58492 struct list_head cpu_timers[3];
58493
58494-/* process credentials */
58495- const struct cred __rcu *real_cred; /* objective and real subjective task
58496- * credentials (COW) */
58497- const struct cred __rcu *cred; /* effective (overridable) subjective task
58498- * credentials (COW) */
58499- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
58500-
58501 char comm[TASK_COMM_LEN]; /* executable name excluding path
58502 - access with [gs]et_task_comm (which lock
58503 it with task_lock())
58504@@ -1380,8 +1393,16 @@ struct task_struct {
58505 #endif
58506 /* CPU-specific state of this task */
58507 struct thread_struct thread;
58508+/* thread_info moved to task_struct */
58509+#ifdef CONFIG_X86
58510+ struct thread_info tinfo;
58511+#endif
58512 /* filesystem information */
58513 struct fs_struct *fs;
58514+
58515+ const struct cred __rcu *cred; /* effective (overridable) subjective task
58516+ * credentials (COW) */
58517+
58518 /* open file information */
58519 struct files_struct *files;
58520 /* namespaces */
58521@@ -1428,6 +1449,11 @@ struct task_struct {
58522 struct rt_mutex_waiter *pi_blocked_on;
58523 #endif
58524
58525+/* process credentials */
58526+ const struct cred __rcu *real_cred; /* objective and real subjective task
58527+ * credentials (COW) */
58528+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
58529+
58530 #ifdef CONFIG_DEBUG_MUTEXES
58531 /* mutex deadlock detection */
58532 struct mutex_waiter *blocked_on;
58533@@ -1538,6 +1564,21 @@ struct task_struct {
58534 unsigned long default_timer_slack_ns;
58535
58536 struct list_head *scm_work_list;
58537+
58538+#ifdef CONFIG_GRKERNSEC
58539+ /* grsecurity */
58540+ struct dentry *gr_chroot_dentry;
58541+ struct acl_subject_label *acl;
58542+ struct acl_role_label *role;
58543+ struct file *exec_file;
58544+ u16 acl_role_id;
58545+ /* is this the task that authenticated to the special role */
58546+ u8 acl_sp_role;
58547+ u8 is_writable;
58548+ u8 brute;
58549+ u8 gr_is_chrooted;
58550+#endif
58551+
58552 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
58553 /* Index of current stored address in ret_stack */
58554 int curr_ret_stack;
58555@@ -1572,6 +1613,57 @@ struct task_struct {
58556 #endif
58557 };
58558
58559+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
58560+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
58561+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
58562+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
58563+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
58564+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
58565+
58566+#ifdef CONFIG_PAX_SOFTMODE
58567+extern int pax_softmode;
58568+#endif
58569+
58570+extern int pax_check_flags(unsigned long *);
58571+
58572+/* if tsk != current then task_lock must be held on it */
58573+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58574+static inline unsigned long pax_get_flags(struct task_struct *tsk)
58575+{
58576+ if (likely(tsk->mm))
58577+ return tsk->mm->pax_flags;
58578+ else
58579+ return 0UL;
58580+}
58581+
58582+/* if tsk != current then task_lock must be held on it */
58583+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
58584+{
58585+ if (likely(tsk->mm)) {
58586+ tsk->mm->pax_flags = flags;
58587+ return 0;
58588+ }
58589+ return -EINVAL;
58590+}
58591+#endif
58592+
58593+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
58594+extern void pax_set_initial_flags(struct linux_binprm *bprm);
58595+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
58596+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
58597+#endif
58598+
58599+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
58600+extern void pax_report_insns(void *pc, void *sp);
58601+extern void pax_report_refcount_overflow(struct pt_regs *regs);
58602+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
58603+
58604+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
58605+extern void pax_track_stack(void);
58606+#else
58607+static inline void pax_track_stack(void) {}
58608+#endif
58609+
58610 /* Future-safe accessor for struct task_struct's cpus_allowed. */
58611 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
58612
58613@@ -1768,6 +1860,7 @@ extern void thread_group_times(struct ta
58614 #define PF_DUMPCORE 0x00000200 /* dumped core */
58615 #define PF_SIGNALED 0x00000400 /* killed by a signal */
58616 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
58617+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
58618 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
58619 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
58620 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
58621@@ -2056,7 +2149,9 @@ void yield(void);
58622 extern struct exec_domain default_exec_domain;
58623
58624 union thread_union {
58625+#ifndef CONFIG_X86
58626 struct thread_info thread_info;
58627+#endif
58628 unsigned long stack[THREAD_SIZE/sizeof(long)];
58629 };
58630
58631@@ -2089,6 +2184,7 @@ extern struct pid_namespace init_pid_ns;
58632 */
58633
58634 extern struct task_struct *find_task_by_vpid(pid_t nr);
58635+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
58636 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
58637 struct pid_namespace *ns);
58638
58639@@ -2225,7 +2321,7 @@ extern void __cleanup_sighand(struct sig
58640 extern void exit_itimers(struct signal_struct *);
58641 extern void flush_itimer_signals(void);
58642
58643-extern NORET_TYPE void do_group_exit(int);
58644+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
58645
58646 extern void daemonize(const char *, ...);
58647 extern int allow_signal(int);
58648@@ -2393,13 +2489,17 @@ static inline unsigned long *end_of_stac
58649
58650 #endif
58651
58652-static inline int object_is_on_stack(void *obj)
58653+static inline int object_starts_on_stack(void *obj)
58654 {
58655- void *stack = task_stack_page(current);
58656+ const void *stack = task_stack_page(current);
58657
58658 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
58659 }
58660
58661+#ifdef CONFIG_PAX_USERCOPY
58662+extern int object_is_on_stack(const void *obj, unsigned long len);
58663+#endif
58664+
58665 extern void thread_info_cache_init(void);
58666
58667 #ifdef CONFIG_DEBUG_STACK_USAGE
58668diff -urNp linux-3.0.4/include/linux/screen_info.h linux-3.0.4/include/linux/screen_info.h
58669--- linux-3.0.4/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
58670+++ linux-3.0.4/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
58671@@ -43,7 +43,8 @@ struct screen_info {
58672 __u16 pages; /* 0x32 */
58673 __u16 vesa_attributes; /* 0x34 */
58674 __u32 capabilities; /* 0x36 */
58675- __u8 _reserved[6]; /* 0x3a */
58676+ __u16 vesapm_size; /* 0x3a */
58677+ __u8 _reserved[4]; /* 0x3c */
58678 } __attribute__((packed));
58679
58680 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
58681diff -urNp linux-3.0.4/include/linux/security.h linux-3.0.4/include/linux/security.h
58682--- linux-3.0.4/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
58683+++ linux-3.0.4/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
58684@@ -36,6 +36,7 @@
58685 #include <linux/key.h>
58686 #include <linux/xfrm.h>
58687 #include <linux/slab.h>
58688+#include <linux/grsecurity.h>
58689 #include <net/flow.h>
58690
58691 /* Maximum number of letters for an LSM name string */
58692diff -urNp linux-3.0.4/include/linux/seq_file.h linux-3.0.4/include/linux/seq_file.h
58693--- linux-3.0.4/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
58694+++ linux-3.0.4/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
58695@@ -32,6 +32,7 @@ struct seq_operations {
58696 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
58697 int (*show) (struct seq_file *m, void *v);
58698 };
58699+typedef struct seq_operations __no_const seq_operations_no_const;
58700
58701 #define SEQ_SKIP 1
58702
58703diff -urNp linux-3.0.4/include/linux/shmem_fs.h linux-3.0.4/include/linux/shmem_fs.h
58704--- linux-3.0.4/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
58705+++ linux-3.0.4/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
58706@@ -10,7 +10,7 @@
58707
58708 #define SHMEM_NR_DIRECT 16
58709
58710-#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
58711+#define SHMEM_SYMLINK_INLINE_LEN 64
58712
58713 struct shmem_inode_info {
58714 spinlock_t lock;
58715diff -urNp linux-3.0.4/include/linux/shm.h linux-3.0.4/include/linux/shm.h
58716--- linux-3.0.4/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
58717+++ linux-3.0.4/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
58718@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
58719 pid_t shm_cprid;
58720 pid_t shm_lprid;
58721 struct user_struct *mlock_user;
58722+#ifdef CONFIG_GRKERNSEC
58723+ time_t shm_createtime;
58724+ pid_t shm_lapid;
58725+#endif
58726 };
58727
58728 /* shm_mode upper byte flags */
58729diff -urNp linux-3.0.4/include/linux/skbuff.h linux-3.0.4/include/linux/skbuff.h
58730--- linux-3.0.4/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
58731+++ linux-3.0.4/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
58732@@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
58733 */
58734 static inline int skb_queue_empty(const struct sk_buff_head *list)
58735 {
58736- return list->next == (struct sk_buff *)list;
58737+ return list->next == (const struct sk_buff *)list;
58738 }
58739
58740 /**
58741@@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
58742 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
58743 const struct sk_buff *skb)
58744 {
58745- return skb->next == (struct sk_buff *)list;
58746+ return skb->next == (const struct sk_buff *)list;
58747 }
58748
58749 /**
58750@@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
58751 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
58752 const struct sk_buff *skb)
58753 {
58754- return skb->prev == (struct sk_buff *)list;
58755+ return skb->prev == (const struct sk_buff *)list;
58756 }
58757
58758 /**
58759@@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
58760 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
58761 */
58762 #ifndef NET_SKB_PAD
58763-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
58764+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
58765 #endif
58766
58767 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
58768diff -urNp linux-3.0.4/include/linux/slab_def.h linux-3.0.4/include/linux/slab_def.h
58769--- linux-3.0.4/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
58770+++ linux-3.0.4/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
58771@@ -96,10 +96,10 @@ struct kmem_cache {
58772 unsigned long node_allocs;
58773 unsigned long node_frees;
58774 unsigned long node_overflow;
58775- atomic_t allochit;
58776- atomic_t allocmiss;
58777- atomic_t freehit;
58778- atomic_t freemiss;
58779+ atomic_unchecked_t allochit;
58780+ atomic_unchecked_t allocmiss;
58781+ atomic_unchecked_t freehit;
58782+ atomic_unchecked_t freemiss;
58783
58784 /*
58785 * If debugging is enabled, then the allocator can add additional
58786diff -urNp linux-3.0.4/include/linux/slab.h linux-3.0.4/include/linux/slab.h
58787--- linux-3.0.4/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
58788+++ linux-3.0.4/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
58789@@ -11,12 +11,20 @@
58790
58791 #include <linux/gfp.h>
58792 #include <linux/types.h>
58793+#include <linux/err.h>
58794
58795 /*
58796 * Flags to pass to kmem_cache_create().
58797 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
58798 */
58799 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
58800+
58801+#ifdef CONFIG_PAX_USERCOPY
58802+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
58803+#else
58804+#define SLAB_USERCOPY 0x00000000UL
58805+#endif
58806+
58807 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
58808 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
58809 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
58810@@ -87,10 +95,13 @@
58811 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
58812 * Both make kfree a no-op.
58813 */
58814-#define ZERO_SIZE_PTR ((void *)16)
58815+#define ZERO_SIZE_PTR \
58816+({ \
58817+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
58818+ (void *)(-MAX_ERRNO-1L); \
58819+})
58820
58821-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
58822- (unsigned long)ZERO_SIZE_PTR)
58823+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
58824
58825 /*
58826 * struct kmem_cache related prototypes
58827@@ -141,6 +152,7 @@ void * __must_check krealloc(const void
58828 void kfree(const void *);
58829 void kzfree(const void *);
58830 size_t ksize(const void *);
58831+void check_object_size(const void *ptr, unsigned long n, bool to);
58832
58833 /*
58834 * Allocator specific definitions. These are mainly used to establish optimized
58835@@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
58836
58837 void __init kmem_cache_init_late(void);
58838
58839+#define kmalloc(x, y) \
58840+({ \
58841+ void *___retval; \
58842+ intoverflow_t ___x = (intoverflow_t)x; \
58843+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
58844+ ___retval = NULL; \
58845+ else \
58846+ ___retval = kmalloc((size_t)___x, (y)); \
58847+ ___retval; \
58848+})
58849+
58850+#define kmalloc_node(x, y, z) \
58851+({ \
58852+ void *___retval; \
58853+ intoverflow_t ___x = (intoverflow_t)x; \
58854+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
58855+ ___retval = NULL; \
58856+ else \
58857+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
58858+ ___retval; \
58859+})
58860+
58861+#define kzalloc(x, y) \
58862+({ \
58863+ void *___retval; \
58864+ intoverflow_t ___x = (intoverflow_t)x; \
58865+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
58866+ ___retval = NULL; \
58867+ else \
58868+ ___retval = kzalloc((size_t)___x, (y)); \
58869+ ___retval; \
58870+})
58871+
58872+#define __krealloc(x, y, z) \
58873+({ \
58874+ void *___retval; \
58875+ intoverflow_t ___y = (intoverflow_t)y; \
58876+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
58877+ ___retval = NULL; \
58878+ else \
58879+ ___retval = __krealloc((x), (size_t)___y, (z)); \
58880+ ___retval; \
58881+})
58882+
58883+#define krealloc(x, y, z) \
58884+({ \
58885+ void *___retval; \
58886+ intoverflow_t ___y = (intoverflow_t)y; \
58887+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
58888+ ___retval = NULL; \
58889+ else \
58890+ ___retval = krealloc((x), (size_t)___y, (z)); \
58891+ ___retval; \
58892+})
58893+
58894 #endif /* _LINUX_SLAB_H */
58895diff -urNp linux-3.0.4/include/linux/slub_def.h linux-3.0.4/include/linux/slub_def.h
58896--- linux-3.0.4/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
58897+++ linux-3.0.4/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
58898@@ -82,7 +82,7 @@ struct kmem_cache {
58899 struct kmem_cache_order_objects max;
58900 struct kmem_cache_order_objects min;
58901 gfp_t allocflags; /* gfp flags to use on each alloc */
58902- int refcount; /* Refcount for slab cache destroy */
58903+ atomic_t refcount; /* Refcount for slab cache destroy */
58904 void (*ctor)(void *);
58905 int inuse; /* Offset to metadata */
58906 int align; /* Alignment */
58907@@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
58908 }
58909
58910 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
58911-void *__kmalloc(size_t size, gfp_t flags);
58912+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
58913
58914 static __always_inline void *
58915 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
58916diff -urNp linux-3.0.4/include/linux/sonet.h linux-3.0.4/include/linux/sonet.h
58917--- linux-3.0.4/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
58918+++ linux-3.0.4/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
58919@@ -61,7 +61,7 @@ struct sonet_stats {
58920 #include <asm/atomic.h>
58921
58922 struct k_sonet_stats {
58923-#define __HANDLE_ITEM(i) atomic_t i
58924+#define __HANDLE_ITEM(i) atomic_unchecked_t i
58925 __SONET_ITEMS
58926 #undef __HANDLE_ITEM
58927 };
58928diff -urNp linux-3.0.4/include/linux/sunrpc/clnt.h linux-3.0.4/include/linux/sunrpc/clnt.h
58929--- linux-3.0.4/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
58930+++ linux-3.0.4/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
58931@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
58932 {
58933 switch (sap->sa_family) {
58934 case AF_INET:
58935- return ntohs(((struct sockaddr_in *)sap)->sin_port);
58936+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
58937 case AF_INET6:
58938- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
58939+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
58940 }
58941 return 0;
58942 }
58943@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
58944 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
58945 const struct sockaddr *src)
58946 {
58947- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
58948+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
58949 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
58950
58951 dsin->sin_family = ssin->sin_family;
58952@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
58953 if (sa->sa_family != AF_INET6)
58954 return 0;
58955
58956- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
58957+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
58958 }
58959
58960 #endif /* __KERNEL__ */
58961diff -urNp linux-3.0.4/include/linux/sunrpc/svc_rdma.h linux-3.0.4/include/linux/sunrpc/svc_rdma.h
58962--- linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
58963+++ linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
58964@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
58965 extern unsigned int svcrdma_max_requests;
58966 extern unsigned int svcrdma_max_req_size;
58967
58968-extern atomic_t rdma_stat_recv;
58969-extern atomic_t rdma_stat_read;
58970-extern atomic_t rdma_stat_write;
58971-extern atomic_t rdma_stat_sq_starve;
58972-extern atomic_t rdma_stat_rq_starve;
58973-extern atomic_t rdma_stat_rq_poll;
58974-extern atomic_t rdma_stat_rq_prod;
58975-extern atomic_t rdma_stat_sq_poll;
58976-extern atomic_t rdma_stat_sq_prod;
58977+extern atomic_unchecked_t rdma_stat_recv;
58978+extern atomic_unchecked_t rdma_stat_read;
58979+extern atomic_unchecked_t rdma_stat_write;
58980+extern atomic_unchecked_t rdma_stat_sq_starve;
58981+extern atomic_unchecked_t rdma_stat_rq_starve;
58982+extern atomic_unchecked_t rdma_stat_rq_poll;
58983+extern atomic_unchecked_t rdma_stat_rq_prod;
58984+extern atomic_unchecked_t rdma_stat_sq_poll;
58985+extern atomic_unchecked_t rdma_stat_sq_prod;
58986
58987 #define RPCRDMA_VERSION 1
58988
58989diff -urNp linux-3.0.4/include/linux/sysctl.h linux-3.0.4/include/linux/sysctl.h
58990--- linux-3.0.4/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
58991+++ linux-3.0.4/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
58992@@ -155,7 +155,11 @@ enum
58993 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
58994 };
58995
58996-
58997+#ifdef CONFIG_PAX_SOFTMODE
58998+enum {
58999+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
59000+};
59001+#endif
59002
59003 /* CTL_VM names: */
59004 enum
59005@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
59006
59007 extern int proc_dostring(struct ctl_table *, int,
59008 void __user *, size_t *, loff_t *);
59009+extern int proc_dostring_modpriv(struct ctl_table *, int,
59010+ void __user *, size_t *, loff_t *);
59011 extern int proc_dointvec(struct ctl_table *, int,
59012 void __user *, size_t *, loff_t *);
59013 extern int proc_dointvec_minmax(struct ctl_table *, int,
59014diff -urNp linux-3.0.4/include/linux/tty_ldisc.h linux-3.0.4/include/linux/tty_ldisc.h
59015--- linux-3.0.4/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
59016+++ linux-3.0.4/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
59017@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
59018
59019 struct module *owner;
59020
59021- int refcount;
59022+ atomic_t refcount;
59023 };
59024
59025 struct tty_ldisc {
59026diff -urNp linux-3.0.4/include/linux/types.h linux-3.0.4/include/linux/types.h
59027--- linux-3.0.4/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
59028+++ linux-3.0.4/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
59029@@ -213,10 +213,26 @@ typedef struct {
59030 int counter;
59031 } atomic_t;
59032
59033+#ifdef CONFIG_PAX_REFCOUNT
59034+typedef struct {
59035+ int counter;
59036+} atomic_unchecked_t;
59037+#else
59038+typedef atomic_t atomic_unchecked_t;
59039+#endif
59040+
59041 #ifdef CONFIG_64BIT
59042 typedef struct {
59043 long counter;
59044 } atomic64_t;
59045+
59046+#ifdef CONFIG_PAX_REFCOUNT
59047+typedef struct {
59048+ long counter;
59049+} atomic64_unchecked_t;
59050+#else
59051+typedef atomic64_t atomic64_unchecked_t;
59052+#endif
59053 #endif
59054
59055 struct list_head {
59056diff -urNp linux-3.0.4/include/linux/uaccess.h linux-3.0.4/include/linux/uaccess.h
59057--- linux-3.0.4/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
59058+++ linux-3.0.4/include/linux/uaccess.h 2011-10-06 04:17:55.000000000 -0400
59059@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
59060 long ret; \
59061 mm_segment_t old_fs = get_fs(); \
59062 \
59063- set_fs(KERNEL_DS); \
59064 pagefault_disable(); \
59065- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
59066- pagefault_enable(); \
59067+ set_fs(KERNEL_DS); \
59068+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
59069 set_fs(old_fs); \
59070+ pagefault_enable(); \
59071 ret; \
59072 })
59073
59074diff -urNp linux-3.0.4/include/linux/unaligned/access_ok.h linux-3.0.4/include/linux/unaligned/access_ok.h
59075--- linux-3.0.4/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
59076+++ linux-3.0.4/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
59077@@ -6,32 +6,32 @@
59078
59079 static inline u16 get_unaligned_le16(const void *p)
59080 {
59081- return le16_to_cpup((__le16 *)p);
59082+ return le16_to_cpup((const __le16 *)p);
59083 }
59084
59085 static inline u32 get_unaligned_le32(const void *p)
59086 {
59087- return le32_to_cpup((__le32 *)p);
59088+ return le32_to_cpup((const __le32 *)p);
59089 }
59090
59091 static inline u64 get_unaligned_le64(const void *p)
59092 {
59093- return le64_to_cpup((__le64 *)p);
59094+ return le64_to_cpup((const __le64 *)p);
59095 }
59096
59097 static inline u16 get_unaligned_be16(const void *p)
59098 {
59099- return be16_to_cpup((__be16 *)p);
59100+ return be16_to_cpup((const __be16 *)p);
59101 }
59102
59103 static inline u32 get_unaligned_be32(const void *p)
59104 {
59105- return be32_to_cpup((__be32 *)p);
59106+ return be32_to_cpup((const __be32 *)p);
59107 }
59108
59109 static inline u64 get_unaligned_be64(const void *p)
59110 {
59111- return be64_to_cpup((__be64 *)p);
59112+ return be64_to_cpup((const __be64 *)p);
59113 }
59114
59115 static inline void put_unaligned_le16(u16 val, void *p)
59116diff -urNp linux-3.0.4/include/linux/vermagic.h linux-3.0.4/include/linux/vermagic.h
59117--- linux-3.0.4/include/linux/vermagic.h 2011-07-21 22:17:23.000000000 -0400
59118+++ linux-3.0.4/include/linux/vermagic.h 2011-10-07 19:25:35.000000000 -0400
59119@@ -26,9 +26,28 @@
59120 #define MODULE_ARCH_VERMAGIC ""
59121 #endif
59122
59123+#ifdef CONFIG_PAX_REFCOUNT
59124+#define MODULE_PAX_REFCOUNT "REFCOUNT "
59125+#else
59126+#define MODULE_PAX_REFCOUNT ""
59127+#endif
59128+
59129+#ifdef CONSTIFY_PLUGIN
59130+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
59131+#else
59132+#define MODULE_CONSTIFY_PLUGIN ""
59133+#endif
59134+
59135+#ifdef CONFIG_GRKERNSEC
59136+#define MODULE_GRSEC "GRSEC "
59137+#else
59138+#define MODULE_GRSEC ""
59139+#endif
59140+
59141 #define VERMAGIC_STRING \
59142 UTS_RELEASE " " \
59143 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
59144 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
59145- MODULE_ARCH_VERMAGIC
59146+ MODULE_ARCH_VERMAGIC \
59147+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_GRSEC
59148
59149diff -urNp linux-3.0.4/include/linux/vmalloc.h linux-3.0.4/include/linux/vmalloc.h
59150--- linux-3.0.4/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
59151+++ linux-3.0.4/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
59152@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
59153 #define VM_MAP 0x00000004 /* vmap()ed pages */
59154 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
59155 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
59156+
59157+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
59158+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
59159+#endif
59160+
59161 /* bits [20..32] reserved for arch specific ioremap internals */
59162
59163 /*
59164@@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
59165 # endif
59166 #endif
59167
59168+#define vmalloc(x) \
59169+({ \
59170+ void *___retval; \
59171+ intoverflow_t ___x = (intoverflow_t)x; \
59172+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
59173+ ___retval = NULL; \
59174+ else \
59175+ ___retval = vmalloc((unsigned long)___x); \
59176+ ___retval; \
59177+})
59178+
59179+#define vzalloc(x) \
59180+({ \
59181+ void *___retval; \
59182+ intoverflow_t ___x = (intoverflow_t)x; \
59183+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
59184+ ___retval = NULL; \
59185+ else \
59186+ ___retval = vzalloc((unsigned long)___x); \
59187+ ___retval; \
59188+})
59189+
59190+#define __vmalloc(x, y, z) \
59191+({ \
59192+ void *___retval; \
59193+ intoverflow_t ___x = (intoverflow_t)x; \
59194+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
59195+ ___retval = NULL; \
59196+ else \
59197+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
59198+ ___retval; \
59199+})
59200+
59201+#define vmalloc_user(x) \
59202+({ \
59203+ void *___retval; \
59204+ intoverflow_t ___x = (intoverflow_t)x; \
59205+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
59206+ ___retval = NULL; \
59207+ else \
59208+ ___retval = vmalloc_user((unsigned long)___x); \
59209+ ___retval; \
59210+})
59211+
59212+#define vmalloc_exec(x) \
59213+({ \
59214+ void *___retval; \
59215+ intoverflow_t ___x = (intoverflow_t)x; \
59216+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
59217+ ___retval = NULL; \
59218+ else \
59219+ ___retval = vmalloc_exec((unsigned long)___x); \
59220+ ___retval; \
59221+})
59222+
59223+#define vmalloc_node(x, y) \
59224+({ \
59225+ void *___retval; \
59226+ intoverflow_t ___x = (intoverflow_t)x; \
59227+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
59228+ ___retval = NULL; \
59229+ else \
59230+ ___retval = vmalloc_node((unsigned long)___x, (y));\
59231+ ___retval; \
59232+})
59233+
59234+#define vzalloc_node(x, y) \
59235+({ \
59236+ void *___retval; \
59237+ intoverflow_t ___x = (intoverflow_t)x; \
59238+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
59239+ ___retval = NULL; \
59240+ else \
59241+ ___retval = vzalloc_node((unsigned long)___x, (y));\
59242+ ___retval; \
59243+})
59244+
59245+#define vmalloc_32(x) \
59246+({ \
59247+ void *___retval; \
59248+ intoverflow_t ___x = (intoverflow_t)x; \
59249+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
59250+ ___retval = NULL; \
59251+ else \
59252+ ___retval = vmalloc_32((unsigned long)___x); \
59253+ ___retval; \
59254+})
59255+
59256+#define vmalloc_32_user(x) \
59257+({ \
59258+void *___retval; \
59259+ intoverflow_t ___x = (intoverflow_t)x; \
59260+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
59261+ ___retval = NULL; \
59262+ else \
59263+ ___retval = vmalloc_32_user((unsigned long)___x);\
59264+ ___retval; \
59265+})
59266+
59267 #endif /* _LINUX_VMALLOC_H */
59268diff -urNp linux-3.0.4/include/linux/vmstat.h linux-3.0.4/include/linux/vmstat.h
59269--- linux-3.0.4/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
59270+++ linux-3.0.4/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
59271@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
59272 /*
59273 * Zone based page accounting with per cpu differentials.
59274 */
59275-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59276+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59277
59278 static inline void zone_page_state_add(long x, struct zone *zone,
59279 enum zone_stat_item item)
59280 {
59281- atomic_long_add(x, &zone->vm_stat[item]);
59282- atomic_long_add(x, &vm_stat[item]);
59283+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
59284+ atomic_long_add_unchecked(x, &vm_stat[item]);
59285 }
59286
59287 static inline unsigned long global_page_state(enum zone_stat_item item)
59288 {
59289- long x = atomic_long_read(&vm_stat[item]);
59290+ long x = atomic_long_read_unchecked(&vm_stat[item]);
59291 #ifdef CONFIG_SMP
59292 if (x < 0)
59293 x = 0;
59294@@ -109,7 +109,7 @@ static inline unsigned long global_page_
59295 static inline unsigned long zone_page_state(struct zone *zone,
59296 enum zone_stat_item item)
59297 {
59298- long x = atomic_long_read(&zone->vm_stat[item]);
59299+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
59300 #ifdef CONFIG_SMP
59301 if (x < 0)
59302 x = 0;
59303@@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
59304 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
59305 enum zone_stat_item item)
59306 {
59307- long x = atomic_long_read(&zone->vm_stat[item]);
59308+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
59309
59310 #ifdef CONFIG_SMP
59311 int cpu;
59312@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
59313
59314 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
59315 {
59316- atomic_long_inc(&zone->vm_stat[item]);
59317- atomic_long_inc(&vm_stat[item]);
59318+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
59319+ atomic_long_inc_unchecked(&vm_stat[item]);
59320 }
59321
59322 static inline void __inc_zone_page_state(struct page *page,
59323@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
59324
59325 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
59326 {
59327- atomic_long_dec(&zone->vm_stat[item]);
59328- atomic_long_dec(&vm_stat[item]);
59329+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
59330+ atomic_long_dec_unchecked(&vm_stat[item]);
59331 }
59332
59333 static inline void __dec_zone_page_state(struct page *page,
59334diff -urNp linux-3.0.4/include/media/saa7146_vv.h linux-3.0.4/include/media/saa7146_vv.h
59335--- linux-3.0.4/include/media/saa7146_vv.h 2011-07-21 22:17:23.000000000 -0400
59336+++ linux-3.0.4/include/media/saa7146_vv.h 2011-10-07 19:07:40.000000000 -0400
59337@@ -163,7 +163,7 @@ struct saa7146_ext_vv
59338 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
59339
59340 /* the extension can override this */
59341- struct v4l2_ioctl_ops ops;
59342+ v4l2_ioctl_ops_no_const ops;
59343 /* pointer to the saa7146 core ops */
59344 const struct v4l2_ioctl_ops *core_ops;
59345
59346diff -urNp linux-3.0.4/include/media/v4l2-dev.h linux-3.0.4/include/media/v4l2-dev.h
59347--- linux-3.0.4/include/media/v4l2-dev.h 2011-07-21 22:17:23.000000000 -0400
59348+++ linux-3.0.4/include/media/v4l2-dev.h 2011-10-07 19:07:40.000000000 -0400
59349@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
59350
59351
59352 struct v4l2_file_operations {
59353- struct module *owner;
59354+ struct module * const owner;
59355 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
59356 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
59357 unsigned int (*poll) (struct file *, struct poll_table_struct *);
59358@@ -68,6 +68,7 @@ struct v4l2_file_operations {
59359 int (*open) (struct file *);
59360 int (*release) (struct file *);
59361 };
59362+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
59363
59364 /*
59365 * Newer version of video_device, handled by videodev2.c
59366diff -urNp linux-3.0.4/include/media/v4l2-ioctl.h linux-3.0.4/include/media/v4l2-ioctl.h
59367--- linux-3.0.4/include/media/v4l2-ioctl.h 2011-07-21 22:17:23.000000000 -0400
59368+++ linux-3.0.4/include/media/v4l2-ioctl.h 2011-08-24 18:25:45.000000000 -0400
59369@@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
59370 long (*vidioc_default) (struct file *file, void *fh,
59371 bool valid_prio, int cmd, void *arg);
59372 };
59373+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
59374
59375
59376 /* v4l debugging and diagnostics */
59377diff -urNp linux-3.0.4/include/net/caif/cfctrl.h linux-3.0.4/include/net/caif/cfctrl.h
59378--- linux-3.0.4/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
59379+++ linux-3.0.4/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
59380@@ -52,7 +52,7 @@ struct cfctrl_rsp {
59381 void (*radioset_rsp)(void);
59382 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
59383 struct cflayer *client_layer);
59384-};
59385+} __no_const;
59386
59387 /* Link Setup Parameters for CAIF-Links. */
59388 struct cfctrl_link_param {
59389@@ -101,8 +101,8 @@ struct cfctrl_request_info {
59390 struct cfctrl {
59391 struct cfsrvl serv;
59392 struct cfctrl_rsp res;
59393- atomic_t req_seq_no;
59394- atomic_t rsp_seq_no;
59395+ atomic_unchecked_t req_seq_no;
59396+ atomic_unchecked_t rsp_seq_no;
59397 struct list_head list;
59398 /* Protects from simultaneous access to first_req list */
59399 spinlock_t info_list_lock;
59400diff -urNp linux-3.0.4/include/net/flow.h linux-3.0.4/include/net/flow.h
59401--- linux-3.0.4/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
59402+++ linux-3.0.4/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
59403@@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
59404 u8 dir, flow_resolve_t resolver, void *ctx);
59405
59406 extern void flow_cache_flush(void);
59407-extern atomic_t flow_cache_genid;
59408+extern atomic_unchecked_t flow_cache_genid;
59409
59410 #endif
59411diff -urNp linux-3.0.4/include/net/inetpeer.h linux-3.0.4/include/net/inetpeer.h
59412--- linux-3.0.4/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
59413+++ linux-3.0.4/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
59414@@ -43,8 +43,8 @@ struct inet_peer {
59415 */
59416 union {
59417 struct {
59418- atomic_t rid; /* Frag reception counter */
59419- atomic_t ip_id_count; /* IP ID for the next packet */
59420+ atomic_unchecked_t rid; /* Frag reception counter */
59421+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
59422 __u32 tcp_ts;
59423 __u32 tcp_ts_stamp;
59424 u32 metrics[RTAX_MAX];
59425@@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
59426 {
59427 more++;
59428 inet_peer_refcheck(p);
59429- return atomic_add_return(more, &p->ip_id_count) - more;
59430+ return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
59431 }
59432
59433 #endif /* _NET_INETPEER_H */
59434diff -urNp linux-3.0.4/include/net/ip_fib.h linux-3.0.4/include/net/ip_fib.h
59435--- linux-3.0.4/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
59436+++ linux-3.0.4/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
59437@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
59438
59439 #define FIB_RES_SADDR(net, res) \
59440 ((FIB_RES_NH(res).nh_saddr_genid == \
59441- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
59442+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
59443 FIB_RES_NH(res).nh_saddr : \
59444 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
59445 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
59446diff -urNp linux-3.0.4/include/net/ip_vs.h linux-3.0.4/include/net/ip_vs.h
59447--- linux-3.0.4/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
59448+++ linux-3.0.4/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
59449@@ -509,7 +509,7 @@ struct ip_vs_conn {
59450 struct ip_vs_conn *control; /* Master control connection */
59451 atomic_t n_control; /* Number of controlled ones */
59452 struct ip_vs_dest *dest; /* real server */
59453- atomic_t in_pkts; /* incoming packet counter */
59454+ atomic_unchecked_t in_pkts; /* incoming packet counter */
59455
59456 /* packet transmitter for different forwarding methods. If it
59457 mangles the packet, it must return NF_DROP or better NF_STOLEN,
59458@@ -647,7 +647,7 @@ struct ip_vs_dest {
59459 __be16 port; /* port number of the server */
59460 union nf_inet_addr addr; /* IP address of the server */
59461 volatile unsigned flags; /* dest status flags */
59462- atomic_t conn_flags; /* flags to copy to conn */
59463+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
59464 atomic_t weight; /* server weight */
59465
59466 atomic_t refcnt; /* reference counter */
59467diff -urNp linux-3.0.4/include/net/irda/ircomm_core.h linux-3.0.4/include/net/irda/ircomm_core.h
59468--- linux-3.0.4/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
59469+++ linux-3.0.4/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
59470@@ -51,7 +51,7 @@ typedef struct {
59471 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
59472 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
59473 struct ircomm_info *);
59474-} call_t;
59475+} __no_const call_t;
59476
59477 struct ircomm_cb {
59478 irda_queue_t queue;
59479diff -urNp linux-3.0.4/include/net/irda/ircomm_tty.h linux-3.0.4/include/net/irda/ircomm_tty.h
59480--- linux-3.0.4/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
59481+++ linux-3.0.4/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
59482@@ -35,6 +35,7 @@
59483 #include <linux/termios.h>
59484 #include <linux/timer.h>
59485 #include <linux/tty.h> /* struct tty_struct */
59486+#include <asm/local.h>
59487
59488 #include <net/irda/irias_object.h>
59489 #include <net/irda/ircomm_core.h>
59490@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
59491 unsigned short close_delay;
59492 unsigned short closing_wait; /* time to wait before closing */
59493
59494- int open_count;
59495- int blocked_open; /* # of blocked opens */
59496+ local_t open_count;
59497+ local_t blocked_open; /* # of blocked opens */
59498
59499 /* Protect concurent access to :
59500 * o self->open_count
59501diff -urNp linux-3.0.4/include/net/iucv/af_iucv.h linux-3.0.4/include/net/iucv/af_iucv.h
59502--- linux-3.0.4/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
59503+++ linux-3.0.4/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
59504@@ -87,7 +87,7 @@ struct iucv_sock {
59505 struct iucv_sock_list {
59506 struct hlist_head head;
59507 rwlock_t lock;
59508- atomic_t autobind_name;
59509+ atomic_unchecked_t autobind_name;
59510 };
59511
59512 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
59513diff -urNp linux-3.0.4/include/net/lapb.h linux-3.0.4/include/net/lapb.h
59514--- linux-3.0.4/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
59515+++ linux-3.0.4/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
59516@@ -95,7 +95,7 @@ struct lapb_cb {
59517 struct sk_buff_head write_queue;
59518 struct sk_buff_head ack_queue;
59519 unsigned char window;
59520- struct lapb_register_struct callbacks;
59521+ struct lapb_register_struct *callbacks;
59522
59523 /* FRMR control information */
59524 struct lapb_frame frmr_data;
59525diff -urNp linux-3.0.4/include/net/neighbour.h linux-3.0.4/include/net/neighbour.h
59526--- linux-3.0.4/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
59527+++ linux-3.0.4/include/net/neighbour.h 2011-08-31 18:39:25.000000000 -0400
59528@@ -124,7 +124,7 @@ struct neigh_ops {
59529 int (*connected_output)(struct sk_buff*);
59530 int (*hh_output)(struct sk_buff*);
59531 int (*queue_xmit)(struct sk_buff*);
59532-};
59533+} __do_const;
59534
59535 struct pneigh_entry {
59536 struct pneigh_entry *next;
59537diff -urNp linux-3.0.4/include/net/netlink.h linux-3.0.4/include/net/netlink.h
59538--- linux-3.0.4/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
59539+++ linux-3.0.4/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
59540@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
59541 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
59542 {
59543 if (mark)
59544- skb_trim(skb, (unsigned char *) mark - skb->data);
59545+ skb_trim(skb, (const unsigned char *) mark - skb->data);
59546 }
59547
59548 /**
59549diff -urNp linux-3.0.4/include/net/netns/ipv4.h linux-3.0.4/include/net/netns/ipv4.h
59550--- linux-3.0.4/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
59551+++ linux-3.0.4/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
59552@@ -56,8 +56,8 @@ struct netns_ipv4 {
59553
59554 unsigned int sysctl_ping_group_range[2];
59555
59556- atomic_t rt_genid;
59557- atomic_t dev_addr_genid;
59558+ atomic_unchecked_t rt_genid;
59559+ atomic_unchecked_t dev_addr_genid;
59560
59561 #ifdef CONFIG_IP_MROUTE
59562 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
59563diff -urNp linux-3.0.4/include/net/sctp/sctp.h linux-3.0.4/include/net/sctp/sctp.h
59564--- linux-3.0.4/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
59565+++ linux-3.0.4/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
59566@@ -315,9 +315,9 @@ do { \
59567
59568 #else /* SCTP_DEBUG */
59569
59570-#define SCTP_DEBUG_PRINTK(whatever...)
59571-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
59572-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
59573+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
59574+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
59575+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
59576 #define SCTP_ENABLE_DEBUG
59577 #define SCTP_DISABLE_DEBUG
59578 #define SCTP_ASSERT(expr, str, func)
59579diff -urNp linux-3.0.4/include/net/sock.h linux-3.0.4/include/net/sock.h
59580--- linux-3.0.4/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
59581+++ linux-3.0.4/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
59582@@ -277,7 +277,7 @@ struct sock {
59583 #ifdef CONFIG_RPS
59584 __u32 sk_rxhash;
59585 #endif
59586- atomic_t sk_drops;
59587+ atomic_unchecked_t sk_drops;
59588 int sk_rcvbuf;
59589
59590 struct sk_filter __rcu *sk_filter;
59591@@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
59592 }
59593
59594 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
59595- char __user *from, char *to,
59596+ char __user *from, unsigned char *to,
59597 int copy, int offset)
59598 {
59599 if (skb->ip_summed == CHECKSUM_NONE) {
59600diff -urNp linux-3.0.4/include/net/tcp.h linux-3.0.4/include/net/tcp.h
59601--- linux-3.0.4/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
59602+++ linux-3.0.4/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
59603@@ -1374,8 +1374,8 @@ enum tcp_seq_states {
59604 struct tcp_seq_afinfo {
59605 char *name;
59606 sa_family_t family;
59607- struct file_operations seq_fops;
59608- struct seq_operations seq_ops;
59609+ file_operations_no_const seq_fops;
59610+ seq_operations_no_const seq_ops;
59611 };
59612
59613 struct tcp_iter_state {
59614diff -urNp linux-3.0.4/include/net/udp.h linux-3.0.4/include/net/udp.h
59615--- linux-3.0.4/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
59616+++ linux-3.0.4/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
59617@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
59618 char *name;
59619 sa_family_t family;
59620 struct udp_table *udp_table;
59621- struct file_operations seq_fops;
59622- struct seq_operations seq_ops;
59623+ file_operations_no_const seq_fops;
59624+ seq_operations_no_const seq_ops;
59625 };
59626
59627 struct udp_iter_state {
59628diff -urNp linux-3.0.4/include/net/xfrm.h linux-3.0.4/include/net/xfrm.h
59629--- linux-3.0.4/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
59630+++ linux-3.0.4/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
59631@@ -505,7 +505,7 @@ struct xfrm_policy {
59632 struct timer_list timer;
59633
59634 struct flow_cache_object flo;
59635- atomic_t genid;
59636+ atomic_unchecked_t genid;
59637 u32 priority;
59638 u32 index;
59639 struct xfrm_mark mark;
59640diff -urNp linux-3.0.4/include/rdma/iw_cm.h linux-3.0.4/include/rdma/iw_cm.h
59641--- linux-3.0.4/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
59642+++ linux-3.0.4/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
59643@@ -120,7 +120,7 @@ struct iw_cm_verbs {
59644 int backlog);
59645
59646 int (*destroy_listen)(struct iw_cm_id *cm_id);
59647-};
59648+} __no_const;
59649
59650 /**
59651 * iw_create_cm_id - Create an IW CM identifier.
59652diff -urNp linux-3.0.4/include/scsi/libfc.h linux-3.0.4/include/scsi/libfc.h
59653--- linux-3.0.4/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
59654+++ linux-3.0.4/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
59655@@ -750,6 +750,7 @@ struct libfc_function_template {
59656 */
59657 void (*disc_stop_final) (struct fc_lport *);
59658 };
59659+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
59660
59661 /**
59662 * struct fc_disc - Discovery context
59663@@ -853,7 +854,7 @@ struct fc_lport {
59664 struct fc_vport *vport;
59665
59666 /* Operational Information */
59667- struct libfc_function_template tt;
59668+ libfc_function_template_no_const tt;
59669 u8 link_up;
59670 u8 qfull;
59671 enum fc_lport_state state;
59672diff -urNp linux-3.0.4/include/scsi/scsi_device.h linux-3.0.4/include/scsi/scsi_device.h
59673--- linux-3.0.4/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
59674+++ linux-3.0.4/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
59675@@ -161,9 +161,9 @@ struct scsi_device {
59676 unsigned int max_device_blocked; /* what device_blocked counts down from */
59677 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
59678
59679- atomic_t iorequest_cnt;
59680- atomic_t iodone_cnt;
59681- atomic_t ioerr_cnt;
59682+ atomic_unchecked_t iorequest_cnt;
59683+ atomic_unchecked_t iodone_cnt;
59684+ atomic_unchecked_t ioerr_cnt;
59685
59686 struct device sdev_gendev,
59687 sdev_dev;
59688diff -urNp linux-3.0.4/include/scsi/scsi_transport_fc.h linux-3.0.4/include/scsi/scsi_transport_fc.h
59689--- linux-3.0.4/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
59690+++ linux-3.0.4/include/scsi/scsi_transport_fc.h 2011-08-26 19:49:56.000000000 -0400
59691@@ -711,7 +711,7 @@ struct fc_function_template {
59692 unsigned long show_host_system_hostname:1;
59693
59694 unsigned long disable_target_scan:1;
59695-};
59696+} __do_const;
59697
59698
59699 /**
59700diff -urNp linux-3.0.4/include/sound/ak4xxx-adda.h linux-3.0.4/include/sound/ak4xxx-adda.h
59701--- linux-3.0.4/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
59702+++ linux-3.0.4/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
59703@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
59704 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
59705 unsigned char val);
59706 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
59707-};
59708+} __no_const;
59709
59710 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
59711
59712diff -urNp linux-3.0.4/include/sound/hwdep.h linux-3.0.4/include/sound/hwdep.h
59713--- linux-3.0.4/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
59714+++ linux-3.0.4/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
59715@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
59716 struct snd_hwdep_dsp_status *status);
59717 int (*dsp_load)(struct snd_hwdep *hw,
59718 struct snd_hwdep_dsp_image *image);
59719-};
59720+} __no_const;
59721
59722 struct snd_hwdep {
59723 struct snd_card *card;
59724diff -urNp linux-3.0.4/include/sound/info.h linux-3.0.4/include/sound/info.h
59725--- linux-3.0.4/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
59726+++ linux-3.0.4/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
59727@@ -44,7 +44,7 @@ struct snd_info_entry_text {
59728 struct snd_info_buffer *buffer);
59729 void (*write)(struct snd_info_entry *entry,
59730 struct snd_info_buffer *buffer);
59731-};
59732+} __no_const;
59733
59734 struct snd_info_entry_ops {
59735 int (*open)(struct snd_info_entry *entry,
59736diff -urNp linux-3.0.4/include/sound/pcm.h linux-3.0.4/include/sound/pcm.h
59737--- linux-3.0.4/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
59738+++ linux-3.0.4/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
59739@@ -81,6 +81,7 @@ struct snd_pcm_ops {
59740 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
59741 int (*ack)(struct snd_pcm_substream *substream);
59742 };
59743+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
59744
59745 /*
59746 *
59747diff -urNp linux-3.0.4/include/sound/sb16_csp.h linux-3.0.4/include/sound/sb16_csp.h
59748--- linux-3.0.4/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
59749+++ linux-3.0.4/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
59750@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
59751 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
59752 int (*csp_stop) (struct snd_sb_csp * p);
59753 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
59754-};
59755+} __no_const;
59756
59757 /*
59758 * CSP private data
59759diff -urNp linux-3.0.4/include/sound/soc.h linux-3.0.4/include/sound/soc.h
59760--- linux-3.0.4/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
59761+++ linux-3.0.4/include/sound/soc.h 2011-08-26 19:49:56.000000000 -0400
59762@@ -636,7 +636,7 @@ struct snd_soc_platform_driver {
59763
59764 /* platform stream ops */
59765 struct snd_pcm_ops *ops;
59766-};
59767+} __do_const;
59768
59769 struct snd_soc_platform {
59770 const char *name;
59771diff -urNp linux-3.0.4/include/sound/ymfpci.h linux-3.0.4/include/sound/ymfpci.h
59772--- linux-3.0.4/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
59773+++ linux-3.0.4/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
59774@@ -358,7 +358,7 @@ struct snd_ymfpci {
59775 spinlock_t reg_lock;
59776 spinlock_t voice_lock;
59777 wait_queue_head_t interrupt_sleep;
59778- atomic_t interrupt_sleep_count;
59779+ atomic_unchecked_t interrupt_sleep_count;
59780 struct snd_info_entry *proc_entry;
59781 const struct firmware *dsp_microcode;
59782 const struct firmware *controller_microcode;
59783diff -urNp linux-3.0.4/include/target/target_core_base.h linux-3.0.4/include/target/target_core_base.h
59784--- linux-3.0.4/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
59785+++ linux-3.0.4/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
59786@@ -364,7 +364,7 @@ struct t10_reservation_ops {
59787 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
59788 int (*t10_pr_register)(struct se_cmd *);
59789 int (*t10_pr_clear)(struct se_cmd *);
59790-};
59791+} __no_const;
59792
59793 struct t10_reservation_template {
59794 /* Reservation effects all target ports */
59795@@ -432,8 +432,8 @@ struct se_transport_task {
59796 atomic_t t_task_cdbs_left;
59797 atomic_t t_task_cdbs_ex_left;
59798 atomic_t t_task_cdbs_timeout_left;
59799- atomic_t t_task_cdbs_sent;
59800- atomic_t t_transport_aborted;
59801+ atomic_unchecked_t t_task_cdbs_sent;
59802+ atomic_unchecked_t t_transport_aborted;
59803 atomic_t t_transport_active;
59804 atomic_t t_transport_complete;
59805 atomic_t t_transport_queue_active;
59806@@ -774,7 +774,7 @@ struct se_device {
59807 atomic_t active_cmds;
59808 atomic_t simple_cmds;
59809 atomic_t depth_left;
59810- atomic_t dev_ordered_id;
59811+ atomic_unchecked_t dev_ordered_id;
59812 atomic_t dev_tur_active;
59813 atomic_t execute_tasks;
59814 atomic_t dev_status_thr_count;
59815diff -urNp linux-3.0.4/include/trace/events/irq.h linux-3.0.4/include/trace/events/irq.h
59816--- linux-3.0.4/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
59817+++ linux-3.0.4/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
59818@@ -36,7 +36,7 @@ struct softirq_action;
59819 */
59820 TRACE_EVENT(irq_handler_entry,
59821
59822- TP_PROTO(int irq, struct irqaction *action),
59823+ TP_PROTO(int irq, const struct irqaction *action),
59824
59825 TP_ARGS(irq, action),
59826
59827@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
59828 */
59829 TRACE_EVENT(irq_handler_exit,
59830
59831- TP_PROTO(int irq, struct irqaction *action, int ret),
59832+ TP_PROTO(int irq, const struct irqaction *action, int ret),
59833
59834 TP_ARGS(irq, action, ret),
59835
59836diff -urNp linux-3.0.4/include/video/udlfb.h linux-3.0.4/include/video/udlfb.h
59837--- linux-3.0.4/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
59838+++ linux-3.0.4/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
59839@@ -51,10 +51,10 @@ struct dlfb_data {
59840 int base8;
59841 u32 pseudo_palette[256];
59842 /* blit-only rendering path metrics, exposed through sysfs */
59843- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
59844- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
59845- atomic_t bytes_sent; /* to usb, after compression including overhead */
59846- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
59847+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
59848+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
59849+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
59850+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
59851 };
59852
59853 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
59854diff -urNp linux-3.0.4/include/video/uvesafb.h linux-3.0.4/include/video/uvesafb.h
59855--- linux-3.0.4/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
59856+++ linux-3.0.4/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
59857@@ -177,6 +177,7 @@ struct uvesafb_par {
59858 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
59859 u8 pmi_setpal; /* PMI for palette changes */
59860 u16 *pmi_base; /* protected mode interface location */
59861+ u8 *pmi_code; /* protected mode code location */
59862 void *pmi_start;
59863 void *pmi_pal;
59864 u8 *vbe_state_orig; /*
59865diff -urNp linux-3.0.4/init/do_mounts.c linux-3.0.4/init/do_mounts.c
59866--- linux-3.0.4/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
59867+++ linux-3.0.4/init/do_mounts.c 2011-10-06 04:17:55.000000000 -0400
59868@@ -287,11 +287,11 @@ static void __init get_fs_names(char *pa
59869
59870 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
59871 {
59872- int err = sys_mount(name, "/root", fs, flags, data);
59873+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
59874 if (err)
59875 return err;
59876
59877- sys_chdir((const char __user __force *)"/root");
59878+ sys_chdir((const char __force_user*)"/root");
59879 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
59880 printk(KERN_INFO
59881 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
59882@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
59883 va_start(args, fmt);
59884 vsprintf(buf, fmt, args);
59885 va_end(args);
59886- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
59887+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
59888 if (fd >= 0) {
59889 sys_ioctl(fd, FDEJECT, 0);
59890 sys_close(fd);
59891 }
59892 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
59893- fd = sys_open("/dev/console", O_RDWR, 0);
59894+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
59895 if (fd >= 0) {
59896 sys_ioctl(fd, TCGETS, (long)&termios);
59897 termios.c_lflag &= ~ICANON;
59898 sys_ioctl(fd, TCSETSF, (long)&termios);
59899- sys_read(fd, &c, 1);
59900+ sys_read(fd, (char __user *)&c, 1);
59901 termios.c_lflag |= ICANON;
59902 sys_ioctl(fd, TCSETSF, (long)&termios);
59903 sys_close(fd);
59904@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
59905 mount_root();
59906 out:
59907 devtmpfs_mount("dev");
59908- sys_mount(".", "/", NULL, MS_MOVE, NULL);
59909- sys_chroot((const char __user __force *)".");
59910+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
59911+ sys_chroot((const char __force_user *)".");
59912 }
59913diff -urNp linux-3.0.4/init/do_mounts.h linux-3.0.4/init/do_mounts.h
59914--- linux-3.0.4/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
59915+++ linux-3.0.4/init/do_mounts.h 2011-10-06 04:17:55.000000000 -0400
59916@@ -15,15 +15,15 @@ extern int root_mountflags;
59917
59918 static inline int create_dev(char *name, dev_t dev)
59919 {
59920- sys_unlink(name);
59921- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
59922+ sys_unlink((char __force_user *)name);
59923+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
59924 }
59925
59926 #if BITS_PER_LONG == 32
59927 static inline u32 bstat(char *name)
59928 {
59929 struct stat64 stat;
59930- if (sys_stat64(name, &stat) != 0)
59931+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
59932 return 0;
59933 if (!S_ISBLK(stat.st_mode))
59934 return 0;
59935@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
59936 static inline u32 bstat(char *name)
59937 {
59938 struct stat stat;
59939- if (sys_newstat(name, &stat) != 0)
59940+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
59941 return 0;
59942 if (!S_ISBLK(stat.st_mode))
59943 return 0;
59944diff -urNp linux-3.0.4/init/do_mounts_initrd.c linux-3.0.4/init/do_mounts_initrd.c
59945--- linux-3.0.4/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
59946+++ linux-3.0.4/init/do_mounts_initrd.c 2011-10-06 04:17:55.000000000 -0400
59947@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
59948 create_dev("/dev/root.old", Root_RAM0);
59949 /* mount initrd on rootfs' /root */
59950 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
59951- sys_mkdir("/old", 0700);
59952- root_fd = sys_open("/", 0, 0);
59953- old_fd = sys_open("/old", 0, 0);
59954+ sys_mkdir((const char __force_user *)"/old", 0700);
59955+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
59956+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
59957 /* move initrd over / and chdir/chroot in initrd root */
59958- sys_chdir("/root");
59959- sys_mount(".", "/", NULL, MS_MOVE, NULL);
59960- sys_chroot(".");
59961+ sys_chdir((const char __force_user *)"/root");
59962+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
59963+ sys_chroot((const char __force_user *)".");
59964
59965 /*
59966 * In case that a resume from disk is carried out by linuxrc or one of
59967@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
59968
59969 /* move initrd to rootfs' /old */
59970 sys_fchdir(old_fd);
59971- sys_mount("/", ".", NULL, MS_MOVE, NULL);
59972+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
59973 /* switch root and cwd back to / of rootfs */
59974 sys_fchdir(root_fd);
59975- sys_chroot(".");
59976+ sys_chroot((const char __force_user *)".");
59977 sys_close(old_fd);
59978 sys_close(root_fd);
59979
59980 if (new_decode_dev(real_root_dev) == Root_RAM0) {
59981- sys_chdir("/old");
59982+ sys_chdir((const char __force_user *)"/old");
59983 return;
59984 }
59985
59986@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
59987 mount_root();
59988
59989 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
59990- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
59991+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
59992 if (!error)
59993 printk("okay\n");
59994 else {
59995- int fd = sys_open("/dev/root.old", O_RDWR, 0);
59996+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
59997 if (error == -ENOENT)
59998 printk("/initrd does not exist. Ignored.\n");
59999 else
60000 printk("failed\n");
60001 printk(KERN_NOTICE "Unmounting old root\n");
60002- sys_umount("/old", MNT_DETACH);
60003+ sys_umount((char __force_user *)"/old", MNT_DETACH);
60004 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
60005 if (fd < 0) {
60006 error = fd;
60007@@ -116,11 +116,11 @@ int __init initrd_load(void)
60008 * mounted in the normal path.
60009 */
60010 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
60011- sys_unlink("/initrd.image");
60012+ sys_unlink((const char __force_user *)"/initrd.image");
60013 handle_initrd();
60014 return 1;
60015 }
60016 }
60017- sys_unlink("/initrd.image");
60018+ sys_unlink((const char __force_user *)"/initrd.image");
60019 return 0;
60020 }
60021diff -urNp linux-3.0.4/init/do_mounts_md.c linux-3.0.4/init/do_mounts_md.c
60022--- linux-3.0.4/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
60023+++ linux-3.0.4/init/do_mounts_md.c 2011-10-06 04:17:55.000000000 -0400
60024@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
60025 partitioned ? "_d" : "", minor,
60026 md_setup_args[ent].device_names);
60027
60028- fd = sys_open(name, 0, 0);
60029+ fd = sys_open((char __force_user *)name, 0, 0);
60030 if (fd < 0) {
60031 printk(KERN_ERR "md: open failed - cannot start "
60032 "array %s\n", name);
60033@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
60034 * array without it
60035 */
60036 sys_close(fd);
60037- fd = sys_open(name, 0, 0);
60038+ fd = sys_open((char __force_user *)name, 0, 0);
60039 sys_ioctl(fd, BLKRRPART, 0);
60040 }
60041 sys_close(fd);
60042@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
60043
60044 wait_for_device_probe();
60045
60046- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
60047+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
60048 if (fd >= 0) {
60049 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
60050 sys_close(fd);
60051diff -urNp linux-3.0.4/init/initramfs.c linux-3.0.4/init/initramfs.c
60052--- linux-3.0.4/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
60053+++ linux-3.0.4/init/initramfs.c 2011-10-06 04:17:55.000000000 -0400
60054@@ -74,7 +74,7 @@ static void __init free_hash(void)
60055 }
60056 }
60057
60058-static long __init do_utime(char __user *filename, time_t mtime)
60059+static long __init do_utime(__force char __user *filename, time_t mtime)
60060 {
60061 struct timespec t[2];
60062
60063@@ -109,7 +109,7 @@ static void __init dir_utime(void)
60064 struct dir_entry *de, *tmp;
60065 list_for_each_entry_safe(de, tmp, &dir_list, list) {
60066 list_del(&de->list);
60067- do_utime(de->name, de->mtime);
60068+ do_utime((char __force_user *)de->name, de->mtime);
60069 kfree(de->name);
60070 kfree(de);
60071 }
60072@@ -271,7 +271,7 @@ static int __init maybe_link(void)
60073 if (nlink >= 2) {
60074 char *old = find_link(major, minor, ino, mode, collected);
60075 if (old)
60076- return (sys_link(old, collected) < 0) ? -1 : 1;
60077+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
60078 }
60079 return 0;
60080 }
60081@@ -280,11 +280,11 @@ static void __init clean_path(char *path
60082 {
60083 struct stat st;
60084
60085- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
60086+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
60087 if (S_ISDIR(st.st_mode))
60088- sys_rmdir(path);
60089+ sys_rmdir((char __force_user *)path);
60090 else
60091- sys_unlink(path);
60092+ sys_unlink((char __force_user *)path);
60093 }
60094 }
60095
60096@@ -305,7 +305,7 @@ static int __init do_name(void)
60097 int openflags = O_WRONLY|O_CREAT;
60098 if (ml != 1)
60099 openflags |= O_TRUNC;
60100- wfd = sys_open(collected, openflags, mode);
60101+ wfd = sys_open((char __force_user *)collected, openflags, mode);
60102
60103 if (wfd >= 0) {
60104 sys_fchown(wfd, uid, gid);
60105@@ -317,17 +317,17 @@ static int __init do_name(void)
60106 }
60107 }
60108 } else if (S_ISDIR(mode)) {
60109- sys_mkdir(collected, mode);
60110- sys_chown(collected, uid, gid);
60111- sys_chmod(collected, mode);
60112+ sys_mkdir((char __force_user *)collected, mode);
60113+ sys_chown((char __force_user *)collected, uid, gid);
60114+ sys_chmod((char __force_user *)collected, mode);
60115 dir_add(collected, mtime);
60116 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
60117 S_ISFIFO(mode) || S_ISSOCK(mode)) {
60118 if (maybe_link() == 0) {
60119- sys_mknod(collected, mode, rdev);
60120- sys_chown(collected, uid, gid);
60121- sys_chmod(collected, mode);
60122- do_utime(collected, mtime);
60123+ sys_mknod((char __force_user *)collected, mode, rdev);
60124+ sys_chown((char __force_user *)collected, uid, gid);
60125+ sys_chmod((char __force_user *)collected, mode);
60126+ do_utime((char __force_user *)collected, mtime);
60127 }
60128 }
60129 return 0;
60130@@ -336,15 +336,15 @@ static int __init do_name(void)
60131 static int __init do_copy(void)
60132 {
60133 if (count >= body_len) {
60134- sys_write(wfd, victim, body_len);
60135+ sys_write(wfd, (char __force_user *)victim, body_len);
60136 sys_close(wfd);
60137- do_utime(vcollected, mtime);
60138+ do_utime((char __force_user *)vcollected, mtime);
60139 kfree(vcollected);
60140 eat(body_len);
60141 state = SkipIt;
60142 return 0;
60143 } else {
60144- sys_write(wfd, victim, count);
60145+ sys_write(wfd, (char __force_user *)victim, count);
60146 body_len -= count;
60147 eat(count);
60148 return 1;
60149@@ -355,9 +355,9 @@ static int __init do_symlink(void)
60150 {
60151 collected[N_ALIGN(name_len) + body_len] = '\0';
60152 clean_path(collected, 0);
60153- sys_symlink(collected + N_ALIGN(name_len), collected);
60154- sys_lchown(collected, uid, gid);
60155- do_utime(collected, mtime);
60156+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
60157+ sys_lchown((char __force_user *)collected, uid, gid);
60158+ do_utime((char __force_user *)collected, mtime);
60159 state = SkipIt;
60160 next_state = Reset;
60161 return 0;
60162diff -urNp linux-3.0.4/init/Kconfig linux-3.0.4/init/Kconfig
60163--- linux-3.0.4/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
60164+++ linux-3.0.4/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
60165@@ -1195,7 +1195,7 @@ config SLUB_DEBUG
60166
60167 config COMPAT_BRK
60168 bool "Disable heap randomization"
60169- default y
60170+ default n
60171 help
60172 Randomizing heap placement makes heap exploits harder, but it
60173 also breaks ancient binaries (including anything libc5 based).
60174diff -urNp linux-3.0.4/init/main.c linux-3.0.4/init/main.c
60175--- linux-3.0.4/init/main.c 2011-07-21 22:17:23.000000000 -0400
60176+++ linux-3.0.4/init/main.c 2011-10-06 04:17:55.000000000 -0400
60177@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
60178 extern void tc_init(void);
60179 #endif
60180
60181+extern void grsecurity_init(void);
60182+
60183 /*
60184 * Debug helper: via this flag we know that we are in 'early bootup code'
60185 * where only the boot processor is running with IRQ disabled. This means
60186@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
60187
60188 __setup("reset_devices", set_reset_devices);
60189
60190+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
60191+extern char pax_enter_kernel_user[];
60192+extern char pax_exit_kernel_user[];
60193+extern pgdval_t clone_pgd_mask;
60194+#endif
60195+
60196+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
60197+static int __init setup_pax_nouderef(char *str)
60198+{
60199+#ifdef CONFIG_X86_32
60200+ unsigned int cpu;
60201+ struct desc_struct *gdt;
60202+
60203+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
60204+ gdt = get_cpu_gdt_table(cpu);
60205+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
60206+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
60207+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
60208+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
60209+ }
60210+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
60211+#else
60212+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
60213+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
60214+ clone_pgd_mask = ~(pgdval_t)0UL;
60215+#endif
60216+
60217+ return 0;
60218+}
60219+early_param("pax_nouderef", setup_pax_nouderef);
60220+#endif
60221+
60222+#ifdef CONFIG_PAX_SOFTMODE
60223+int pax_softmode;
60224+
60225+static int __init setup_pax_softmode(char *str)
60226+{
60227+ get_option(&str, &pax_softmode);
60228+ return 1;
60229+}
60230+__setup("pax_softmode=", setup_pax_softmode);
60231+#endif
60232+
60233 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
60234 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
60235 static const char *panic_later, *panic_param;
60236@@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
60237 {
60238 int count = preempt_count();
60239 int ret;
60240+ const char *msg1 = "", *msg2 = "";
60241
60242 if (initcall_debug)
60243 ret = do_one_initcall_debug(fn);
60244@@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
60245 sprintf(msgbuf, "error code %d ", ret);
60246
60247 if (preempt_count() != count) {
60248- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
60249+ msg1 = " preemption imbalance";
60250 preempt_count() = count;
60251 }
60252 if (irqs_disabled()) {
60253- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
60254+ msg2 = " disabled interrupts";
60255 local_irq_enable();
60256 }
60257- if (msgbuf[0]) {
60258- printk("initcall %pF returned with %s\n", fn, msgbuf);
60259+ if (msgbuf[0] || *msg1 || *msg2) {
60260+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
60261 }
60262
60263 return ret;
60264@@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
60265 do_basic_setup();
60266
60267 /* Open the /dev/console on the rootfs, this should never fail */
60268- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
60269+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
60270 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
60271
60272 (void) sys_dup(0);
60273@@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
60274 if (!ramdisk_execute_command)
60275 ramdisk_execute_command = "/init";
60276
60277- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
60278+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
60279 ramdisk_execute_command = NULL;
60280 prepare_namespace();
60281 }
60282
60283+ grsecurity_init();
60284+
60285 /*
60286 * Ok, we have completed the initial bootup, and
60287 * we're essentially up and running. Get rid of the
60288diff -urNp linux-3.0.4/ipc/mqueue.c linux-3.0.4/ipc/mqueue.c
60289--- linux-3.0.4/ipc/mqueue.c 2011-07-21 22:17:23.000000000 -0400
60290+++ linux-3.0.4/ipc/mqueue.c 2011-08-23 21:48:14.000000000 -0400
60291@@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
60292 mq_bytes = (mq_msg_tblsz +
60293 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
60294
60295+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
60296 spin_lock(&mq_lock);
60297 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
60298 u->mq_bytes + mq_bytes >
60299diff -urNp linux-3.0.4/ipc/msg.c linux-3.0.4/ipc/msg.c
60300--- linux-3.0.4/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
60301+++ linux-3.0.4/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
60302@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
60303 return security_msg_queue_associate(msq, msgflg);
60304 }
60305
60306+static struct ipc_ops msg_ops = {
60307+ .getnew = newque,
60308+ .associate = msg_security,
60309+ .more_checks = NULL
60310+};
60311+
60312 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
60313 {
60314 struct ipc_namespace *ns;
60315- struct ipc_ops msg_ops;
60316 struct ipc_params msg_params;
60317
60318 ns = current->nsproxy->ipc_ns;
60319
60320- msg_ops.getnew = newque;
60321- msg_ops.associate = msg_security;
60322- msg_ops.more_checks = NULL;
60323-
60324 msg_params.key = key;
60325 msg_params.flg = msgflg;
60326
60327diff -urNp linux-3.0.4/ipc/sem.c linux-3.0.4/ipc/sem.c
60328--- linux-3.0.4/ipc/sem.c 2011-09-02 18:11:21.000000000 -0400
60329+++ linux-3.0.4/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
60330@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
60331 return 0;
60332 }
60333
60334+static struct ipc_ops sem_ops = {
60335+ .getnew = newary,
60336+ .associate = sem_security,
60337+ .more_checks = sem_more_checks
60338+};
60339+
60340 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
60341 {
60342 struct ipc_namespace *ns;
60343- struct ipc_ops sem_ops;
60344 struct ipc_params sem_params;
60345
60346 ns = current->nsproxy->ipc_ns;
60347@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
60348 if (nsems < 0 || nsems > ns->sc_semmsl)
60349 return -EINVAL;
60350
60351- sem_ops.getnew = newary;
60352- sem_ops.associate = sem_security;
60353- sem_ops.more_checks = sem_more_checks;
60354-
60355 sem_params.key = key;
60356 sem_params.flg = semflg;
60357 sem_params.u.nsems = nsems;
60358@@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
60359 int nsems;
60360 struct list_head tasks;
60361
60362+ pax_track_stack();
60363+
60364 sma = sem_lock_check(ns, semid);
60365 if (IS_ERR(sma))
60366 return PTR_ERR(sma);
60367@@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
60368 struct ipc_namespace *ns;
60369 struct list_head tasks;
60370
60371+ pax_track_stack();
60372+
60373 ns = current->nsproxy->ipc_ns;
60374
60375 if (nsops < 1 || semid < 0)
60376diff -urNp linux-3.0.4/ipc/shm.c linux-3.0.4/ipc/shm.c
60377--- linux-3.0.4/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
60378+++ linux-3.0.4/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
60379@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
60380 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
60381 #endif
60382
60383+#ifdef CONFIG_GRKERNSEC
60384+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
60385+ const time_t shm_createtime, const uid_t cuid,
60386+ const int shmid);
60387+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
60388+ const time_t shm_createtime);
60389+#endif
60390+
60391 void shm_init_ns(struct ipc_namespace *ns)
60392 {
60393 ns->shm_ctlmax = SHMMAX;
60394@@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
60395 shp->shm_lprid = 0;
60396 shp->shm_atim = shp->shm_dtim = 0;
60397 shp->shm_ctim = get_seconds();
60398+#ifdef CONFIG_GRKERNSEC
60399+ {
60400+ struct timespec timeval;
60401+ do_posix_clock_monotonic_gettime(&timeval);
60402+
60403+ shp->shm_createtime = timeval.tv_sec;
60404+ }
60405+#endif
60406 shp->shm_segsz = size;
60407 shp->shm_nattch = 0;
60408 shp->shm_file = file;
60409@@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
60410 return 0;
60411 }
60412
60413+static struct ipc_ops shm_ops = {
60414+ .getnew = newseg,
60415+ .associate = shm_security,
60416+ .more_checks = shm_more_checks
60417+};
60418+
60419 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
60420 {
60421 struct ipc_namespace *ns;
60422- struct ipc_ops shm_ops;
60423 struct ipc_params shm_params;
60424
60425 ns = current->nsproxy->ipc_ns;
60426
60427- shm_ops.getnew = newseg;
60428- shm_ops.associate = shm_security;
60429- shm_ops.more_checks = shm_more_checks;
60430-
60431 shm_params.key = key;
60432 shm_params.flg = shmflg;
60433 shm_params.u.size = size;
60434@@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
60435 case SHM_LOCK:
60436 case SHM_UNLOCK:
60437 {
60438- struct file *uninitialized_var(shm_file);
60439-
60440 lru_add_drain_all(); /* drain pagevecs to lru lists */
60441
60442 shp = shm_lock_check(ns, shmid);
60443@@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
60444 if (err)
60445 goto out_unlock;
60446
60447+#ifdef CONFIG_GRKERNSEC
60448+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
60449+ shp->shm_perm.cuid, shmid) ||
60450+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
60451+ err = -EACCES;
60452+ goto out_unlock;
60453+ }
60454+#endif
60455+
60456 path = shp->shm_file->f_path;
60457 path_get(&path);
60458 shp->shm_nattch++;
60459+#ifdef CONFIG_GRKERNSEC
60460+ shp->shm_lapid = current->pid;
60461+#endif
60462 size = i_size_read(path.dentry->d_inode);
60463 shm_unlock(shp);
60464
60465diff -urNp linux-3.0.4/kernel/acct.c linux-3.0.4/kernel/acct.c
60466--- linux-3.0.4/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
60467+++ linux-3.0.4/kernel/acct.c 2011-10-06 04:17:55.000000000 -0400
60468@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
60469 */
60470 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
60471 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
60472- file->f_op->write(file, (char *)&ac,
60473+ file->f_op->write(file, (char __force_user *)&ac,
60474 sizeof(acct_t), &file->f_pos);
60475 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
60476 set_fs(fs);
60477diff -urNp linux-3.0.4/kernel/audit.c linux-3.0.4/kernel/audit.c
60478--- linux-3.0.4/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
60479+++ linux-3.0.4/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
60480@@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
60481 3) suppressed due to audit_rate_limit
60482 4) suppressed due to audit_backlog_limit
60483 */
60484-static atomic_t audit_lost = ATOMIC_INIT(0);
60485+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
60486
60487 /* The netlink socket. */
60488 static struct sock *audit_sock;
60489@@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
60490 unsigned long now;
60491 int print;
60492
60493- atomic_inc(&audit_lost);
60494+ atomic_inc_unchecked(&audit_lost);
60495
60496 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
60497
60498@@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
60499 printk(KERN_WARNING
60500 "audit: audit_lost=%d audit_rate_limit=%d "
60501 "audit_backlog_limit=%d\n",
60502- atomic_read(&audit_lost),
60503+ atomic_read_unchecked(&audit_lost),
60504 audit_rate_limit,
60505 audit_backlog_limit);
60506 audit_panic(message);
60507@@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
60508 status_set.pid = audit_pid;
60509 status_set.rate_limit = audit_rate_limit;
60510 status_set.backlog_limit = audit_backlog_limit;
60511- status_set.lost = atomic_read(&audit_lost);
60512+ status_set.lost = atomic_read_unchecked(&audit_lost);
60513 status_set.backlog = skb_queue_len(&audit_skb_queue);
60514 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
60515 &status_set, sizeof(status_set));
60516diff -urNp linux-3.0.4/kernel/auditsc.c linux-3.0.4/kernel/auditsc.c
60517--- linux-3.0.4/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
60518+++ linux-3.0.4/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
60519@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
60520 }
60521
60522 /* global counter which is incremented every time something logs in */
60523-static atomic_t session_id = ATOMIC_INIT(0);
60524+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
60525
60526 /**
60527 * audit_set_loginuid - set a task's audit_context loginuid
60528@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
60529 */
60530 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
60531 {
60532- unsigned int sessionid = atomic_inc_return(&session_id);
60533+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
60534 struct audit_context *context = task->audit_context;
60535
60536 if (context && context->in_syscall) {
60537diff -urNp linux-3.0.4/kernel/capability.c linux-3.0.4/kernel/capability.c
60538--- linux-3.0.4/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
60539+++ linux-3.0.4/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
60540@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
60541 * before modification is attempted and the application
60542 * fails.
60543 */
60544+ if (tocopy > ARRAY_SIZE(kdata))
60545+ return -EFAULT;
60546+
60547 if (copy_to_user(dataptr, kdata, tocopy
60548 * sizeof(struct __user_cap_data_struct))) {
60549 return -EFAULT;
60550@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
60551 BUG();
60552 }
60553
60554- if (security_capable(ns, current_cred(), cap) == 0) {
60555+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
60556 current->flags |= PF_SUPERPRIV;
60557 return true;
60558 }
60559@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
60560 }
60561 EXPORT_SYMBOL(ns_capable);
60562
60563+bool ns_capable_nolog(struct user_namespace *ns, int cap)
60564+{
60565+ if (unlikely(!cap_valid(cap))) {
60566+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
60567+ BUG();
60568+ }
60569+
60570+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
60571+ current->flags |= PF_SUPERPRIV;
60572+ return true;
60573+ }
60574+ return false;
60575+}
60576+EXPORT_SYMBOL(ns_capable_nolog);
60577+
60578+bool capable_nolog(int cap)
60579+{
60580+ return ns_capable_nolog(&init_user_ns, cap);
60581+}
60582+EXPORT_SYMBOL(capable_nolog);
60583+
60584 /**
60585 * task_ns_capable - Determine whether current task has a superior
60586 * capability targeted at a specific task's user namespace.
60587@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
60588 }
60589 EXPORT_SYMBOL(task_ns_capable);
60590
60591+bool task_ns_capable_nolog(struct task_struct *t, int cap)
60592+{
60593+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
60594+}
60595+EXPORT_SYMBOL(task_ns_capable_nolog);
60596+
60597 /**
60598 * nsown_capable - Check superior capability to one's own user_ns
60599 * @cap: The capability in question
60600diff -urNp linux-3.0.4/kernel/cgroup.c linux-3.0.4/kernel/cgroup.c
60601--- linux-3.0.4/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
60602+++ linux-3.0.4/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
60603@@ -593,6 +593,8 @@ static struct css_set *find_css_set(
60604 struct hlist_head *hhead;
60605 struct cg_cgroup_link *link;
60606
60607+ pax_track_stack();
60608+
60609 /* First see if we already have a cgroup group that matches
60610 * the desired set */
60611 read_lock(&css_set_lock);
60612diff -urNp linux-3.0.4/kernel/compat.c linux-3.0.4/kernel/compat.c
60613--- linux-3.0.4/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
60614+++ linux-3.0.4/kernel/compat.c 2011-10-06 04:17:55.000000000 -0400
60615@@ -13,6 +13,7 @@
60616
60617 #include <linux/linkage.h>
60618 #include <linux/compat.h>
60619+#include <linux/module.h>
60620 #include <linux/errno.h>
60621 #include <linux/time.h>
60622 #include <linux/signal.h>
60623@@ -166,7 +167,7 @@ static long compat_nanosleep_restart(str
60624 mm_segment_t oldfs;
60625 long ret;
60626
60627- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
60628+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
60629 oldfs = get_fs();
60630 set_fs(KERNEL_DS);
60631 ret = hrtimer_nanosleep_restart(restart);
60632@@ -198,7 +199,7 @@ asmlinkage long compat_sys_nanosleep(str
60633 oldfs = get_fs();
60634 set_fs(KERNEL_DS);
60635 ret = hrtimer_nanosleep(&tu,
60636- rmtp ? (struct timespec __user *)&rmt : NULL,
60637+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
60638 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
60639 set_fs(oldfs);
60640
60641@@ -307,7 +308,7 @@ asmlinkage long compat_sys_sigpending(co
60642 mm_segment_t old_fs = get_fs();
60643
60644 set_fs(KERNEL_DS);
60645- ret = sys_sigpending((old_sigset_t __user *) &s);
60646+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
60647 set_fs(old_fs);
60648 if (ret == 0)
60649 ret = put_user(s, set);
60650@@ -330,8 +331,8 @@ asmlinkage long compat_sys_sigprocmask(i
60651 old_fs = get_fs();
60652 set_fs(KERNEL_DS);
60653 ret = sys_sigprocmask(how,
60654- set ? (old_sigset_t __user *) &s : NULL,
60655- oset ? (old_sigset_t __user *) &s : NULL);
60656+ set ? (old_sigset_t __force_user *) &s : NULL,
60657+ oset ? (old_sigset_t __force_user *) &s : NULL);
60658 set_fs(old_fs);
60659 if (ret == 0)
60660 if (oset)
60661@@ -368,7 +369,7 @@ asmlinkage long compat_sys_old_getrlimit
60662 mm_segment_t old_fs = get_fs();
60663
60664 set_fs(KERNEL_DS);
60665- ret = sys_old_getrlimit(resource, &r);
60666+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
60667 set_fs(old_fs);
60668
60669 if (!ret) {
60670@@ -440,7 +441,7 @@ asmlinkage long compat_sys_getrusage(int
60671 mm_segment_t old_fs = get_fs();
60672
60673 set_fs(KERNEL_DS);
60674- ret = sys_getrusage(who, (struct rusage __user *) &r);
60675+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
60676 set_fs(old_fs);
60677
60678 if (ret)
60679@@ -467,8 +468,8 @@ compat_sys_wait4(compat_pid_t pid, compa
60680 set_fs (KERNEL_DS);
60681 ret = sys_wait4(pid,
60682 (stat_addr ?
60683- (unsigned int __user *) &status : NULL),
60684- options, (struct rusage __user *) &r);
60685+ (unsigned int __force_user *) &status : NULL),
60686+ options, (struct rusage __force_user *) &r);
60687 set_fs (old_fs);
60688
60689 if (ret > 0) {
60690@@ -493,8 +494,8 @@ asmlinkage long compat_sys_waitid(int wh
60691 memset(&info, 0, sizeof(info));
60692
60693 set_fs(KERNEL_DS);
60694- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
60695- uru ? (struct rusage __user *)&ru : NULL);
60696+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
60697+ uru ? (struct rusage __force_user *)&ru : NULL);
60698 set_fs(old_fs);
60699
60700 if ((ret < 0) || (info.si_signo == 0))
60701@@ -624,8 +625,8 @@ long compat_sys_timer_settime(timer_t ti
60702 oldfs = get_fs();
60703 set_fs(KERNEL_DS);
60704 err = sys_timer_settime(timer_id, flags,
60705- (struct itimerspec __user *) &newts,
60706- (struct itimerspec __user *) &oldts);
60707+ (struct itimerspec __force_user *) &newts,
60708+ (struct itimerspec __force_user *) &oldts);
60709 set_fs(oldfs);
60710 if (!err && old && put_compat_itimerspec(old, &oldts))
60711 return -EFAULT;
60712@@ -642,7 +643,7 @@ long compat_sys_timer_gettime(timer_t ti
60713 oldfs = get_fs();
60714 set_fs(KERNEL_DS);
60715 err = sys_timer_gettime(timer_id,
60716- (struct itimerspec __user *) &ts);
60717+ (struct itimerspec __force_user *) &ts);
60718 set_fs(oldfs);
60719 if (!err && put_compat_itimerspec(setting, &ts))
60720 return -EFAULT;
60721@@ -661,7 +662,7 @@ long compat_sys_clock_settime(clockid_t
60722 oldfs = get_fs();
60723 set_fs(KERNEL_DS);
60724 err = sys_clock_settime(which_clock,
60725- (struct timespec __user *) &ts);
60726+ (struct timespec __force_user *) &ts);
60727 set_fs(oldfs);
60728 return err;
60729 }
60730@@ -676,7 +677,7 @@ long compat_sys_clock_gettime(clockid_t
60731 oldfs = get_fs();
60732 set_fs(KERNEL_DS);
60733 err = sys_clock_gettime(which_clock,
60734- (struct timespec __user *) &ts);
60735+ (struct timespec __force_user *) &ts);
60736 set_fs(oldfs);
60737 if (!err && put_compat_timespec(&ts, tp))
60738 return -EFAULT;
60739@@ -696,7 +697,7 @@ long compat_sys_clock_adjtime(clockid_t
60740
60741 oldfs = get_fs();
60742 set_fs(KERNEL_DS);
60743- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
60744+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
60745 set_fs(oldfs);
60746
60747 err = compat_put_timex(utp, &txc);
60748@@ -716,7 +717,7 @@ long compat_sys_clock_getres(clockid_t w
60749 oldfs = get_fs();
60750 set_fs(KERNEL_DS);
60751 err = sys_clock_getres(which_clock,
60752- (struct timespec __user *) &ts);
60753+ (struct timespec __force_user *) &ts);
60754 set_fs(oldfs);
60755 if (!err && tp && put_compat_timespec(&ts, tp))
60756 return -EFAULT;
60757@@ -728,9 +729,9 @@ static long compat_clock_nanosleep_resta
60758 long err;
60759 mm_segment_t oldfs;
60760 struct timespec tu;
60761- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
60762+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
60763
60764- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
60765+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
60766 oldfs = get_fs();
60767 set_fs(KERNEL_DS);
60768 err = clock_nanosleep_restart(restart);
60769@@ -762,8 +763,8 @@ long compat_sys_clock_nanosleep(clockid_
60770 oldfs = get_fs();
60771 set_fs(KERNEL_DS);
60772 err = sys_clock_nanosleep(which_clock, flags,
60773- (struct timespec __user *) &in,
60774- (struct timespec __user *) &out);
60775+ (struct timespec __force_user *) &in,
60776+ (struct timespec __force_user *) &out);
60777 set_fs(oldfs);
60778
60779 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
60780diff -urNp linux-3.0.4/kernel/configs.c linux-3.0.4/kernel/configs.c
60781--- linux-3.0.4/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
60782+++ linux-3.0.4/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
60783@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
60784 struct proc_dir_entry *entry;
60785
60786 /* create the current config file */
60787+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
60788+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
60789+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
60790+ &ikconfig_file_ops);
60791+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60792+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
60793+ &ikconfig_file_ops);
60794+#endif
60795+#else
60796 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
60797 &ikconfig_file_ops);
60798+#endif
60799+
60800 if (!entry)
60801 return -ENOMEM;
60802
60803diff -urNp linux-3.0.4/kernel/cred.c linux-3.0.4/kernel/cred.c
60804--- linux-3.0.4/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
60805+++ linux-3.0.4/kernel/cred.c 2011-08-25 17:23:03.000000000 -0400
60806@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
60807 */
60808 void __put_cred(struct cred *cred)
60809 {
60810+ pax_track_stack();
60811+
60812 kdebug("__put_cred(%p{%d,%d})", cred,
60813 atomic_read(&cred->usage),
60814 read_cred_subscribers(cred));
60815@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
60816 {
60817 struct cred *cred;
60818
60819+ pax_track_stack();
60820+
60821 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
60822 atomic_read(&tsk->cred->usage),
60823 read_cred_subscribers(tsk->cred));
60824@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
60825 {
60826 const struct cred *cred;
60827
60828+ pax_track_stack();
60829+
60830 rcu_read_lock();
60831
60832 do {
60833@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
60834 {
60835 struct cred *new;
60836
60837+ pax_track_stack();
60838+
60839 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
60840 if (!new)
60841 return NULL;
60842@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
60843 const struct cred *old;
60844 struct cred *new;
60845
60846+ pax_track_stack();
60847+
60848 validate_process_creds();
60849
60850 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
60851@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
60852 struct thread_group_cred *tgcred = NULL;
60853 struct cred *new;
60854
60855+ pax_track_stack();
60856+
60857 #ifdef CONFIG_KEYS
60858 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
60859 if (!tgcred)
60860@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
60861 struct cred *new;
60862 int ret;
60863
60864+ pax_track_stack();
60865+
60866 if (
60867 #ifdef CONFIG_KEYS
60868 !p->cred->thread_keyring &&
60869@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
60870 struct task_struct *task = current;
60871 const struct cred *old = task->real_cred;
60872
60873+ pax_track_stack();
60874+
60875 kdebug("commit_creds(%p{%d,%d})", new,
60876 atomic_read(&new->usage),
60877 read_cred_subscribers(new));
60878@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
60879
60880 get_cred(new); /* we will require a ref for the subj creds too */
60881
60882+ gr_set_role_label(task, new->uid, new->gid);
60883+
60884 /* dumpability changes */
60885 if (old->euid != new->euid ||
60886 old->egid != new->egid ||
60887@@ -508,10 +526,8 @@ int commit_creds(struct cred *new)
60888 key_fsgid_changed(task);
60889
60890 /* do it
60891- * - What if a process setreuid()'s and this brings the
60892- * new uid over his NPROC rlimit? We can check this now
60893- * cheaply with the new uid cache, so if it matters
60894- * we should be checking for it. -DaveM
60895+ * RLIMIT_NPROC limits on user->processes have already been checked
60896+ * in set_user().
60897 */
60898 alter_cred_subscribers(new, 2);
60899 if (new->user != old->user)
60900@@ -551,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
60901 */
60902 void abort_creds(struct cred *new)
60903 {
60904+ pax_track_stack();
60905+
60906 kdebug("abort_creds(%p{%d,%d})", new,
60907 atomic_read(&new->usage),
60908 read_cred_subscribers(new));
60909@@ -574,6 +592,8 @@ const struct cred *override_creds(const
60910 {
60911 const struct cred *old = current->cred;
60912
60913+ pax_track_stack();
60914+
60915 kdebug("override_creds(%p{%d,%d})", new,
60916 atomic_read(&new->usage),
60917 read_cred_subscribers(new));
60918@@ -603,6 +623,8 @@ void revert_creds(const struct cred *old
60919 {
60920 const struct cred *override = current->cred;
60921
60922+ pax_track_stack();
60923+
60924 kdebug("revert_creds(%p{%d,%d})", old,
60925 atomic_read(&old->usage),
60926 read_cred_subscribers(old));
60927@@ -649,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
60928 const struct cred *old;
60929 struct cred *new;
60930
60931+ pax_track_stack();
60932+
60933 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
60934 if (!new)
60935 return NULL;
60936@@ -703,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
60937 */
60938 int set_security_override(struct cred *new, u32 secid)
60939 {
60940+ pax_track_stack();
60941+
60942 return security_kernel_act_as(new, secid);
60943 }
60944 EXPORT_SYMBOL(set_security_override);
60945@@ -722,6 +748,8 @@ int set_security_override_from_ctx(struc
60946 u32 secid;
60947 int ret;
60948
60949+ pax_track_stack();
60950+
60951 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
60952 if (ret < 0)
60953 return ret;
60954diff -urNp linux-3.0.4/kernel/debug/debug_core.c linux-3.0.4/kernel/debug/debug_core.c
60955--- linux-3.0.4/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
60956+++ linux-3.0.4/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
60957@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
60958 */
60959 static atomic_t masters_in_kgdb;
60960 static atomic_t slaves_in_kgdb;
60961-static atomic_t kgdb_break_tasklet_var;
60962+static atomic_unchecked_t kgdb_break_tasklet_var;
60963 atomic_t kgdb_setting_breakpoint;
60964
60965 struct task_struct *kgdb_usethread;
60966@@ -129,7 +129,7 @@ int kgdb_single_step;
60967 static pid_t kgdb_sstep_pid;
60968
60969 /* to keep track of the CPU which is doing the single stepping*/
60970-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
60971+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
60972
60973 /*
60974 * If you are debugging a problem where roundup (the collection of
60975@@ -542,7 +542,7 @@ return_normal:
60976 * kernel will only try for the value of sstep_tries before
60977 * giving up and continuing on.
60978 */
60979- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
60980+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
60981 (kgdb_info[cpu].task &&
60982 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
60983 atomic_set(&kgdb_active, -1);
60984@@ -636,8 +636,8 @@ cpu_master_loop:
60985 }
60986
60987 kgdb_restore:
60988- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
60989- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
60990+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
60991+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
60992 if (kgdb_info[sstep_cpu].task)
60993 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
60994 else
60995@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
60996 static void kgdb_tasklet_bpt(unsigned long ing)
60997 {
60998 kgdb_breakpoint();
60999- atomic_set(&kgdb_break_tasklet_var, 0);
61000+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
61001 }
61002
61003 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
61004
61005 void kgdb_schedule_breakpoint(void)
61006 {
61007- if (atomic_read(&kgdb_break_tasklet_var) ||
61008+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
61009 atomic_read(&kgdb_active) != -1 ||
61010 atomic_read(&kgdb_setting_breakpoint))
61011 return;
61012- atomic_inc(&kgdb_break_tasklet_var);
61013+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
61014 tasklet_schedule(&kgdb_tasklet_breakpoint);
61015 }
61016 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
61017diff -urNp linux-3.0.4/kernel/debug/kdb/kdb_main.c linux-3.0.4/kernel/debug/kdb/kdb_main.c
61018--- linux-3.0.4/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
61019+++ linux-3.0.4/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
61020@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
61021 list_for_each_entry(mod, kdb_modules, list) {
61022
61023 kdb_printf("%-20s%8u 0x%p ", mod->name,
61024- mod->core_size, (void *)mod);
61025+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
61026 #ifdef CONFIG_MODULE_UNLOAD
61027 kdb_printf("%4d ", module_refcount(mod));
61028 #endif
61029@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
61030 kdb_printf(" (Loading)");
61031 else
61032 kdb_printf(" (Live)");
61033- kdb_printf(" 0x%p", mod->module_core);
61034+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
61035
61036 #ifdef CONFIG_MODULE_UNLOAD
61037 {
61038diff -urNp linux-3.0.4/kernel/events/core.c linux-3.0.4/kernel/events/core.c
61039--- linux-3.0.4/kernel/events/core.c 2011-09-02 18:11:21.000000000 -0400
61040+++ linux-3.0.4/kernel/events/core.c 2011-09-14 09:08:05.000000000 -0400
61041@@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
61042 return 0;
61043 }
61044
61045-static atomic64_t perf_event_id;
61046+static atomic64_unchecked_t perf_event_id;
61047
61048 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
61049 enum event_type_t event_type);
61050@@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
61051
61052 static inline u64 perf_event_count(struct perf_event *event)
61053 {
61054- return local64_read(&event->count) + atomic64_read(&event->child_count);
61055+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
61056 }
61057
61058 static u64 perf_event_read(struct perf_event *event)
61059@@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
61060 mutex_lock(&event->child_mutex);
61061 total += perf_event_read(event);
61062 *enabled += event->total_time_enabled +
61063- atomic64_read(&event->child_total_time_enabled);
61064+ atomic64_read_unchecked(&event->child_total_time_enabled);
61065 *running += event->total_time_running +
61066- atomic64_read(&event->child_total_time_running);
61067+ atomic64_read_unchecked(&event->child_total_time_running);
61068
61069 list_for_each_entry(child, &event->child_list, child_list) {
61070 total += perf_event_read(child);
61071@@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
61072 userpg->offset -= local64_read(&event->hw.prev_count);
61073
61074 userpg->time_enabled = event->total_time_enabled +
61075- atomic64_read(&event->child_total_time_enabled);
61076+ atomic64_read_unchecked(&event->child_total_time_enabled);
61077
61078 userpg->time_running = event->total_time_running +
61079- atomic64_read(&event->child_total_time_running);
61080+ atomic64_read_unchecked(&event->child_total_time_running);
61081
61082 barrier();
61083 ++userpg->lock;
61084@@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
61085 values[n++] = perf_event_count(event);
61086 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
61087 values[n++] = enabled +
61088- atomic64_read(&event->child_total_time_enabled);
61089+ atomic64_read_unchecked(&event->child_total_time_enabled);
61090 }
61091 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
61092 values[n++] = running +
61093- atomic64_read(&event->child_total_time_running);
61094+ atomic64_read_unchecked(&event->child_total_time_running);
61095 }
61096 if (read_format & PERF_FORMAT_ID)
61097 values[n++] = primary_event_id(event);
61098@@ -4833,12 +4833,12 @@ static void perf_event_mmap_event(struct
61099 * need to add enough zero bytes after the string to handle
61100 * the 64bit alignment we do later.
61101 */
61102- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
61103+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
61104 if (!buf) {
61105 name = strncpy(tmp, "//enomem", sizeof(tmp));
61106 goto got_name;
61107 }
61108- name = d_path(&file->f_path, buf, PATH_MAX);
61109+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
61110 if (IS_ERR(name)) {
61111 name = strncpy(tmp, "//toolong", sizeof(tmp));
61112 goto got_name;
61113@@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
61114 event->parent = parent_event;
61115
61116 event->ns = get_pid_ns(current->nsproxy->pid_ns);
61117- event->id = atomic64_inc_return(&perf_event_id);
61118+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
61119
61120 event->state = PERF_EVENT_STATE_INACTIVE;
61121
61122@@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
61123 /*
61124 * Add back the child's count to the parent's count:
61125 */
61126- atomic64_add(child_val, &parent_event->child_count);
61127- atomic64_add(child_event->total_time_enabled,
61128+ atomic64_add_unchecked(child_val, &parent_event->child_count);
61129+ atomic64_add_unchecked(child_event->total_time_enabled,
61130 &parent_event->child_total_time_enabled);
61131- atomic64_add(child_event->total_time_running,
61132+ atomic64_add_unchecked(child_event->total_time_running,
61133 &parent_event->child_total_time_running);
61134
61135 /*
61136diff -urNp linux-3.0.4/kernel/exit.c linux-3.0.4/kernel/exit.c
61137--- linux-3.0.4/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
61138+++ linux-3.0.4/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
61139@@ -57,6 +57,10 @@
61140 #include <asm/pgtable.h>
61141 #include <asm/mmu_context.h>
61142
61143+#ifdef CONFIG_GRKERNSEC
61144+extern rwlock_t grsec_exec_file_lock;
61145+#endif
61146+
61147 static void exit_mm(struct task_struct * tsk);
61148
61149 static void __unhash_process(struct task_struct *p, bool group_dead)
61150@@ -169,6 +173,10 @@ void release_task(struct task_struct * p
61151 struct task_struct *leader;
61152 int zap_leader;
61153 repeat:
61154+#ifdef CONFIG_NET
61155+ gr_del_task_from_ip_table(p);
61156+#endif
61157+
61158 tracehook_prepare_release_task(p);
61159 /* don't need to get the RCU readlock here - the process is dead and
61160 * can't be modifying its own credentials. But shut RCU-lockdep up */
61161@@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
61162 {
61163 write_lock_irq(&tasklist_lock);
61164
61165+#ifdef CONFIG_GRKERNSEC
61166+ write_lock(&grsec_exec_file_lock);
61167+ if (current->exec_file) {
61168+ fput(current->exec_file);
61169+ current->exec_file = NULL;
61170+ }
61171+ write_unlock(&grsec_exec_file_lock);
61172+#endif
61173+
61174 ptrace_unlink(current);
61175 /* Reparent to init */
61176 current->real_parent = current->parent = kthreadd_task;
61177 list_move_tail(&current->sibling, &current->real_parent->children);
61178
61179+ gr_set_kernel_label(current);
61180+
61181 /* Set the exit signal to SIGCHLD so we signal init on exit */
61182 current->exit_signal = SIGCHLD;
61183
61184@@ -394,7 +413,7 @@ int allow_signal(int sig)
61185 * know it'll be handled, so that they don't get converted to
61186 * SIGKILL or just silently dropped.
61187 */
61188- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
61189+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
61190 recalc_sigpending();
61191 spin_unlock_irq(&current->sighand->siglock);
61192 return 0;
61193@@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
61194 vsnprintf(current->comm, sizeof(current->comm), name, args);
61195 va_end(args);
61196
61197+#ifdef CONFIG_GRKERNSEC
61198+ write_lock(&grsec_exec_file_lock);
61199+ if (current->exec_file) {
61200+ fput(current->exec_file);
61201+ current->exec_file = NULL;
61202+ }
61203+ write_unlock(&grsec_exec_file_lock);
61204+#endif
61205+
61206+ gr_set_kernel_label(current);
61207+
61208 /*
61209 * If we were started as result of loading a module, close all of the
61210 * user space pages. We don't need them, and if we didn't close them
61211@@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
61212 struct task_struct *tsk = current;
61213 int group_dead;
61214
61215- profile_task_exit(tsk);
61216-
61217- WARN_ON(atomic_read(&tsk->fs_excl));
61218- WARN_ON(blk_needs_flush_plug(tsk));
61219-
61220 if (unlikely(in_interrupt()))
61221 panic("Aiee, killing interrupt handler!");
61222- if (unlikely(!tsk->pid))
61223- panic("Attempted to kill the idle task!");
61224
61225 /*
61226 * If do_exit is called because this processes oopsed, it's possible
61227@@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
61228 */
61229 set_fs(USER_DS);
61230
61231+ profile_task_exit(tsk);
61232+
61233+ WARN_ON(atomic_read(&tsk->fs_excl));
61234+ WARN_ON(blk_needs_flush_plug(tsk));
61235+
61236+ if (unlikely(!tsk->pid))
61237+ panic("Attempted to kill the idle task!");
61238+
61239 tracehook_report_exit(&code);
61240
61241 validate_creds_for_do_exit(tsk);
61242@@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
61243 tsk->exit_code = code;
61244 taskstats_exit(tsk, group_dead);
61245
61246+ gr_acl_handle_psacct(tsk, code);
61247+ gr_acl_handle_exit();
61248+
61249 exit_mm(tsk);
61250
61251 if (group_dead)
61252diff -urNp linux-3.0.4/kernel/fork.c linux-3.0.4/kernel/fork.c
61253--- linux-3.0.4/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
61254+++ linux-3.0.4/kernel/fork.c 2011-08-25 17:23:36.000000000 -0400
61255@@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
61256 *stackend = STACK_END_MAGIC; /* for overflow detection */
61257
61258 #ifdef CONFIG_CC_STACKPROTECTOR
61259- tsk->stack_canary = get_random_int();
61260+ tsk->stack_canary = pax_get_random_long();
61261 #endif
61262
61263 /* One for us, one for whoever does the "release_task()" (usually parent) */
61264@@ -308,13 +308,77 @@ out:
61265 }
61266
61267 #ifdef CONFIG_MMU
61268+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
61269+{
61270+ struct vm_area_struct *tmp;
61271+ unsigned long charge;
61272+ struct mempolicy *pol;
61273+ struct file *file;
61274+
61275+ charge = 0;
61276+ if (mpnt->vm_flags & VM_ACCOUNT) {
61277+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
61278+ if (security_vm_enough_memory(len))
61279+ goto fail_nomem;
61280+ charge = len;
61281+ }
61282+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
61283+ if (!tmp)
61284+ goto fail_nomem;
61285+ *tmp = *mpnt;
61286+ tmp->vm_mm = mm;
61287+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
61288+ pol = mpol_dup(vma_policy(mpnt));
61289+ if (IS_ERR(pol))
61290+ goto fail_nomem_policy;
61291+ vma_set_policy(tmp, pol);
61292+ if (anon_vma_fork(tmp, mpnt))
61293+ goto fail_nomem_anon_vma_fork;
61294+ tmp->vm_flags &= ~VM_LOCKED;
61295+ tmp->vm_next = tmp->vm_prev = NULL;
61296+ tmp->vm_mirror = NULL;
61297+ file = tmp->vm_file;
61298+ if (file) {
61299+ struct inode *inode = file->f_path.dentry->d_inode;
61300+ struct address_space *mapping = file->f_mapping;
61301+
61302+ get_file(file);
61303+ if (tmp->vm_flags & VM_DENYWRITE)
61304+ atomic_dec(&inode->i_writecount);
61305+ mutex_lock(&mapping->i_mmap_mutex);
61306+ if (tmp->vm_flags & VM_SHARED)
61307+ mapping->i_mmap_writable++;
61308+ flush_dcache_mmap_lock(mapping);
61309+ /* insert tmp into the share list, just after mpnt */
61310+ vma_prio_tree_add(tmp, mpnt);
61311+ flush_dcache_mmap_unlock(mapping);
61312+ mutex_unlock(&mapping->i_mmap_mutex);
61313+ }
61314+
61315+ /*
61316+ * Clear hugetlb-related page reserves for children. This only
61317+ * affects MAP_PRIVATE mappings. Faults generated by the child
61318+ * are not guaranteed to succeed, even if read-only
61319+ */
61320+ if (is_vm_hugetlb_page(tmp))
61321+ reset_vma_resv_huge_pages(tmp);
61322+
61323+ return tmp;
61324+
61325+fail_nomem_anon_vma_fork:
61326+ mpol_put(pol);
61327+fail_nomem_policy:
61328+ kmem_cache_free(vm_area_cachep, tmp);
61329+fail_nomem:
61330+ vm_unacct_memory(charge);
61331+ return NULL;
61332+}
61333+
61334 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
61335 {
61336 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
61337 struct rb_node **rb_link, *rb_parent;
61338 int retval;
61339- unsigned long charge;
61340- struct mempolicy *pol;
61341
61342 down_write(&oldmm->mmap_sem);
61343 flush_cache_dup_mm(oldmm);
61344@@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
61345 mm->locked_vm = 0;
61346 mm->mmap = NULL;
61347 mm->mmap_cache = NULL;
61348- mm->free_area_cache = oldmm->mmap_base;
61349- mm->cached_hole_size = ~0UL;
61350+ mm->free_area_cache = oldmm->free_area_cache;
61351+ mm->cached_hole_size = oldmm->cached_hole_size;
61352 mm->map_count = 0;
61353 cpumask_clear(mm_cpumask(mm));
61354 mm->mm_rb = RB_ROOT;
61355@@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
61356
61357 prev = NULL;
61358 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
61359- struct file *file;
61360-
61361 if (mpnt->vm_flags & VM_DONTCOPY) {
61362 long pages = vma_pages(mpnt);
61363 mm->total_vm -= pages;
61364@@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
61365 -pages);
61366 continue;
61367 }
61368- charge = 0;
61369- if (mpnt->vm_flags & VM_ACCOUNT) {
61370- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
61371- if (security_vm_enough_memory(len))
61372- goto fail_nomem;
61373- charge = len;
61374- }
61375- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
61376- if (!tmp)
61377- goto fail_nomem;
61378- *tmp = *mpnt;
61379- INIT_LIST_HEAD(&tmp->anon_vma_chain);
61380- pol = mpol_dup(vma_policy(mpnt));
61381- retval = PTR_ERR(pol);
61382- if (IS_ERR(pol))
61383- goto fail_nomem_policy;
61384- vma_set_policy(tmp, pol);
61385- tmp->vm_mm = mm;
61386- if (anon_vma_fork(tmp, mpnt))
61387- goto fail_nomem_anon_vma_fork;
61388- tmp->vm_flags &= ~VM_LOCKED;
61389- tmp->vm_next = tmp->vm_prev = NULL;
61390- file = tmp->vm_file;
61391- if (file) {
61392- struct inode *inode = file->f_path.dentry->d_inode;
61393- struct address_space *mapping = file->f_mapping;
61394-
61395- get_file(file);
61396- if (tmp->vm_flags & VM_DENYWRITE)
61397- atomic_dec(&inode->i_writecount);
61398- mutex_lock(&mapping->i_mmap_mutex);
61399- if (tmp->vm_flags & VM_SHARED)
61400- mapping->i_mmap_writable++;
61401- flush_dcache_mmap_lock(mapping);
61402- /* insert tmp into the share list, just after mpnt */
61403- vma_prio_tree_add(tmp, mpnt);
61404- flush_dcache_mmap_unlock(mapping);
61405- mutex_unlock(&mapping->i_mmap_mutex);
61406+ tmp = dup_vma(mm, mpnt);
61407+ if (!tmp) {
61408+ retval = -ENOMEM;
61409+ goto out;
61410 }
61411
61412 /*
61413- * Clear hugetlb-related page reserves for children. This only
61414- * affects MAP_PRIVATE mappings. Faults generated by the child
61415- * are not guaranteed to succeed, even if read-only
61416- */
61417- if (is_vm_hugetlb_page(tmp))
61418- reset_vma_resv_huge_pages(tmp);
61419-
61420- /*
61421 * Link in the new vma and copy the page table entries.
61422 */
61423 *pprev = tmp;
61424@@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
61425 if (retval)
61426 goto out;
61427 }
61428+
61429+#ifdef CONFIG_PAX_SEGMEXEC
61430+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
61431+ struct vm_area_struct *mpnt_m;
61432+
61433+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
61434+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
61435+
61436+ if (!mpnt->vm_mirror)
61437+ continue;
61438+
61439+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
61440+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
61441+ mpnt->vm_mirror = mpnt_m;
61442+ } else {
61443+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
61444+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
61445+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
61446+ mpnt->vm_mirror->vm_mirror = mpnt;
61447+ }
61448+ }
61449+ BUG_ON(mpnt_m);
61450+ }
61451+#endif
61452+
61453 /* a new mm has just been created */
61454 arch_dup_mmap(oldmm, mm);
61455 retval = 0;
61456@@ -429,14 +474,6 @@ out:
61457 flush_tlb_mm(oldmm);
61458 up_write(&oldmm->mmap_sem);
61459 return retval;
61460-fail_nomem_anon_vma_fork:
61461- mpol_put(pol);
61462-fail_nomem_policy:
61463- kmem_cache_free(vm_area_cachep, tmp);
61464-fail_nomem:
61465- retval = -ENOMEM;
61466- vm_unacct_memory(charge);
61467- goto out;
61468 }
61469
61470 static inline int mm_alloc_pgd(struct mm_struct * mm)
61471@@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
61472 spin_unlock(&fs->lock);
61473 return -EAGAIN;
61474 }
61475- fs->users++;
61476+ atomic_inc(&fs->users);
61477 spin_unlock(&fs->lock);
61478 return 0;
61479 }
61480 tsk->fs = copy_fs_struct(fs);
61481 if (!tsk->fs)
61482 return -ENOMEM;
61483+ gr_set_chroot_entries(tsk, &tsk->fs->root);
61484 return 0;
61485 }
61486
61487@@ -1104,12 +1142,16 @@ static struct task_struct *copy_process(
61488 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
61489 #endif
61490 retval = -EAGAIN;
61491+
61492+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
61493+
61494 if (atomic_read(&p->real_cred->user->processes) >=
61495 task_rlimit(p, RLIMIT_NPROC)) {
61496- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
61497- p->real_cred->user != INIT_USER)
61498+ if (p->real_cred->user != INIT_USER &&
61499+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
61500 goto bad_fork_free;
61501 }
61502+ current->flags &= ~PF_NPROC_EXCEEDED;
61503
61504 retval = copy_creds(p, clone_flags);
61505 if (retval < 0)
61506@@ -1250,6 +1292,8 @@ static struct task_struct *copy_process(
61507 if (clone_flags & CLONE_THREAD)
61508 p->tgid = current->tgid;
61509
61510+ gr_copy_label(p);
61511+
61512 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
61513 /*
61514 * Clear TID on mm_release()?
61515@@ -1414,6 +1458,8 @@ bad_fork_cleanup_count:
61516 bad_fork_free:
61517 free_task(p);
61518 fork_out:
61519+ gr_log_forkfail(retval);
61520+
61521 return ERR_PTR(retval);
61522 }
61523
61524@@ -1502,6 +1548,8 @@ long do_fork(unsigned long clone_flags,
61525 if (clone_flags & CLONE_PARENT_SETTID)
61526 put_user(nr, parent_tidptr);
61527
61528+ gr_handle_brute_check();
61529+
61530 if (clone_flags & CLONE_VFORK) {
61531 p->vfork_done = &vfork;
61532 init_completion(&vfork);
61533@@ -1610,7 +1658,7 @@ static int unshare_fs(unsigned long unsh
61534 return 0;
61535
61536 /* don't need lock here; in the worst case we'll do useless copy */
61537- if (fs->users == 1)
61538+ if (atomic_read(&fs->users) == 1)
61539 return 0;
61540
61541 *new_fsp = copy_fs_struct(fs);
61542@@ -1697,7 +1745,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
61543 fs = current->fs;
61544 spin_lock(&fs->lock);
61545 current->fs = new_fs;
61546- if (--fs->users)
61547+ gr_set_chroot_entries(current, &current->fs->root);
61548+ if (atomic_dec_return(&fs->users))
61549 new_fs = NULL;
61550 else
61551 new_fs = fs;
61552diff -urNp linux-3.0.4/kernel/futex.c linux-3.0.4/kernel/futex.c
61553--- linux-3.0.4/kernel/futex.c 2011-09-02 18:11:21.000000000 -0400
61554+++ linux-3.0.4/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
61555@@ -54,6 +54,7 @@
61556 #include <linux/mount.h>
61557 #include <linux/pagemap.h>
61558 #include <linux/syscalls.h>
61559+#include <linux/ptrace.h>
61560 #include <linux/signal.h>
61561 #include <linux/module.h>
61562 #include <linux/magic.h>
61563@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
61564 struct page *page, *page_head;
61565 int err, ro = 0;
61566
61567+#ifdef CONFIG_PAX_SEGMEXEC
61568+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
61569+ return -EFAULT;
61570+#endif
61571+
61572 /*
61573 * The futex address must be "naturally" aligned.
61574 */
61575@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
61576 struct futex_q q = futex_q_init;
61577 int ret;
61578
61579+ pax_track_stack();
61580+
61581 if (!bitset)
61582 return -EINVAL;
61583 q.bitset = bitset;
61584@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
61585 struct futex_q q = futex_q_init;
61586 int res, ret;
61587
61588+ pax_track_stack();
61589+
61590 if (!bitset)
61591 return -EINVAL;
61592
61593@@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
61594 {
61595 struct robust_list_head __user *head;
61596 unsigned long ret;
61597+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
61598 const struct cred *cred = current_cred(), *pcred;
61599+#endif
61600
61601 if (!futex_cmpxchg_enabled)
61602 return -ENOSYS;
61603@@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
61604 if (!p)
61605 goto err_unlock;
61606 ret = -EPERM;
61607+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61608+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
61609+ goto err_unlock;
61610+#else
61611 pcred = __task_cred(p);
61612 /* If victim is in different user_ns, then uids are not
61613 comparable, so we must have CAP_SYS_PTRACE */
61614@@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
61615 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
61616 goto err_unlock;
61617 ok:
61618+#endif
61619 head = p->robust_list;
61620 rcu_read_unlock();
61621 }
61622@@ -2712,6 +2729,7 @@ static int __init futex_init(void)
61623 {
61624 u32 curval;
61625 int i;
61626+ mm_segment_t oldfs;
61627
61628 /*
61629 * This will fail and we want it. Some arch implementations do
61630@@ -2723,8 +2741,11 @@ static int __init futex_init(void)
61631 * implementation, the non-functional ones will return
61632 * -ENOSYS.
61633 */
61634+ oldfs = get_fs();
61635+ set_fs(USER_DS);
61636 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
61637 futex_cmpxchg_enabled = 1;
61638+ set_fs(oldfs);
61639
61640 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
61641 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
61642diff -urNp linux-3.0.4/kernel/futex_compat.c linux-3.0.4/kernel/futex_compat.c
61643--- linux-3.0.4/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
61644+++ linux-3.0.4/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
61645@@ -10,6 +10,7 @@
61646 #include <linux/compat.h>
61647 #include <linux/nsproxy.h>
61648 #include <linux/futex.h>
61649+#include <linux/ptrace.h>
61650
61651 #include <asm/uaccess.h>
61652
61653@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
61654 {
61655 struct compat_robust_list_head __user *head;
61656 unsigned long ret;
61657- const struct cred *cred = current_cred(), *pcred;
61658+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
61659+ const struct cred *cred = current_cred();
61660+ const struct cred *pcred;
61661+#endif
61662
61663 if (!futex_cmpxchg_enabled)
61664 return -ENOSYS;
61665@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
61666 if (!p)
61667 goto err_unlock;
61668 ret = -EPERM;
61669+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61670+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
61671+ goto err_unlock;
61672+#else
61673 pcred = __task_cred(p);
61674 /* If victim is in different user_ns, then uids are not
61675 comparable, so we must have CAP_SYS_PTRACE */
61676@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
61677 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
61678 goto err_unlock;
61679 ok:
61680+#endif
61681 head = p->compat_robust_list;
61682 rcu_read_unlock();
61683 }
61684diff -urNp linux-3.0.4/kernel/gcov/base.c linux-3.0.4/kernel/gcov/base.c
61685--- linux-3.0.4/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
61686+++ linux-3.0.4/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
61687@@ -102,11 +102,6 @@ void gcov_enable_events(void)
61688 }
61689
61690 #ifdef CONFIG_MODULES
61691-static inline int within(void *addr, void *start, unsigned long size)
61692-{
61693- return ((addr >= start) && (addr < start + size));
61694-}
61695-
61696 /* Update list and generate events when modules are unloaded. */
61697 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
61698 void *data)
61699@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
61700 prev = NULL;
61701 /* Remove entries located in module from linked list. */
61702 for (info = gcov_info_head; info; info = info->next) {
61703- if (within(info, mod->module_core, mod->core_size)) {
61704+ if (within_module_core_rw((unsigned long)info, mod)) {
61705 if (prev)
61706 prev->next = info->next;
61707 else
61708diff -urNp linux-3.0.4/kernel/hrtimer.c linux-3.0.4/kernel/hrtimer.c
61709--- linux-3.0.4/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
61710+++ linux-3.0.4/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
61711@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
61712 local_irq_restore(flags);
61713 }
61714
61715-static void run_hrtimer_softirq(struct softirq_action *h)
61716+static void run_hrtimer_softirq(void)
61717 {
61718 hrtimer_peek_ahead_timers();
61719 }
61720diff -urNp linux-3.0.4/kernel/jump_label.c linux-3.0.4/kernel/jump_label.c
61721--- linux-3.0.4/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
61722+++ linux-3.0.4/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
61723@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
61724
61725 size = (((unsigned long)stop - (unsigned long)start)
61726 / sizeof(struct jump_entry));
61727+ pax_open_kernel();
61728 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
61729+ pax_close_kernel();
61730 }
61731
61732 static void jump_label_update(struct jump_label_key *key, int enable);
61733@@ -297,10 +299,12 @@ static void jump_label_invalidate_module
61734 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
61735 struct jump_entry *iter;
61736
61737+ pax_open_kernel();
61738 for (iter = iter_start; iter < iter_stop; iter++) {
61739 if (within_module_init(iter->code, mod))
61740 iter->code = 0;
61741 }
61742+ pax_close_kernel();
61743 }
61744
61745 static int
61746diff -urNp linux-3.0.4/kernel/kallsyms.c linux-3.0.4/kernel/kallsyms.c
61747--- linux-3.0.4/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
61748+++ linux-3.0.4/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
61749@@ -11,6 +11,9 @@
61750 * Changed the compression method from stem compression to "table lookup"
61751 * compression (see scripts/kallsyms.c for a more complete description)
61752 */
61753+#ifdef CONFIG_GRKERNSEC_HIDESYM
61754+#define __INCLUDED_BY_HIDESYM 1
61755+#endif
61756 #include <linux/kallsyms.h>
61757 #include <linux/module.h>
61758 #include <linux/init.h>
61759@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
61760
61761 static inline int is_kernel_inittext(unsigned long addr)
61762 {
61763+ if (system_state != SYSTEM_BOOTING)
61764+ return 0;
61765+
61766 if (addr >= (unsigned long)_sinittext
61767 && addr <= (unsigned long)_einittext)
61768 return 1;
61769 return 0;
61770 }
61771
61772+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61773+#ifdef CONFIG_MODULES
61774+static inline int is_module_text(unsigned long addr)
61775+{
61776+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
61777+ return 1;
61778+
61779+ addr = ktla_ktva(addr);
61780+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
61781+}
61782+#else
61783+static inline int is_module_text(unsigned long addr)
61784+{
61785+ return 0;
61786+}
61787+#endif
61788+#endif
61789+
61790 static inline int is_kernel_text(unsigned long addr)
61791 {
61792 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
61793@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
61794
61795 static inline int is_kernel(unsigned long addr)
61796 {
61797+
61798+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61799+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
61800+ return 1;
61801+
61802+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
61803+#else
61804 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
61805+#endif
61806+
61807 return 1;
61808 return in_gate_area_no_mm(addr);
61809 }
61810
61811 static int is_ksym_addr(unsigned long addr)
61812 {
61813+
61814+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61815+ if (is_module_text(addr))
61816+ return 0;
61817+#endif
61818+
61819 if (all_var)
61820 return is_kernel(addr);
61821
61822@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
61823
61824 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
61825 {
61826- iter->name[0] = '\0';
61827 iter->nameoff = get_symbol_offset(new_pos);
61828 iter->pos = new_pos;
61829 }
61830@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
61831 {
61832 struct kallsym_iter *iter = m->private;
61833
61834+#ifdef CONFIG_GRKERNSEC_HIDESYM
61835+ if (current_uid())
61836+ return 0;
61837+#endif
61838+
61839 /* Some debugging symbols have no name. Ignore them. */
61840 if (!iter->name[0])
61841 return 0;
61842@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
61843 struct kallsym_iter *iter;
61844 int ret;
61845
61846- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
61847+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
61848 if (!iter)
61849 return -ENOMEM;
61850 reset_iter(iter, 0);
61851diff -urNp linux-3.0.4/kernel/kexec.c linux-3.0.4/kernel/kexec.c
61852--- linux-3.0.4/kernel/kexec.c 2011-07-21 22:17:23.000000000 -0400
61853+++ linux-3.0.4/kernel/kexec.c 2011-10-06 04:17:55.000000000 -0400
61854@@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(un
61855 unsigned long flags)
61856 {
61857 struct compat_kexec_segment in;
61858- struct kexec_segment out, __user *ksegments;
61859+ struct kexec_segment out;
61860+ struct kexec_segment __user *ksegments;
61861 unsigned long i, result;
61862
61863 /* Don't allow clients that don't understand the native
61864diff -urNp linux-3.0.4/kernel/kmod.c linux-3.0.4/kernel/kmod.c
61865--- linux-3.0.4/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
61866+++ linux-3.0.4/kernel/kmod.c 2011-10-06 04:17:55.000000000 -0400
61867@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
61868 * If module auto-loading support is disabled then this function
61869 * becomes a no-operation.
61870 */
61871-int __request_module(bool wait, const char *fmt, ...)
61872+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
61873 {
61874- va_list args;
61875 char module_name[MODULE_NAME_LEN];
61876 unsigned int max_modprobes;
61877 int ret;
61878- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
61879+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
61880 static char *envp[] = { "HOME=/",
61881 "TERM=linux",
61882 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
61883@@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
61884 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
61885 static int kmod_loop_msg;
61886
61887- va_start(args, fmt);
61888- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
61889- va_end(args);
61890+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
61891 if (ret >= MODULE_NAME_LEN)
61892 return -ENAMETOOLONG;
61893
61894@@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
61895 if (ret)
61896 return ret;
61897
61898+#ifdef CONFIG_GRKERNSEC_MODHARDEN
61899+ if (!current_uid()) {
61900+ /* hack to workaround consolekit/udisks stupidity */
61901+ read_lock(&tasklist_lock);
61902+ if (!strcmp(current->comm, "mount") &&
61903+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
61904+ read_unlock(&tasklist_lock);
61905+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
61906+ return -EPERM;
61907+ }
61908+ read_unlock(&tasklist_lock);
61909+ }
61910+#endif
61911+
61912 /* If modprobe needs a service that is in a module, we get a recursive
61913 * loop. Limit the number of running kmod threads to max_threads/2 or
61914 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
61915@@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
61916 atomic_dec(&kmod_concurrent);
61917 return ret;
61918 }
61919+
61920+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
61921+{
61922+ va_list args;
61923+ int ret;
61924+
61925+ va_start(args, fmt);
61926+ ret = ____request_module(wait, module_param, fmt, args);
61927+ va_end(args);
61928+
61929+ return ret;
61930+}
61931+
61932+int __request_module(bool wait, const char *fmt, ...)
61933+{
61934+ va_list args;
61935+ int ret;
61936+
61937+#ifdef CONFIG_GRKERNSEC_MODHARDEN
61938+ if (current_uid()) {
61939+ char module_param[MODULE_NAME_LEN];
61940+
61941+ memset(module_param, 0, sizeof(module_param));
61942+
61943+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
61944+
61945+ va_start(args, fmt);
61946+ ret = ____request_module(wait, module_param, fmt, args);
61947+ va_end(args);
61948+
61949+ return ret;
61950+ }
61951+#endif
61952+
61953+ va_start(args, fmt);
61954+ ret = ____request_module(wait, NULL, fmt, args);
61955+ va_end(args);
61956+
61957+ return ret;
61958+}
61959+
61960 EXPORT_SYMBOL(__request_module);
61961 #endif /* CONFIG_MODULES */
61962
61963@@ -220,7 +272,7 @@ static int wait_for_helper(void *data)
61964 *
61965 * Thus the __user pointer cast is valid here.
61966 */
61967- sys_wait4(pid, (int __user *)&ret, 0, NULL);
61968+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
61969
61970 /*
61971 * If ret is 0, either ____call_usermodehelper failed and the
61972diff -urNp linux-3.0.4/kernel/kprobes.c linux-3.0.4/kernel/kprobes.c
61973--- linux-3.0.4/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
61974+++ linux-3.0.4/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
61975@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
61976 * kernel image and loaded module images reside. This is required
61977 * so x86_64 can correctly handle the %rip-relative fixups.
61978 */
61979- kip->insns = module_alloc(PAGE_SIZE);
61980+ kip->insns = module_alloc_exec(PAGE_SIZE);
61981 if (!kip->insns) {
61982 kfree(kip);
61983 return NULL;
61984@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
61985 */
61986 if (!list_is_singular(&kip->list)) {
61987 list_del(&kip->list);
61988- module_free(NULL, kip->insns);
61989+ module_free_exec(NULL, kip->insns);
61990 kfree(kip);
61991 }
61992 return 1;
61993@@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
61994 {
61995 int i, err = 0;
61996 unsigned long offset = 0, size = 0;
61997- char *modname, namebuf[128];
61998+ char *modname, namebuf[KSYM_NAME_LEN];
61999 const char *symbol_name;
62000 void *addr;
62001 struct kprobe_blackpoint *kb;
62002@@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
62003 const char *sym = NULL;
62004 unsigned int i = *(loff_t *) v;
62005 unsigned long offset = 0;
62006- char *modname, namebuf[128];
62007+ char *modname, namebuf[KSYM_NAME_LEN];
62008
62009 head = &kprobe_table[i];
62010 preempt_disable();
62011diff -urNp linux-3.0.4/kernel/lockdep.c linux-3.0.4/kernel/lockdep.c
62012--- linux-3.0.4/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
62013+++ linux-3.0.4/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
62014@@ -583,6 +583,10 @@ static int static_obj(void *obj)
62015 end = (unsigned long) &_end,
62016 addr = (unsigned long) obj;
62017
62018+#ifdef CONFIG_PAX_KERNEXEC
62019+ start = ktla_ktva(start);
62020+#endif
62021+
62022 /*
62023 * static variable?
62024 */
62025@@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
62026 if (!static_obj(lock->key)) {
62027 debug_locks_off();
62028 printk("INFO: trying to register non-static key.\n");
62029+ printk("lock:%pS key:%pS.\n", lock, lock->key);
62030 printk("the code is fine but needs lockdep annotation.\n");
62031 printk("turning off the locking correctness validator.\n");
62032 dump_stack();
62033@@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
62034 if (!class)
62035 return 0;
62036 }
62037- atomic_inc((atomic_t *)&class->ops);
62038+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
62039 if (very_verbose(class)) {
62040 printk("\nacquire class [%p] %s", class->key, class->name);
62041 if (class->name_version > 1)
62042diff -urNp linux-3.0.4/kernel/lockdep_proc.c linux-3.0.4/kernel/lockdep_proc.c
62043--- linux-3.0.4/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
62044+++ linux-3.0.4/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
62045@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
62046
62047 static void print_name(struct seq_file *m, struct lock_class *class)
62048 {
62049- char str[128];
62050+ char str[KSYM_NAME_LEN];
62051 const char *name = class->name;
62052
62053 if (!name) {
62054diff -urNp linux-3.0.4/kernel/module.c linux-3.0.4/kernel/module.c
62055--- linux-3.0.4/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
62056+++ linux-3.0.4/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
62057@@ -58,6 +58,7 @@
62058 #include <linux/jump_label.h>
62059 #include <linux/pfn.h>
62060 #include <linux/bsearch.h>
62061+#include <linux/grsecurity.h>
62062
62063 #define CREATE_TRACE_POINTS
62064 #include <trace/events/module.h>
62065@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
62066
62067 /* Bounds of module allocation, for speeding __module_address.
62068 * Protected by module_mutex. */
62069-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
62070+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
62071+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
62072
62073 int register_module_notifier(struct notifier_block * nb)
62074 {
62075@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
62076 return true;
62077
62078 list_for_each_entry_rcu(mod, &modules, list) {
62079- struct symsearch arr[] = {
62080+ struct symsearch modarr[] = {
62081 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
62082 NOT_GPL_ONLY, false },
62083 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
62084@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
62085 #endif
62086 };
62087
62088- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
62089+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
62090 return true;
62091 }
62092 return false;
62093@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
62094 static int percpu_modalloc(struct module *mod,
62095 unsigned long size, unsigned long align)
62096 {
62097- if (align > PAGE_SIZE) {
62098+ if (align-1 >= PAGE_SIZE) {
62099 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
62100 mod->name, align, PAGE_SIZE);
62101 align = PAGE_SIZE;
62102@@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
62103 */
62104 #ifdef CONFIG_SYSFS
62105
62106-#ifdef CONFIG_KALLSYMS
62107+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62108 static inline bool sect_empty(const Elf_Shdr *sect)
62109 {
62110 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
62111@@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
62112
62113 static void unset_module_core_ro_nx(struct module *mod)
62114 {
62115- set_page_attributes(mod->module_core + mod->core_text_size,
62116- mod->module_core + mod->core_size,
62117+ set_page_attributes(mod->module_core_rw,
62118+ mod->module_core_rw + mod->core_size_rw,
62119 set_memory_x);
62120- set_page_attributes(mod->module_core,
62121- mod->module_core + mod->core_ro_size,
62122+ set_page_attributes(mod->module_core_rx,
62123+ mod->module_core_rx + mod->core_size_rx,
62124 set_memory_rw);
62125 }
62126
62127 static void unset_module_init_ro_nx(struct module *mod)
62128 {
62129- set_page_attributes(mod->module_init + mod->init_text_size,
62130- mod->module_init + mod->init_size,
62131+ set_page_attributes(mod->module_init_rw,
62132+ mod->module_init_rw + mod->init_size_rw,
62133 set_memory_x);
62134- set_page_attributes(mod->module_init,
62135- mod->module_init + mod->init_ro_size,
62136+ set_page_attributes(mod->module_init_rx,
62137+ mod->module_init_rx + mod->init_size_rx,
62138 set_memory_rw);
62139 }
62140
62141@@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
62142
62143 mutex_lock(&module_mutex);
62144 list_for_each_entry_rcu(mod, &modules, list) {
62145- if ((mod->module_core) && (mod->core_text_size)) {
62146- set_page_attributes(mod->module_core,
62147- mod->module_core + mod->core_text_size,
62148+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
62149+ set_page_attributes(mod->module_core_rx,
62150+ mod->module_core_rx + mod->core_size_rx,
62151 set_memory_rw);
62152 }
62153- if ((mod->module_init) && (mod->init_text_size)) {
62154- set_page_attributes(mod->module_init,
62155- mod->module_init + mod->init_text_size,
62156+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
62157+ set_page_attributes(mod->module_init_rx,
62158+ mod->module_init_rx + mod->init_size_rx,
62159 set_memory_rw);
62160 }
62161 }
62162@@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
62163
62164 mutex_lock(&module_mutex);
62165 list_for_each_entry_rcu(mod, &modules, list) {
62166- if ((mod->module_core) && (mod->core_text_size)) {
62167- set_page_attributes(mod->module_core,
62168- mod->module_core + mod->core_text_size,
62169+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
62170+ set_page_attributes(mod->module_core_rx,
62171+ mod->module_core_rx + mod->core_size_rx,
62172 set_memory_ro);
62173 }
62174- if ((mod->module_init) && (mod->init_text_size)) {
62175- set_page_attributes(mod->module_init,
62176- mod->module_init + mod->init_text_size,
62177+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
62178+ set_page_attributes(mod->module_init_rx,
62179+ mod->module_init_rx + mod->init_size_rx,
62180 set_memory_ro);
62181 }
62182 }
62183@@ -1722,16 +1724,19 @@ static void free_module(struct module *m
62184
62185 /* This may be NULL, but that's OK */
62186 unset_module_init_ro_nx(mod);
62187- module_free(mod, mod->module_init);
62188+ module_free(mod, mod->module_init_rw);
62189+ module_free_exec(mod, mod->module_init_rx);
62190 kfree(mod->args);
62191 percpu_modfree(mod);
62192
62193 /* Free lock-classes: */
62194- lockdep_free_key_range(mod->module_core, mod->core_size);
62195+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
62196+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
62197
62198 /* Finally, free the core (containing the module structure) */
62199 unset_module_core_ro_nx(mod);
62200- module_free(mod, mod->module_core);
62201+ module_free_exec(mod, mod->module_core_rx);
62202+ module_free(mod, mod->module_core_rw);
62203
62204 #ifdef CONFIG_MPU
62205 update_protections(current->mm);
62206@@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
62207 unsigned int i;
62208 int ret = 0;
62209 const struct kernel_symbol *ksym;
62210+#ifdef CONFIG_GRKERNSEC_MODHARDEN
62211+ int is_fs_load = 0;
62212+ int register_filesystem_found = 0;
62213+ char *p;
62214+
62215+ p = strstr(mod->args, "grsec_modharden_fs");
62216+ if (p) {
62217+ char *endptr = p + strlen("grsec_modharden_fs");
62218+ /* copy \0 as well */
62219+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
62220+ is_fs_load = 1;
62221+ }
62222+#endif
62223
62224 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
62225 const char *name = info->strtab + sym[i].st_name;
62226
62227+#ifdef CONFIG_GRKERNSEC_MODHARDEN
62228+ /* it's a real shame this will never get ripped and copied
62229+ upstream! ;(
62230+ */
62231+ if (is_fs_load && !strcmp(name, "register_filesystem"))
62232+ register_filesystem_found = 1;
62233+#endif
62234+
62235 switch (sym[i].st_shndx) {
62236 case SHN_COMMON:
62237 /* We compiled with -fno-common. These are not
62238@@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
62239 ksym = resolve_symbol_wait(mod, info, name);
62240 /* Ok if resolved. */
62241 if (ksym && !IS_ERR(ksym)) {
62242+ pax_open_kernel();
62243 sym[i].st_value = ksym->value;
62244+ pax_close_kernel();
62245 break;
62246 }
62247
62248@@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
62249 secbase = (unsigned long)mod_percpu(mod);
62250 else
62251 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
62252+ pax_open_kernel();
62253 sym[i].st_value += secbase;
62254+ pax_close_kernel();
62255 break;
62256 }
62257 }
62258
62259+#ifdef CONFIG_GRKERNSEC_MODHARDEN
62260+ if (is_fs_load && !register_filesystem_found) {
62261+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
62262+ ret = -EPERM;
62263+ }
62264+#endif
62265+
62266 return ret;
62267 }
62268
62269@@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
62270 || s->sh_entsize != ~0UL
62271 || strstarts(sname, ".init"))
62272 continue;
62273- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
62274+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
62275+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
62276+ else
62277+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
62278 DEBUGP("\t%s\n", name);
62279 }
62280- switch (m) {
62281- case 0: /* executable */
62282- mod->core_size = debug_align(mod->core_size);
62283- mod->core_text_size = mod->core_size;
62284- break;
62285- case 1: /* RO: text and ro-data */
62286- mod->core_size = debug_align(mod->core_size);
62287- mod->core_ro_size = mod->core_size;
62288- break;
62289- case 3: /* whole core */
62290- mod->core_size = debug_align(mod->core_size);
62291- break;
62292- }
62293 }
62294
62295 DEBUGP("Init section allocation order:\n");
62296@@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
62297 || s->sh_entsize != ~0UL
62298 || !strstarts(sname, ".init"))
62299 continue;
62300- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
62301- | INIT_OFFSET_MASK);
62302+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
62303+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
62304+ else
62305+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
62306+ s->sh_entsize |= INIT_OFFSET_MASK;
62307 DEBUGP("\t%s\n", sname);
62308 }
62309- switch (m) {
62310- case 0: /* executable */
62311- mod->init_size = debug_align(mod->init_size);
62312- mod->init_text_size = mod->init_size;
62313- break;
62314- case 1: /* RO: text and ro-data */
62315- mod->init_size = debug_align(mod->init_size);
62316- mod->init_ro_size = mod->init_size;
62317- break;
62318- case 3: /* whole init */
62319- mod->init_size = debug_align(mod->init_size);
62320- break;
62321- }
62322 }
62323 }
62324
62325@@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
62326
62327 /* Put symbol section at end of init part of module. */
62328 symsect->sh_flags |= SHF_ALLOC;
62329- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
62330+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
62331 info->index.sym) | INIT_OFFSET_MASK;
62332 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
62333
62334@@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
62335 }
62336
62337 /* Append room for core symbols at end of core part. */
62338- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
62339- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
62340+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
62341+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
62342
62343 /* Put string table section at end of init part of module. */
62344 strsect->sh_flags |= SHF_ALLOC;
62345- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
62346+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
62347 info->index.str) | INIT_OFFSET_MASK;
62348 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
62349
62350 /* Append room for core symbols' strings at end of core part. */
62351- info->stroffs = mod->core_size;
62352+ info->stroffs = mod->core_size_rx;
62353 __set_bit(0, info->strmap);
62354- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
62355+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
62356 }
62357
62358 static void add_kallsyms(struct module *mod, const struct load_info *info)
62359@@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
62360 /* Make sure we get permanent strtab: don't use info->strtab. */
62361 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
62362
62363+ pax_open_kernel();
62364+
62365 /* Set types up while we still have access to sections. */
62366 for (i = 0; i < mod->num_symtab; i++)
62367 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
62368
62369- mod->core_symtab = dst = mod->module_core + info->symoffs;
62370+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
62371 src = mod->symtab;
62372 *dst = *src;
62373 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
62374@@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
62375 }
62376 mod->core_num_syms = ndst;
62377
62378- mod->core_strtab = s = mod->module_core + info->stroffs;
62379+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
62380 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
62381 if (test_bit(i, info->strmap))
62382 *++s = mod->strtab[i];
62383+
62384+ pax_close_kernel();
62385 }
62386 #else
62387 static inline void layout_symtab(struct module *mod, struct load_info *info)
62388@@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
62389 ddebug_remove_module(debug->modname);
62390 }
62391
62392-static void *module_alloc_update_bounds(unsigned long size)
62393+static void *module_alloc_update_bounds_rw(unsigned long size)
62394 {
62395 void *ret = module_alloc(size);
62396
62397 if (ret) {
62398 mutex_lock(&module_mutex);
62399 /* Update module bounds. */
62400- if ((unsigned long)ret < module_addr_min)
62401- module_addr_min = (unsigned long)ret;
62402- if ((unsigned long)ret + size > module_addr_max)
62403- module_addr_max = (unsigned long)ret + size;
62404+ if ((unsigned long)ret < module_addr_min_rw)
62405+ module_addr_min_rw = (unsigned long)ret;
62406+ if ((unsigned long)ret + size > module_addr_max_rw)
62407+ module_addr_max_rw = (unsigned long)ret + size;
62408+ mutex_unlock(&module_mutex);
62409+ }
62410+ return ret;
62411+}
62412+
62413+static void *module_alloc_update_bounds_rx(unsigned long size)
62414+{
62415+ void *ret = module_alloc_exec(size);
62416+
62417+ if (ret) {
62418+ mutex_lock(&module_mutex);
62419+ /* Update module bounds. */
62420+ if ((unsigned long)ret < module_addr_min_rx)
62421+ module_addr_min_rx = (unsigned long)ret;
62422+ if ((unsigned long)ret + size > module_addr_max_rx)
62423+ module_addr_max_rx = (unsigned long)ret + size;
62424 mutex_unlock(&module_mutex);
62425 }
62426 return ret;
62427@@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
62428 void *ptr;
62429
62430 /* Do the allocs. */
62431- ptr = module_alloc_update_bounds(mod->core_size);
62432+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
62433 /*
62434 * The pointer to this block is stored in the module structure
62435 * which is inside the block. Just mark it as not being a
62436@@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
62437 if (!ptr)
62438 return -ENOMEM;
62439
62440- memset(ptr, 0, mod->core_size);
62441- mod->module_core = ptr;
62442+ memset(ptr, 0, mod->core_size_rw);
62443+ mod->module_core_rw = ptr;
62444
62445- ptr = module_alloc_update_bounds(mod->init_size);
62446+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
62447 /*
62448 * The pointer to this block is stored in the module structure
62449 * which is inside the block. This block doesn't need to be
62450 * scanned as it contains data and code that will be freed
62451 * after the module is initialized.
62452 */
62453- kmemleak_ignore(ptr);
62454- if (!ptr && mod->init_size) {
62455- module_free(mod, mod->module_core);
62456+ kmemleak_not_leak(ptr);
62457+ if (!ptr && mod->init_size_rw) {
62458+ module_free(mod, mod->module_core_rw);
62459 return -ENOMEM;
62460 }
62461- memset(ptr, 0, mod->init_size);
62462- mod->module_init = ptr;
62463+ memset(ptr, 0, mod->init_size_rw);
62464+ mod->module_init_rw = ptr;
62465+
62466+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
62467+ kmemleak_not_leak(ptr);
62468+ if (!ptr) {
62469+ module_free(mod, mod->module_init_rw);
62470+ module_free(mod, mod->module_core_rw);
62471+ return -ENOMEM;
62472+ }
62473+
62474+ pax_open_kernel();
62475+ memset(ptr, 0, mod->core_size_rx);
62476+ pax_close_kernel();
62477+ mod->module_core_rx = ptr;
62478+
62479+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
62480+ kmemleak_not_leak(ptr);
62481+ if (!ptr && mod->init_size_rx) {
62482+ module_free_exec(mod, mod->module_core_rx);
62483+ module_free(mod, mod->module_init_rw);
62484+ module_free(mod, mod->module_core_rw);
62485+ return -ENOMEM;
62486+ }
62487+
62488+ pax_open_kernel();
62489+ memset(ptr, 0, mod->init_size_rx);
62490+ pax_close_kernel();
62491+ mod->module_init_rx = ptr;
62492
62493 /* Transfer each section which specifies SHF_ALLOC */
62494 DEBUGP("final section addresses:\n");
62495@@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
62496 if (!(shdr->sh_flags & SHF_ALLOC))
62497 continue;
62498
62499- if (shdr->sh_entsize & INIT_OFFSET_MASK)
62500- dest = mod->module_init
62501- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
62502- else
62503- dest = mod->module_core + shdr->sh_entsize;
62504+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
62505+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
62506+ dest = mod->module_init_rw
62507+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
62508+ else
62509+ dest = mod->module_init_rx
62510+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
62511+ } else {
62512+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
62513+ dest = mod->module_core_rw + shdr->sh_entsize;
62514+ else
62515+ dest = mod->module_core_rx + shdr->sh_entsize;
62516+ }
62517+
62518+ if (shdr->sh_type != SHT_NOBITS) {
62519+
62520+#ifdef CONFIG_PAX_KERNEXEC
62521+#ifdef CONFIG_X86_64
62522+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
62523+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
62524+#endif
62525+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
62526+ pax_open_kernel();
62527+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
62528+ pax_close_kernel();
62529+ } else
62530+#endif
62531
62532- if (shdr->sh_type != SHT_NOBITS)
62533 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
62534+ }
62535 /* Update sh_addr to point to copy in image. */
62536- shdr->sh_addr = (unsigned long)dest;
62537+
62538+#ifdef CONFIG_PAX_KERNEXEC
62539+ if (shdr->sh_flags & SHF_EXECINSTR)
62540+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
62541+ else
62542+#endif
62543+
62544+ shdr->sh_addr = (unsigned long)dest;
62545 DEBUGP("\t0x%lx %s\n",
62546 shdr->sh_addr, info->secstrings + shdr->sh_name);
62547 }
62548@@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
62549 * Do it before processing of module parameters, so the module
62550 * can provide parameter accessor functions of its own.
62551 */
62552- if (mod->module_init)
62553- flush_icache_range((unsigned long)mod->module_init,
62554- (unsigned long)mod->module_init
62555- + mod->init_size);
62556- flush_icache_range((unsigned long)mod->module_core,
62557- (unsigned long)mod->module_core + mod->core_size);
62558+ if (mod->module_init_rx)
62559+ flush_icache_range((unsigned long)mod->module_init_rx,
62560+ (unsigned long)mod->module_init_rx
62561+ + mod->init_size_rx);
62562+ flush_icache_range((unsigned long)mod->module_core_rx,
62563+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
62564
62565 set_fs(old_fs);
62566 }
62567@@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
62568 {
62569 kfree(info->strmap);
62570 percpu_modfree(mod);
62571- module_free(mod, mod->module_init);
62572- module_free(mod, mod->module_core);
62573+ module_free_exec(mod, mod->module_init_rx);
62574+ module_free_exec(mod, mod->module_core_rx);
62575+ module_free(mod, mod->module_init_rw);
62576+ module_free(mod, mod->module_core_rw);
62577 }
62578
62579 static int post_relocation(struct module *mod, const struct load_info *info)
62580@@ -2770,9 +2865,38 @@ static struct module *load_module(void _
62581 if (err)
62582 goto free_unload;
62583
62584+ /* Now copy in args */
62585+ mod->args = strndup_user(uargs, ~0UL >> 1);
62586+ if (IS_ERR(mod->args)) {
62587+ err = PTR_ERR(mod->args);
62588+ goto free_unload;
62589+ }
62590+
62591 /* Set up MODINFO_ATTR fields */
62592 setup_modinfo(mod, &info);
62593
62594+#ifdef CONFIG_GRKERNSEC_MODHARDEN
62595+ {
62596+ char *p, *p2;
62597+
62598+ if (strstr(mod->args, "grsec_modharden_netdev")) {
62599+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
62600+ err = -EPERM;
62601+ goto free_modinfo;
62602+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
62603+ p += strlen("grsec_modharden_normal");
62604+ p2 = strstr(p, "_");
62605+ if (p2) {
62606+ *p2 = '\0';
62607+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
62608+ *p2 = '_';
62609+ }
62610+ err = -EPERM;
62611+ goto free_modinfo;
62612+ }
62613+ }
62614+#endif
62615+
62616 /* Fix up syms, so that st_value is a pointer to location. */
62617 err = simplify_symbols(mod, &info);
62618 if (err < 0)
62619@@ -2788,13 +2912,6 @@ static struct module *load_module(void _
62620
62621 flush_module_icache(mod);
62622
62623- /* Now copy in args */
62624- mod->args = strndup_user(uargs, ~0UL >> 1);
62625- if (IS_ERR(mod->args)) {
62626- err = PTR_ERR(mod->args);
62627- goto free_arch_cleanup;
62628- }
62629-
62630 /* Mark state as coming so strong_try_module_get() ignores us. */
62631 mod->state = MODULE_STATE_COMING;
62632
62633@@ -2854,11 +2971,10 @@ static struct module *load_module(void _
62634 unlock:
62635 mutex_unlock(&module_mutex);
62636 synchronize_sched();
62637- kfree(mod->args);
62638- free_arch_cleanup:
62639 module_arch_cleanup(mod);
62640 free_modinfo:
62641 free_modinfo(mod);
62642+ kfree(mod->args);
62643 free_unload:
62644 module_unload_free(mod);
62645 free_module:
62646@@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
62647 MODULE_STATE_COMING, mod);
62648
62649 /* Set RO and NX regions for core */
62650- set_section_ro_nx(mod->module_core,
62651- mod->core_text_size,
62652- mod->core_ro_size,
62653- mod->core_size);
62654+ set_section_ro_nx(mod->module_core_rx,
62655+ mod->core_size_rx,
62656+ mod->core_size_rx,
62657+ mod->core_size_rx);
62658
62659 /* Set RO and NX regions for init */
62660- set_section_ro_nx(mod->module_init,
62661- mod->init_text_size,
62662- mod->init_ro_size,
62663- mod->init_size);
62664+ set_section_ro_nx(mod->module_init_rx,
62665+ mod->init_size_rx,
62666+ mod->init_size_rx,
62667+ mod->init_size_rx);
62668
62669 do_mod_ctors(mod);
62670 /* Start the module */
62671@@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
62672 mod->strtab = mod->core_strtab;
62673 #endif
62674 unset_module_init_ro_nx(mod);
62675- module_free(mod, mod->module_init);
62676- mod->module_init = NULL;
62677- mod->init_size = 0;
62678- mod->init_ro_size = 0;
62679- mod->init_text_size = 0;
62680+ module_free(mod, mod->module_init_rw);
62681+ module_free_exec(mod, mod->module_init_rx);
62682+ mod->module_init_rw = NULL;
62683+ mod->module_init_rx = NULL;
62684+ mod->init_size_rw = 0;
62685+ mod->init_size_rx = 0;
62686 mutex_unlock(&module_mutex);
62687
62688 return 0;
62689@@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
62690 unsigned long nextval;
62691
62692 /* At worse, next value is at end of module */
62693- if (within_module_init(addr, mod))
62694- nextval = (unsigned long)mod->module_init+mod->init_text_size;
62695+ if (within_module_init_rx(addr, mod))
62696+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
62697+ else if (within_module_init_rw(addr, mod))
62698+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
62699+ else if (within_module_core_rx(addr, mod))
62700+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
62701+ else if (within_module_core_rw(addr, mod))
62702+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
62703 else
62704- nextval = (unsigned long)mod->module_core+mod->core_text_size;
62705+ return NULL;
62706
62707 /* Scan for closest preceding symbol, and next symbol. (ELF
62708 starts real symbols at 1). */
62709@@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
62710 char buf[8];
62711
62712 seq_printf(m, "%s %u",
62713- mod->name, mod->init_size + mod->core_size);
62714+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
62715 print_unload_info(m, mod);
62716
62717 /* Informative for users. */
62718@@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
62719 mod->state == MODULE_STATE_COMING ? "Loading":
62720 "Live");
62721 /* Used by oprofile and other similar tools. */
62722- seq_printf(m, " 0x%pK", mod->module_core);
62723+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
62724
62725 /* Taints info */
62726 if (mod->taints)
62727@@ -3283,7 +3406,17 @@ static const struct file_operations proc
62728
62729 static int __init proc_modules_init(void)
62730 {
62731+#ifndef CONFIG_GRKERNSEC_HIDESYM
62732+#ifdef CONFIG_GRKERNSEC_PROC_USER
62733+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
62734+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62735+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
62736+#else
62737 proc_create("modules", 0, NULL, &proc_modules_operations);
62738+#endif
62739+#else
62740+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
62741+#endif
62742 return 0;
62743 }
62744 module_init(proc_modules_init);
62745@@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
62746 {
62747 struct module *mod;
62748
62749- if (addr < module_addr_min || addr > module_addr_max)
62750+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
62751+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
62752 return NULL;
62753
62754 list_for_each_entry_rcu(mod, &modules, list)
62755- if (within_module_core(addr, mod)
62756- || within_module_init(addr, mod))
62757+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
62758 return mod;
62759 return NULL;
62760 }
62761@@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
62762 */
62763 struct module *__module_text_address(unsigned long addr)
62764 {
62765- struct module *mod = __module_address(addr);
62766+ struct module *mod;
62767+
62768+#ifdef CONFIG_X86_32
62769+ addr = ktla_ktva(addr);
62770+#endif
62771+
62772+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
62773+ return NULL;
62774+
62775+ mod = __module_address(addr);
62776+
62777 if (mod) {
62778 /* Make sure it's within the text section. */
62779- if (!within(addr, mod->module_init, mod->init_text_size)
62780- && !within(addr, mod->module_core, mod->core_text_size))
62781+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
62782 mod = NULL;
62783 }
62784 return mod;
62785diff -urNp linux-3.0.4/kernel/mutex.c linux-3.0.4/kernel/mutex.c
62786--- linux-3.0.4/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
62787+++ linux-3.0.4/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
62788@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
62789 spin_lock_mutex(&lock->wait_lock, flags);
62790
62791 debug_mutex_lock_common(lock, &waiter);
62792- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
62793+ debug_mutex_add_waiter(lock, &waiter, task);
62794
62795 /* add waiting tasks to the end of the waitqueue (FIFO): */
62796 list_add_tail(&waiter.list, &lock->wait_list);
62797@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
62798 * TASK_UNINTERRUPTIBLE case.)
62799 */
62800 if (unlikely(signal_pending_state(state, task))) {
62801- mutex_remove_waiter(lock, &waiter,
62802- task_thread_info(task));
62803+ mutex_remove_waiter(lock, &waiter, task);
62804 mutex_release(&lock->dep_map, 1, ip);
62805 spin_unlock_mutex(&lock->wait_lock, flags);
62806
62807@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
62808 done:
62809 lock_acquired(&lock->dep_map, ip);
62810 /* got the lock - rejoice! */
62811- mutex_remove_waiter(lock, &waiter, current_thread_info());
62812+ mutex_remove_waiter(lock, &waiter, task);
62813 mutex_set_owner(lock);
62814
62815 /* set it to 0 if there are no waiters left: */
62816diff -urNp linux-3.0.4/kernel/mutex-debug.c linux-3.0.4/kernel/mutex-debug.c
62817--- linux-3.0.4/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
62818+++ linux-3.0.4/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
62819@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
62820 }
62821
62822 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
62823- struct thread_info *ti)
62824+ struct task_struct *task)
62825 {
62826 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
62827
62828 /* Mark the current thread as blocked on the lock: */
62829- ti->task->blocked_on = waiter;
62830+ task->blocked_on = waiter;
62831 }
62832
62833 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
62834- struct thread_info *ti)
62835+ struct task_struct *task)
62836 {
62837 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
62838- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
62839- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
62840- ti->task->blocked_on = NULL;
62841+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
62842+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
62843+ task->blocked_on = NULL;
62844
62845 list_del_init(&waiter->list);
62846 waiter->task = NULL;
62847diff -urNp linux-3.0.4/kernel/mutex-debug.h linux-3.0.4/kernel/mutex-debug.h
62848--- linux-3.0.4/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
62849+++ linux-3.0.4/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
62850@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
62851 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
62852 extern void debug_mutex_add_waiter(struct mutex *lock,
62853 struct mutex_waiter *waiter,
62854- struct thread_info *ti);
62855+ struct task_struct *task);
62856 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
62857- struct thread_info *ti);
62858+ struct task_struct *task);
62859 extern void debug_mutex_unlock(struct mutex *lock);
62860 extern void debug_mutex_init(struct mutex *lock, const char *name,
62861 struct lock_class_key *key);
62862diff -urNp linux-3.0.4/kernel/padata.c linux-3.0.4/kernel/padata.c
62863--- linux-3.0.4/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
62864+++ linux-3.0.4/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
62865@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
62866 padata->pd = pd;
62867 padata->cb_cpu = cb_cpu;
62868
62869- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
62870- atomic_set(&pd->seq_nr, -1);
62871+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
62872+ atomic_set_unchecked(&pd->seq_nr, -1);
62873
62874- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
62875+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
62876
62877 target_cpu = padata_cpu_hash(padata);
62878 queue = per_cpu_ptr(pd->pqueue, target_cpu);
62879@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
62880 padata_init_pqueues(pd);
62881 padata_init_squeues(pd);
62882 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
62883- atomic_set(&pd->seq_nr, -1);
62884+ atomic_set_unchecked(&pd->seq_nr, -1);
62885 atomic_set(&pd->reorder_objects, 0);
62886 atomic_set(&pd->refcnt, 0);
62887 pd->pinst = pinst;
62888diff -urNp linux-3.0.4/kernel/panic.c linux-3.0.4/kernel/panic.c
62889--- linux-3.0.4/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
62890+++ linux-3.0.4/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
62891@@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
62892 const char *board;
62893
62894 printk(KERN_WARNING "------------[ cut here ]------------\n");
62895- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
62896+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
62897 board = dmi_get_system_info(DMI_PRODUCT_NAME);
62898 if (board)
62899 printk(KERN_WARNING "Hardware name: %s\n", board);
62900@@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
62901 */
62902 void __stack_chk_fail(void)
62903 {
62904- panic("stack-protector: Kernel stack is corrupted in: %p\n",
62905+ dump_stack();
62906+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
62907 __builtin_return_address(0));
62908 }
62909 EXPORT_SYMBOL(__stack_chk_fail);
62910diff -urNp linux-3.0.4/kernel/pid.c linux-3.0.4/kernel/pid.c
62911--- linux-3.0.4/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
62912+++ linux-3.0.4/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
62913@@ -33,6 +33,7 @@
62914 #include <linux/rculist.h>
62915 #include <linux/bootmem.h>
62916 #include <linux/hash.h>
62917+#include <linux/security.h>
62918 #include <linux/pid_namespace.h>
62919 #include <linux/init_task.h>
62920 #include <linux/syscalls.h>
62921@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
62922
62923 int pid_max = PID_MAX_DEFAULT;
62924
62925-#define RESERVED_PIDS 300
62926+#define RESERVED_PIDS 500
62927
62928 int pid_max_min = RESERVED_PIDS + 1;
62929 int pid_max_max = PID_MAX_LIMIT;
62930@@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
62931 */
62932 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
62933 {
62934+ struct task_struct *task;
62935+
62936 rcu_lockdep_assert(rcu_read_lock_held());
62937- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
62938+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
62939+
62940+ if (gr_pid_is_chrooted(task))
62941+ return NULL;
62942+
62943+ return task;
62944 }
62945
62946 struct task_struct *find_task_by_vpid(pid_t vnr)
62947@@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
62948 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
62949 }
62950
62951+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
62952+{
62953+ rcu_lockdep_assert(rcu_read_lock_held());
62954+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
62955+}
62956+
62957 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
62958 {
62959 struct pid *pid;
62960diff -urNp linux-3.0.4/kernel/posix-cpu-timers.c linux-3.0.4/kernel/posix-cpu-timers.c
62961--- linux-3.0.4/kernel/posix-cpu-timers.c 2011-07-21 22:17:23.000000000 -0400
62962+++ linux-3.0.4/kernel/posix-cpu-timers.c 2011-08-23 21:48:14.000000000 -0400
62963@@ -6,6 +6,7 @@
62964 #include <linux/posix-timers.h>
62965 #include <linux/errno.h>
62966 #include <linux/math64.h>
62967+#include <linux/security.h>
62968 #include <asm/uaccess.h>
62969 #include <linux/kernel_stat.h>
62970 #include <trace/events/timer.h>
62971@@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
62972
62973 static __init int init_posix_cpu_timers(void)
62974 {
62975- struct k_clock process = {
62976+ static struct k_clock process = {
62977 .clock_getres = process_cpu_clock_getres,
62978 .clock_get = process_cpu_clock_get,
62979 .timer_create = process_cpu_timer_create,
62980 .nsleep = process_cpu_nsleep,
62981 .nsleep_restart = process_cpu_nsleep_restart,
62982 };
62983- struct k_clock thread = {
62984+ static struct k_clock thread = {
62985 .clock_getres = thread_cpu_clock_getres,
62986 .clock_get = thread_cpu_clock_get,
62987 .timer_create = thread_cpu_timer_create,
62988diff -urNp linux-3.0.4/kernel/posix-timers.c linux-3.0.4/kernel/posix-timers.c
62989--- linux-3.0.4/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
62990+++ linux-3.0.4/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
62991@@ -43,6 +43,7 @@
62992 #include <linux/idr.h>
62993 #include <linux/posix-clock.h>
62994 #include <linux/posix-timers.h>
62995+#include <linux/grsecurity.h>
62996 #include <linux/syscalls.h>
62997 #include <linux/wait.h>
62998 #include <linux/workqueue.h>
62999@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
63000 * which we beg off on and pass to do_sys_settimeofday().
63001 */
63002
63003-static struct k_clock posix_clocks[MAX_CLOCKS];
63004+static struct k_clock *posix_clocks[MAX_CLOCKS];
63005
63006 /*
63007 * These ones are defined below.
63008@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
63009 */
63010 static __init int init_posix_timers(void)
63011 {
63012- struct k_clock clock_realtime = {
63013+ static struct k_clock clock_realtime = {
63014 .clock_getres = hrtimer_get_res,
63015 .clock_get = posix_clock_realtime_get,
63016 .clock_set = posix_clock_realtime_set,
63017@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
63018 .timer_get = common_timer_get,
63019 .timer_del = common_timer_del,
63020 };
63021- struct k_clock clock_monotonic = {
63022+ static struct k_clock clock_monotonic = {
63023 .clock_getres = hrtimer_get_res,
63024 .clock_get = posix_ktime_get_ts,
63025 .nsleep = common_nsleep,
63026@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
63027 .timer_get = common_timer_get,
63028 .timer_del = common_timer_del,
63029 };
63030- struct k_clock clock_monotonic_raw = {
63031+ static struct k_clock clock_monotonic_raw = {
63032 .clock_getres = hrtimer_get_res,
63033 .clock_get = posix_get_monotonic_raw,
63034 };
63035- struct k_clock clock_realtime_coarse = {
63036+ static struct k_clock clock_realtime_coarse = {
63037 .clock_getres = posix_get_coarse_res,
63038 .clock_get = posix_get_realtime_coarse,
63039 };
63040- struct k_clock clock_monotonic_coarse = {
63041+ static struct k_clock clock_monotonic_coarse = {
63042 .clock_getres = posix_get_coarse_res,
63043 .clock_get = posix_get_monotonic_coarse,
63044 };
63045- struct k_clock clock_boottime = {
63046+ static struct k_clock clock_boottime = {
63047 .clock_getres = hrtimer_get_res,
63048 .clock_get = posix_get_boottime,
63049 .nsleep = common_nsleep,
63050@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
63051 .timer_del = common_timer_del,
63052 };
63053
63054+ pax_track_stack();
63055+
63056 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
63057 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
63058 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
63059@@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
63060 return;
63061 }
63062
63063- posix_clocks[clock_id] = *new_clock;
63064+ posix_clocks[clock_id] = new_clock;
63065 }
63066 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
63067
63068@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
63069 return (id & CLOCKFD_MASK) == CLOCKFD ?
63070 &clock_posix_dynamic : &clock_posix_cpu;
63071
63072- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
63073+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
63074 return NULL;
63075- return &posix_clocks[id];
63076+ return posix_clocks[id];
63077 }
63078
63079 static int common_timer_create(struct k_itimer *new_timer)
63080@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
63081 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
63082 return -EFAULT;
63083
63084+ /* only the CLOCK_REALTIME clock can be set, all other clocks
63085+ have their clock_set fptr set to a nosettime dummy function
63086+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
63087+ call common_clock_set, which calls do_sys_settimeofday, which
63088+ we hook
63089+ */
63090+
63091 return kc->clock_set(which_clock, &new_tp);
63092 }
63093
63094diff -urNp linux-3.0.4/kernel/power/poweroff.c linux-3.0.4/kernel/power/poweroff.c
63095--- linux-3.0.4/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
63096+++ linux-3.0.4/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
63097@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
63098 .enable_mask = SYSRQ_ENABLE_BOOT,
63099 };
63100
63101-static int pm_sysrq_init(void)
63102+static int __init pm_sysrq_init(void)
63103 {
63104 register_sysrq_key('o', &sysrq_poweroff_op);
63105 return 0;
63106diff -urNp linux-3.0.4/kernel/power/process.c linux-3.0.4/kernel/power/process.c
63107--- linux-3.0.4/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
63108+++ linux-3.0.4/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
63109@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
63110 u64 elapsed_csecs64;
63111 unsigned int elapsed_csecs;
63112 bool wakeup = false;
63113+ bool timedout = false;
63114
63115 do_gettimeofday(&start);
63116
63117@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
63118
63119 while (true) {
63120 todo = 0;
63121+ if (time_after(jiffies, end_time))
63122+ timedout = true;
63123 read_lock(&tasklist_lock);
63124 do_each_thread(g, p) {
63125 if (frozen(p) || !freezable(p))
63126@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
63127 * try_to_stop() after schedule() in ptrace/signal
63128 * stop sees TIF_FREEZE.
63129 */
63130- if (!task_is_stopped_or_traced(p) &&
63131- !freezer_should_skip(p))
63132+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
63133 todo++;
63134+ if (timedout) {
63135+ printk(KERN_ERR "Task refusing to freeze:\n");
63136+ sched_show_task(p);
63137+ }
63138+ }
63139 } while_each_thread(g, p);
63140 read_unlock(&tasklist_lock);
63141
63142@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
63143 todo += wq_busy;
63144 }
63145
63146- if (!todo || time_after(jiffies, end_time))
63147+ if (!todo || timedout)
63148 break;
63149
63150 if (pm_wakeup_pending()) {
63151diff -urNp linux-3.0.4/kernel/printk.c linux-3.0.4/kernel/printk.c
63152--- linux-3.0.4/kernel/printk.c 2011-07-21 22:17:23.000000000 -0400
63153+++ linux-3.0.4/kernel/printk.c 2011-08-23 21:48:14.000000000 -0400
63154@@ -313,12 +313,17 @@ static int check_syslog_permissions(int
63155 if (from_file && type != SYSLOG_ACTION_OPEN)
63156 return 0;
63157
63158+#ifdef CONFIG_GRKERNSEC_DMESG
63159+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
63160+ return -EPERM;
63161+#endif
63162+
63163 if (syslog_action_restricted(type)) {
63164 if (capable(CAP_SYSLOG))
63165 return 0;
63166 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
63167 if (capable(CAP_SYS_ADMIN)) {
63168- WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
63169+ printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
63170 "but no CAP_SYSLOG (deprecated).\n");
63171 return 0;
63172 }
63173diff -urNp linux-3.0.4/kernel/profile.c linux-3.0.4/kernel/profile.c
63174--- linux-3.0.4/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
63175+++ linux-3.0.4/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
63176@@ -39,7 +39,7 @@ struct profile_hit {
63177 /* Oprofile timer tick hook */
63178 static int (*timer_hook)(struct pt_regs *) __read_mostly;
63179
63180-static atomic_t *prof_buffer;
63181+static atomic_unchecked_t *prof_buffer;
63182 static unsigned long prof_len, prof_shift;
63183
63184 int prof_on __read_mostly;
63185@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
63186 hits[i].pc = 0;
63187 continue;
63188 }
63189- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
63190+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
63191 hits[i].hits = hits[i].pc = 0;
63192 }
63193 }
63194@@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
63195 * Add the current hit(s) and flush the write-queue out
63196 * to the global buffer:
63197 */
63198- atomic_add(nr_hits, &prof_buffer[pc]);
63199+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
63200 for (i = 0; i < NR_PROFILE_HIT; ++i) {
63201- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
63202+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
63203 hits[i].pc = hits[i].hits = 0;
63204 }
63205 out:
63206@@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
63207 {
63208 unsigned long pc;
63209 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
63210- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
63211+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
63212 }
63213 #endif /* !CONFIG_SMP */
63214
63215@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
63216 return -EFAULT;
63217 buf++; p++; count--; read++;
63218 }
63219- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
63220+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
63221 if (copy_to_user(buf, (void *)pnt, count))
63222 return -EFAULT;
63223 read += count;
63224@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
63225 }
63226 #endif
63227 profile_discard_flip_buffers();
63228- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
63229+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
63230 return count;
63231 }
63232
63233diff -urNp linux-3.0.4/kernel/ptrace.c linux-3.0.4/kernel/ptrace.c
63234--- linux-3.0.4/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
63235+++ linux-3.0.4/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
63236@@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
63237 return ret;
63238 }
63239
63240-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
63241+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
63242+ unsigned int log)
63243 {
63244 const struct cred *cred = current_cred(), *tcred;
63245
63246@@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
63247 cred->gid == tcred->sgid &&
63248 cred->gid == tcred->gid))
63249 goto ok;
63250- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
63251+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
63252+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
63253 goto ok;
63254 rcu_read_unlock();
63255 return -EPERM;
63256@@ -167,7 +169,9 @@ ok:
63257 smp_rmb();
63258 if (task->mm)
63259 dumpable = get_dumpable(task->mm);
63260- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
63261+ if (!dumpable &&
63262+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
63263+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
63264 return -EPERM;
63265
63266 return security_ptrace_access_check(task, mode);
63267@@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
63268 {
63269 int err;
63270 task_lock(task);
63271- err = __ptrace_may_access(task, mode);
63272+ err = __ptrace_may_access(task, mode, 0);
63273+ task_unlock(task);
63274+ return !err;
63275+}
63276+
63277+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
63278+{
63279+ int err;
63280+ task_lock(task);
63281+ err = __ptrace_may_access(task, mode, 1);
63282 task_unlock(task);
63283 return !err;
63284 }
63285@@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
63286 goto out;
63287
63288 task_lock(task);
63289- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
63290+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
63291 task_unlock(task);
63292 if (retval)
63293 goto unlock_creds;
63294@@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
63295 goto unlock_tasklist;
63296
63297 task->ptrace = PT_PTRACED;
63298- if (task_ns_capable(task, CAP_SYS_PTRACE))
63299+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
63300 task->ptrace |= PT_PTRACE_CAP;
63301
63302 __ptrace_link(task, current);
63303@@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
63304 {
63305 int copied = 0;
63306
63307+ pax_track_stack();
63308+
63309 while (len > 0) {
63310 char buf[128];
63311 int this_len, retval;
63312@@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
63313 break;
63314 return -EIO;
63315 }
63316- if (copy_to_user(dst, buf, retval))
63317+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
63318 return -EFAULT;
63319 copied += retval;
63320 src += retval;
63321@@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
63322 {
63323 int copied = 0;
63324
63325+ pax_track_stack();
63326+
63327 while (len > 0) {
63328 char buf[128];
63329 int this_len, retval;
63330@@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
63331 {
63332 int ret = -EIO;
63333 siginfo_t siginfo;
63334- void __user *datavp = (void __user *) data;
63335+ void __user *datavp = (__force void __user *) data;
63336 unsigned long __user *datalp = datavp;
63337
63338+ pax_track_stack();
63339+
63340 switch (request) {
63341 case PTRACE_PEEKTEXT:
63342 case PTRACE_PEEKDATA:
63343@@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
63344 goto out;
63345 }
63346
63347+ if (gr_handle_ptrace(child, request)) {
63348+ ret = -EPERM;
63349+ goto out_put_task_struct;
63350+ }
63351+
63352 if (request == PTRACE_ATTACH) {
63353 ret = ptrace_attach(child);
63354 /*
63355 * Some architectures need to do book-keeping after
63356 * a ptrace attach.
63357 */
63358- if (!ret)
63359+ if (!ret) {
63360 arch_ptrace_attach(child);
63361+ gr_audit_ptrace(child);
63362+ }
63363 goto out_put_task_struct;
63364 }
63365
63366@@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
63367 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
63368 if (copied != sizeof(tmp))
63369 return -EIO;
63370- return put_user(tmp, (unsigned long __user *)data);
63371+ return put_user(tmp, (__force unsigned long __user *)data);
63372 }
63373
63374 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
63375@@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
63376 siginfo_t siginfo;
63377 int ret;
63378
63379+ pax_track_stack();
63380+
63381 switch (request) {
63382 case PTRACE_PEEKTEXT:
63383 case PTRACE_PEEKDATA:
63384@@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
63385 goto out;
63386 }
63387
63388+ if (gr_handle_ptrace(child, request)) {
63389+ ret = -EPERM;
63390+ goto out_put_task_struct;
63391+ }
63392+
63393 if (request == PTRACE_ATTACH) {
63394 ret = ptrace_attach(child);
63395 /*
63396 * Some architectures need to do book-keeping after
63397 * a ptrace attach.
63398 */
63399- if (!ret)
63400+ if (!ret) {
63401 arch_ptrace_attach(child);
63402+ gr_audit_ptrace(child);
63403+ }
63404 goto out_put_task_struct;
63405 }
63406
63407diff -urNp linux-3.0.4/kernel/rcutorture.c linux-3.0.4/kernel/rcutorture.c
63408--- linux-3.0.4/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
63409+++ linux-3.0.4/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
63410@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
63411 { 0 };
63412 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
63413 { 0 };
63414-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
63415-static atomic_t n_rcu_torture_alloc;
63416-static atomic_t n_rcu_torture_alloc_fail;
63417-static atomic_t n_rcu_torture_free;
63418-static atomic_t n_rcu_torture_mberror;
63419-static atomic_t n_rcu_torture_error;
63420+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
63421+static atomic_unchecked_t n_rcu_torture_alloc;
63422+static atomic_unchecked_t n_rcu_torture_alloc_fail;
63423+static atomic_unchecked_t n_rcu_torture_free;
63424+static atomic_unchecked_t n_rcu_torture_mberror;
63425+static atomic_unchecked_t n_rcu_torture_error;
63426 static long n_rcu_torture_boost_ktrerror;
63427 static long n_rcu_torture_boost_rterror;
63428 static long n_rcu_torture_boost_failure;
63429@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
63430
63431 spin_lock_bh(&rcu_torture_lock);
63432 if (list_empty(&rcu_torture_freelist)) {
63433- atomic_inc(&n_rcu_torture_alloc_fail);
63434+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
63435 spin_unlock_bh(&rcu_torture_lock);
63436 return NULL;
63437 }
63438- atomic_inc(&n_rcu_torture_alloc);
63439+ atomic_inc_unchecked(&n_rcu_torture_alloc);
63440 p = rcu_torture_freelist.next;
63441 list_del_init(p);
63442 spin_unlock_bh(&rcu_torture_lock);
63443@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
63444 static void
63445 rcu_torture_free(struct rcu_torture *p)
63446 {
63447- atomic_inc(&n_rcu_torture_free);
63448+ atomic_inc_unchecked(&n_rcu_torture_free);
63449 spin_lock_bh(&rcu_torture_lock);
63450 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
63451 spin_unlock_bh(&rcu_torture_lock);
63452@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
63453 i = rp->rtort_pipe_count;
63454 if (i > RCU_TORTURE_PIPE_LEN)
63455 i = RCU_TORTURE_PIPE_LEN;
63456- atomic_inc(&rcu_torture_wcount[i]);
63457+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
63458 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
63459 rp->rtort_mbtest = 0;
63460 rcu_torture_free(rp);
63461@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
63462 i = rp->rtort_pipe_count;
63463 if (i > RCU_TORTURE_PIPE_LEN)
63464 i = RCU_TORTURE_PIPE_LEN;
63465- atomic_inc(&rcu_torture_wcount[i]);
63466+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
63467 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
63468 rp->rtort_mbtest = 0;
63469 list_del(&rp->rtort_free);
63470@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
63471 i = old_rp->rtort_pipe_count;
63472 if (i > RCU_TORTURE_PIPE_LEN)
63473 i = RCU_TORTURE_PIPE_LEN;
63474- atomic_inc(&rcu_torture_wcount[i]);
63475+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
63476 old_rp->rtort_pipe_count++;
63477 cur_ops->deferred_free(old_rp);
63478 }
63479@@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
63480 return;
63481 }
63482 if (p->rtort_mbtest == 0)
63483- atomic_inc(&n_rcu_torture_mberror);
63484+ atomic_inc_unchecked(&n_rcu_torture_mberror);
63485 spin_lock(&rand_lock);
63486 cur_ops->read_delay(&rand);
63487 n_rcu_torture_timers++;
63488@@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
63489 continue;
63490 }
63491 if (p->rtort_mbtest == 0)
63492- atomic_inc(&n_rcu_torture_mberror);
63493+ atomic_inc_unchecked(&n_rcu_torture_mberror);
63494 cur_ops->read_delay(&rand);
63495 preempt_disable();
63496 pipe_count = p->rtort_pipe_count;
63497@@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
63498 rcu_torture_current,
63499 rcu_torture_current_version,
63500 list_empty(&rcu_torture_freelist),
63501- atomic_read(&n_rcu_torture_alloc),
63502- atomic_read(&n_rcu_torture_alloc_fail),
63503- atomic_read(&n_rcu_torture_free),
63504- atomic_read(&n_rcu_torture_mberror),
63505+ atomic_read_unchecked(&n_rcu_torture_alloc),
63506+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
63507+ atomic_read_unchecked(&n_rcu_torture_free),
63508+ atomic_read_unchecked(&n_rcu_torture_mberror),
63509 n_rcu_torture_boost_ktrerror,
63510 n_rcu_torture_boost_rterror,
63511 n_rcu_torture_boost_failure,
63512 n_rcu_torture_boosts,
63513 n_rcu_torture_timers);
63514- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
63515+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
63516 n_rcu_torture_boost_ktrerror != 0 ||
63517 n_rcu_torture_boost_rterror != 0 ||
63518 n_rcu_torture_boost_failure != 0)
63519@@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
63520 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
63521 if (i > 1) {
63522 cnt += sprintf(&page[cnt], "!!! ");
63523- atomic_inc(&n_rcu_torture_error);
63524+ atomic_inc_unchecked(&n_rcu_torture_error);
63525 WARN_ON_ONCE(1);
63526 }
63527 cnt += sprintf(&page[cnt], "Reader Pipe: ");
63528@@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
63529 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
63530 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
63531 cnt += sprintf(&page[cnt], " %d",
63532- atomic_read(&rcu_torture_wcount[i]));
63533+ atomic_read_unchecked(&rcu_torture_wcount[i]));
63534 }
63535 cnt += sprintf(&page[cnt], "\n");
63536 if (cur_ops->stats)
63537@@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
63538
63539 if (cur_ops->cleanup)
63540 cur_ops->cleanup();
63541- if (atomic_read(&n_rcu_torture_error))
63542+ if (atomic_read_unchecked(&n_rcu_torture_error))
63543 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
63544 else
63545 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
63546@@ -1476,17 +1476,17 @@ rcu_torture_init(void)
63547
63548 rcu_torture_current = NULL;
63549 rcu_torture_current_version = 0;
63550- atomic_set(&n_rcu_torture_alloc, 0);
63551- atomic_set(&n_rcu_torture_alloc_fail, 0);
63552- atomic_set(&n_rcu_torture_free, 0);
63553- atomic_set(&n_rcu_torture_mberror, 0);
63554- atomic_set(&n_rcu_torture_error, 0);
63555+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
63556+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
63557+ atomic_set_unchecked(&n_rcu_torture_free, 0);
63558+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
63559+ atomic_set_unchecked(&n_rcu_torture_error, 0);
63560 n_rcu_torture_boost_ktrerror = 0;
63561 n_rcu_torture_boost_rterror = 0;
63562 n_rcu_torture_boost_failure = 0;
63563 n_rcu_torture_boosts = 0;
63564 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
63565- atomic_set(&rcu_torture_wcount[i], 0);
63566+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
63567 for_each_possible_cpu(cpu) {
63568 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
63569 per_cpu(rcu_torture_count, cpu)[i] = 0;
63570diff -urNp linux-3.0.4/kernel/rcutree.c linux-3.0.4/kernel/rcutree.c
63571--- linux-3.0.4/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
63572+++ linux-3.0.4/kernel/rcutree.c 2011-09-14 09:08:05.000000000 -0400
63573@@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
63574 }
63575 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
63576 smp_mb__before_atomic_inc(); /* See above. */
63577- atomic_inc(&rdtp->dynticks);
63578+ atomic_inc_unchecked(&rdtp->dynticks);
63579 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
63580- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
63581+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
63582 local_irq_restore(flags);
63583
63584 /* If the interrupt queued a callback, get out of dyntick mode. */
63585@@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
63586 return;
63587 }
63588 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
63589- atomic_inc(&rdtp->dynticks);
63590+ atomic_inc_unchecked(&rdtp->dynticks);
63591 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
63592 smp_mb__after_atomic_inc(); /* See above. */
63593- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
63594+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
63595 local_irq_restore(flags);
63596 }
63597
63598@@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
63599 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
63600
63601 if (rdtp->dynticks_nmi_nesting == 0 &&
63602- (atomic_read(&rdtp->dynticks) & 0x1))
63603+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
63604 return;
63605 rdtp->dynticks_nmi_nesting++;
63606 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
63607- atomic_inc(&rdtp->dynticks);
63608+ atomic_inc_unchecked(&rdtp->dynticks);
63609 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
63610 smp_mb__after_atomic_inc(); /* See above. */
63611- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
63612+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
63613 }
63614
63615 /**
63616@@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
63617 return;
63618 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
63619 smp_mb__before_atomic_inc(); /* See above. */
63620- atomic_inc(&rdtp->dynticks);
63621+ atomic_inc_unchecked(&rdtp->dynticks);
63622 smp_mb__after_atomic_inc(); /* Force delay to next write. */
63623- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
63624+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
63625 }
63626
63627 /**
63628@@ -469,7 +469,7 @@ void rcu_irq_exit(void)
63629 */
63630 static int dyntick_save_progress_counter(struct rcu_data *rdp)
63631 {
63632- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
63633+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
63634 return 0;
63635 }
63636
63637@@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(stru
63638 unsigned long curr;
63639 unsigned long snap;
63640
63641- curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
63642+ curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
63643 snap = (unsigned long)rdp->dynticks_snap;
63644
63645 /*
63646@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
63647 /*
63648 * Do softirq processing for the current CPU.
63649 */
63650-static void rcu_process_callbacks(struct softirq_action *unused)
63651+static void rcu_process_callbacks(void)
63652 {
63653 __rcu_process_callbacks(&rcu_sched_state,
63654 &__get_cpu_var(rcu_sched_data));
63655diff -urNp linux-3.0.4/kernel/rcutree.h linux-3.0.4/kernel/rcutree.h
63656--- linux-3.0.4/kernel/rcutree.h 2011-07-21 22:17:23.000000000 -0400
63657+++ linux-3.0.4/kernel/rcutree.h 2011-09-14 09:08:05.000000000 -0400
63658@@ -86,7 +86,7 @@
63659 struct rcu_dynticks {
63660 int dynticks_nesting; /* Track irq/process nesting level. */
63661 int dynticks_nmi_nesting; /* Track NMI nesting level. */
63662- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
63663+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
63664 };
63665
63666 /* RCU's kthread states for tracing. */
63667diff -urNp linux-3.0.4/kernel/rcutree_plugin.h linux-3.0.4/kernel/rcutree_plugin.h
63668--- linux-3.0.4/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
63669+++ linux-3.0.4/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
63670@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
63671
63672 /* Clean up and exit. */
63673 smp_mb(); /* ensure expedited GP seen before counter increment. */
63674- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
63675+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
63676 unlock_mb_ret:
63677 mutex_unlock(&sync_rcu_preempt_exp_mutex);
63678 mb_ret:
63679@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
63680
63681 #else /* #ifndef CONFIG_SMP */
63682
63683-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
63684-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
63685+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
63686+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
63687
63688 static int synchronize_sched_expedited_cpu_stop(void *data)
63689 {
63690@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
63691 int firstsnap, s, snap, trycount = 0;
63692
63693 /* Note that atomic_inc_return() implies full memory barrier. */
63694- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
63695+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
63696 get_online_cpus();
63697
63698 /*
63699@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
63700 }
63701
63702 /* Check to see if someone else did our work for us. */
63703- s = atomic_read(&sync_sched_expedited_done);
63704+ s = atomic_read_unchecked(&sync_sched_expedited_done);
63705 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
63706 smp_mb(); /* ensure test happens before caller kfree */
63707 return;
63708@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
63709 * grace period works for us.
63710 */
63711 get_online_cpus();
63712- snap = atomic_read(&sync_sched_expedited_started) - 1;
63713+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
63714 smp_mb(); /* ensure read is before try_stop_cpus(). */
63715 }
63716
63717@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
63718 * than we did beat us to the punch.
63719 */
63720 do {
63721- s = atomic_read(&sync_sched_expedited_done);
63722+ s = atomic_read_unchecked(&sync_sched_expedited_done);
63723 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
63724 smp_mb(); /* ensure test happens before caller kfree */
63725 break;
63726 }
63727- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
63728+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
63729
63730 put_online_cpus();
63731 }
63732diff -urNp linux-3.0.4/kernel/relay.c linux-3.0.4/kernel/relay.c
63733--- linux-3.0.4/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
63734+++ linux-3.0.4/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
63735@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
63736 };
63737 ssize_t ret;
63738
63739+ pax_track_stack();
63740+
63741 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
63742 return 0;
63743 if (splice_grow_spd(pipe, &spd))
63744diff -urNp linux-3.0.4/kernel/resource.c linux-3.0.4/kernel/resource.c
63745--- linux-3.0.4/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
63746+++ linux-3.0.4/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
63747@@ -141,8 +141,18 @@ static const struct file_operations proc
63748
63749 static int __init ioresources_init(void)
63750 {
63751+#ifdef CONFIG_GRKERNSEC_PROC_ADD
63752+#ifdef CONFIG_GRKERNSEC_PROC_USER
63753+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
63754+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
63755+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63756+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
63757+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
63758+#endif
63759+#else
63760 proc_create("ioports", 0, NULL, &proc_ioports_operations);
63761 proc_create("iomem", 0, NULL, &proc_iomem_operations);
63762+#endif
63763 return 0;
63764 }
63765 __initcall(ioresources_init);
63766diff -urNp linux-3.0.4/kernel/rtmutex-tester.c linux-3.0.4/kernel/rtmutex-tester.c
63767--- linux-3.0.4/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
63768+++ linux-3.0.4/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
63769@@ -20,7 +20,7 @@
63770 #define MAX_RT_TEST_MUTEXES 8
63771
63772 static spinlock_t rttest_lock;
63773-static atomic_t rttest_event;
63774+static atomic_unchecked_t rttest_event;
63775
63776 struct test_thread_data {
63777 int opcode;
63778@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
63779
63780 case RTTEST_LOCKCONT:
63781 td->mutexes[td->opdata] = 1;
63782- td->event = atomic_add_return(1, &rttest_event);
63783+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63784 return 0;
63785
63786 case RTTEST_RESET:
63787@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
63788 return 0;
63789
63790 case RTTEST_RESETEVENT:
63791- atomic_set(&rttest_event, 0);
63792+ atomic_set_unchecked(&rttest_event, 0);
63793 return 0;
63794
63795 default:
63796@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
63797 return ret;
63798
63799 td->mutexes[id] = 1;
63800- td->event = atomic_add_return(1, &rttest_event);
63801+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63802 rt_mutex_lock(&mutexes[id]);
63803- td->event = atomic_add_return(1, &rttest_event);
63804+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63805 td->mutexes[id] = 4;
63806 return 0;
63807
63808@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
63809 return ret;
63810
63811 td->mutexes[id] = 1;
63812- td->event = atomic_add_return(1, &rttest_event);
63813+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63814 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
63815- td->event = atomic_add_return(1, &rttest_event);
63816+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63817 td->mutexes[id] = ret ? 0 : 4;
63818 return ret ? -EINTR : 0;
63819
63820@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
63821 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
63822 return ret;
63823
63824- td->event = atomic_add_return(1, &rttest_event);
63825+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63826 rt_mutex_unlock(&mutexes[id]);
63827- td->event = atomic_add_return(1, &rttest_event);
63828+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63829 td->mutexes[id] = 0;
63830 return 0;
63831
63832@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
63833 break;
63834
63835 td->mutexes[dat] = 2;
63836- td->event = atomic_add_return(1, &rttest_event);
63837+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63838 break;
63839
63840 default:
63841@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
63842 return;
63843
63844 td->mutexes[dat] = 3;
63845- td->event = atomic_add_return(1, &rttest_event);
63846+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63847 break;
63848
63849 case RTTEST_LOCKNOWAIT:
63850@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
63851 return;
63852
63853 td->mutexes[dat] = 1;
63854- td->event = atomic_add_return(1, &rttest_event);
63855+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63856 return;
63857
63858 default:
63859diff -urNp linux-3.0.4/kernel/sched_autogroup.c linux-3.0.4/kernel/sched_autogroup.c
63860--- linux-3.0.4/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
63861+++ linux-3.0.4/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
63862@@ -7,7 +7,7 @@
63863
63864 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
63865 static struct autogroup autogroup_default;
63866-static atomic_t autogroup_seq_nr;
63867+static atomic_unchecked_t autogroup_seq_nr;
63868
63869 static void __init autogroup_init(struct task_struct *init_task)
63870 {
63871@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
63872
63873 kref_init(&ag->kref);
63874 init_rwsem(&ag->lock);
63875- ag->id = atomic_inc_return(&autogroup_seq_nr);
63876+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
63877 ag->tg = tg;
63878 #ifdef CONFIG_RT_GROUP_SCHED
63879 /*
63880diff -urNp linux-3.0.4/kernel/sched.c linux-3.0.4/kernel/sched.c
63881--- linux-3.0.4/kernel/sched.c 2011-07-21 22:17:23.000000000 -0400
63882+++ linux-3.0.4/kernel/sched.c 2011-08-23 21:48:14.000000000 -0400
63883@@ -4251,6 +4251,8 @@ asmlinkage void __sched schedule(void)
63884 struct rq *rq;
63885 int cpu;
63886
63887+ pax_track_stack();
63888+
63889 need_resched:
63890 preempt_disable();
63891 cpu = smp_processor_id();
63892@@ -4934,6 +4936,8 @@ int can_nice(const struct task_struct *p
63893 /* convert nice value [19,-20] to rlimit style value [1,40] */
63894 int nice_rlim = 20 - nice;
63895
63896+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
63897+
63898 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
63899 capable(CAP_SYS_NICE));
63900 }
63901@@ -4967,7 +4971,8 @@ SYSCALL_DEFINE1(nice, int, increment)
63902 if (nice > 19)
63903 nice = 19;
63904
63905- if (increment < 0 && !can_nice(current, nice))
63906+ if (increment < 0 && (!can_nice(current, nice) ||
63907+ gr_handle_chroot_nice()))
63908 return -EPERM;
63909
63910 retval = security_task_setnice(current, nice);
63911@@ -5111,6 +5116,7 @@ recheck:
63912 unsigned long rlim_rtprio =
63913 task_rlimit(p, RLIMIT_RTPRIO);
63914
63915+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
63916 /* can't set/change the rt policy */
63917 if (policy != p->policy && !rlim_rtprio)
63918 return -EPERM;
63919diff -urNp linux-3.0.4/kernel/sched_fair.c linux-3.0.4/kernel/sched_fair.c
63920--- linux-3.0.4/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
63921+++ linux-3.0.4/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
63922@@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
63923 * run_rebalance_domains is triggered when needed from the scheduler tick.
63924 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
63925 */
63926-static void run_rebalance_domains(struct softirq_action *h)
63927+static void run_rebalance_domains(void)
63928 {
63929 int this_cpu = smp_processor_id();
63930 struct rq *this_rq = cpu_rq(this_cpu);
63931diff -urNp linux-3.0.4/kernel/signal.c linux-3.0.4/kernel/signal.c
63932--- linux-3.0.4/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
63933+++ linux-3.0.4/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
63934@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
63935
63936 int print_fatal_signals __read_mostly;
63937
63938-static void __user *sig_handler(struct task_struct *t, int sig)
63939+static __sighandler_t sig_handler(struct task_struct *t, int sig)
63940 {
63941 return t->sighand->action[sig - 1].sa.sa_handler;
63942 }
63943
63944-static int sig_handler_ignored(void __user *handler, int sig)
63945+static int sig_handler_ignored(__sighandler_t handler, int sig)
63946 {
63947 /* Is it explicitly or implicitly ignored? */
63948 return handler == SIG_IGN ||
63949@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
63950 static int sig_task_ignored(struct task_struct *t, int sig,
63951 int from_ancestor_ns)
63952 {
63953- void __user *handler;
63954+ __sighandler_t handler;
63955
63956 handler = sig_handler(t, sig);
63957
63958@@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
63959 atomic_inc(&user->sigpending);
63960 rcu_read_unlock();
63961
63962+ if (!override_rlimit)
63963+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
63964+
63965 if (override_rlimit ||
63966 atomic_read(&user->sigpending) <=
63967 task_rlimit(t, RLIMIT_SIGPENDING)) {
63968@@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
63969
63970 int unhandled_signal(struct task_struct *tsk, int sig)
63971 {
63972- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
63973+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
63974 if (is_global_init(tsk))
63975 return 1;
63976 if (handler != SIG_IGN && handler != SIG_DFL)
63977@@ -770,6 +773,13 @@ static int check_kill_permission(int sig
63978 }
63979 }
63980
63981+ /* allow glibc communication via tgkill to other threads in our
63982+ thread group */
63983+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
63984+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
63985+ && gr_handle_signal(t, sig))
63986+ return -EPERM;
63987+
63988 return security_task_kill(t, info, sig, 0);
63989 }
63990
63991@@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
63992 return send_signal(sig, info, p, 1);
63993 }
63994
63995-static int
63996+int
63997 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
63998 {
63999 return send_signal(sig, info, t, 0);
64000@@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
64001 unsigned long int flags;
64002 int ret, blocked, ignored;
64003 struct k_sigaction *action;
64004+ int is_unhandled = 0;
64005
64006 spin_lock_irqsave(&t->sighand->siglock, flags);
64007 action = &t->sighand->action[sig-1];
64008@@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
64009 }
64010 if (action->sa.sa_handler == SIG_DFL)
64011 t->signal->flags &= ~SIGNAL_UNKILLABLE;
64012+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
64013+ is_unhandled = 1;
64014 ret = specific_send_sig_info(sig, info, t);
64015 spin_unlock_irqrestore(&t->sighand->siglock, flags);
64016
64017+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
64018+ normal operation */
64019+ if (is_unhandled) {
64020+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
64021+ gr_handle_crash(t, sig);
64022+ }
64023+
64024 return ret;
64025 }
64026
64027@@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
64028 ret = check_kill_permission(sig, info, p);
64029 rcu_read_unlock();
64030
64031- if (!ret && sig)
64032+ if (!ret && sig) {
64033 ret = do_send_sig_info(sig, info, p, true);
64034+ if (!ret)
64035+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
64036+ }
64037
64038 return ret;
64039 }
64040@@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
64041 {
64042 siginfo_t info;
64043
64044+ pax_track_stack();
64045+
64046 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
64047
64048 memset(&info, 0, sizeof info);
64049@@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
64050 int error = -ESRCH;
64051
64052 rcu_read_lock();
64053- p = find_task_by_vpid(pid);
64054+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64055+ /* allow glibc communication via tgkill to other threads in our
64056+ thread group */
64057+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
64058+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
64059+ p = find_task_by_vpid_unrestricted(pid);
64060+ else
64061+#endif
64062+ p = find_task_by_vpid(pid);
64063 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
64064 error = check_kill_permission(sig, info, p);
64065 /*
64066diff -urNp linux-3.0.4/kernel/smp.c linux-3.0.4/kernel/smp.c
64067--- linux-3.0.4/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
64068+++ linux-3.0.4/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
64069@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
64070 }
64071 EXPORT_SYMBOL(smp_call_function);
64072
64073-void ipi_call_lock(void)
64074+void ipi_call_lock(void) __acquires(call_function.lock)
64075 {
64076 raw_spin_lock(&call_function.lock);
64077 }
64078
64079-void ipi_call_unlock(void)
64080+void ipi_call_unlock(void) __releases(call_function.lock)
64081 {
64082 raw_spin_unlock(&call_function.lock);
64083 }
64084
64085-void ipi_call_lock_irq(void)
64086+void ipi_call_lock_irq(void) __acquires(call_function.lock)
64087 {
64088 raw_spin_lock_irq(&call_function.lock);
64089 }
64090
64091-void ipi_call_unlock_irq(void)
64092+void ipi_call_unlock_irq(void) __releases(call_function.lock)
64093 {
64094 raw_spin_unlock_irq(&call_function.lock);
64095 }
64096diff -urNp linux-3.0.4/kernel/softirq.c linux-3.0.4/kernel/softirq.c
64097--- linux-3.0.4/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
64098+++ linux-3.0.4/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
64099@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
64100
64101 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
64102
64103-char *softirq_to_name[NR_SOFTIRQS] = {
64104+const char * const softirq_to_name[NR_SOFTIRQS] = {
64105 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
64106 "TASKLET", "SCHED", "HRTIMER", "RCU"
64107 };
64108@@ -235,7 +235,7 @@ restart:
64109 kstat_incr_softirqs_this_cpu(vec_nr);
64110
64111 trace_softirq_entry(vec_nr);
64112- h->action(h);
64113+ h->action();
64114 trace_softirq_exit(vec_nr);
64115 if (unlikely(prev_count != preempt_count())) {
64116 printk(KERN_ERR "huh, entered softirq %u %s %p"
64117@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
64118 local_irq_restore(flags);
64119 }
64120
64121-void open_softirq(int nr, void (*action)(struct softirq_action *))
64122+void open_softirq(int nr, void (*action)(void))
64123 {
64124- softirq_vec[nr].action = action;
64125+ pax_open_kernel();
64126+ *(void **)&softirq_vec[nr].action = action;
64127+ pax_close_kernel();
64128 }
64129
64130 /*
64131@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
64132
64133 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
64134
64135-static void tasklet_action(struct softirq_action *a)
64136+static void tasklet_action(void)
64137 {
64138 struct tasklet_struct *list;
64139
64140@@ -476,7 +478,7 @@ static void tasklet_action(struct softir
64141 }
64142 }
64143
64144-static void tasklet_hi_action(struct softirq_action *a)
64145+static void tasklet_hi_action(void)
64146 {
64147 struct tasklet_struct *list;
64148
64149diff -urNp linux-3.0.4/kernel/sys.c linux-3.0.4/kernel/sys.c
64150--- linux-3.0.4/kernel/sys.c 2011-09-02 18:11:26.000000000 -0400
64151+++ linux-3.0.4/kernel/sys.c 2011-10-06 04:17:55.000000000 -0400
64152@@ -158,6 +158,12 @@ static int set_one_prio(struct task_stru
64153 error = -EACCES;
64154 goto out;
64155 }
64156+
64157+ if (gr_handle_chroot_setpriority(p, niceval)) {
64158+ error = -EACCES;
64159+ goto out;
64160+ }
64161+
64162 no_nice = security_task_setnice(p, niceval);
64163 if (no_nice) {
64164 error = no_nice;
64165@@ -541,6 +547,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
64166 goto error;
64167 }
64168
64169+ if (gr_check_group_change(new->gid, new->egid, -1))
64170+ goto error;
64171+
64172 if (rgid != (gid_t) -1 ||
64173 (egid != (gid_t) -1 && egid != old->gid))
64174 new->sgid = new->egid;
64175@@ -570,6 +579,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
64176 old = current_cred();
64177
64178 retval = -EPERM;
64179+
64180+ if (gr_check_group_change(gid, gid, gid))
64181+ goto error;
64182+
64183 if (nsown_capable(CAP_SETGID))
64184 new->gid = new->egid = new->sgid = new->fsgid = gid;
64185 else if (gid == old->gid || gid == old->sgid)
64186@@ -595,11 +608,18 @@ static int set_user(struct cred *new)
64187 if (!new_user)
64188 return -EAGAIN;
64189
64190+ /*
64191+ * We don't fail in case of NPROC limit excess here because too many
64192+ * poorly written programs don't check set*uid() return code, assuming
64193+ * it never fails if called by root. We may still enforce NPROC limit
64194+ * for programs doing set*uid()+execve() by harmlessly deferring the
64195+ * failure to the execve() stage.
64196+ */
64197 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
64198- new_user != INIT_USER) {
64199- free_uid(new_user);
64200- return -EAGAIN;
64201- }
64202+ new_user != INIT_USER)
64203+ current->flags |= PF_NPROC_EXCEEDED;
64204+ else
64205+ current->flags &= ~PF_NPROC_EXCEEDED;
64206
64207 free_uid(new->user);
64208 new->user = new_user;
64209@@ -650,6 +670,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
64210 goto error;
64211 }
64212
64213+ if (gr_check_user_change(new->uid, new->euid, -1))
64214+ goto error;
64215+
64216 if (new->uid != old->uid) {
64217 retval = set_user(new);
64218 if (retval < 0)
64219@@ -694,6 +717,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
64220 old = current_cred();
64221
64222 retval = -EPERM;
64223+
64224+ if (gr_check_crash_uid(uid))
64225+ goto error;
64226+ if (gr_check_user_change(uid, uid, uid))
64227+ goto error;
64228+
64229 if (nsown_capable(CAP_SETUID)) {
64230 new->suid = new->uid = uid;
64231 if (uid != old->uid) {
64232@@ -748,6 +777,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
64233 goto error;
64234 }
64235
64236+ if (gr_check_user_change(ruid, euid, -1))
64237+ goto error;
64238+
64239 if (ruid != (uid_t) -1) {
64240 new->uid = ruid;
64241 if (ruid != old->uid) {
64242@@ -812,6 +844,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
64243 goto error;
64244 }
64245
64246+ if (gr_check_group_change(rgid, egid, -1))
64247+ goto error;
64248+
64249 if (rgid != (gid_t) -1)
64250 new->gid = rgid;
64251 if (egid != (gid_t) -1)
64252@@ -858,6 +893,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
64253 old = current_cred();
64254 old_fsuid = old->fsuid;
64255
64256+ if (gr_check_user_change(-1, -1, uid))
64257+ goto error;
64258+
64259 if (uid == old->uid || uid == old->euid ||
64260 uid == old->suid || uid == old->fsuid ||
64261 nsown_capable(CAP_SETUID)) {
64262@@ -868,6 +906,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
64263 }
64264 }
64265
64266+error:
64267 abort_creds(new);
64268 return old_fsuid;
64269
64270@@ -894,12 +933,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
64271 if (gid == old->gid || gid == old->egid ||
64272 gid == old->sgid || gid == old->fsgid ||
64273 nsown_capable(CAP_SETGID)) {
64274+ if (gr_check_group_change(-1, -1, gid))
64275+ goto error;
64276+
64277 if (gid != old_fsgid) {
64278 new->fsgid = gid;
64279 goto change_okay;
64280 }
64281 }
64282
64283+error:
64284 abort_creds(new);
64285 return old_fsgid;
64286
64287@@ -1205,19 +1248,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_
64288 return -EFAULT;
64289
64290 down_read(&uts_sem);
64291- error = __copy_to_user(&name->sysname, &utsname()->sysname,
64292+ error = __copy_to_user(name->sysname, &utsname()->sysname,
64293 __OLD_UTS_LEN);
64294 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
64295- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
64296+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
64297 __OLD_UTS_LEN);
64298 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
64299- error |= __copy_to_user(&name->release, &utsname()->release,
64300+ error |= __copy_to_user(name->release, &utsname()->release,
64301 __OLD_UTS_LEN);
64302 error |= __put_user(0, name->release + __OLD_UTS_LEN);
64303- error |= __copy_to_user(&name->version, &utsname()->version,
64304+ error |= __copy_to_user(name->version, &utsname()->version,
64305 __OLD_UTS_LEN);
64306 error |= __put_user(0, name->version + __OLD_UTS_LEN);
64307- error |= __copy_to_user(&name->machine, &utsname()->machine,
64308+ error |= __copy_to_user(name->machine, &utsname()->machine,
64309 __OLD_UTS_LEN);
64310 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
64311 up_read(&uts_sem);
64312@@ -1680,7 +1723,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
64313 error = get_dumpable(me->mm);
64314 break;
64315 case PR_SET_DUMPABLE:
64316- if (arg2 < 0 || arg2 > 1) {
64317+ if (arg2 > 1) {
64318 error = -EINVAL;
64319 break;
64320 }
64321diff -urNp linux-3.0.4/kernel/sysctl_binary.c linux-3.0.4/kernel/sysctl_binary.c
64322--- linux-3.0.4/kernel/sysctl_binary.c 2011-07-21 22:17:23.000000000 -0400
64323+++ linux-3.0.4/kernel/sysctl_binary.c 2011-10-06 04:17:55.000000000 -0400
64324@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *f
64325 int i;
64326
64327 set_fs(KERNEL_DS);
64328- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
64329+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
64330 set_fs(old_fs);
64331 if (result < 0)
64332 goto out_kfree;
64333@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *f
64334 }
64335
64336 set_fs(KERNEL_DS);
64337- result = vfs_write(file, buffer, str - buffer, &pos);
64338+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
64339 set_fs(old_fs);
64340 if (result < 0)
64341 goto out_kfree;
64342@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file
64343 int i;
64344
64345 set_fs(KERNEL_DS);
64346- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
64347+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
64348 set_fs(old_fs);
64349 if (result < 0)
64350 goto out_kfree;
64351@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file
64352 }
64353
64354 set_fs(KERNEL_DS);
64355- result = vfs_write(file, buffer, str - buffer, &pos);
64356+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
64357 set_fs(old_fs);
64358 if (result < 0)
64359 goto out_kfree;
64360@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *fil
64361 int i;
64362
64363 set_fs(KERNEL_DS);
64364- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
64365+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
64366 set_fs(old_fs);
64367 if (result < 0)
64368 goto out;
64369@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struc
64370 __le16 dnaddr;
64371
64372 set_fs(KERNEL_DS);
64373- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
64374+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
64375 set_fs(old_fs);
64376 if (result < 0)
64377 goto out;
64378@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struc
64379 le16_to_cpu(dnaddr) & 0x3ff);
64380
64381 set_fs(KERNEL_DS);
64382- result = vfs_write(file, buf, len, &pos);
64383+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
64384 set_fs(old_fs);
64385 if (result < 0)
64386 goto out;
64387diff -urNp linux-3.0.4/kernel/sysctl.c linux-3.0.4/kernel/sysctl.c
64388--- linux-3.0.4/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
64389+++ linux-3.0.4/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
64390@@ -85,6 +85,13 @@
64391
64392
64393 #if defined(CONFIG_SYSCTL)
64394+#include <linux/grsecurity.h>
64395+#include <linux/grinternal.h>
64396+
64397+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
64398+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
64399+ const int op);
64400+extern int gr_handle_chroot_sysctl(const int op);
64401
64402 /* External variables not in a header file. */
64403 extern int sysctl_overcommit_memory;
64404@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
64405 }
64406
64407 #endif
64408+extern struct ctl_table grsecurity_table[];
64409
64410 static struct ctl_table root_table[];
64411 static struct ctl_table_root sysctl_table_root;
64412@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
64413 int sysctl_legacy_va_layout;
64414 #endif
64415
64416+#ifdef CONFIG_PAX_SOFTMODE
64417+static ctl_table pax_table[] = {
64418+ {
64419+ .procname = "softmode",
64420+ .data = &pax_softmode,
64421+ .maxlen = sizeof(unsigned int),
64422+ .mode = 0600,
64423+ .proc_handler = &proc_dointvec,
64424+ },
64425+
64426+ { }
64427+};
64428+#endif
64429+
64430 /* The default sysctl tables: */
64431
64432 static struct ctl_table root_table[] = {
64433@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
64434 #endif
64435
64436 static struct ctl_table kern_table[] = {
64437+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
64438+ {
64439+ .procname = "grsecurity",
64440+ .mode = 0500,
64441+ .child = grsecurity_table,
64442+ },
64443+#endif
64444+
64445+#ifdef CONFIG_PAX_SOFTMODE
64446+ {
64447+ .procname = "pax",
64448+ .mode = 0500,
64449+ .child = pax_table,
64450+ },
64451+#endif
64452+
64453 {
64454 .procname = "sched_child_runs_first",
64455 .data = &sysctl_sched_child_runs_first,
64456@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
64457 .data = &modprobe_path,
64458 .maxlen = KMOD_PATH_LEN,
64459 .mode = 0644,
64460- .proc_handler = proc_dostring,
64461+ .proc_handler = proc_dostring_modpriv,
64462 },
64463 {
64464 .procname = "modules_disabled",
64465@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
64466 .extra1 = &zero,
64467 .extra2 = &one,
64468 },
64469+#endif
64470 {
64471 .procname = "kptr_restrict",
64472 .data = &kptr_restrict,
64473 .maxlen = sizeof(int),
64474 .mode = 0644,
64475 .proc_handler = proc_dmesg_restrict,
64476+#ifdef CONFIG_GRKERNSEC_HIDESYM
64477+ .extra1 = &two,
64478+#else
64479 .extra1 = &zero,
64480+#endif
64481 .extra2 = &two,
64482 },
64483-#endif
64484 {
64485 .procname = "ngroups_max",
64486 .data = &ngroups_max,
64487@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
64488 .proc_handler = proc_dointvec_minmax,
64489 .extra1 = &zero,
64490 },
64491+ {
64492+ .procname = "heap_stack_gap",
64493+ .data = &sysctl_heap_stack_gap,
64494+ .maxlen = sizeof(sysctl_heap_stack_gap),
64495+ .mode = 0644,
64496+ .proc_handler = proc_doulongvec_minmax,
64497+ },
64498 #else
64499 {
64500 .procname = "nr_trim_pages",
64501@@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
64502 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
64503 {
64504 int mode;
64505+ int error;
64506+
64507+ if (table->parent != NULL && table->parent->procname != NULL &&
64508+ table->procname != NULL &&
64509+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
64510+ return -EACCES;
64511+ if (gr_handle_chroot_sysctl(op))
64512+ return -EACCES;
64513+ error = gr_handle_sysctl(table, op);
64514+ if (error)
64515+ return error;
64516
64517 if (root->permissions)
64518 mode = root->permissions(root, current->nsproxy, table);
64519@@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
64520 buffer, lenp, ppos);
64521 }
64522
64523+int proc_dostring_modpriv(struct ctl_table *table, int write,
64524+ void __user *buffer, size_t *lenp, loff_t *ppos)
64525+{
64526+ if (write && !capable(CAP_SYS_MODULE))
64527+ return -EPERM;
64528+
64529+ return _proc_do_string(table->data, table->maxlen, write,
64530+ buffer, lenp, ppos);
64531+}
64532+
64533 static size_t proc_skip_spaces(char **buf)
64534 {
64535 size_t ret;
64536@@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
64537 len = strlen(tmp);
64538 if (len > *size)
64539 len = *size;
64540+ if (len > sizeof(tmp))
64541+ len = sizeof(tmp);
64542 if (copy_to_user(*buf, tmp, len))
64543 return -EFAULT;
64544 *size -= len;
64545@@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
64546 *i = val;
64547 } else {
64548 val = convdiv * (*i) / convmul;
64549- if (!first)
64550+ if (!first) {
64551 err = proc_put_char(&buffer, &left, '\t');
64552+ if (err)
64553+ break;
64554+ }
64555 err = proc_put_long(&buffer, &left, val, false);
64556 if (err)
64557 break;
64558@@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
64559 return -ENOSYS;
64560 }
64561
64562+int proc_dostring_modpriv(struct ctl_table *table, int write,
64563+ void __user *buffer, size_t *lenp, loff_t *ppos)
64564+{
64565+ return -ENOSYS;
64566+}
64567+
64568 int proc_dointvec(struct ctl_table *table, int write,
64569 void __user *buffer, size_t *lenp, loff_t *ppos)
64570 {
64571@@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
64572 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
64573 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
64574 EXPORT_SYMBOL(proc_dostring);
64575+EXPORT_SYMBOL(proc_dostring_modpriv);
64576 EXPORT_SYMBOL(proc_doulongvec_minmax);
64577 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
64578 EXPORT_SYMBOL(register_sysctl_table);
64579diff -urNp linux-3.0.4/kernel/sysctl_check.c linux-3.0.4/kernel/sysctl_check.c
64580--- linux-3.0.4/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
64581+++ linux-3.0.4/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
64582@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
64583 set_fail(&fail, table, "Directory with extra2");
64584 } else {
64585 if ((table->proc_handler == proc_dostring) ||
64586+ (table->proc_handler == proc_dostring_modpriv) ||
64587 (table->proc_handler == proc_dointvec) ||
64588 (table->proc_handler == proc_dointvec_minmax) ||
64589 (table->proc_handler == proc_dointvec_jiffies) ||
64590diff -urNp linux-3.0.4/kernel/taskstats.c linux-3.0.4/kernel/taskstats.c
64591--- linux-3.0.4/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
64592+++ linux-3.0.4/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
64593@@ -27,9 +27,12 @@
64594 #include <linux/cgroup.h>
64595 #include <linux/fs.h>
64596 #include <linux/file.h>
64597+#include <linux/grsecurity.h>
64598 #include <net/genetlink.h>
64599 #include <asm/atomic.h>
64600
64601+extern int gr_is_taskstats_denied(int pid);
64602+
64603 /*
64604 * Maximum length of a cpumask that can be specified in
64605 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
64606@@ -558,6 +561,9 @@ err:
64607
64608 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
64609 {
64610+ if (gr_is_taskstats_denied(current->pid))
64611+ return -EACCES;
64612+
64613 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
64614 return cmd_attr_register_cpumask(info);
64615 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
64616diff -urNp linux-3.0.4/kernel/time/alarmtimer.c linux-3.0.4/kernel/time/alarmtimer.c
64617--- linux-3.0.4/kernel/time/alarmtimer.c 2011-07-21 22:17:23.000000000 -0400
64618+++ linux-3.0.4/kernel/time/alarmtimer.c 2011-08-23 21:47:56.000000000 -0400
64619@@ -685,7 +685,7 @@ static int __init alarmtimer_init(void)
64620 {
64621 int error = 0;
64622 int i;
64623- struct k_clock alarm_clock = {
64624+ static struct k_clock alarm_clock = {
64625 .clock_getres = alarm_clock_getres,
64626 .clock_get = alarm_clock_get,
64627 .timer_create = alarm_timer_create,
64628diff -urNp linux-3.0.4/kernel/time/tick-broadcast.c linux-3.0.4/kernel/time/tick-broadcast.c
64629--- linux-3.0.4/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
64630+++ linux-3.0.4/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
64631@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
64632 * then clear the broadcast bit.
64633 */
64634 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
64635- int cpu = smp_processor_id();
64636+ cpu = smp_processor_id();
64637
64638 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
64639 tick_broadcast_clear_oneshot(cpu);
64640diff -urNp linux-3.0.4/kernel/time/timekeeping.c linux-3.0.4/kernel/time/timekeeping.c
64641--- linux-3.0.4/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
64642+++ linux-3.0.4/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
64643@@ -14,6 +14,7 @@
64644 #include <linux/init.h>
64645 #include <linux/mm.h>
64646 #include <linux/sched.h>
64647+#include <linux/grsecurity.h>
64648 #include <linux/syscore_ops.h>
64649 #include <linux/clocksource.h>
64650 #include <linux/jiffies.h>
64651@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
64652 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
64653 return -EINVAL;
64654
64655+ gr_log_timechange();
64656+
64657 write_seqlock_irqsave(&xtime_lock, flags);
64658
64659 timekeeping_forward_now();
64660diff -urNp linux-3.0.4/kernel/time/timer_list.c linux-3.0.4/kernel/time/timer_list.c
64661--- linux-3.0.4/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
64662+++ linux-3.0.4/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
64663@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
64664
64665 static void print_name_offset(struct seq_file *m, void *sym)
64666 {
64667+#ifdef CONFIG_GRKERNSEC_HIDESYM
64668+ SEQ_printf(m, "<%p>", NULL);
64669+#else
64670 char symname[KSYM_NAME_LEN];
64671
64672 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
64673 SEQ_printf(m, "<%pK>", sym);
64674 else
64675 SEQ_printf(m, "%s", symname);
64676+#endif
64677 }
64678
64679 static void
64680@@ -112,7 +116,11 @@ next_one:
64681 static void
64682 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
64683 {
64684+#ifdef CONFIG_GRKERNSEC_HIDESYM
64685+ SEQ_printf(m, " .base: %p\n", NULL);
64686+#else
64687 SEQ_printf(m, " .base: %pK\n", base);
64688+#endif
64689 SEQ_printf(m, " .index: %d\n",
64690 base->index);
64691 SEQ_printf(m, " .resolution: %Lu nsecs\n",
64692@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
64693 {
64694 struct proc_dir_entry *pe;
64695
64696+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64697+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
64698+#else
64699 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
64700+#endif
64701 if (!pe)
64702 return -ENOMEM;
64703 return 0;
64704diff -urNp linux-3.0.4/kernel/time/timer_stats.c linux-3.0.4/kernel/time/timer_stats.c
64705--- linux-3.0.4/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
64706+++ linux-3.0.4/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
64707@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
64708 static unsigned long nr_entries;
64709 static struct entry entries[MAX_ENTRIES];
64710
64711-static atomic_t overflow_count;
64712+static atomic_unchecked_t overflow_count;
64713
64714 /*
64715 * The entries are in a hash-table, for fast lookup:
64716@@ -140,7 +140,7 @@ static void reset_entries(void)
64717 nr_entries = 0;
64718 memset(entries, 0, sizeof(entries));
64719 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
64720- atomic_set(&overflow_count, 0);
64721+ atomic_set_unchecked(&overflow_count, 0);
64722 }
64723
64724 static struct entry *alloc_entry(void)
64725@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
64726 if (likely(entry))
64727 entry->count++;
64728 else
64729- atomic_inc(&overflow_count);
64730+ atomic_inc_unchecked(&overflow_count);
64731
64732 out_unlock:
64733 raw_spin_unlock_irqrestore(lock, flags);
64734@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
64735
64736 static void print_name_offset(struct seq_file *m, unsigned long addr)
64737 {
64738+#ifdef CONFIG_GRKERNSEC_HIDESYM
64739+ seq_printf(m, "<%p>", NULL);
64740+#else
64741 char symname[KSYM_NAME_LEN];
64742
64743 if (lookup_symbol_name(addr, symname) < 0)
64744 seq_printf(m, "<%p>", (void *)addr);
64745 else
64746 seq_printf(m, "%s", symname);
64747+#endif
64748 }
64749
64750 static int tstats_show(struct seq_file *m, void *v)
64751@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
64752
64753 seq_puts(m, "Timer Stats Version: v0.2\n");
64754 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
64755- if (atomic_read(&overflow_count))
64756+ if (atomic_read_unchecked(&overflow_count))
64757 seq_printf(m, "Overflow: %d entries\n",
64758- atomic_read(&overflow_count));
64759+ atomic_read_unchecked(&overflow_count));
64760
64761 for (i = 0; i < nr_entries; i++) {
64762 entry = entries + i;
64763@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
64764 {
64765 struct proc_dir_entry *pe;
64766
64767+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64768+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
64769+#else
64770 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
64771+#endif
64772 if (!pe)
64773 return -ENOMEM;
64774 return 0;
64775diff -urNp linux-3.0.4/kernel/time.c linux-3.0.4/kernel/time.c
64776--- linux-3.0.4/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
64777+++ linux-3.0.4/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
64778@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
64779 return error;
64780
64781 if (tz) {
64782+ /* we log in do_settimeofday called below, so don't log twice
64783+ */
64784+ if (!tv)
64785+ gr_log_timechange();
64786+
64787 /* SMP safe, global irq locking makes it work. */
64788 sys_tz = *tz;
64789 update_vsyscall_tz();
64790diff -urNp linux-3.0.4/kernel/timer.c linux-3.0.4/kernel/timer.c
64791--- linux-3.0.4/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
64792+++ linux-3.0.4/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
64793@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
64794 /*
64795 * This function runs timers and the timer-tq in bottom half context.
64796 */
64797-static void run_timer_softirq(struct softirq_action *h)
64798+static void run_timer_softirq(void)
64799 {
64800 struct tvec_base *base = __this_cpu_read(tvec_bases);
64801
64802diff -urNp linux-3.0.4/kernel/trace/blktrace.c linux-3.0.4/kernel/trace/blktrace.c
64803--- linux-3.0.4/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
64804+++ linux-3.0.4/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
64805@@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
64806 struct blk_trace *bt = filp->private_data;
64807 char buf[16];
64808
64809- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
64810+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
64811
64812 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
64813 }
64814@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
64815 return 1;
64816
64817 bt = buf->chan->private_data;
64818- atomic_inc(&bt->dropped);
64819+ atomic_inc_unchecked(&bt->dropped);
64820 return 0;
64821 }
64822
64823@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
64824
64825 bt->dir = dir;
64826 bt->dev = dev;
64827- atomic_set(&bt->dropped, 0);
64828+ atomic_set_unchecked(&bt->dropped, 0);
64829
64830 ret = -EIO;
64831 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
64832diff -urNp linux-3.0.4/kernel/trace/ftrace.c linux-3.0.4/kernel/trace/ftrace.c
64833--- linux-3.0.4/kernel/trace/ftrace.c 2011-07-21 22:17:23.000000000 -0400
64834+++ linux-3.0.4/kernel/trace/ftrace.c 2011-08-23 21:47:56.000000000 -0400
64835@@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
64836 if (unlikely(ftrace_disabled))
64837 return 0;
64838
64839+ ret = ftrace_arch_code_modify_prepare();
64840+ FTRACE_WARN_ON(ret);
64841+ if (ret)
64842+ return 0;
64843+
64844 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
64845+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
64846 if (ret) {
64847 ftrace_bug(ret, ip);
64848- return 0;
64849 }
64850- return 1;
64851+ return ret ? 0 : 1;
64852 }
64853
64854 /*
64855@@ -2550,7 +2555,7 @@ static void ftrace_free_entry_rcu(struct
64856
64857 int
64858 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
64859- void *data)
64860+ void *data)
64861 {
64862 struct ftrace_func_probe *entry;
64863 struct ftrace_page *pg;
64864diff -urNp linux-3.0.4/kernel/trace/trace.c linux-3.0.4/kernel/trace/trace.c
64865--- linux-3.0.4/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
64866+++ linux-3.0.4/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
64867@@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
64868 size_t rem;
64869 unsigned int i;
64870
64871+ pax_track_stack();
64872+
64873 if (splice_grow_spd(pipe, &spd))
64874 return -ENOMEM;
64875
64876@@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
64877 int entries, size, i;
64878 size_t ret;
64879
64880+ pax_track_stack();
64881+
64882 if (splice_grow_spd(pipe, &spd))
64883 return -ENOMEM;
64884
64885@@ -3990,10 +3994,9 @@ static const struct file_operations trac
64886 };
64887 #endif
64888
64889-static struct dentry *d_tracer;
64890-
64891 struct dentry *tracing_init_dentry(void)
64892 {
64893+ static struct dentry *d_tracer;
64894 static int once;
64895
64896 if (d_tracer)
64897@@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
64898 return d_tracer;
64899 }
64900
64901-static struct dentry *d_percpu;
64902-
64903 struct dentry *tracing_dentry_percpu(void)
64904 {
64905+ static struct dentry *d_percpu;
64906 static int once;
64907 struct dentry *d_tracer;
64908
64909diff -urNp linux-3.0.4/kernel/trace/trace_events.c linux-3.0.4/kernel/trace/trace_events.c
64910--- linux-3.0.4/kernel/trace/trace_events.c 2011-09-02 18:11:21.000000000 -0400
64911+++ linux-3.0.4/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
64912@@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
64913 struct ftrace_module_file_ops {
64914 struct list_head list;
64915 struct module *mod;
64916- struct file_operations id;
64917- struct file_operations enable;
64918- struct file_operations format;
64919- struct file_operations filter;
64920 };
64921
64922 static struct ftrace_module_file_ops *
64923@@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
64924
64925 file_ops->mod = mod;
64926
64927- file_ops->id = ftrace_event_id_fops;
64928- file_ops->id.owner = mod;
64929-
64930- file_ops->enable = ftrace_enable_fops;
64931- file_ops->enable.owner = mod;
64932-
64933- file_ops->filter = ftrace_event_filter_fops;
64934- file_ops->filter.owner = mod;
64935-
64936- file_ops->format = ftrace_event_format_fops;
64937- file_ops->format.owner = mod;
64938+ pax_open_kernel();
64939+ *(void **)&mod->trace_id.owner = mod;
64940+ *(void **)&mod->trace_enable.owner = mod;
64941+ *(void **)&mod->trace_filter.owner = mod;
64942+ *(void **)&mod->trace_format.owner = mod;
64943+ pax_close_kernel();
64944
64945 list_add(&file_ops->list, &ftrace_module_file_list);
64946
64947@@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
64948
64949 for_each_event(call, start, end) {
64950 __trace_add_event_call(*call, mod,
64951- &file_ops->id, &file_ops->enable,
64952- &file_ops->filter, &file_ops->format);
64953+ &mod->trace_id, &mod->trace_enable,
64954+ &mod->trace_filter, &mod->trace_format);
64955 }
64956 }
64957
64958diff -urNp linux-3.0.4/kernel/trace/trace_kprobe.c linux-3.0.4/kernel/trace/trace_kprobe.c
64959--- linux-3.0.4/kernel/trace/trace_kprobe.c 2011-07-21 22:17:23.000000000 -0400
64960+++ linux-3.0.4/kernel/trace/trace_kprobe.c 2011-10-06 04:17:55.000000000 -0400
64961@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(me
64962 long ret;
64963 int maxlen = get_rloc_len(*(u32 *)dest);
64964 u8 *dst = get_rloc_data(dest);
64965- u8 *src = addr;
64966+ const u8 __user *src = (const u8 __force_user *)addr;
64967 mm_segment_t old_fs = get_fs();
64968 if (!maxlen)
64969 return;
64970@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(me
64971 pagefault_disable();
64972 do
64973 ret = __copy_from_user_inatomic(dst++, src++, 1);
64974- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
64975+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
64976 dst[-1] = '\0';
64977 pagefault_enable();
64978 set_fs(old_fs);
64979@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(me
64980 ((u8 *)get_rloc_data(dest))[0] = '\0';
64981 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
64982 } else
64983- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
64984+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
64985 get_rloc_offs(*(u32 *)dest));
64986 }
64987 /* Return the length of string -- including null terminal byte */
64988@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(me
64989 set_fs(KERNEL_DS);
64990 pagefault_disable();
64991 do {
64992- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
64993+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
64994 len++;
64995 } while (c && ret == 0 && len < MAX_STRING_SIZE);
64996 pagefault_enable();
64997diff -urNp linux-3.0.4/kernel/trace/trace_mmiotrace.c linux-3.0.4/kernel/trace/trace_mmiotrace.c
64998--- linux-3.0.4/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
64999+++ linux-3.0.4/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
65000@@ -24,7 +24,7 @@ struct header_iter {
65001 static struct trace_array *mmio_trace_array;
65002 static bool overrun_detected;
65003 static unsigned long prev_overruns;
65004-static atomic_t dropped_count;
65005+static atomic_unchecked_t dropped_count;
65006
65007 static void mmio_reset_data(struct trace_array *tr)
65008 {
65009@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
65010
65011 static unsigned long count_overruns(struct trace_iterator *iter)
65012 {
65013- unsigned long cnt = atomic_xchg(&dropped_count, 0);
65014+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
65015 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
65016
65017 if (over > prev_overruns)
65018@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
65019 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
65020 sizeof(*entry), 0, pc);
65021 if (!event) {
65022- atomic_inc(&dropped_count);
65023+ atomic_inc_unchecked(&dropped_count);
65024 return;
65025 }
65026 entry = ring_buffer_event_data(event);
65027@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
65028 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
65029 sizeof(*entry), 0, pc);
65030 if (!event) {
65031- atomic_inc(&dropped_count);
65032+ atomic_inc_unchecked(&dropped_count);
65033 return;
65034 }
65035 entry = ring_buffer_event_data(event);
65036diff -urNp linux-3.0.4/kernel/trace/trace_output.c linux-3.0.4/kernel/trace/trace_output.c
65037--- linux-3.0.4/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
65038+++ linux-3.0.4/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
65039@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
65040
65041 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
65042 if (!IS_ERR(p)) {
65043- p = mangle_path(s->buffer + s->len, p, "\n");
65044+ p = mangle_path(s->buffer + s->len, p, "\n\\");
65045 if (p) {
65046 s->len = p - s->buffer;
65047 return 1;
65048diff -urNp linux-3.0.4/kernel/trace/trace_stack.c linux-3.0.4/kernel/trace/trace_stack.c
65049--- linux-3.0.4/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
65050+++ linux-3.0.4/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
65051@@ -50,7 +50,7 @@ static inline void check_stack(void)
65052 return;
65053
65054 /* we do not handle interrupt stacks yet */
65055- if (!object_is_on_stack(&this_size))
65056+ if (!object_starts_on_stack(&this_size))
65057 return;
65058
65059 local_irq_save(flags);
65060diff -urNp linux-3.0.4/kernel/trace/trace_workqueue.c linux-3.0.4/kernel/trace/trace_workqueue.c
65061--- linux-3.0.4/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
65062+++ linux-3.0.4/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
65063@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
65064 int cpu;
65065 pid_t pid;
65066 /* Can be inserted from interrupt or user context, need to be atomic */
65067- atomic_t inserted;
65068+ atomic_unchecked_t inserted;
65069 /*
65070 * Don't need to be atomic, works are serialized in a single workqueue thread
65071 * on a single CPU.
65072@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
65073 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
65074 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
65075 if (node->pid == wq_thread->pid) {
65076- atomic_inc(&node->inserted);
65077+ atomic_inc_unchecked(&node->inserted);
65078 goto found;
65079 }
65080 }
65081@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
65082 tsk = get_pid_task(pid, PIDTYPE_PID);
65083 if (tsk) {
65084 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
65085- atomic_read(&cws->inserted), cws->executed,
65086+ atomic_read_unchecked(&cws->inserted), cws->executed,
65087 tsk->comm);
65088 put_task_struct(tsk);
65089 }
65090diff -urNp linux-3.0.4/lib/bitmap.c linux-3.0.4/lib/bitmap.c
65091--- linux-3.0.4/lib/bitmap.c 2011-07-21 22:17:23.000000000 -0400
65092+++ linux-3.0.4/lib/bitmap.c 2011-10-06 04:17:55.000000000 -0400
65093@@ -421,7 +421,7 @@ int __bitmap_parse(const char *buf, unsi
65094 {
65095 int c, old_c, totaldigits, ndigits, nchunks, nbits;
65096 u32 chunk;
65097- const char __user *ubuf = buf;
65098+ const char __user *ubuf = (const char __force_user *)buf;
65099
65100 bitmap_zero(maskp, nmaskbits);
65101
65102@@ -506,7 +506,7 @@ int bitmap_parse_user(const char __user
65103 {
65104 if (!access_ok(VERIFY_READ, ubuf, ulen))
65105 return -EFAULT;
65106- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
65107+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
65108 }
65109 EXPORT_SYMBOL(bitmap_parse_user);
65110
65111@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char
65112 {
65113 unsigned a, b;
65114 int c, old_c, totaldigits;
65115- const char __user *ubuf = buf;
65116+ const char __user *ubuf = (const char __force_user *)buf;
65117 int exp_digit, in_range;
65118
65119 totaldigits = c = 0;
65120@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __u
65121 {
65122 if (!access_ok(VERIFY_READ, ubuf, ulen))
65123 return -EFAULT;
65124- return __bitmap_parselist((const char *)ubuf,
65125+ return __bitmap_parselist((const char __force_kernel *)ubuf,
65126 ulen, 1, maskp, nmaskbits);
65127 }
65128 EXPORT_SYMBOL(bitmap_parselist_user);
65129diff -urNp linux-3.0.4/lib/bug.c linux-3.0.4/lib/bug.c
65130--- linux-3.0.4/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
65131+++ linux-3.0.4/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
65132@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
65133 return BUG_TRAP_TYPE_NONE;
65134
65135 bug = find_bug(bugaddr);
65136+ if (!bug)
65137+ return BUG_TRAP_TYPE_NONE;
65138
65139 file = NULL;
65140 line = 0;
65141diff -urNp linux-3.0.4/lib/debugobjects.c linux-3.0.4/lib/debugobjects.c
65142--- linux-3.0.4/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
65143+++ linux-3.0.4/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
65144@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
65145 if (limit > 4)
65146 return;
65147
65148- is_on_stack = object_is_on_stack(addr);
65149+ is_on_stack = object_starts_on_stack(addr);
65150 if (is_on_stack == onstack)
65151 return;
65152
65153diff -urNp linux-3.0.4/lib/devres.c linux-3.0.4/lib/devres.c
65154--- linux-3.0.4/lib/devres.c 2011-07-21 22:17:23.000000000 -0400
65155+++ linux-3.0.4/lib/devres.c 2011-10-06 04:17:55.000000000 -0400
65156@@ -81,7 +81,7 @@ void devm_iounmap(struct device *dev, vo
65157 {
65158 iounmap(addr);
65159 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
65160- (void *)addr));
65161+ (void __force *)addr));
65162 }
65163 EXPORT_SYMBOL(devm_iounmap);
65164
65165@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *de
65166 {
65167 ioport_unmap(addr);
65168 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
65169- devm_ioport_map_match, (void *)addr));
65170+ devm_ioport_map_match, (void __force *)addr));
65171 }
65172 EXPORT_SYMBOL(devm_ioport_unmap);
65173
65174diff -urNp linux-3.0.4/lib/dma-debug.c linux-3.0.4/lib/dma-debug.c
65175--- linux-3.0.4/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
65176+++ linux-3.0.4/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
65177@@ -870,7 +870,7 @@ out:
65178
65179 static void check_for_stack(struct device *dev, void *addr)
65180 {
65181- if (object_is_on_stack(addr))
65182+ if (object_starts_on_stack(addr))
65183 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
65184 "stack [addr=%p]\n", addr);
65185 }
65186diff -urNp linux-3.0.4/lib/extable.c linux-3.0.4/lib/extable.c
65187--- linux-3.0.4/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
65188+++ linux-3.0.4/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
65189@@ -13,6 +13,7 @@
65190 #include <linux/init.h>
65191 #include <linux/sort.h>
65192 #include <asm/uaccess.h>
65193+#include <asm/pgtable.h>
65194
65195 #ifndef ARCH_HAS_SORT_EXTABLE
65196 /*
65197@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
65198 void sort_extable(struct exception_table_entry *start,
65199 struct exception_table_entry *finish)
65200 {
65201+ pax_open_kernel();
65202 sort(start, finish - start, sizeof(struct exception_table_entry),
65203 cmp_ex, NULL);
65204+ pax_close_kernel();
65205 }
65206
65207 #ifdef CONFIG_MODULES
65208diff -urNp linux-3.0.4/lib/inflate.c linux-3.0.4/lib/inflate.c
65209--- linux-3.0.4/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
65210+++ linux-3.0.4/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
65211@@ -269,7 +269,7 @@ static void free(void *where)
65212 malloc_ptr = free_mem_ptr;
65213 }
65214 #else
65215-#define malloc(a) kmalloc(a, GFP_KERNEL)
65216+#define malloc(a) kmalloc((a), GFP_KERNEL)
65217 #define free(a) kfree(a)
65218 #endif
65219
65220diff -urNp linux-3.0.4/lib/Kconfig.debug linux-3.0.4/lib/Kconfig.debug
65221--- linux-3.0.4/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
65222+++ linux-3.0.4/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
65223@@ -1088,6 +1088,7 @@ config LATENCYTOP
65224 depends on DEBUG_KERNEL
65225 depends on STACKTRACE_SUPPORT
65226 depends on PROC_FS
65227+ depends on !GRKERNSEC_HIDESYM
65228 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
65229 select KALLSYMS
65230 select KALLSYMS_ALL
65231diff -urNp linux-3.0.4/lib/kref.c linux-3.0.4/lib/kref.c
65232--- linux-3.0.4/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
65233+++ linux-3.0.4/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
65234@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
65235 */
65236 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
65237 {
65238- WARN_ON(release == NULL);
65239+ BUG_ON(release == NULL);
65240 WARN_ON(release == (void (*)(struct kref *))kfree);
65241
65242 if (atomic_dec_and_test(&kref->refcount)) {
65243diff -urNp linux-3.0.4/lib/radix-tree.c linux-3.0.4/lib/radix-tree.c
65244--- linux-3.0.4/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
65245+++ linux-3.0.4/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
65246@@ -80,7 +80,7 @@ struct radix_tree_preload {
65247 int nr;
65248 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
65249 };
65250-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
65251+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
65252
65253 static inline void *ptr_to_indirect(void *ptr)
65254 {
65255diff -urNp linux-3.0.4/lib/vsprintf.c linux-3.0.4/lib/vsprintf.c
65256--- linux-3.0.4/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
65257+++ linux-3.0.4/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
65258@@ -16,6 +16,9 @@
65259 * - scnprintf and vscnprintf
65260 */
65261
65262+#ifdef CONFIG_GRKERNSEC_HIDESYM
65263+#define __INCLUDED_BY_HIDESYM 1
65264+#endif
65265 #include <stdarg.h>
65266 #include <linux/module.h>
65267 #include <linux/types.h>
65268@@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
65269 char sym[KSYM_SYMBOL_LEN];
65270 if (ext == 'B')
65271 sprint_backtrace(sym, value);
65272- else if (ext != 'f' && ext != 's')
65273+ else if (ext != 'f' && ext != 's' && ext != 'a')
65274 sprint_symbol(sym, value);
65275 else
65276 kallsyms_lookup(value, NULL, NULL, NULL, sym);
65277@@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
65278 return string(buf, end, uuid, spec);
65279 }
65280
65281+#ifdef CONFIG_GRKERNSEC_HIDESYM
65282+int kptr_restrict __read_mostly = 2;
65283+#else
65284 int kptr_restrict __read_mostly;
65285+#endif
65286
65287 /*
65288 * Show a '%p' thing. A kernel extension is that the '%p' is followed
65289@@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
65290 * - 'S' For symbolic direct pointers with offset
65291 * - 's' For symbolic direct pointers without offset
65292 * - 'B' For backtraced symbolic direct pointers with offset
65293+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
65294+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
65295 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
65296 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
65297 * - 'M' For a 6-byte MAC address, it prints the address in the
65298@@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
65299 {
65300 if (!ptr && *fmt != 'K') {
65301 /*
65302- * Print (null) with the same width as a pointer so it makes
65303+ * Print (nil) with the same width as a pointer so it makes
65304 * tabular output look nice.
65305 */
65306 if (spec.field_width == -1)
65307 spec.field_width = 2 * sizeof(void *);
65308- return string(buf, end, "(null)", spec);
65309+ return string(buf, end, "(nil)", spec);
65310 }
65311
65312 switch (*fmt) {
65313@@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
65314 /* Fallthrough */
65315 case 'S':
65316 case 's':
65317+#ifdef CONFIG_GRKERNSEC_HIDESYM
65318+ break;
65319+#else
65320+ return symbol_string(buf, end, ptr, spec, *fmt);
65321+#endif
65322+ case 'A':
65323+ case 'a':
65324 case 'B':
65325 return symbol_string(buf, end, ptr, spec, *fmt);
65326 case 'R':
65327@@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
65328 typeof(type) value; \
65329 if (sizeof(type) == 8) { \
65330 args = PTR_ALIGN(args, sizeof(u32)); \
65331- *(u32 *)&value = *(u32 *)args; \
65332- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
65333+ *(u32 *)&value = *(const u32 *)args; \
65334+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
65335 } else { \
65336 args = PTR_ALIGN(args, sizeof(type)); \
65337- value = *(typeof(type) *)args; \
65338+ value = *(const typeof(type) *)args; \
65339 } \
65340 args += sizeof(type); \
65341 value; \
65342@@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
65343 case FORMAT_TYPE_STR: {
65344 const char *str_arg = args;
65345 args += strlen(str_arg) + 1;
65346- str = string(str, end, (char *)str_arg, spec);
65347+ str = string(str, end, str_arg, spec);
65348 break;
65349 }
65350
65351diff -urNp linux-3.0.4/localversion-grsec linux-3.0.4/localversion-grsec
65352--- linux-3.0.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
65353+++ linux-3.0.4/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
65354@@ -0,0 +1 @@
65355+-grsec
65356diff -urNp linux-3.0.4/Makefile linux-3.0.4/Makefile
65357--- linux-3.0.4/Makefile 2011-09-02 18:11:26.000000000 -0400
65358+++ linux-3.0.4/Makefile 2011-10-07 19:29:57.000000000 -0400
65359@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
65360
65361 HOSTCC = gcc
65362 HOSTCXX = g++
65363-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
65364-HOSTCXXFLAGS = -O2
65365+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
65366+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
65367+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
65368
65369 # Decide whether to build built-in, modular, or both.
65370 # Normally, just do built-in.
65371@@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
65372 KBUILD_CPPFLAGS := -D__KERNEL__
65373
65374 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
65375+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
65376 -fno-strict-aliasing -fno-common \
65377 -Werror-implicit-function-declaration \
65378 -Wno-format-security \
65379 -fno-delete-null-pointer-checks
65380+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
65381 KBUILD_AFLAGS_KERNEL :=
65382 KBUILD_CFLAGS_KERNEL :=
65383 KBUILD_AFLAGS := -D__ASSEMBLY__
65384@@ -407,8 +410,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
65385 # Rules shared between *config targets and build targets
65386
65387 # Basic helpers built in scripts/
65388-PHONY += scripts_basic
65389-scripts_basic:
65390+PHONY += scripts_basic gcc-plugins
65391+scripts_basic: gcc-plugins
65392 $(Q)$(MAKE) $(build)=scripts/basic
65393 $(Q)rm -f .tmp_quiet_recordmcount
65394
65395@@ -564,6 +567,36 @@ else
65396 KBUILD_CFLAGS += -O2
65397 endif
65398
65399+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
65400+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
65401+ifdef CONFIG_PAX_MEMORY_STACKLEAK
65402+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
65403+endif
65404+ifdef CONFIG_KALLOCSTAT_PLUGIN
65405+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
65406+endif
65407+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
65408+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
65409+endif
65410+ifdef CONFIG_CHECKER_PLUGIN
65411+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
65412+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
65413+endif
65414+endif
65415+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
65416+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
65417+gcc-plugins:
65418+ $(Q)$(MAKE) $(build)=tools/gcc
65419+else
65420+gcc-plugins:
65421+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
65422+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
65423+else
65424+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
65425+endif
65426+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
65427+endif
65428+
65429 include $(srctree)/arch/$(SRCARCH)/Makefile
65430
65431 ifneq ($(CONFIG_FRAME_WARN),0)
65432@@ -708,7 +741,7 @@ export mod_strip_cmd
65433
65434
65435 ifeq ($(KBUILD_EXTMOD),)
65436-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
65437+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
65438
65439 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
65440 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
65441@@ -932,6 +965,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-mai
65442
65443 # The actual objects are generated when descending,
65444 # make sure no implicit rule kicks in
65445+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
65446 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
65447
65448 # Handle descending into subdirectories listed in $(vmlinux-dirs)
65449@@ -941,7 +975,7 @@ $(sort $(vmlinux-init) $(vmlinux-main))
65450 # Error messages still appears in the original language
65451
65452 PHONY += $(vmlinux-dirs)
65453-$(vmlinux-dirs): prepare scripts
65454+$(vmlinux-dirs): gcc-plugins prepare scripts
65455 $(Q)$(MAKE) $(build)=$@
65456
65457 # Store (new) KERNELRELASE string in include/config/kernel.release
65458@@ -986,6 +1020,7 @@ prepare0: archprepare FORCE
65459 $(Q)$(MAKE) $(build)=. missing-syscalls
65460
65461 # All the preparing..
65462+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
65463 prepare: prepare0
65464
65465 # Generate some files
65466@@ -1087,6 +1122,7 @@ all: modules
65467 # using awk while concatenating to the final file.
65468
65469 PHONY += modules
65470+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
65471 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
65472 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
65473 @$(kecho) ' Building modules, stage 2.';
65474@@ -1102,7 +1138,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
65475
65476 # Target to prepare building external modules
65477 PHONY += modules_prepare
65478-modules_prepare: prepare scripts
65479+modules_prepare: gcc-plugins prepare scripts
65480
65481 # Target to install modules
65482 PHONY += modules_install
65483@@ -1198,7 +1234,7 @@ distclean: mrproper
65484 @find $(srctree) $(RCS_FIND_IGNORE) \
65485 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
65486 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
65487- -o -name '.*.rej' -o -size 0 \
65488+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
65489 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
65490 -type f -print | xargs rm -f
65491
65492@@ -1359,6 +1395,7 @@ PHONY += $(module-dirs) modules
65493 $(module-dirs): crmodverdir $(objtree)/Module.symvers
65494 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
65495
65496+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
65497 modules: $(module-dirs)
65498 @$(kecho) ' Building modules, stage 2.';
65499 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
65500@@ -1485,17 +1522,19 @@ else
65501 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
65502 endif
65503
65504-%.s: %.c prepare scripts FORCE
65505+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
65506+%.s: %.c gcc-plugins prepare scripts FORCE
65507 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
65508 %.i: %.c prepare scripts FORCE
65509 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
65510-%.o: %.c prepare scripts FORCE
65511+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
65512+%.o: %.c gcc-plugins prepare scripts FORCE
65513 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
65514 %.lst: %.c prepare scripts FORCE
65515 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
65516-%.s: %.S prepare scripts FORCE
65517+%.s: %.S gcc-plugins prepare scripts FORCE
65518 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
65519-%.o: %.S prepare scripts FORCE
65520+%.o: %.S gcc-plugins prepare scripts FORCE
65521 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
65522 %.symtypes: %.c prepare scripts FORCE
65523 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
65524@@ -1505,11 +1544,13 @@ endif
65525 $(cmd_crmodverdir)
65526 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
65527 $(build)=$(build-dir)
65528-%/: prepare scripts FORCE
65529+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
65530+%/: gcc-plugins prepare scripts FORCE
65531 $(cmd_crmodverdir)
65532 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
65533 $(build)=$(build-dir)
65534-%.ko: prepare scripts FORCE
65535+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
65536+%.ko: gcc-plugins prepare scripts FORCE
65537 $(cmd_crmodverdir)
65538 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
65539 $(build)=$(build-dir) $(@:.ko=.o)
65540diff -urNp linux-3.0.4/mm/filemap.c linux-3.0.4/mm/filemap.c
65541--- linux-3.0.4/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
65542+++ linux-3.0.4/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
65543@@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
65544 struct address_space *mapping = file->f_mapping;
65545
65546 if (!mapping->a_ops->readpage)
65547- return -ENOEXEC;
65548+ return -ENODEV;
65549 file_accessed(file);
65550 vma->vm_ops = &generic_file_vm_ops;
65551 vma->vm_flags |= VM_CAN_NONLINEAR;
65552@@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
65553 *pos = i_size_read(inode);
65554
65555 if (limit != RLIM_INFINITY) {
65556+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
65557 if (*pos >= limit) {
65558 send_sig(SIGXFSZ, current, 0);
65559 return -EFBIG;
65560diff -urNp linux-3.0.4/mm/fremap.c linux-3.0.4/mm/fremap.c
65561--- linux-3.0.4/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
65562+++ linux-3.0.4/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
65563@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
65564 retry:
65565 vma = find_vma(mm, start);
65566
65567+#ifdef CONFIG_PAX_SEGMEXEC
65568+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
65569+ goto out;
65570+#endif
65571+
65572 /*
65573 * Make sure the vma is shared, that it supports prefaulting,
65574 * and that the remapped range is valid and fully within
65575diff -urNp linux-3.0.4/mm/highmem.c linux-3.0.4/mm/highmem.c
65576--- linux-3.0.4/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
65577+++ linux-3.0.4/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
65578@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
65579 * So no dangers, even with speculative execution.
65580 */
65581 page = pte_page(pkmap_page_table[i]);
65582+ pax_open_kernel();
65583 pte_clear(&init_mm, (unsigned long)page_address(page),
65584 &pkmap_page_table[i]);
65585-
65586+ pax_close_kernel();
65587 set_page_address(page, NULL);
65588 need_flush = 1;
65589 }
65590@@ -186,9 +187,11 @@ start:
65591 }
65592 }
65593 vaddr = PKMAP_ADDR(last_pkmap_nr);
65594+
65595+ pax_open_kernel();
65596 set_pte_at(&init_mm, vaddr,
65597 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
65598-
65599+ pax_close_kernel();
65600 pkmap_count[last_pkmap_nr] = 1;
65601 set_page_address(page, (void *)vaddr);
65602
65603diff -urNp linux-3.0.4/mm/huge_memory.c linux-3.0.4/mm/huge_memory.c
65604--- linux-3.0.4/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
65605+++ linux-3.0.4/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
65606@@ -702,7 +702,7 @@ out:
65607 * run pte_offset_map on the pmd, if an huge pmd could
65608 * materialize from under us from a different thread.
65609 */
65610- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
65611+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
65612 return VM_FAULT_OOM;
65613 /* if an huge pmd materialized from under us just retry later */
65614 if (unlikely(pmd_trans_huge(*pmd)))
65615diff -urNp linux-3.0.4/mm/hugetlb.c linux-3.0.4/mm/hugetlb.c
65616--- linux-3.0.4/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
65617+++ linux-3.0.4/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
65618@@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
65619 return 1;
65620 }
65621
65622+#ifdef CONFIG_PAX_SEGMEXEC
65623+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
65624+{
65625+ struct mm_struct *mm = vma->vm_mm;
65626+ struct vm_area_struct *vma_m;
65627+ unsigned long address_m;
65628+ pte_t *ptep_m;
65629+
65630+ vma_m = pax_find_mirror_vma(vma);
65631+ if (!vma_m)
65632+ return;
65633+
65634+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
65635+ address_m = address + SEGMEXEC_TASK_SIZE;
65636+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
65637+ get_page(page_m);
65638+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
65639+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
65640+}
65641+#endif
65642+
65643 /*
65644 * Hugetlb_cow() should be called with page lock of the original hugepage held.
65645 */
65646@@ -2440,6 +2461,11 @@ retry_avoidcopy:
65647 make_huge_pte(vma, new_page, 1));
65648 page_remove_rmap(old_page);
65649 hugepage_add_new_anon_rmap(new_page, vma, address);
65650+
65651+#ifdef CONFIG_PAX_SEGMEXEC
65652+ pax_mirror_huge_pte(vma, address, new_page);
65653+#endif
65654+
65655 /* Make the old page be freed below */
65656 new_page = old_page;
65657 mmu_notifier_invalidate_range_end(mm,
65658@@ -2591,6 +2617,10 @@ retry:
65659 && (vma->vm_flags & VM_SHARED)));
65660 set_huge_pte_at(mm, address, ptep, new_pte);
65661
65662+#ifdef CONFIG_PAX_SEGMEXEC
65663+ pax_mirror_huge_pte(vma, address, page);
65664+#endif
65665+
65666 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
65667 /* Optimization, do the COW without a second fault */
65668 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
65669@@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
65670 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
65671 struct hstate *h = hstate_vma(vma);
65672
65673+#ifdef CONFIG_PAX_SEGMEXEC
65674+ struct vm_area_struct *vma_m;
65675+#endif
65676+
65677 ptep = huge_pte_offset(mm, address);
65678 if (ptep) {
65679 entry = huge_ptep_get(ptep);
65680@@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
65681 VM_FAULT_SET_HINDEX(h - hstates);
65682 }
65683
65684+#ifdef CONFIG_PAX_SEGMEXEC
65685+ vma_m = pax_find_mirror_vma(vma);
65686+ if (vma_m) {
65687+ unsigned long address_m;
65688+
65689+ if (vma->vm_start > vma_m->vm_start) {
65690+ address_m = address;
65691+ address -= SEGMEXEC_TASK_SIZE;
65692+ vma = vma_m;
65693+ h = hstate_vma(vma);
65694+ } else
65695+ address_m = address + SEGMEXEC_TASK_SIZE;
65696+
65697+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
65698+ return VM_FAULT_OOM;
65699+ address_m &= HPAGE_MASK;
65700+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
65701+ }
65702+#endif
65703+
65704 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
65705 if (!ptep)
65706 return VM_FAULT_OOM;
65707diff -urNp linux-3.0.4/mm/internal.h linux-3.0.4/mm/internal.h
65708--- linux-3.0.4/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
65709+++ linux-3.0.4/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
65710@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
65711 * in mm/page_alloc.c
65712 */
65713 extern void __free_pages_bootmem(struct page *page, unsigned int order);
65714+extern void free_compound_page(struct page *page);
65715 extern void prep_compound_page(struct page *page, unsigned long order);
65716 #ifdef CONFIG_MEMORY_FAILURE
65717 extern bool is_free_buddy_page(struct page *page);
65718diff -urNp linux-3.0.4/mm/Kconfig linux-3.0.4/mm/Kconfig
65719--- linux-3.0.4/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
65720+++ linux-3.0.4/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
65721@@ -240,7 +240,7 @@ config KSM
65722 config DEFAULT_MMAP_MIN_ADDR
65723 int "Low address space to protect from user allocation"
65724 depends on MMU
65725- default 4096
65726+ default 65536
65727 help
65728 This is the portion of low virtual memory which should be protected
65729 from userspace allocation. Keeping a user from writing to low pages
65730diff -urNp linux-3.0.4/mm/kmemleak.c linux-3.0.4/mm/kmemleak.c
65731--- linux-3.0.4/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
65732+++ linux-3.0.4/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
65733@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
65734
65735 for (i = 0; i < object->trace_len; i++) {
65736 void *ptr = (void *)object->trace[i];
65737- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
65738+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
65739 }
65740 }
65741
65742diff -urNp linux-3.0.4/mm/maccess.c linux-3.0.4/mm/maccess.c
65743--- linux-3.0.4/mm/maccess.c 2011-07-21 22:17:23.000000000 -0400
65744+++ linux-3.0.4/mm/maccess.c 2011-10-06 04:17:55.000000000 -0400
65745@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, cons
65746 set_fs(KERNEL_DS);
65747 pagefault_disable();
65748 ret = __copy_from_user_inatomic(dst,
65749- (__force const void __user *)src, size);
65750+ (const void __force_user *)src, size);
65751 pagefault_enable();
65752 set_fs(old_fs);
65753
65754@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, con
65755
65756 set_fs(KERNEL_DS);
65757 pagefault_disable();
65758- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
65759+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
65760 pagefault_enable();
65761 set_fs(old_fs);
65762
65763diff -urNp linux-3.0.4/mm/madvise.c linux-3.0.4/mm/madvise.c
65764--- linux-3.0.4/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
65765+++ linux-3.0.4/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
65766@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
65767 pgoff_t pgoff;
65768 unsigned long new_flags = vma->vm_flags;
65769
65770+#ifdef CONFIG_PAX_SEGMEXEC
65771+ struct vm_area_struct *vma_m;
65772+#endif
65773+
65774 switch (behavior) {
65775 case MADV_NORMAL:
65776 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
65777@@ -110,6 +114,13 @@ success:
65778 /*
65779 * vm_flags is protected by the mmap_sem held in write mode.
65780 */
65781+
65782+#ifdef CONFIG_PAX_SEGMEXEC
65783+ vma_m = pax_find_mirror_vma(vma);
65784+ if (vma_m)
65785+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
65786+#endif
65787+
65788 vma->vm_flags = new_flags;
65789
65790 out:
65791@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
65792 struct vm_area_struct ** prev,
65793 unsigned long start, unsigned long end)
65794 {
65795+
65796+#ifdef CONFIG_PAX_SEGMEXEC
65797+ struct vm_area_struct *vma_m;
65798+#endif
65799+
65800 *prev = vma;
65801 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
65802 return -EINVAL;
65803@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
65804 zap_page_range(vma, start, end - start, &details);
65805 } else
65806 zap_page_range(vma, start, end - start, NULL);
65807+
65808+#ifdef CONFIG_PAX_SEGMEXEC
65809+ vma_m = pax_find_mirror_vma(vma);
65810+ if (vma_m) {
65811+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
65812+ struct zap_details details = {
65813+ .nonlinear_vma = vma_m,
65814+ .last_index = ULONG_MAX,
65815+ };
65816+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
65817+ } else
65818+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
65819+ }
65820+#endif
65821+
65822 return 0;
65823 }
65824
65825@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
65826 if (end < start)
65827 goto out;
65828
65829+#ifdef CONFIG_PAX_SEGMEXEC
65830+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
65831+ if (end > SEGMEXEC_TASK_SIZE)
65832+ goto out;
65833+ } else
65834+#endif
65835+
65836+ if (end > TASK_SIZE)
65837+ goto out;
65838+
65839 error = 0;
65840 if (end == start)
65841 goto out;
65842diff -urNp linux-3.0.4/mm/memory.c linux-3.0.4/mm/memory.c
65843--- linux-3.0.4/mm/memory.c 2011-09-02 18:11:21.000000000 -0400
65844+++ linux-3.0.4/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
65845@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
65846 return;
65847
65848 pmd = pmd_offset(pud, start);
65849+
65850+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
65851 pud_clear(pud);
65852 pmd_free_tlb(tlb, pmd, start);
65853+#endif
65854+
65855 }
65856
65857 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
65858@@ -489,9 +493,12 @@ static inline void free_pud_range(struct
65859 if (end - 1 > ceiling - 1)
65860 return;
65861
65862+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
65863 pud = pud_offset(pgd, start);
65864 pgd_clear(pgd);
65865 pud_free_tlb(tlb, pud, start);
65866+#endif
65867+
65868 }
65869
65870 /*
65871@@ -1577,12 +1584,6 @@ no_page_table:
65872 return page;
65873 }
65874
65875-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
65876-{
65877- return stack_guard_page_start(vma, addr) ||
65878- stack_guard_page_end(vma, addr+PAGE_SIZE);
65879-}
65880-
65881 /**
65882 * __get_user_pages() - pin user pages in memory
65883 * @tsk: task_struct of target task
65884@@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
65885 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
65886 i = 0;
65887
65888- do {
65889+ while (nr_pages) {
65890 struct vm_area_struct *vma;
65891
65892- vma = find_extend_vma(mm, start);
65893+ vma = find_vma(mm, start);
65894 if (!vma && in_gate_area(mm, start)) {
65895 unsigned long pg = start & PAGE_MASK;
65896 pgd_t *pgd;
65897@@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
65898 goto next_page;
65899 }
65900
65901- if (!vma ||
65902+ if (!vma || start < vma->vm_start ||
65903 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
65904 !(vm_flags & vma->vm_flags))
65905 return i ? : -EFAULT;
65906@@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
65907 int ret;
65908 unsigned int fault_flags = 0;
65909
65910- /* For mlock, just skip the stack guard page. */
65911- if (foll_flags & FOLL_MLOCK) {
65912- if (stack_guard_page(vma, start))
65913- goto next_page;
65914- }
65915 if (foll_flags & FOLL_WRITE)
65916 fault_flags |= FAULT_FLAG_WRITE;
65917 if (nonblocking)
65918@@ -1811,7 +1807,7 @@ next_page:
65919 start += PAGE_SIZE;
65920 nr_pages--;
65921 } while (nr_pages && start < vma->vm_end);
65922- } while (nr_pages);
65923+ }
65924 return i;
65925 }
65926 EXPORT_SYMBOL(__get_user_pages);
65927@@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
65928 page_add_file_rmap(page);
65929 set_pte_at(mm, addr, pte, mk_pte(page, prot));
65930
65931+#ifdef CONFIG_PAX_SEGMEXEC
65932+ pax_mirror_file_pte(vma, addr, page, ptl);
65933+#endif
65934+
65935 retval = 0;
65936 pte_unmap_unlock(pte, ptl);
65937 return retval;
65938@@ -2052,10 +2052,22 @@ out:
65939 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
65940 struct page *page)
65941 {
65942+
65943+#ifdef CONFIG_PAX_SEGMEXEC
65944+ struct vm_area_struct *vma_m;
65945+#endif
65946+
65947 if (addr < vma->vm_start || addr >= vma->vm_end)
65948 return -EFAULT;
65949 if (!page_count(page))
65950 return -EINVAL;
65951+
65952+#ifdef CONFIG_PAX_SEGMEXEC
65953+ vma_m = pax_find_mirror_vma(vma);
65954+ if (vma_m)
65955+ vma_m->vm_flags |= VM_INSERTPAGE;
65956+#endif
65957+
65958 vma->vm_flags |= VM_INSERTPAGE;
65959 return insert_page(vma, addr, page, vma->vm_page_prot);
65960 }
65961@@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
65962 unsigned long pfn)
65963 {
65964 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
65965+ BUG_ON(vma->vm_mirror);
65966
65967 if (addr < vma->vm_start || addr >= vma->vm_end)
65968 return -EFAULT;
65969@@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
65970 copy_user_highpage(dst, src, va, vma);
65971 }
65972
65973+#ifdef CONFIG_PAX_SEGMEXEC
65974+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
65975+{
65976+ struct mm_struct *mm = vma->vm_mm;
65977+ spinlock_t *ptl;
65978+ pte_t *pte, entry;
65979+
65980+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
65981+ entry = *pte;
65982+ if (!pte_present(entry)) {
65983+ if (!pte_none(entry)) {
65984+ BUG_ON(pte_file(entry));
65985+ free_swap_and_cache(pte_to_swp_entry(entry));
65986+ pte_clear_not_present_full(mm, address, pte, 0);
65987+ }
65988+ } else {
65989+ struct page *page;
65990+
65991+ flush_cache_page(vma, address, pte_pfn(entry));
65992+ entry = ptep_clear_flush(vma, address, pte);
65993+ BUG_ON(pte_dirty(entry));
65994+ page = vm_normal_page(vma, address, entry);
65995+ if (page) {
65996+ update_hiwater_rss(mm);
65997+ if (PageAnon(page))
65998+ dec_mm_counter_fast(mm, MM_ANONPAGES);
65999+ else
66000+ dec_mm_counter_fast(mm, MM_FILEPAGES);
66001+ page_remove_rmap(page);
66002+ page_cache_release(page);
66003+ }
66004+ }
66005+ pte_unmap_unlock(pte, ptl);
66006+}
66007+
66008+/* PaX: if vma is mirrored, synchronize the mirror's PTE
66009+ *
66010+ * the ptl of the lower mapped page is held on entry and is not released on exit
66011+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
66012+ */
66013+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
66014+{
66015+ struct mm_struct *mm = vma->vm_mm;
66016+ unsigned long address_m;
66017+ spinlock_t *ptl_m;
66018+ struct vm_area_struct *vma_m;
66019+ pmd_t *pmd_m;
66020+ pte_t *pte_m, entry_m;
66021+
66022+ BUG_ON(!page_m || !PageAnon(page_m));
66023+
66024+ vma_m = pax_find_mirror_vma(vma);
66025+ if (!vma_m)
66026+ return;
66027+
66028+ BUG_ON(!PageLocked(page_m));
66029+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
66030+ address_m = address + SEGMEXEC_TASK_SIZE;
66031+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
66032+ pte_m = pte_offset_map(pmd_m, address_m);
66033+ ptl_m = pte_lockptr(mm, pmd_m);
66034+ if (ptl != ptl_m) {
66035+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
66036+ if (!pte_none(*pte_m))
66037+ goto out;
66038+ }
66039+
66040+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
66041+ page_cache_get(page_m);
66042+ page_add_anon_rmap(page_m, vma_m, address_m);
66043+ inc_mm_counter_fast(mm, MM_ANONPAGES);
66044+ set_pte_at(mm, address_m, pte_m, entry_m);
66045+ update_mmu_cache(vma_m, address_m, entry_m);
66046+out:
66047+ if (ptl != ptl_m)
66048+ spin_unlock(ptl_m);
66049+ pte_unmap(pte_m);
66050+ unlock_page(page_m);
66051+}
66052+
66053+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
66054+{
66055+ struct mm_struct *mm = vma->vm_mm;
66056+ unsigned long address_m;
66057+ spinlock_t *ptl_m;
66058+ struct vm_area_struct *vma_m;
66059+ pmd_t *pmd_m;
66060+ pte_t *pte_m, entry_m;
66061+
66062+ BUG_ON(!page_m || PageAnon(page_m));
66063+
66064+ vma_m = pax_find_mirror_vma(vma);
66065+ if (!vma_m)
66066+ return;
66067+
66068+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
66069+ address_m = address + SEGMEXEC_TASK_SIZE;
66070+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
66071+ pte_m = pte_offset_map(pmd_m, address_m);
66072+ ptl_m = pte_lockptr(mm, pmd_m);
66073+ if (ptl != ptl_m) {
66074+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
66075+ if (!pte_none(*pte_m))
66076+ goto out;
66077+ }
66078+
66079+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
66080+ page_cache_get(page_m);
66081+ page_add_file_rmap(page_m);
66082+ inc_mm_counter_fast(mm, MM_FILEPAGES);
66083+ set_pte_at(mm, address_m, pte_m, entry_m);
66084+ update_mmu_cache(vma_m, address_m, entry_m);
66085+out:
66086+ if (ptl != ptl_m)
66087+ spin_unlock(ptl_m);
66088+ pte_unmap(pte_m);
66089+}
66090+
66091+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
66092+{
66093+ struct mm_struct *mm = vma->vm_mm;
66094+ unsigned long address_m;
66095+ spinlock_t *ptl_m;
66096+ struct vm_area_struct *vma_m;
66097+ pmd_t *pmd_m;
66098+ pte_t *pte_m, entry_m;
66099+
66100+ vma_m = pax_find_mirror_vma(vma);
66101+ if (!vma_m)
66102+ return;
66103+
66104+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
66105+ address_m = address + SEGMEXEC_TASK_SIZE;
66106+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
66107+ pte_m = pte_offset_map(pmd_m, address_m);
66108+ ptl_m = pte_lockptr(mm, pmd_m);
66109+ if (ptl != ptl_m) {
66110+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
66111+ if (!pte_none(*pte_m))
66112+ goto out;
66113+ }
66114+
66115+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
66116+ set_pte_at(mm, address_m, pte_m, entry_m);
66117+out:
66118+ if (ptl != ptl_m)
66119+ spin_unlock(ptl_m);
66120+ pte_unmap(pte_m);
66121+}
66122+
66123+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
66124+{
66125+ struct page *page_m;
66126+ pte_t entry;
66127+
66128+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
66129+ goto out;
66130+
66131+ entry = *pte;
66132+ page_m = vm_normal_page(vma, address, entry);
66133+ if (!page_m)
66134+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
66135+ else if (PageAnon(page_m)) {
66136+ if (pax_find_mirror_vma(vma)) {
66137+ pte_unmap_unlock(pte, ptl);
66138+ lock_page(page_m);
66139+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
66140+ if (pte_same(entry, *pte))
66141+ pax_mirror_anon_pte(vma, address, page_m, ptl);
66142+ else
66143+ unlock_page(page_m);
66144+ }
66145+ } else
66146+ pax_mirror_file_pte(vma, address, page_m, ptl);
66147+
66148+out:
66149+ pte_unmap_unlock(pte, ptl);
66150+}
66151+#endif
66152+
66153 /*
66154 * This routine handles present pages, when users try to write
66155 * to a shared page. It is done by copying the page to a new address
66156@@ -2667,6 +2860,12 @@ gotten:
66157 */
66158 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
66159 if (likely(pte_same(*page_table, orig_pte))) {
66160+
66161+#ifdef CONFIG_PAX_SEGMEXEC
66162+ if (pax_find_mirror_vma(vma))
66163+ BUG_ON(!trylock_page(new_page));
66164+#endif
66165+
66166 if (old_page) {
66167 if (!PageAnon(old_page)) {
66168 dec_mm_counter_fast(mm, MM_FILEPAGES);
66169@@ -2718,6 +2917,10 @@ gotten:
66170 page_remove_rmap(old_page);
66171 }
66172
66173+#ifdef CONFIG_PAX_SEGMEXEC
66174+ pax_mirror_anon_pte(vma, address, new_page, ptl);
66175+#endif
66176+
66177 /* Free the old page.. */
66178 new_page = old_page;
66179 ret |= VM_FAULT_WRITE;
66180@@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
66181 swap_free(entry);
66182 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
66183 try_to_free_swap(page);
66184+
66185+#ifdef CONFIG_PAX_SEGMEXEC
66186+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
66187+#endif
66188+
66189 unlock_page(page);
66190 if (swapcache) {
66191 /*
66192@@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
66193
66194 /* No need to invalidate - it was non-present before */
66195 update_mmu_cache(vma, address, page_table);
66196+
66197+#ifdef CONFIG_PAX_SEGMEXEC
66198+ pax_mirror_anon_pte(vma, address, page, ptl);
66199+#endif
66200+
66201 unlock:
66202 pte_unmap_unlock(page_table, ptl);
66203 out:
66204@@ -3039,40 +3252,6 @@ out_release:
66205 }
66206
66207 /*
66208- * This is like a special single-page "expand_{down|up}wards()",
66209- * except we must first make sure that 'address{-|+}PAGE_SIZE'
66210- * doesn't hit another vma.
66211- */
66212-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
66213-{
66214- address &= PAGE_MASK;
66215- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
66216- struct vm_area_struct *prev = vma->vm_prev;
66217-
66218- /*
66219- * Is there a mapping abutting this one below?
66220- *
66221- * That's only ok if it's the same stack mapping
66222- * that has gotten split..
66223- */
66224- if (prev && prev->vm_end == address)
66225- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
66226-
66227- expand_downwards(vma, address - PAGE_SIZE);
66228- }
66229- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
66230- struct vm_area_struct *next = vma->vm_next;
66231-
66232- /* As VM_GROWSDOWN but s/below/above/ */
66233- if (next && next->vm_start == address + PAGE_SIZE)
66234- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
66235-
66236- expand_upwards(vma, address + PAGE_SIZE);
66237- }
66238- return 0;
66239-}
66240-
66241-/*
66242 * We enter with non-exclusive mmap_sem (to exclude vma changes,
66243 * but allow concurrent faults), and pte mapped but not yet locked.
66244 * We return with mmap_sem still held, but pte unmapped and unlocked.
66245@@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
66246 unsigned long address, pte_t *page_table, pmd_t *pmd,
66247 unsigned int flags)
66248 {
66249- struct page *page;
66250+ struct page *page = NULL;
66251 spinlock_t *ptl;
66252 pte_t entry;
66253
66254- pte_unmap(page_table);
66255-
66256- /* Check if we need to add a guard page to the stack */
66257- if (check_stack_guard_page(vma, address) < 0)
66258- return VM_FAULT_SIGBUS;
66259-
66260- /* Use the zero-page for reads */
66261 if (!(flags & FAULT_FLAG_WRITE)) {
66262 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
66263 vma->vm_page_prot));
66264- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
66265+ ptl = pte_lockptr(mm, pmd);
66266+ spin_lock(ptl);
66267 if (!pte_none(*page_table))
66268 goto unlock;
66269 goto setpte;
66270 }
66271
66272 /* Allocate our own private page. */
66273+ pte_unmap(page_table);
66274+
66275 if (unlikely(anon_vma_prepare(vma)))
66276 goto oom;
66277 page = alloc_zeroed_user_highpage_movable(vma, address);
66278@@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
66279 if (!pte_none(*page_table))
66280 goto release;
66281
66282+#ifdef CONFIG_PAX_SEGMEXEC
66283+ if (pax_find_mirror_vma(vma))
66284+ BUG_ON(!trylock_page(page));
66285+#endif
66286+
66287 inc_mm_counter_fast(mm, MM_ANONPAGES);
66288 page_add_new_anon_rmap(page, vma, address);
66289 setpte:
66290@@ -3127,6 +3307,12 @@ setpte:
66291
66292 /* No need to invalidate - it was non-present before */
66293 update_mmu_cache(vma, address, page_table);
66294+
66295+#ifdef CONFIG_PAX_SEGMEXEC
66296+ if (page)
66297+ pax_mirror_anon_pte(vma, address, page, ptl);
66298+#endif
66299+
66300 unlock:
66301 pte_unmap_unlock(page_table, ptl);
66302 return 0;
66303@@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
66304 */
66305 /* Only go through if we didn't race with anybody else... */
66306 if (likely(pte_same(*page_table, orig_pte))) {
66307+
66308+#ifdef CONFIG_PAX_SEGMEXEC
66309+ if (anon && pax_find_mirror_vma(vma))
66310+ BUG_ON(!trylock_page(page));
66311+#endif
66312+
66313 flush_icache_page(vma, page);
66314 entry = mk_pte(page, vma->vm_page_prot);
66315 if (flags & FAULT_FLAG_WRITE)
66316@@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
66317
66318 /* no need to invalidate: a not-present page won't be cached */
66319 update_mmu_cache(vma, address, page_table);
66320+
66321+#ifdef CONFIG_PAX_SEGMEXEC
66322+ if (anon)
66323+ pax_mirror_anon_pte(vma, address, page, ptl);
66324+ else
66325+ pax_mirror_file_pte(vma, address, page, ptl);
66326+#endif
66327+
66328 } else {
66329 if (charged)
66330 mem_cgroup_uncharge_page(page);
66331@@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
66332 if (flags & FAULT_FLAG_WRITE)
66333 flush_tlb_fix_spurious_fault(vma, address);
66334 }
66335+
66336+#ifdef CONFIG_PAX_SEGMEXEC
66337+ pax_mirror_pte(vma, address, pte, pmd, ptl);
66338+ return 0;
66339+#endif
66340+
66341 unlock:
66342 pte_unmap_unlock(pte, ptl);
66343 return 0;
66344@@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
66345 pmd_t *pmd;
66346 pte_t *pte;
66347
66348+#ifdef CONFIG_PAX_SEGMEXEC
66349+ struct vm_area_struct *vma_m;
66350+#endif
66351+
66352 __set_current_state(TASK_RUNNING);
66353
66354 count_vm_event(PGFAULT);
66355@@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
66356 if (unlikely(is_vm_hugetlb_page(vma)))
66357 return hugetlb_fault(mm, vma, address, flags);
66358
66359+#ifdef CONFIG_PAX_SEGMEXEC
66360+ vma_m = pax_find_mirror_vma(vma);
66361+ if (vma_m) {
66362+ unsigned long address_m;
66363+ pgd_t *pgd_m;
66364+ pud_t *pud_m;
66365+ pmd_t *pmd_m;
66366+
66367+ if (vma->vm_start > vma_m->vm_start) {
66368+ address_m = address;
66369+ address -= SEGMEXEC_TASK_SIZE;
66370+ vma = vma_m;
66371+ } else
66372+ address_m = address + SEGMEXEC_TASK_SIZE;
66373+
66374+ pgd_m = pgd_offset(mm, address_m);
66375+ pud_m = pud_alloc(mm, pgd_m, address_m);
66376+ if (!pud_m)
66377+ return VM_FAULT_OOM;
66378+ pmd_m = pmd_alloc(mm, pud_m, address_m);
66379+ if (!pmd_m)
66380+ return VM_FAULT_OOM;
66381+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
66382+ return VM_FAULT_OOM;
66383+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
66384+ }
66385+#endif
66386+
66387 pgd = pgd_offset(mm, address);
66388 pud = pud_alloc(mm, pgd, address);
66389 if (!pud)
66390@@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
66391 * run pte_offset_map on the pmd, if an huge pmd could
66392 * materialize from under us from a different thread.
66393 */
66394- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
66395+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
66396 return VM_FAULT_OOM;
66397 /* if an huge pmd materialized from under us just retry later */
66398 if (unlikely(pmd_trans_huge(*pmd)))
66399@@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
66400 gate_vma.vm_start = FIXADDR_USER_START;
66401 gate_vma.vm_end = FIXADDR_USER_END;
66402 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
66403- gate_vma.vm_page_prot = __P101;
66404+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
66405 /*
66406 * Make sure the vDSO gets into every core dump.
66407 * Dumping its contents makes post-mortem fully interpretable later
66408diff -urNp linux-3.0.4/mm/memory-failure.c linux-3.0.4/mm/memory-failure.c
66409--- linux-3.0.4/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
66410+++ linux-3.0.4/mm/memory-failure.c 2011-10-06 04:17:55.000000000 -0400
66411@@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
66412
66413 int sysctl_memory_failure_recovery __read_mostly = 1;
66414
66415-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
66416+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
66417
66418 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
66419
66420@@ -200,7 +200,7 @@ static int kill_proc_ao(struct task_stru
66421 si.si_signo = SIGBUS;
66422 si.si_errno = 0;
66423 si.si_code = BUS_MCEERR_AO;
66424- si.si_addr = (void *)addr;
66425+ si.si_addr = (void __user *)addr;
66426 #ifdef __ARCH_SI_TRAPNO
66427 si.si_trapno = trapno;
66428 #endif
66429@@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
66430 }
66431
66432 nr_pages = 1 << compound_trans_order(hpage);
66433- atomic_long_add(nr_pages, &mce_bad_pages);
66434+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
66435
66436 /*
66437 * We need/can do nothing about count=0 pages.
66438@@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
66439 if (!PageHWPoison(hpage)
66440 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
66441 || (p != hpage && TestSetPageHWPoison(hpage))) {
66442- atomic_long_sub(nr_pages, &mce_bad_pages);
66443+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
66444 return 0;
66445 }
66446 set_page_hwpoison_huge_page(hpage);
66447@@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
66448 }
66449 if (hwpoison_filter(p)) {
66450 if (TestClearPageHWPoison(p))
66451- atomic_long_sub(nr_pages, &mce_bad_pages);
66452+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
66453 unlock_page(hpage);
66454 put_page(hpage);
66455 return 0;
66456@@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
66457 return 0;
66458 }
66459 if (TestClearPageHWPoison(p))
66460- atomic_long_sub(nr_pages, &mce_bad_pages);
66461+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
66462 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
66463 return 0;
66464 }
66465@@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
66466 */
66467 if (TestClearPageHWPoison(page)) {
66468 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
66469- atomic_long_sub(nr_pages, &mce_bad_pages);
66470+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
66471 freeit = 1;
66472 if (PageHuge(page))
66473 clear_page_hwpoison_huge_page(page);
66474@@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
66475 }
66476 done:
66477 if (!PageHWPoison(hpage))
66478- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
66479+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
66480 set_page_hwpoison_huge_page(hpage);
66481 dequeue_hwpoisoned_huge_page(hpage);
66482 /* keep elevated page count for bad page */
66483@@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
66484 return ret;
66485
66486 done:
66487- atomic_long_add(1, &mce_bad_pages);
66488+ atomic_long_add_unchecked(1, &mce_bad_pages);
66489 SetPageHWPoison(page);
66490 /* keep elevated page count for bad page */
66491 return ret;
66492diff -urNp linux-3.0.4/mm/mempolicy.c linux-3.0.4/mm/mempolicy.c
66493--- linux-3.0.4/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
66494+++ linux-3.0.4/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
66495@@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
66496 unsigned long vmstart;
66497 unsigned long vmend;
66498
66499+#ifdef CONFIG_PAX_SEGMEXEC
66500+ struct vm_area_struct *vma_m;
66501+#endif
66502+
66503 vma = find_vma_prev(mm, start, &prev);
66504 if (!vma || vma->vm_start > start)
66505 return -EFAULT;
66506@@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
66507 err = policy_vma(vma, new_pol);
66508 if (err)
66509 goto out;
66510+
66511+#ifdef CONFIG_PAX_SEGMEXEC
66512+ vma_m = pax_find_mirror_vma(vma);
66513+ if (vma_m) {
66514+ err = policy_vma(vma_m, new_pol);
66515+ if (err)
66516+ goto out;
66517+ }
66518+#endif
66519+
66520 }
66521
66522 out:
66523@@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
66524
66525 if (end < start)
66526 return -EINVAL;
66527+
66528+#ifdef CONFIG_PAX_SEGMEXEC
66529+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
66530+ if (end > SEGMEXEC_TASK_SIZE)
66531+ return -EINVAL;
66532+ } else
66533+#endif
66534+
66535+ if (end > TASK_SIZE)
66536+ return -EINVAL;
66537+
66538 if (end == start)
66539 return 0;
66540
66541@@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
66542 if (!mm)
66543 goto out;
66544
66545+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66546+ if (mm != current->mm &&
66547+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
66548+ err = -EPERM;
66549+ goto out;
66550+ }
66551+#endif
66552+
66553 /*
66554 * Check if this process has the right to modify the specified
66555 * process. The right exists if the process has administrative
66556@@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
66557 rcu_read_lock();
66558 tcred = __task_cred(task);
66559 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
66560- cred->uid != tcred->suid && cred->uid != tcred->uid &&
66561- !capable(CAP_SYS_NICE)) {
66562+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
66563 rcu_read_unlock();
66564 err = -EPERM;
66565 goto out;
66566diff -urNp linux-3.0.4/mm/migrate.c linux-3.0.4/mm/migrate.c
66567--- linux-3.0.4/mm/migrate.c 2011-07-21 22:17:23.000000000 -0400
66568+++ linux-3.0.4/mm/migrate.c 2011-08-23 21:48:14.000000000 -0400
66569@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
66570 unsigned long chunk_start;
66571 int err;
66572
66573+ pax_track_stack();
66574+
66575 task_nodes = cpuset_mems_allowed(task);
66576
66577 err = -ENOMEM;
66578@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
66579 if (!mm)
66580 return -EINVAL;
66581
66582+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66583+ if (mm != current->mm &&
66584+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
66585+ err = -EPERM;
66586+ goto out;
66587+ }
66588+#endif
66589+
66590 /*
66591 * Check if this process has the right to modify the specified
66592 * process. The right exists if the process has administrative
66593@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
66594 rcu_read_lock();
66595 tcred = __task_cred(task);
66596 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
66597- cred->uid != tcred->suid && cred->uid != tcred->uid &&
66598- !capable(CAP_SYS_NICE)) {
66599+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
66600 rcu_read_unlock();
66601 err = -EPERM;
66602 goto out;
66603diff -urNp linux-3.0.4/mm/mlock.c linux-3.0.4/mm/mlock.c
66604--- linux-3.0.4/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
66605+++ linux-3.0.4/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
66606@@ -13,6 +13,7 @@
66607 #include <linux/pagemap.h>
66608 #include <linux/mempolicy.h>
66609 #include <linux/syscalls.h>
66610+#include <linux/security.h>
66611 #include <linux/sched.h>
66612 #include <linux/module.h>
66613 #include <linux/rmap.h>
66614@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
66615 return -EINVAL;
66616 if (end == start)
66617 return 0;
66618+ if (end > TASK_SIZE)
66619+ return -EINVAL;
66620+
66621 vma = find_vma_prev(current->mm, start, &prev);
66622 if (!vma || vma->vm_start > start)
66623 return -ENOMEM;
66624@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
66625 for (nstart = start ; ; ) {
66626 vm_flags_t newflags;
66627
66628+#ifdef CONFIG_PAX_SEGMEXEC
66629+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
66630+ break;
66631+#endif
66632+
66633 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
66634
66635 newflags = vma->vm_flags | VM_LOCKED;
66636@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
66637 lock_limit >>= PAGE_SHIFT;
66638
66639 /* check against resource limits */
66640+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
66641 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
66642 error = do_mlock(start, len, 1);
66643 up_write(&current->mm->mmap_sem);
66644@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
66645 static int do_mlockall(int flags)
66646 {
66647 struct vm_area_struct * vma, * prev = NULL;
66648- unsigned int def_flags = 0;
66649
66650 if (flags & MCL_FUTURE)
66651- def_flags = VM_LOCKED;
66652- current->mm->def_flags = def_flags;
66653+ current->mm->def_flags |= VM_LOCKED;
66654+ else
66655+ current->mm->def_flags &= ~VM_LOCKED;
66656 if (flags == MCL_FUTURE)
66657 goto out;
66658
66659 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
66660 vm_flags_t newflags;
66661
66662+#ifdef CONFIG_PAX_SEGMEXEC
66663+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
66664+ break;
66665+#endif
66666+
66667+ BUG_ON(vma->vm_end > TASK_SIZE);
66668 newflags = vma->vm_flags | VM_LOCKED;
66669 if (!(flags & MCL_CURRENT))
66670 newflags &= ~VM_LOCKED;
66671@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
66672 lock_limit >>= PAGE_SHIFT;
66673
66674 ret = -ENOMEM;
66675+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
66676 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
66677 capable(CAP_IPC_LOCK))
66678 ret = do_mlockall(flags);
66679diff -urNp linux-3.0.4/mm/mmap.c linux-3.0.4/mm/mmap.c
66680--- linux-3.0.4/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
66681+++ linux-3.0.4/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
66682@@ -46,6 +46,16 @@
66683 #define arch_rebalance_pgtables(addr, len) (addr)
66684 #endif
66685
66686+static inline void verify_mm_writelocked(struct mm_struct *mm)
66687+{
66688+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
66689+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
66690+ up_read(&mm->mmap_sem);
66691+ BUG();
66692+ }
66693+#endif
66694+}
66695+
66696 static void unmap_region(struct mm_struct *mm,
66697 struct vm_area_struct *vma, struct vm_area_struct *prev,
66698 unsigned long start, unsigned long end);
66699@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
66700 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
66701 *
66702 */
66703-pgprot_t protection_map[16] = {
66704+pgprot_t protection_map[16] __read_only = {
66705 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
66706 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
66707 };
66708
66709-pgprot_t vm_get_page_prot(unsigned long vm_flags)
66710+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
66711 {
66712- return __pgprot(pgprot_val(protection_map[vm_flags &
66713+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
66714 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
66715 pgprot_val(arch_vm_get_page_prot(vm_flags)));
66716+
66717+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
66718+ if (!(__supported_pte_mask & _PAGE_NX) &&
66719+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
66720+ (vm_flags & (VM_READ | VM_WRITE)))
66721+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
66722+#endif
66723+
66724+ return prot;
66725 }
66726 EXPORT_SYMBOL(vm_get_page_prot);
66727
66728 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
66729 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
66730 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
66731+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
66732 /*
66733 * Make sure vm_committed_as in one cacheline and not cacheline shared with
66734 * other variables. It can be updated by several CPUs frequently.
66735@@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
66736 struct vm_area_struct *next = vma->vm_next;
66737
66738 might_sleep();
66739+ BUG_ON(vma->vm_mirror);
66740 if (vma->vm_ops && vma->vm_ops->close)
66741 vma->vm_ops->close(vma);
66742 if (vma->vm_file) {
66743@@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
66744 * not page aligned -Ram Gupta
66745 */
66746 rlim = rlimit(RLIMIT_DATA);
66747+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
66748 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
66749 (mm->end_data - mm->start_data) > rlim)
66750 goto out;
66751@@ -697,6 +719,12 @@ static int
66752 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
66753 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
66754 {
66755+
66756+#ifdef CONFIG_PAX_SEGMEXEC
66757+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
66758+ return 0;
66759+#endif
66760+
66761 if (is_mergeable_vma(vma, file, vm_flags) &&
66762 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
66763 if (vma->vm_pgoff == vm_pgoff)
66764@@ -716,6 +744,12 @@ static int
66765 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
66766 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
66767 {
66768+
66769+#ifdef CONFIG_PAX_SEGMEXEC
66770+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
66771+ return 0;
66772+#endif
66773+
66774 if (is_mergeable_vma(vma, file, vm_flags) &&
66775 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
66776 pgoff_t vm_pglen;
66777@@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
66778 struct vm_area_struct *vma_merge(struct mm_struct *mm,
66779 struct vm_area_struct *prev, unsigned long addr,
66780 unsigned long end, unsigned long vm_flags,
66781- struct anon_vma *anon_vma, struct file *file,
66782+ struct anon_vma *anon_vma, struct file *file,
66783 pgoff_t pgoff, struct mempolicy *policy)
66784 {
66785 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
66786 struct vm_area_struct *area, *next;
66787 int err;
66788
66789+#ifdef CONFIG_PAX_SEGMEXEC
66790+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
66791+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
66792+
66793+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
66794+#endif
66795+
66796 /*
66797 * We later require that vma->vm_flags == vm_flags,
66798 * so this tests vma->vm_flags & VM_SPECIAL, too.
66799@@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
66800 if (next && next->vm_end == end) /* cases 6, 7, 8 */
66801 next = next->vm_next;
66802
66803+#ifdef CONFIG_PAX_SEGMEXEC
66804+ if (prev)
66805+ prev_m = pax_find_mirror_vma(prev);
66806+ if (area)
66807+ area_m = pax_find_mirror_vma(area);
66808+ if (next)
66809+ next_m = pax_find_mirror_vma(next);
66810+#endif
66811+
66812 /*
66813 * Can it merge with the predecessor?
66814 */
66815@@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
66816 /* cases 1, 6 */
66817 err = vma_adjust(prev, prev->vm_start,
66818 next->vm_end, prev->vm_pgoff, NULL);
66819- } else /* cases 2, 5, 7 */
66820+
66821+#ifdef CONFIG_PAX_SEGMEXEC
66822+ if (!err && prev_m)
66823+ err = vma_adjust(prev_m, prev_m->vm_start,
66824+ next_m->vm_end, prev_m->vm_pgoff, NULL);
66825+#endif
66826+
66827+ } else { /* cases 2, 5, 7 */
66828 err = vma_adjust(prev, prev->vm_start,
66829 end, prev->vm_pgoff, NULL);
66830+
66831+#ifdef CONFIG_PAX_SEGMEXEC
66832+ if (!err && prev_m)
66833+ err = vma_adjust(prev_m, prev_m->vm_start,
66834+ end_m, prev_m->vm_pgoff, NULL);
66835+#endif
66836+
66837+ }
66838 if (err)
66839 return NULL;
66840 khugepaged_enter_vma_merge(prev);
66841@@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
66842 mpol_equal(policy, vma_policy(next)) &&
66843 can_vma_merge_before(next, vm_flags,
66844 anon_vma, file, pgoff+pglen)) {
66845- if (prev && addr < prev->vm_end) /* case 4 */
66846+ if (prev && addr < prev->vm_end) { /* case 4 */
66847 err = vma_adjust(prev, prev->vm_start,
66848 addr, prev->vm_pgoff, NULL);
66849- else /* cases 3, 8 */
66850+
66851+#ifdef CONFIG_PAX_SEGMEXEC
66852+ if (!err && prev_m)
66853+ err = vma_adjust(prev_m, prev_m->vm_start,
66854+ addr_m, prev_m->vm_pgoff, NULL);
66855+#endif
66856+
66857+ } else { /* cases 3, 8 */
66858 err = vma_adjust(area, addr, next->vm_end,
66859 next->vm_pgoff - pglen, NULL);
66860+
66861+#ifdef CONFIG_PAX_SEGMEXEC
66862+ if (!err && area_m)
66863+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
66864+ next_m->vm_pgoff - pglen, NULL);
66865+#endif
66866+
66867+ }
66868 if (err)
66869 return NULL;
66870 khugepaged_enter_vma_merge(area);
66871@@ -929,14 +1009,11 @@ none:
66872 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
66873 struct file *file, long pages)
66874 {
66875- const unsigned long stack_flags
66876- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
66877-
66878 if (file) {
66879 mm->shared_vm += pages;
66880 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
66881 mm->exec_vm += pages;
66882- } else if (flags & stack_flags)
66883+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
66884 mm->stack_vm += pages;
66885 if (flags & (VM_RESERVED|VM_IO))
66886 mm->reserved_vm += pages;
66887@@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
66888 * (the exception is when the underlying filesystem is noexec
66889 * mounted, in which case we dont add PROT_EXEC.)
66890 */
66891- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
66892+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
66893 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
66894 prot |= PROT_EXEC;
66895
66896@@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
66897 /* Obtain the address to map to. we verify (or select) it and ensure
66898 * that it represents a valid section of the address space.
66899 */
66900- addr = get_unmapped_area(file, addr, len, pgoff, flags);
66901+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
66902 if (addr & ~PAGE_MASK)
66903 return addr;
66904
66905@@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
66906 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
66907 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
66908
66909+#ifdef CONFIG_PAX_MPROTECT
66910+ if (mm->pax_flags & MF_PAX_MPROTECT) {
66911+#ifndef CONFIG_PAX_MPROTECT_COMPAT
66912+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
66913+ gr_log_rwxmmap(file);
66914+
66915+#ifdef CONFIG_PAX_EMUPLT
66916+ vm_flags &= ~VM_EXEC;
66917+#else
66918+ return -EPERM;
66919+#endif
66920+
66921+ }
66922+
66923+ if (!(vm_flags & VM_EXEC))
66924+ vm_flags &= ~VM_MAYEXEC;
66925+#else
66926+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
66927+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
66928+#endif
66929+ else
66930+ vm_flags &= ~VM_MAYWRITE;
66931+ }
66932+#endif
66933+
66934+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
66935+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
66936+ vm_flags &= ~VM_PAGEEXEC;
66937+#endif
66938+
66939 if (flags & MAP_LOCKED)
66940 if (!can_do_mlock())
66941 return -EPERM;
66942@@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
66943 locked += mm->locked_vm;
66944 lock_limit = rlimit(RLIMIT_MEMLOCK);
66945 lock_limit >>= PAGE_SHIFT;
66946+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
66947 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
66948 return -EAGAIN;
66949 }
66950@@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
66951 if (error)
66952 return error;
66953
66954+ if (!gr_acl_handle_mmap(file, prot))
66955+ return -EACCES;
66956+
66957 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
66958 }
66959 EXPORT_SYMBOL(do_mmap_pgoff);
66960@@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
66961 vm_flags_t vm_flags = vma->vm_flags;
66962
66963 /* If it was private or non-writable, the write bit is already clear */
66964- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
66965+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
66966 return 0;
66967
66968 /* The backer wishes to know when pages are first written to? */
66969@@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
66970 unsigned long charged = 0;
66971 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
66972
66973+#ifdef CONFIG_PAX_SEGMEXEC
66974+ struct vm_area_struct *vma_m = NULL;
66975+#endif
66976+
66977+ /*
66978+ * mm->mmap_sem is required to protect against another thread
66979+ * changing the mappings in case we sleep.
66980+ */
66981+ verify_mm_writelocked(mm);
66982+
66983 /* Clear old maps */
66984 error = -ENOMEM;
66985-munmap_back:
66986 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
66987 if (vma && vma->vm_start < addr + len) {
66988 if (do_munmap(mm, addr, len))
66989 return -ENOMEM;
66990- goto munmap_back;
66991+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
66992+ BUG_ON(vma && vma->vm_start < addr + len);
66993 }
66994
66995 /* Check against address space limit. */
66996@@ -1266,6 +1387,16 @@ munmap_back:
66997 goto unacct_error;
66998 }
66999
67000+#ifdef CONFIG_PAX_SEGMEXEC
67001+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
67002+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
67003+ if (!vma_m) {
67004+ error = -ENOMEM;
67005+ goto free_vma;
67006+ }
67007+ }
67008+#endif
67009+
67010 vma->vm_mm = mm;
67011 vma->vm_start = addr;
67012 vma->vm_end = addr + len;
67013@@ -1289,6 +1420,19 @@ munmap_back:
67014 error = file->f_op->mmap(file, vma);
67015 if (error)
67016 goto unmap_and_free_vma;
67017+
67018+#ifdef CONFIG_PAX_SEGMEXEC
67019+ if (vma_m && (vm_flags & VM_EXECUTABLE))
67020+ added_exe_file_vma(mm);
67021+#endif
67022+
67023+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
67024+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
67025+ vma->vm_flags |= VM_PAGEEXEC;
67026+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
67027+ }
67028+#endif
67029+
67030 if (vm_flags & VM_EXECUTABLE)
67031 added_exe_file_vma(mm);
67032
67033@@ -1324,6 +1468,11 @@ munmap_back:
67034 vma_link(mm, vma, prev, rb_link, rb_parent);
67035 file = vma->vm_file;
67036
67037+#ifdef CONFIG_PAX_SEGMEXEC
67038+ if (vma_m)
67039+ BUG_ON(pax_mirror_vma(vma_m, vma));
67040+#endif
67041+
67042 /* Once vma denies write, undo our temporary denial count */
67043 if (correct_wcount)
67044 atomic_inc(&inode->i_writecount);
67045@@ -1332,6 +1481,7 @@ out:
67046
67047 mm->total_vm += len >> PAGE_SHIFT;
67048 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
67049+ track_exec_limit(mm, addr, addr + len, vm_flags);
67050 if (vm_flags & VM_LOCKED) {
67051 if (!mlock_vma_pages_range(vma, addr, addr + len))
67052 mm->locked_vm += (len >> PAGE_SHIFT);
67053@@ -1349,6 +1499,12 @@ unmap_and_free_vma:
67054 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
67055 charged = 0;
67056 free_vma:
67057+
67058+#ifdef CONFIG_PAX_SEGMEXEC
67059+ if (vma_m)
67060+ kmem_cache_free(vm_area_cachep, vma_m);
67061+#endif
67062+
67063 kmem_cache_free(vm_area_cachep, vma);
67064 unacct_error:
67065 if (charged)
67066@@ -1356,6 +1512,44 @@ unacct_error:
67067 return error;
67068 }
67069
67070+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
67071+{
67072+ if (!vma) {
67073+#ifdef CONFIG_STACK_GROWSUP
67074+ if (addr > sysctl_heap_stack_gap)
67075+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
67076+ else
67077+ vma = find_vma(current->mm, 0);
67078+ if (vma && (vma->vm_flags & VM_GROWSUP))
67079+ return false;
67080+#endif
67081+ return true;
67082+ }
67083+
67084+ if (addr + len > vma->vm_start)
67085+ return false;
67086+
67087+ if (vma->vm_flags & VM_GROWSDOWN)
67088+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
67089+#ifdef CONFIG_STACK_GROWSUP
67090+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
67091+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
67092+#endif
67093+
67094+ return true;
67095+}
67096+
67097+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
67098+{
67099+ if (vma->vm_start < len)
67100+ return -ENOMEM;
67101+ if (!(vma->vm_flags & VM_GROWSDOWN))
67102+ return vma->vm_start - len;
67103+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
67104+ return vma->vm_start - len - sysctl_heap_stack_gap;
67105+ return -ENOMEM;
67106+}
67107+
67108 /* Get an address range which is currently unmapped.
67109 * For shmat() with addr=0.
67110 *
67111@@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
67112 if (flags & MAP_FIXED)
67113 return addr;
67114
67115+#ifdef CONFIG_PAX_RANDMMAP
67116+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
67117+#endif
67118+
67119 if (addr) {
67120 addr = PAGE_ALIGN(addr);
67121- vma = find_vma(mm, addr);
67122- if (TASK_SIZE - len >= addr &&
67123- (!vma || addr + len <= vma->vm_start))
67124- return addr;
67125+ if (TASK_SIZE - len >= addr) {
67126+ vma = find_vma(mm, addr);
67127+ if (check_heap_stack_gap(vma, addr, len))
67128+ return addr;
67129+ }
67130 }
67131 if (len > mm->cached_hole_size) {
67132- start_addr = addr = mm->free_area_cache;
67133+ start_addr = addr = mm->free_area_cache;
67134 } else {
67135- start_addr = addr = TASK_UNMAPPED_BASE;
67136- mm->cached_hole_size = 0;
67137+ start_addr = addr = mm->mmap_base;
67138+ mm->cached_hole_size = 0;
67139 }
67140
67141 full_search:
67142@@ -1404,34 +1603,40 @@ full_search:
67143 * Start a new search - just in case we missed
67144 * some holes.
67145 */
67146- if (start_addr != TASK_UNMAPPED_BASE) {
67147- addr = TASK_UNMAPPED_BASE;
67148- start_addr = addr;
67149+ if (start_addr != mm->mmap_base) {
67150+ start_addr = addr = mm->mmap_base;
67151 mm->cached_hole_size = 0;
67152 goto full_search;
67153 }
67154 return -ENOMEM;
67155 }
67156- if (!vma || addr + len <= vma->vm_start) {
67157- /*
67158- * Remember the place where we stopped the search:
67159- */
67160- mm->free_area_cache = addr + len;
67161- return addr;
67162- }
67163+ if (check_heap_stack_gap(vma, addr, len))
67164+ break;
67165 if (addr + mm->cached_hole_size < vma->vm_start)
67166 mm->cached_hole_size = vma->vm_start - addr;
67167 addr = vma->vm_end;
67168 }
67169+
67170+ /*
67171+ * Remember the place where we stopped the search:
67172+ */
67173+ mm->free_area_cache = addr + len;
67174+ return addr;
67175 }
67176 #endif
67177
67178 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
67179 {
67180+
67181+#ifdef CONFIG_PAX_SEGMEXEC
67182+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
67183+ return;
67184+#endif
67185+
67186 /*
67187 * Is this a new hole at the lowest possible address?
67188 */
67189- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
67190+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
67191 mm->free_area_cache = addr;
67192 mm->cached_hole_size = ~0UL;
67193 }
67194@@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
67195 {
67196 struct vm_area_struct *vma;
67197 struct mm_struct *mm = current->mm;
67198- unsigned long addr = addr0;
67199+ unsigned long base = mm->mmap_base, addr = addr0;
67200
67201 /* requested length too big for entire address space */
67202 if (len > TASK_SIZE)
67203@@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
67204 if (flags & MAP_FIXED)
67205 return addr;
67206
67207+#ifdef CONFIG_PAX_RANDMMAP
67208+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
67209+#endif
67210+
67211 /* requesting a specific address */
67212 if (addr) {
67213 addr = PAGE_ALIGN(addr);
67214- vma = find_vma(mm, addr);
67215- if (TASK_SIZE - len >= addr &&
67216- (!vma || addr + len <= vma->vm_start))
67217- return addr;
67218+ if (TASK_SIZE - len >= addr) {
67219+ vma = find_vma(mm, addr);
67220+ if (check_heap_stack_gap(vma, addr, len))
67221+ return addr;
67222+ }
67223 }
67224
67225 /* check if free_area_cache is useful for us */
67226@@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
67227 /* make sure it can fit in the remaining address space */
67228 if (addr > len) {
67229 vma = find_vma(mm, addr-len);
67230- if (!vma || addr <= vma->vm_start)
67231+ if (check_heap_stack_gap(vma, addr - len, len))
67232 /* remember the address as a hint for next time */
67233 return (mm->free_area_cache = addr-len);
67234 }
67235@@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
67236 * return with success:
67237 */
67238 vma = find_vma(mm, addr);
67239- if (!vma || addr+len <= vma->vm_start)
67240+ if (check_heap_stack_gap(vma, addr, len))
67241 /* remember the address as a hint for next time */
67242 return (mm->free_area_cache = addr);
67243
67244@@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
67245 mm->cached_hole_size = vma->vm_start - addr;
67246
67247 /* try just below the current vma->vm_start */
67248- addr = vma->vm_start-len;
67249- } while (len < vma->vm_start);
67250+ addr = skip_heap_stack_gap(vma, len);
67251+ } while (!IS_ERR_VALUE(addr));
67252
67253 bottomup:
67254 /*
67255@@ -1515,13 +1725,21 @@ bottomup:
67256 * can happen with large stack limits and large mmap()
67257 * allocations.
67258 */
67259+ mm->mmap_base = TASK_UNMAPPED_BASE;
67260+
67261+#ifdef CONFIG_PAX_RANDMMAP
67262+ if (mm->pax_flags & MF_PAX_RANDMMAP)
67263+ mm->mmap_base += mm->delta_mmap;
67264+#endif
67265+
67266+ mm->free_area_cache = mm->mmap_base;
67267 mm->cached_hole_size = ~0UL;
67268- mm->free_area_cache = TASK_UNMAPPED_BASE;
67269 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
67270 /*
67271 * Restore the topdown base:
67272 */
67273- mm->free_area_cache = mm->mmap_base;
67274+ mm->mmap_base = base;
67275+ mm->free_area_cache = base;
67276 mm->cached_hole_size = ~0UL;
67277
67278 return addr;
67279@@ -1530,6 +1748,12 @@ bottomup:
67280
67281 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
67282 {
67283+
67284+#ifdef CONFIG_PAX_SEGMEXEC
67285+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
67286+ return;
67287+#endif
67288+
67289 /*
67290 * Is this a new hole at the highest possible address?
67291 */
67292@@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
67293 mm->free_area_cache = addr;
67294
67295 /* dont allow allocations above current base */
67296- if (mm->free_area_cache > mm->mmap_base)
67297+ if (mm->free_area_cache > mm->mmap_base) {
67298 mm->free_area_cache = mm->mmap_base;
67299+ mm->cached_hole_size = ~0UL;
67300+ }
67301 }
67302
67303 unsigned long
67304@@ -1646,6 +1872,28 @@ out:
67305 return prev ? prev->vm_next : vma;
67306 }
67307
67308+#ifdef CONFIG_PAX_SEGMEXEC
67309+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
67310+{
67311+ struct vm_area_struct *vma_m;
67312+
67313+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
67314+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
67315+ BUG_ON(vma->vm_mirror);
67316+ return NULL;
67317+ }
67318+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
67319+ vma_m = vma->vm_mirror;
67320+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
67321+ BUG_ON(vma->vm_file != vma_m->vm_file);
67322+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
67323+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
67324+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
67325+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
67326+ return vma_m;
67327+}
67328+#endif
67329+
67330 /*
67331 * Verify that the stack growth is acceptable and
67332 * update accounting. This is shared with both the
67333@@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
67334 return -ENOMEM;
67335
67336 /* Stack limit test */
67337+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
67338 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
67339 return -ENOMEM;
67340
67341@@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
67342 locked = mm->locked_vm + grow;
67343 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
67344 limit >>= PAGE_SHIFT;
67345+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
67346 if (locked > limit && !capable(CAP_IPC_LOCK))
67347 return -ENOMEM;
67348 }
67349@@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
67350 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
67351 * vma is the last one with address > vma->vm_end. Have to extend vma.
67352 */
67353+#ifndef CONFIG_IA64
67354+static
67355+#endif
67356 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
67357 {
67358 int error;
67359+ bool locknext;
67360
67361 if (!(vma->vm_flags & VM_GROWSUP))
67362 return -EFAULT;
67363
67364+ /* Also guard against wrapping around to address 0. */
67365+ if (address < PAGE_ALIGN(address+1))
67366+ address = PAGE_ALIGN(address+1);
67367+ else
67368+ return -ENOMEM;
67369+
67370 /*
67371 * We must make sure the anon_vma is allocated
67372 * so that the anon_vma locking is not a noop.
67373 */
67374 if (unlikely(anon_vma_prepare(vma)))
67375 return -ENOMEM;
67376+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
67377+ if (locknext && anon_vma_prepare(vma->vm_next))
67378+ return -ENOMEM;
67379 vma_lock_anon_vma(vma);
67380+ if (locknext)
67381+ vma_lock_anon_vma(vma->vm_next);
67382
67383 /*
67384 * vma->vm_start/vm_end cannot change under us because the caller
67385 * is required to hold the mmap_sem in read mode. We need the
67386- * anon_vma lock to serialize against concurrent expand_stacks.
67387- * Also guard against wrapping around to address 0.
67388+ * anon_vma locks to serialize against concurrent expand_stacks
67389+ * and expand_upwards.
67390 */
67391- if (address < PAGE_ALIGN(address+4))
67392- address = PAGE_ALIGN(address+4);
67393- else {
67394- vma_unlock_anon_vma(vma);
67395- return -ENOMEM;
67396- }
67397 error = 0;
67398
67399 /* Somebody else might have raced and expanded it already */
67400- if (address > vma->vm_end) {
67401+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
67402+ error = -ENOMEM;
67403+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
67404 unsigned long size, grow;
67405
67406 size = address - vma->vm_start;
67407@@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
67408 }
67409 }
67410 }
67411+ if (locknext)
67412+ vma_unlock_anon_vma(vma->vm_next);
67413 vma_unlock_anon_vma(vma);
67414 khugepaged_enter_vma_merge(vma);
67415 return error;
67416@@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
67417 unsigned long address)
67418 {
67419 int error;
67420+ bool lockprev = false;
67421+ struct vm_area_struct *prev;
67422
67423 /*
67424 * We must make sure the anon_vma is allocated
67425@@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
67426 if (error)
67427 return error;
67428
67429+ prev = vma->vm_prev;
67430+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
67431+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
67432+#endif
67433+ if (lockprev && anon_vma_prepare(prev))
67434+ return -ENOMEM;
67435+ if (lockprev)
67436+ vma_lock_anon_vma(prev);
67437+
67438 vma_lock_anon_vma(vma);
67439
67440 /*
67441@@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
67442 */
67443
67444 /* Somebody else might have raced and expanded it already */
67445- if (address < vma->vm_start) {
67446+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
67447+ error = -ENOMEM;
67448+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
67449 unsigned long size, grow;
67450
67451+#ifdef CONFIG_PAX_SEGMEXEC
67452+ struct vm_area_struct *vma_m;
67453+
67454+ vma_m = pax_find_mirror_vma(vma);
67455+#endif
67456+
67457 size = vma->vm_end - address;
67458 grow = (vma->vm_start - address) >> PAGE_SHIFT;
67459
67460@@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
67461 if (!error) {
67462 vma->vm_start = address;
67463 vma->vm_pgoff -= grow;
67464+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
67465+
67466+#ifdef CONFIG_PAX_SEGMEXEC
67467+ if (vma_m) {
67468+ vma_m->vm_start -= grow << PAGE_SHIFT;
67469+ vma_m->vm_pgoff -= grow;
67470+ }
67471+#endif
67472+
67473 perf_event_mmap(vma);
67474 }
67475 }
67476 }
67477 vma_unlock_anon_vma(vma);
67478+ if (lockprev)
67479+ vma_unlock_anon_vma(prev);
67480 khugepaged_enter_vma_merge(vma);
67481 return error;
67482 }
67483@@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
67484 do {
67485 long nrpages = vma_pages(vma);
67486
67487+#ifdef CONFIG_PAX_SEGMEXEC
67488+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
67489+ vma = remove_vma(vma);
67490+ continue;
67491+ }
67492+#endif
67493+
67494 mm->total_vm -= nrpages;
67495 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
67496 vma = remove_vma(vma);
67497@@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
67498 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
67499 vma->vm_prev = NULL;
67500 do {
67501+
67502+#ifdef CONFIG_PAX_SEGMEXEC
67503+ if (vma->vm_mirror) {
67504+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
67505+ vma->vm_mirror->vm_mirror = NULL;
67506+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
67507+ vma->vm_mirror = NULL;
67508+ }
67509+#endif
67510+
67511 rb_erase(&vma->vm_rb, &mm->mm_rb);
67512 mm->map_count--;
67513 tail_vma = vma;
67514@@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
67515 struct vm_area_struct *new;
67516 int err = -ENOMEM;
67517
67518+#ifdef CONFIG_PAX_SEGMEXEC
67519+ struct vm_area_struct *vma_m, *new_m = NULL;
67520+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
67521+#endif
67522+
67523 if (is_vm_hugetlb_page(vma) && (addr &
67524 ~(huge_page_mask(hstate_vma(vma)))))
67525 return -EINVAL;
67526
67527+#ifdef CONFIG_PAX_SEGMEXEC
67528+ vma_m = pax_find_mirror_vma(vma);
67529+#endif
67530+
67531 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
67532 if (!new)
67533 goto out_err;
67534
67535+#ifdef CONFIG_PAX_SEGMEXEC
67536+ if (vma_m) {
67537+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
67538+ if (!new_m) {
67539+ kmem_cache_free(vm_area_cachep, new);
67540+ goto out_err;
67541+ }
67542+ }
67543+#endif
67544+
67545 /* most fields are the same, copy all, and then fixup */
67546 *new = *vma;
67547
67548@@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
67549 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
67550 }
67551
67552+#ifdef CONFIG_PAX_SEGMEXEC
67553+ if (vma_m) {
67554+ *new_m = *vma_m;
67555+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
67556+ new_m->vm_mirror = new;
67557+ new->vm_mirror = new_m;
67558+
67559+ if (new_below)
67560+ new_m->vm_end = addr_m;
67561+ else {
67562+ new_m->vm_start = addr_m;
67563+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
67564+ }
67565+ }
67566+#endif
67567+
67568 pol = mpol_dup(vma_policy(vma));
67569 if (IS_ERR(pol)) {
67570 err = PTR_ERR(pol);
67571@@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
67572 else
67573 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
67574
67575+#ifdef CONFIG_PAX_SEGMEXEC
67576+ if (!err && vma_m) {
67577+ if (anon_vma_clone(new_m, vma_m))
67578+ goto out_free_mpol;
67579+
67580+ mpol_get(pol);
67581+ vma_set_policy(new_m, pol);
67582+
67583+ if (new_m->vm_file) {
67584+ get_file(new_m->vm_file);
67585+ if (vma_m->vm_flags & VM_EXECUTABLE)
67586+ added_exe_file_vma(mm);
67587+ }
67588+
67589+ if (new_m->vm_ops && new_m->vm_ops->open)
67590+ new_m->vm_ops->open(new_m);
67591+
67592+ if (new_below)
67593+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
67594+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
67595+ else
67596+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
67597+
67598+ if (err) {
67599+ if (new_m->vm_ops && new_m->vm_ops->close)
67600+ new_m->vm_ops->close(new_m);
67601+ if (new_m->vm_file) {
67602+ if (vma_m->vm_flags & VM_EXECUTABLE)
67603+ removed_exe_file_vma(mm);
67604+ fput(new_m->vm_file);
67605+ }
67606+ mpol_put(pol);
67607+ }
67608+ }
67609+#endif
67610+
67611 /* Success. */
67612 if (!err)
67613 return 0;
67614@@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
67615 removed_exe_file_vma(mm);
67616 fput(new->vm_file);
67617 }
67618- unlink_anon_vmas(new);
67619 out_free_mpol:
67620 mpol_put(pol);
67621 out_free_vma:
67622+
67623+#ifdef CONFIG_PAX_SEGMEXEC
67624+ if (new_m) {
67625+ unlink_anon_vmas(new_m);
67626+ kmem_cache_free(vm_area_cachep, new_m);
67627+ }
67628+#endif
67629+
67630+ unlink_anon_vmas(new);
67631 kmem_cache_free(vm_area_cachep, new);
67632 out_err:
67633 return err;
67634@@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
67635 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
67636 unsigned long addr, int new_below)
67637 {
67638+
67639+#ifdef CONFIG_PAX_SEGMEXEC
67640+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
67641+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
67642+ if (mm->map_count >= sysctl_max_map_count-1)
67643+ return -ENOMEM;
67644+ } else
67645+#endif
67646+
67647 if (mm->map_count >= sysctl_max_map_count)
67648 return -ENOMEM;
67649
67650@@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
67651 * work. This now handles partial unmappings.
67652 * Jeremy Fitzhardinge <jeremy@goop.org>
67653 */
67654+#ifdef CONFIG_PAX_SEGMEXEC
67655 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
67656 {
67657+ int ret = __do_munmap(mm, start, len);
67658+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
67659+ return ret;
67660+
67661+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
67662+}
67663+
67664+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
67665+#else
67666+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
67667+#endif
67668+{
67669 unsigned long end;
67670 struct vm_area_struct *vma, *prev, *last;
67671
67672+ /*
67673+ * mm->mmap_sem is required to protect against another thread
67674+ * changing the mappings in case we sleep.
67675+ */
67676+ verify_mm_writelocked(mm);
67677+
67678 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
67679 return -EINVAL;
67680
67681@@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
67682 /* Fix up all other VM information */
67683 remove_vma_list(mm, vma);
67684
67685+ track_exec_limit(mm, start, end, 0UL);
67686+
67687 return 0;
67688 }
67689
67690@@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
67691
67692 profile_munmap(addr);
67693
67694+#ifdef CONFIG_PAX_SEGMEXEC
67695+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
67696+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
67697+ return -EINVAL;
67698+#endif
67699+
67700 down_write(&mm->mmap_sem);
67701 ret = do_munmap(mm, addr, len);
67702 up_write(&mm->mmap_sem);
67703 return ret;
67704 }
67705
67706-static inline void verify_mm_writelocked(struct mm_struct *mm)
67707-{
67708-#ifdef CONFIG_DEBUG_VM
67709- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
67710- WARN_ON(1);
67711- up_read(&mm->mmap_sem);
67712- }
67713-#endif
67714-}
67715-
67716 /*
67717 * this is really a simplified "do_mmap". it only handles
67718 * anonymous maps. eventually we may be able to do some
67719@@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
67720 struct rb_node ** rb_link, * rb_parent;
67721 pgoff_t pgoff = addr >> PAGE_SHIFT;
67722 int error;
67723+ unsigned long charged;
67724
67725 len = PAGE_ALIGN(len);
67726 if (!len)
67727@@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
67728
67729 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
67730
67731+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
67732+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
67733+ flags &= ~VM_EXEC;
67734+
67735+#ifdef CONFIG_PAX_MPROTECT
67736+ if (mm->pax_flags & MF_PAX_MPROTECT)
67737+ flags &= ~VM_MAYEXEC;
67738+#endif
67739+
67740+ }
67741+#endif
67742+
67743 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
67744 if (error & ~PAGE_MASK)
67745 return error;
67746
67747+ charged = len >> PAGE_SHIFT;
67748+
67749 /*
67750 * mlock MCL_FUTURE?
67751 */
67752 if (mm->def_flags & VM_LOCKED) {
67753 unsigned long locked, lock_limit;
67754- locked = len >> PAGE_SHIFT;
67755+ locked = charged;
67756 locked += mm->locked_vm;
67757 lock_limit = rlimit(RLIMIT_MEMLOCK);
67758 lock_limit >>= PAGE_SHIFT;
67759@@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
67760 /*
67761 * Clear old maps. this also does some error checking for us
67762 */
67763- munmap_back:
67764 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
67765 if (vma && vma->vm_start < addr + len) {
67766 if (do_munmap(mm, addr, len))
67767 return -ENOMEM;
67768- goto munmap_back;
67769+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
67770+ BUG_ON(vma && vma->vm_start < addr + len);
67771 }
67772
67773 /* Check against address space limits *after* clearing old maps... */
67774- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
67775+ if (!may_expand_vm(mm, charged))
67776 return -ENOMEM;
67777
67778 if (mm->map_count > sysctl_max_map_count)
67779 return -ENOMEM;
67780
67781- if (security_vm_enough_memory(len >> PAGE_SHIFT))
67782+ if (security_vm_enough_memory(charged))
67783 return -ENOMEM;
67784
67785 /* Can we just expand an old private anonymous mapping? */
67786@@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
67787 */
67788 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
67789 if (!vma) {
67790- vm_unacct_memory(len >> PAGE_SHIFT);
67791+ vm_unacct_memory(charged);
67792 return -ENOMEM;
67793 }
67794
67795@@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
67796 vma_link(mm, vma, prev, rb_link, rb_parent);
67797 out:
67798 perf_event_mmap(vma);
67799- mm->total_vm += len >> PAGE_SHIFT;
67800+ mm->total_vm += charged;
67801 if (flags & VM_LOCKED) {
67802 if (!mlock_vma_pages_range(vma, addr, addr + len))
67803- mm->locked_vm += (len >> PAGE_SHIFT);
67804+ mm->locked_vm += charged;
67805 }
67806+ track_exec_limit(mm, addr, addr + len, flags);
67807 return addr;
67808 }
67809
67810@@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
67811 * Walk the list again, actually closing and freeing it,
67812 * with preemption enabled, without holding any MM locks.
67813 */
67814- while (vma)
67815+ while (vma) {
67816+ vma->vm_mirror = NULL;
67817 vma = remove_vma(vma);
67818+ }
67819
67820 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
67821 }
67822@@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
67823 struct vm_area_struct * __vma, * prev;
67824 struct rb_node ** rb_link, * rb_parent;
67825
67826+#ifdef CONFIG_PAX_SEGMEXEC
67827+ struct vm_area_struct *vma_m = NULL;
67828+#endif
67829+
67830+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
67831+ return -EPERM;
67832+
67833 /*
67834 * The vm_pgoff of a purely anonymous vma should be irrelevant
67835 * until its first write fault, when page's anon_vma and index
67836@@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
67837 if ((vma->vm_flags & VM_ACCOUNT) &&
67838 security_vm_enough_memory_mm(mm, vma_pages(vma)))
67839 return -ENOMEM;
67840+
67841+#ifdef CONFIG_PAX_SEGMEXEC
67842+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
67843+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
67844+ if (!vma_m)
67845+ return -ENOMEM;
67846+ }
67847+#endif
67848+
67849 vma_link(mm, vma, prev, rb_link, rb_parent);
67850+
67851+#ifdef CONFIG_PAX_SEGMEXEC
67852+ if (vma_m)
67853+ BUG_ON(pax_mirror_vma(vma_m, vma));
67854+#endif
67855+
67856 return 0;
67857 }
67858
67859@@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
67860 struct rb_node **rb_link, *rb_parent;
67861 struct mempolicy *pol;
67862
67863+ BUG_ON(vma->vm_mirror);
67864+
67865 /*
67866 * If anonymous vma has not yet been faulted, update new pgoff
67867 * to match new location, to increase its chance of merging.
67868@@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
67869 return NULL;
67870 }
67871
67872+#ifdef CONFIG_PAX_SEGMEXEC
67873+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
67874+{
67875+ struct vm_area_struct *prev_m;
67876+ struct rb_node **rb_link_m, *rb_parent_m;
67877+ struct mempolicy *pol_m;
67878+
67879+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
67880+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
67881+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
67882+ *vma_m = *vma;
67883+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
67884+ if (anon_vma_clone(vma_m, vma))
67885+ return -ENOMEM;
67886+ pol_m = vma_policy(vma_m);
67887+ mpol_get(pol_m);
67888+ vma_set_policy(vma_m, pol_m);
67889+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
67890+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
67891+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
67892+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
67893+ if (vma_m->vm_file)
67894+ get_file(vma_m->vm_file);
67895+ if (vma_m->vm_ops && vma_m->vm_ops->open)
67896+ vma_m->vm_ops->open(vma_m);
67897+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
67898+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
67899+ vma_m->vm_mirror = vma;
67900+ vma->vm_mirror = vma_m;
67901+ return 0;
67902+}
67903+#endif
67904+
67905 /*
67906 * Return true if the calling process may expand its vm space by the passed
67907 * number of pages
67908@@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
67909 unsigned long lim;
67910
67911 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
67912-
67913+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
67914 if (cur + npages > lim)
67915 return 0;
67916 return 1;
67917@@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
67918 vma->vm_start = addr;
67919 vma->vm_end = addr + len;
67920
67921+#ifdef CONFIG_PAX_MPROTECT
67922+ if (mm->pax_flags & MF_PAX_MPROTECT) {
67923+#ifndef CONFIG_PAX_MPROTECT_COMPAT
67924+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
67925+ return -EPERM;
67926+ if (!(vm_flags & VM_EXEC))
67927+ vm_flags &= ~VM_MAYEXEC;
67928+#else
67929+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
67930+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
67931+#endif
67932+ else
67933+ vm_flags &= ~VM_MAYWRITE;
67934+ }
67935+#endif
67936+
67937 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
67938 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
67939
67940diff -urNp linux-3.0.4/mm/mprotect.c linux-3.0.4/mm/mprotect.c
67941--- linux-3.0.4/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
67942+++ linux-3.0.4/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
67943@@ -23,10 +23,16 @@
67944 #include <linux/mmu_notifier.h>
67945 #include <linux/migrate.h>
67946 #include <linux/perf_event.h>
67947+
67948+#ifdef CONFIG_PAX_MPROTECT
67949+#include <linux/elf.h>
67950+#endif
67951+
67952 #include <asm/uaccess.h>
67953 #include <asm/pgtable.h>
67954 #include <asm/cacheflush.h>
67955 #include <asm/tlbflush.h>
67956+#include <asm/mmu_context.h>
67957
67958 #ifndef pgprot_modify
67959 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
67960@@ -141,6 +147,48 @@ static void change_protection(struct vm_
67961 flush_tlb_range(vma, start, end);
67962 }
67963
67964+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67965+/* called while holding the mmap semaphor for writing except stack expansion */
67966+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
67967+{
67968+ unsigned long oldlimit, newlimit = 0UL;
67969+
67970+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
67971+ return;
67972+
67973+ spin_lock(&mm->page_table_lock);
67974+ oldlimit = mm->context.user_cs_limit;
67975+ if ((prot & VM_EXEC) && oldlimit < end)
67976+ /* USER_CS limit moved up */
67977+ newlimit = end;
67978+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
67979+ /* USER_CS limit moved down */
67980+ newlimit = start;
67981+
67982+ if (newlimit) {
67983+ mm->context.user_cs_limit = newlimit;
67984+
67985+#ifdef CONFIG_SMP
67986+ wmb();
67987+ cpus_clear(mm->context.cpu_user_cs_mask);
67988+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
67989+#endif
67990+
67991+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
67992+ }
67993+ spin_unlock(&mm->page_table_lock);
67994+ if (newlimit == end) {
67995+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
67996+
67997+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
67998+ if (is_vm_hugetlb_page(vma))
67999+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
68000+ else
68001+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
68002+ }
68003+}
68004+#endif
68005+
68006 int
68007 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
68008 unsigned long start, unsigned long end, unsigned long newflags)
68009@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
68010 int error;
68011 int dirty_accountable = 0;
68012
68013+#ifdef CONFIG_PAX_SEGMEXEC
68014+ struct vm_area_struct *vma_m = NULL;
68015+ unsigned long start_m, end_m;
68016+
68017+ start_m = start + SEGMEXEC_TASK_SIZE;
68018+ end_m = end + SEGMEXEC_TASK_SIZE;
68019+#endif
68020+
68021 if (newflags == oldflags) {
68022 *pprev = vma;
68023 return 0;
68024 }
68025
68026+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
68027+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
68028+
68029+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
68030+ return -ENOMEM;
68031+
68032+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
68033+ return -ENOMEM;
68034+ }
68035+
68036 /*
68037 * If we make a private mapping writable we increase our commit;
68038 * but (without finer accounting) cannot reduce our commit if we
68039@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
68040 }
68041 }
68042
68043+#ifdef CONFIG_PAX_SEGMEXEC
68044+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
68045+ if (start != vma->vm_start) {
68046+ error = split_vma(mm, vma, start, 1);
68047+ if (error)
68048+ goto fail;
68049+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
68050+ *pprev = (*pprev)->vm_next;
68051+ }
68052+
68053+ if (end != vma->vm_end) {
68054+ error = split_vma(mm, vma, end, 0);
68055+ if (error)
68056+ goto fail;
68057+ }
68058+
68059+ if (pax_find_mirror_vma(vma)) {
68060+ error = __do_munmap(mm, start_m, end_m - start_m);
68061+ if (error)
68062+ goto fail;
68063+ } else {
68064+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68065+ if (!vma_m) {
68066+ error = -ENOMEM;
68067+ goto fail;
68068+ }
68069+ vma->vm_flags = newflags;
68070+ error = pax_mirror_vma(vma_m, vma);
68071+ if (error) {
68072+ vma->vm_flags = oldflags;
68073+ goto fail;
68074+ }
68075+ }
68076+ }
68077+#endif
68078+
68079 /*
68080 * First try to merge with previous and/or next vma.
68081 */
68082@@ -204,9 +306,21 @@ success:
68083 * vm_flags and vm_page_prot are protected by the mmap_sem
68084 * held in write mode.
68085 */
68086+
68087+#ifdef CONFIG_PAX_SEGMEXEC
68088+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
68089+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
68090+#endif
68091+
68092 vma->vm_flags = newflags;
68093+
68094+#ifdef CONFIG_PAX_MPROTECT
68095+ if (mm->binfmt && mm->binfmt->handle_mprotect)
68096+ mm->binfmt->handle_mprotect(vma, newflags);
68097+#endif
68098+
68099 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
68100- vm_get_page_prot(newflags));
68101+ vm_get_page_prot(vma->vm_flags));
68102
68103 if (vma_wants_writenotify(vma)) {
68104 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
68105@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
68106 end = start + len;
68107 if (end <= start)
68108 return -ENOMEM;
68109+
68110+#ifdef CONFIG_PAX_SEGMEXEC
68111+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
68112+ if (end > SEGMEXEC_TASK_SIZE)
68113+ return -EINVAL;
68114+ } else
68115+#endif
68116+
68117+ if (end > TASK_SIZE)
68118+ return -EINVAL;
68119+
68120 if (!arch_validate_prot(prot))
68121 return -EINVAL;
68122
68123@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
68124 /*
68125 * Does the application expect PROT_READ to imply PROT_EXEC:
68126 */
68127- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68128+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68129 prot |= PROT_EXEC;
68130
68131 vm_flags = calc_vm_prot_bits(prot);
68132@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
68133 if (start > vma->vm_start)
68134 prev = vma;
68135
68136+#ifdef CONFIG_PAX_MPROTECT
68137+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
68138+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
68139+#endif
68140+
68141 for (nstart = start ; ; ) {
68142 unsigned long newflags;
68143
68144@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
68145
68146 /* newflags >> 4 shift VM_MAY% in place of VM_% */
68147 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
68148+ if (prot & (PROT_WRITE | PROT_EXEC))
68149+ gr_log_rwxmprotect(vma->vm_file);
68150+
68151+ error = -EACCES;
68152+ goto out;
68153+ }
68154+
68155+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
68156 error = -EACCES;
68157 goto out;
68158 }
68159@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
68160 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
68161 if (error)
68162 goto out;
68163+
68164+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
68165+
68166 nstart = tmp;
68167
68168 if (nstart < prev->vm_end)
68169diff -urNp linux-3.0.4/mm/mremap.c linux-3.0.4/mm/mremap.c
68170--- linux-3.0.4/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
68171+++ linux-3.0.4/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
68172@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
68173 continue;
68174 pte = ptep_clear_flush(vma, old_addr, old_pte);
68175 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
68176+
68177+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68178+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
68179+ pte = pte_exprotect(pte);
68180+#endif
68181+
68182 set_pte_at(mm, new_addr, new_pte, pte);
68183 }
68184
68185@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
68186 if (is_vm_hugetlb_page(vma))
68187 goto Einval;
68188
68189+#ifdef CONFIG_PAX_SEGMEXEC
68190+ if (pax_find_mirror_vma(vma))
68191+ goto Einval;
68192+#endif
68193+
68194 /* We can't remap across vm area boundaries */
68195 if (old_len > vma->vm_end - addr)
68196 goto Efault;
68197@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
68198 unsigned long ret = -EINVAL;
68199 unsigned long charged = 0;
68200 unsigned long map_flags;
68201+ unsigned long pax_task_size = TASK_SIZE;
68202
68203 if (new_addr & ~PAGE_MASK)
68204 goto out;
68205
68206- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
68207+#ifdef CONFIG_PAX_SEGMEXEC
68208+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
68209+ pax_task_size = SEGMEXEC_TASK_SIZE;
68210+#endif
68211+
68212+ pax_task_size -= PAGE_SIZE;
68213+
68214+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
68215 goto out;
68216
68217 /* Check if the location we're moving into overlaps the
68218 * old location at all, and fail if it does.
68219 */
68220- if ((new_addr <= addr) && (new_addr+new_len) > addr)
68221- goto out;
68222-
68223- if ((addr <= new_addr) && (addr+old_len) > new_addr)
68224+ if (addr + old_len > new_addr && new_addr + new_len > addr)
68225 goto out;
68226
68227 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
68228@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
68229 struct vm_area_struct *vma;
68230 unsigned long ret = -EINVAL;
68231 unsigned long charged = 0;
68232+ unsigned long pax_task_size = TASK_SIZE;
68233
68234 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
68235 goto out;
68236@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
68237 if (!new_len)
68238 goto out;
68239
68240+#ifdef CONFIG_PAX_SEGMEXEC
68241+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
68242+ pax_task_size = SEGMEXEC_TASK_SIZE;
68243+#endif
68244+
68245+ pax_task_size -= PAGE_SIZE;
68246+
68247+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
68248+ old_len > pax_task_size || addr > pax_task_size-old_len)
68249+ goto out;
68250+
68251 if (flags & MREMAP_FIXED) {
68252 if (flags & MREMAP_MAYMOVE)
68253 ret = mremap_to(addr, old_len, new_addr, new_len);
68254@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
68255 addr + new_len);
68256 }
68257 ret = addr;
68258+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
68259 goto out;
68260 }
68261 }
68262@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
68263 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
68264 if (ret)
68265 goto out;
68266+
68267+ map_flags = vma->vm_flags;
68268 ret = move_vma(vma, addr, old_len, new_len, new_addr);
68269+ if (!(ret & ~PAGE_MASK)) {
68270+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
68271+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
68272+ }
68273 }
68274 out:
68275 if (ret & ~PAGE_MASK)
68276diff -urNp linux-3.0.4/mm/nobootmem.c linux-3.0.4/mm/nobootmem.c
68277--- linux-3.0.4/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
68278+++ linux-3.0.4/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
68279@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
68280 unsigned long __init free_all_memory_core_early(int nodeid)
68281 {
68282 int i;
68283- u64 start, end;
68284+ u64 start, end, startrange, endrange;
68285 unsigned long count = 0;
68286- struct range *range = NULL;
68287+ struct range *range = NULL, rangerange = { 0, 0 };
68288 int nr_range;
68289
68290 nr_range = get_free_all_memory_range(&range, nodeid);
68291+ startrange = __pa(range) >> PAGE_SHIFT;
68292+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
68293
68294 for (i = 0; i < nr_range; i++) {
68295 start = range[i].start;
68296 end = range[i].end;
68297+ if (start <= endrange && startrange < end) {
68298+ BUG_ON(rangerange.start | rangerange.end);
68299+ rangerange = range[i];
68300+ continue;
68301+ }
68302 count += end - start;
68303 __free_pages_memory(start, end);
68304 }
68305+ start = rangerange.start;
68306+ end = rangerange.end;
68307+ count += end - start;
68308+ __free_pages_memory(start, end);
68309
68310 return count;
68311 }
68312diff -urNp linux-3.0.4/mm/nommu.c linux-3.0.4/mm/nommu.c
68313--- linux-3.0.4/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
68314+++ linux-3.0.4/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
68315@@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
68316 int sysctl_overcommit_ratio = 50; /* default is 50% */
68317 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
68318 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
68319-int heap_stack_gap = 0;
68320
68321 atomic_long_t mmap_pages_allocated;
68322
68323@@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
68324 EXPORT_SYMBOL(find_vma);
68325
68326 /*
68327- * find a VMA
68328- * - we don't extend stack VMAs under NOMMU conditions
68329- */
68330-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
68331-{
68332- return find_vma(mm, addr);
68333-}
68334-
68335-/*
68336 * expand a stack to a given address
68337 * - not supported under NOMMU conditions
68338 */
68339@@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
68340
68341 /* most fields are the same, copy all, and then fixup */
68342 *new = *vma;
68343+ INIT_LIST_HEAD(&new->anon_vma_chain);
68344 *region = *vma->vm_region;
68345 new->vm_region = region;
68346
68347diff -urNp linux-3.0.4/mm/page_alloc.c linux-3.0.4/mm/page_alloc.c
68348--- linux-3.0.4/mm/page_alloc.c 2011-07-21 22:17:23.000000000 -0400
68349+++ linux-3.0.4/mm/page_alloc.c 2011-08-23 21:48:14.000000000 -0400
68350@@ -340,7 +340,7 @@ out:
68351 * This usage means that zero-order pages may not be compound.
68352 */
68353
68354-static void free_compound_page(struct page *page)
68355+void free_compound_page(struct page *page)
68356 {
68357 __free_pages_ok(page, compound_order(page));
68358 }
68359@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
68360 int i;
68361 int bad = 0;
68362
68363+#ifdef CONFIG_PAX_MEMORY_SANITIZE
68364+ unsigned long index = 1UL << order;
68365+#endif
68366+
68367 trace_mm_page_free_direct(page, order);
68368 kmemcheck_free_shadow(page, order);
68369
68370@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
68371 debug_check_no_obj_freed(page_address(page),
68372 PAGE_SIZE << order);
68373 }
68374+
68375+#ifdef CONFIG_PAX_MEMORY_SANITIZE
68376+ for (; index; --index)
68377+ sanitize_highpage(page + index - 1);
68378+#endif
68379+
68380 arch_free_page(page, order);
68381 kernel_map_pages(page, 1 << order, 0);
68382
68383@@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
68384 arch_alloc_page(page, order);
68385 kernel_map_pages(page, 1 << order, 1);
68386
68387+#ifndef CONFIG_PAX_MEMORY_SANITIZE
68388 if (gfp_flags & __GFP_ZERO)
68389 prep_zero_page(page, order, gfp_flags);
68390+#endif
68391
68392 if (order && (gfp_flags & __GFP_COMP))
68393 prep_compound_page(page, order);
68394@@ -2525,6 +2537,8 @@ void show_free_areas(unsigned int filter
68395 int cpu;
68396 struct zone *zone;
68397
68398+ pax_track_stack();
68399+
68400 for_each_populated_zone(zone) {
68401 if (skip_free_areas_node(filter, zone_to_nid(zone)))
68402 continue;
68403diff -urNp linux-3.0.4/mm/percpu.c linux-3.0.4/mm/percpu.c
68404--- linux-3.0.4/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
68405+++ linux-3.0.4/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
68406@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
68407 static unsigned int pcpu_last_unit_cpu __read_mostly;
68408
68409 /* the address of the first chunk which starts with the kernel static area */
68410-void *pcpu_base_addr __read_mostly;
68411+void *pcpu_base_addr __read_only;
68412 EXPORT_SYMBOL_GPL(pcpu_base_addr);
68413
68414 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
68415diff -urNp linux-3.0.4/mm/rmap.c linux-3.0.4/mm/rmap.c
68416--- linux-3.0.4/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
68417+++ linux-3.0.4/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
68418@@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
68419 struct anon_vma *anon_vma = vma->anon_vma;
68420 struct anon_vma_chain *avc;
68421
68422+#ifdef CONFIG_PAX_SEGMEXEC
68423+ struct anon_vma_chain *avc_m = NULL;
68424+#endif
68425+
68426 might_sleep();
68427 if (unlikely(!anon_vma)) {
68428 struct mm_struct *mm = vma->vm_mm;
68429@@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
68430 if (!avc)
68431 goto out_enomem;
68432
68433+#ifdef CONFIG_PAX_SEGMEXEC
68434+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
68435+ if (!avc_m)
68436+ goto out_enomem_free_avc;
68437+#endif
68438+
68439 anon_vma = find_mergeable_anon_vma(vma);
68440 allocated = NULL;
68441 if (!anon_vma) {
68442@@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
68443 /* page_table_lock to protect against threads */
68444 spin_lock(&mm->page_table_lock);
68445 if (likely(!vma->anon_vma)) {
68446+
68447+#ifdef CONFIG_PAX_SEGMEXEC
68448+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
68449+
68450+ if (vma_m) {
68451+ BUG_ON(vma_m->anon_vma);
68452+ vma_m->anon_vma = anon_vma;
68453+ avc_m->anon_vma = anon_vma;
68454+ avc_m->vma = vma;
68455+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
68456+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
68457+ avc_m = NULL;
68458+ }
68459+#endif
68460+
68461 vma->anon_vma = anon_vma;
68462 avc->anon_vma = anon_vma;
68463 avc->vma = vma;
68464@@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
68465
68466 if (unlikely(allocated))
68467 put_anon_vma(allocated);
68468+
68469+#ifdef CONFIG_PAX_SEGMEXEC
68470+ if (unlikely(avc_m))
68471+ anon_vma_chain_free(avc_m);
68472+#endif
68473+
68474 if (unlikely(avc))
68475 anon_vma_chain_free(avc);
68476 }
68477 return 0;
68478
68479 out_enomem_free_avc:
68480+
68481+#ifdef CONFIG_PAX_SEGMEXEC
68482+ if (avc_m)
68483+ anon_vma_chain_free(avc_m);
68484+#endif
68485+
68486 anon_vma_chain_free(avc);
68487 out_enomem:
68488 return -ENOMEM;
68489@@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
68490 * Attach the anon_vmas from src to dst.
68491 * Returns 0 on success, -ENOMEM on failure.
68492 */
68493-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
68494+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
68495 {
68496 struct anon_vma_chain *avc, *pavc;
68497 struct anon_vma *root = NULL;
68498@@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
68499 * the corresponding VMA in the parent process is attached to.
68500 * Returns 0 on success, non-zero on failure.
68501 */
68502-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
68503+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
68504 {
68505 struct anon_vma_chain *avc;
68506 struct anon_vma *anon_vma;
68507diff -urNp linux-3.0.4/mm/shmem.c linux-3.0.4/mm/shmem.c
68508--- linux-3.0.4/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
68509+++ linux-3.0.4/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
68510@@ -31,7 +31,7 @@
68511 #include <linux/percpu_counter.h>
68512 #include <linux/swap.h>
68513
68514-static struct vfsmount *shm_mnt;
68515+struct vfsmount *shm_mnt;
68516
68517 #ifdef CONFIG_SHMEM
68518 /*
68519@@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
68520 goto unlock;
68521 }
68522 entry = shmem_swp_entry(info, index, NULL);
68523+ if (!entry)
68524+ goto unlock;
68525 if (entry->val) {
68526 /*
68527 * The more uptodate page coming down from a stacked
68528@@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
68529 struct vm_area_struct pvma;
68530 struct page *page;
68531
68532+ pax_track_stack();
68533+
68534 spol = mpol_cond_copy(&mpol,
68535 mpol_shared_policy_lookup(&info->policy, idx));
68536
68537@@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
68538 int err = -ENOMEM;
68539
68540 /* Round up to L1_CACHE_BYTES to resist false sharing */
68541- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
68542- L1_CACHE_BYTES), GFP_KERNEL);
68543+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
68544 if (!sbinfo)
68545 return -ENOMEM;
68546
68547diff -urNp linux-3.0.4/mm/slab.c linux-3.0.4/mm/slab.c
68548--- linux-3.0.4/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
68549+++ linux-3.0.4/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
68550@@ -151,7 +151,7 @@
68551
68552 /* Legal flag mask for kmem_cache_create(). */
68553 #if DEBUG
68554-# define CREATE_MASK (SLAB_RED_ZONE | \
68555+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
68556 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
68557 SLAB_CACHE_DMA | \
68558 SLAB_STORE_USER | \
68559@@ -159,7 +159,7 @@
68560 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
68561 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
68562 #else
68563-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
68564+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
68565 SLAB_CACHE_DMA | \
68566 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
68567 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
68568@@ -288,7 +288,7 @@ struct kmem_list3 {
68569 * Need this for bootstrapping a per node allocator.
68570 */
68571 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
68572-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
68573+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
68574 #define CACHE_CACHE 0
68575 #define SIZE_AC MAX_NUMNODES
68576 #define SIZE_L3 (2 * MAX_NUMNODES)
68577@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
68578 if ((x)->max_freeable < i) \
68579 (x)->max_freeable = i; \
68580 } while (0)
68581-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
68582-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
68583-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
68584-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
68585+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
68586+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
68587+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
68588+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
68589 #else
68590 #define STATS_INC_ACTIVE(x) do { } while (0)
68591 #define STATS_DEC_ACTIVE(x) do { } while (0)
68592@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
68593 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
68594 */
68595 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
68596- const struct slab *slab, void *obj)
68597+ const struct slab *slab, const void *obj)
68598 {
68599 u32 offset = (obj - slab->s_mem);
68600 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
68601@@ -564,7 +564,7 @@ struct cache_names {
68602 static struct cache_names __initdata cache_names[] = {
68603 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
68604 #include <linux/kmalloc_sizes.h>
68605- {NULL,}
68606+ {NULL}
68607 #undef CACHE
68608 };
68609
68610@@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
68611 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
68612 sizes[INDEX_AC].cs_size,
68613 ARCH_KMALLOC_MINALIGN,
68614- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
68615+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
68616 NULL);
68617
68618 if (INDEX_AC != INDEX_L3) {
68619@@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
68620 kmem_cache_create(names[INDEX_L3].name,
68621 sizes[INDEX_L3].cs_size,
68622 ARCH_KMALLOC_MINALIGN,
68623- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
68624+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
68625 NULL);
68626 }
68627
68628@@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
68629 sizes->cs_cachep = kmem_cache_create(names->name,
68630 sizes->cs_size,
68631 ARCH_KMALLOC_MINALIGN,
68632- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
68633+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
68634 NULL);
68635 }
68636 #ifdef CONFIG_ZONE_DMA
68637@@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
68638 }
68639 /* cpu stats */
68640 {
68641- unsigned long allochit = atomic_read(&cachep->allochit);
68642- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
68643- unsigned long freehit = atomic_read(&cachep->freehit);
68644- unsigned long freemiss = atomic_read(&cachep->freemiss);
68645+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
68646+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
68647+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
68648+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
68649
68650 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
68651 allochit, allocmiss, freehit, freemiss);
68652@@ -4532,15 +4532,66 @@ static const struct file_operations proc
68653
68654 static int __init slab_proc_init(void)
68655 {
68656- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
68657+ mode_t gr_mode = S_IRUGO;
68658+
68659+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68660+ gr_mode = S_IRUSR;
68661+#endif
68662+
68663+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
68664 #ifdef CONFIG_DEBUG_SLAB_LEAK
68665- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
68666+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
68667 #endif
68668 return 0;
68669 }
68670 module_init(slab_proc_init);
68671 #endif
68672
68673+void check_object_size(const void *ptr, unsigned long n, bool to)
68674+{
68675+
68676+#ifdef CONFIG_PAX_USERCOPY
68677+ struct page *page;
68678+ struct kmem_cache *cachep = NULL;
68679+ struct slab *slabp;
68680+ unsigned int objnr;
68681+ unsigned long offset;
68682+
68683+ if (!n)
68684+ return;
68685+
68686+ if (ZERO_OR_NULL_PTR(ptr))
68687+ goto report;
68688+
68689+ if (!virt_addr_valid(ptr))
68690+ return;
68691+
68692+ page = virt_to_head_page(ptr);
68693+
68694+ if (!PageSlab(page)) {
68695+ if (object_is_on_stack(ptr, n) == -1)
68696+ goto report;
68697+ return;
68698+ }
68699+
68700+ cachep = page_get_cache(page);
68701+ if (!(cachep->flags & SLAB_USERCOPY))
68702+ goto report;
68703+
68704+ slabp = page_get_slab(page);
68705+ objnr = obj_to_index(cachep, slabp, ptr);
68706+ BUG_ON(objnr >= cachep->num);
68707+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
68708+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
68709+ return;
68710+
68711+report:
68712+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
68713+#endif
68714+
68715+}
68716+EXPORT_SYMBOL(check_object_size);
68717+
68718 /**
68719 * ksize - get the actual amount of memory allocated for a given object
68720 * @objp: Pointer to the object
68721diff -urNp linux-3.0.4/mm/slob.c linux-3.0.4/mm/slob.c
68722--- linux-3.0.4/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
68723+++ linux-3.0.4/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
68724@@ -29,7 +29,7 @@
68725 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
68726 * alloc_pages() directly, allocating compound pages so the page order
68727 * does not have to be separately tracked, and also stores the exact
68728- * allocation size in page->private so that it can be used to accurately
68729+ * allocation size in slob_page->size so that it can be used to accurately
68730 * provide ksize(). These objects are detected in kfree() because slob_page()
68731 * is false for them.
68732 *
68733@@ -58,6 +58,7 @@
68734 */
68735
68736 #include <linux/kernel.h>
68737+#include <linux/sched.h>
68738 #include <linux/slab.h>
68739 #include <linux/mm.h>
68740 #include <linux/swap.h> /* struct reclaim_state */
68741@@ -102,7 +103,8 @@ struct slob_page {
68742 unsigned long flags; /* mandatory */
68743 atomic_t _count; /* mandatory */
68744 slobidx_t units; /* free units left in page */
68745- unsigned long pad[2];
68746+ unsigned long pad[1];
68747+ unsigned long size; /* size when >=PAGE_SIZE */
68748 slob_t *free; /* first free slob_t in page */
68749 struct list_head list; /* linked list of free pages */
68750 };
68751@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
68752 */
68753 static inline int is_slob_page(struct slob_page *sp)
68754 {
68755- return PageSlab((struct page *)sp);
68756+ return PageSlab((struct page *)sp) && !sp->size;
68757 }
68758
68759 static inline void set_slob_page(struct slob_page *sp)
68760@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
68761
68762 static inline struct slob_page *slob_page(const void *addr)
68763 {
68764- return (struct slob_page *)virt_to_page(addr);
68765+ return (struct slob_page *)virt_to_head_page(addr);
68766 }
68767
68768 /*
68769@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
68770 /*
68771 * Return the size of a slob block.
68772 */
68773-static slobidx_t slob_units(slob_t *s)
68774+static slobidx_t slob_units(const slob_t *s)
68775 {
68776 if (s->units > 0)
68777 return s->units;
68778@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
68779 /*
68780 * Return the next free slob block pointer after this one.
68781 */
68782-static slob_t *slob_next(slob_t *s)
68783+static slob_t *slob_next(const slob_t *s)
68784 {
68785 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
68786 slobidx_t next;
68787@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
68788 /*
68789 * Returns true if s is the last free block in its page.
68790 */
68791-static int slob_last(slob_t *s)
68792+static int slob_last(const slob_t *s)
68793 {
68794 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
68795 }
68796@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
68797 if (!page)
68798 return NULL;
68799
68800+ set_slob_page(page);
68801 return page_address(page);
68802 }
68803
68804@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
68805 if (!b)
68806 return NULL;
68807 sp = slob_page(b);
68808- set_slob_page(sp);
68809
68810 spin_lock_irqsave(&slob_lock, flags);
68811 sp->units = SLOB_UNITS(PAGE_SIZE);
68812 sp->free = b;
68813+ sp->size = 0;
68814 INIT_LIST_HEAD(&sp->list);
68815 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
68816 set_slob_page_free(sp, slob_list);
68817@@ -476,10 +479,9 @@ out:
68818 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
68819 */
68820
68821-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
68822+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
68823 {
68824- unsigned int *m;
68825- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
68826+ slob_t *m;
68827 void *ret;
68828
68829 lockdep_trace_alloc(gfp);
68830@@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
68831
68832 if (!m)
68833 return NULL;
68834- *m = size;
68835+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
68836+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
68837+ m[0].units = size;
68838+ m[1].units = align;
68839 ret = (void *)m + align;
68840
68841 trace_kmalloc_node(_RET_IP_, ret,
68842@@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
68843 gfp |= __GFP_COMP;
68844 ret = slob_new_pages(gfp, order, node);
68845 if (ret) {
68846- struct page *page;
68847- page = virt_to_page(ret);
68848- page->private = size;
68849+ struct slob_page *sp;
68850+ sp = slob_page(ret);
68851+ sp->size = size;
68852 }
68853
68854 trace_kmalloc_node(_RET_IP_, ret,
68855 size, PAGE_SIZE << order, gfp, node);
68856 }
68857
68858- kmemleak_alloc(ret, size, 1, gfp);
68859+ return ret;
68860+}
68861+
68862+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
68863+{
68864+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
68865+ void *ret = __kmalloc_node_align(size, gfp, node, align);
68866+
68867+ if (!ZERO_OR_NULL_PTR(ret))
68868+ kmemleak_alloc(ret, size, 1, gfp);
68869 return ret;
68870 }
68871 EXPORT_SYMBOL(__kmalloc_node);
68872@@ -531,13 +545,88 @@ void kfree(const void *block)
68873 sp = slob_page(block);
68874 if (is_slob_page(sp)) {
68875 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
68876- unsigned int *m = (unsigned int *)(block - align);
68877- slob_free(m, *m + align);
68878- } else
68879+ slob_t *m = (slob_t *)(block - align);
68880+ slob_free(m, m[0].units + align);
68881+ } else {
68882+ clear_slob_page(sp);
68883+ free_slob_page(sp);
68884+ sp->size = 0;
68885 put_page(&sp->page);
68886+ }
68887 }
68888 EXPORT_SYMBOL(kfree);
68889
68890+void check_object_size(const void *ptr, unsigned long n, bool to)
68891+{
68892+
68893+#ifdef CONFIG_PAX_USERCOPY
68894+ struct slob_page *sp;
68895+ const slob_t *free;
68896+ const void *base;
68897+ unsigned long flags;
68898+
68899+ if (!n)
68900+ return;
68901+
68902+ if (ZERO_OR_NULL_PTR(ptr))
68903+ goto report;
68904+
68905+ if (!virt_addr_valid(ptr))
68906+ return;
68907+
68908+ sp = slob_page(ptr);
68909+ if (!PageSlab((struct page*)sp)) {
68910+ if (object_is_on_stack(ptr, n) == -1)
68911+ goto report;
68912+ return;
68913+ }
68914+
68915+ if (sp->size) {
68916+ base = page_address(&sp->page);
68917+ if (base <= ptr && n <= sp->size - (ptr - base))
68918+ return;
68919+ goto report;
68920+ }
68921+
68922+ /* some tricky double walking to find the chunk */
68923+ spin_lock_irqsave(&slob_lock, flags);
68924+ base = (void *)((unsigned long)ptr & PAGE_MASK);
68925+ free = sp->free;
68926+
68927+ while (!slob_last(free) && (void *)free <= ptr) {
68928+ base = free + slob_units(free);
68929+ free = slob_next(free);
68930+ }
68931+
68932+ while (base < (void *)free) {
68933+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
68934+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
68935+ int offset;
68936+
68937+ if (ptr < base + align)
68938+ break;
68939+
68940+ offset = ptr - base - align;
68941+ if (offset >= m) {
68942+ base += size;
68943+ continue;
68944+ }
68945+
68946+ if (n > m - offset)
68947+ break;
68948+
68949+ spin_unlock_irqrestore(&slob_lock, flags);
68950+ return;
68951+ }
68952+
68953+ spin_unlock_irqrestore(&slob_lock, flags);
68954+report:
68955+ pax_report_usercopy(ptr, n, to, NULL);
68956+#endif
68957+
68958+}
68959+EXPORT_SYMBOL(check_object_size);
68960+
68961 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
68962 size_t ksize(const void *block)
68963 {
68964@@ -550,10 +639,10 @@ size_t ksize(const void *block)
68965 sp = slob_page(block);
68966 if (is_slob_page(sp)) {
68967 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
68968- unsigned int *m = (unsigned int *)(block - align);
68969- return SLOB_UNITS(*m) * SLOB_UNIT;
68970+ slob_t *m = (slob_t *)(block - align);
68971+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
68972 } else
68973- return sp->page.private;
68974+ return sp->size;
68975 }
68976 EXPORT_SYMBOL(ksize);
68977
68978@@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
68979 {
68980 struct kmem_cache *c;
68981
68982+#ifdef CONFIG_PAX_USERCOPY
68983+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
68984+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
68985+#else
68986 c = slob_alloc(sizeof(struct kmem_cache),
68987 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
68988+#endif
68989
68990 if (c) {
68991 c->name = name;
68992@@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
68993 {
68994 void *b;
68995
68996+#ifdef CONFIG_PAX_USERCOPY
68997+ b = __kmalloc_node_align(c->size, flags, node, c->align);
68998+#else
68999 if (c->size < PAGE_SIZE) {
69000 b = slob_alloc(c->size, flags, c->align, node);
69001 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
69002 SLOB_UNITS(c->size) * SLOB_UNIT,
69003 flags, node);
69004 } else {
69005+ struct slob_page *sp;
69006+
69007 b = slob_new_pages(flags, get_order(c->size), node);
69008+ sp = slob_page(b);
69009+ sp->size = c->size;
69010 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
69011 PAGE_SIZE << get_order(c->size),
69012 flags, node);
69013 }
69014+#endif
69015
69016 if (c->ctor)
69017 c->ctor(b);
69018@@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
69019
69020 static void __kmem_cache_free(void *b, int size)
69021 {
69022- if (size < PAGE_SIZE)
69023+ struct slob_page *sp = slob_page(b);
69024+
69025+ if (is_slob_page(sp))
69026 slob_free(b, size);
69027- else
69028+ else {
69029+ clear_slob_page(sp);
69030+ free_slob_page(sp);
69031+ sp->size = 0;
69032 slob_free_pages(b, get_order(size));
69033+ }
69034 }
69035
69036 static void kmem_rcu_free(struct rcu_head *head)
69037@@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
69038
69039 void kmem_cache_free(struct kmem_cache *c, void *b)
69040 {
69041+ int size = c->size;
69042+
69043+#ifdef CONFIG_PAX_USERCOPY
69044+ if (size + c->align < PAGE_SIZE) {
69045+ size += c->align;
69046+ b -= c->align;
69047+ }
69048+#endif
69049+
69050 kmemleak_free_recursive(b, c->flags);
69051 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
69052 struct slob_rcu *slob_rcu;
69053- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
69054- slob_rcu->size = c->size;
69055+ slob_rcu = b + (size - sizeof(struct slob_rcu));
69056+ slob_rcu->size = size;
69057 call_rcu(&slob_rcu->head, kmem_rcu_free);
69058 } else {
69059- __kmem_cache_free(b, c->size);
69060+ __kmem_cache_free(b, size);
69061 }
69062
69063+#ifdef CONFIG_PAX_USERCOPY
69064+ trace_kfree(_RET_IP_, b);
69065+#else
69066 trace_kmem_cache_free(_RET_IP_, b);
69067+#endif
69068+
69069 }
69070 EXPORT_SYMBOL(kmem_cache_free);
69071
69072diff -urNp linux-3.0.4/mm/slub.c linux-3.0.4/mm/slub.c
69073--- linux-3.0.4/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
69074+++ linux-3.0.4/mm/slub.c 2011-09-25 22:15:40.000000000 -0400
69075@@ -200,7 +200,7 @@ struct track {
69076
69077 enum track_item { TRACK_ALLOC, TRACK_FREE };
69078
69079-#ifdef CONFIG_SYSFS
69080+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
69081 static int sysfs_slab_add(struct kmem_cache *);
69082 static int sysfs_slab_alias(struct kmem_cache *, const char *);
69083 static void sysfs_slab_remove(struct kmem_cache *);
69084@@ -442,7 +442,7 @@ static void print_track(const char *s, s
69085 if (!t->addr)
69086 return;
69087
69088- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
69089+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
69090 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
69091 }
69092
69093@@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
69094
69095 page = virt_to_head_page(x);
69096
69097+ BUG_ON(!PageSlab(page));
69098+
69099 slab_free(s, page, x, _RET_IP_);
69100
69101 trace_kmem_cache_free(_RET_IP_, x);
69102@@ -2170,7 +2172,7 @@ static int slub_min_objects;
69103 * Merge control. If this is set then no merging of slab caches will occur.
69104 * (Could be removed. This was introduced to pacify the merge skeptics.)
69105 */
69106-static int slub_nomerge;
69107+static int slub_nomerge = 1;
69108
69109 /*
69110 * Calculate the order of allocation given an slab object size.
69111@@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
69112 * list to avoid pounding the page allocator excessively.
69113 */
69114 set_min_partial(s, ilog2(s->size));
69115- s->refcount = 1;
69116+ atomic_set(&s->refcount, 1);
69117 #ifdef CONFIG_NUMA
69118 s->remote_node_defrag_ratio = 1000;
69119 #endif
69120@@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
69121 void kmem_cache_destroy(struct kmem_cache *s)
69122 {
69123 down_write(&slub_lock);
69124- s->refcount--;
69125- if (!s->refcount) {
69126+ if (atomic_dec_and_test(&s->refcount)) {
69127 list_del(&s->list);
69128 if (kmem_cache_close(s)) {
69129 printk(KERN_ERR "SLUB %s: %s called for cache that "
69130@@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
69131 EXPORT_SYMBOL(__kmalloc_node);
69132 #endif
69133
69134+void check_object_size(const void *ptr, unsigned long n, bool to)
69135+{
69136+
69137+#ifdef CONFIG_PAX_USERCOPY
69138+ struct page *page;
69139+ struct kmem_cache *s = NULL;
69140+ unsigned long offset;
69141+
69142+ if (!n)
69143+ return;
69144+
69145+ if (ZERO_OR_NULL_PTR(ptr))
69146+ goto report;
69147+
69148+ if (!virt_addr_valid(ptr))
69149+ return;
69150+
69151+ page = virt_to_head_page(ptr);
69152+
69153+ if (!PageSlab(page)) {
69154+ if (object_is_on_stack(ptr, n) == -1)
69155+ goto report;
69156+ return;
69157+ }
69158+
69159+ s = page->slab;
69160+ if (!(s->flags & SLAB_USERCOPY))
69161+ goto report;
69162+
69163+ offset = (ptr - page_address(page)) % s->size;
69164+ if (offset <= s->objsize && n <= s->objsize - offset)
69165+ return;
69166+
69167+report:
69168+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
69169+#endif
69170+
69171+}
69172+EXPORT_SYMBOL(check_object_size);
69173+
69174 size_t ksize(const void *object)
69175 {
69176 struct page *page;
69177@@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
69178 int node;
69179
69180 list_add(&s->list, &slab_caches);
69181- s->refcount = -1;
69182+ atomic_set(&s->refcount, -1);
69183
69184 for_each_node_state(node, N_NORMAL_MEMORY) {
69185 struct kmem_cache_node *n = get_node(s, node);
69186@@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
69187
69188 /* Caches that are not of the two-to-the-power-of size */
69189 if (KMALLOC_MIN_SIZE <= 32) {
69190- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
69191+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
69192 caches++;
69193 }
69194
69195 if (KMALLOC_MIN_SIZE <= 64) {
69196- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
69197+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
69198 caches++;
69199 }
69200
69201 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
69202- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
69203+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
69204 caches++;
69205 }
69206
69207@@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
69208 /*
69209 * We may have set a slab to be unmergeable during bootstrap.
69210 */
69211- if (s->refcount < 0)
69212+ if (atomic_read(&s->refcount) < 0)
69213 return 1;
69214
69215 return 0;
69216@@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
69217 down_write(&slub_lock);
69218 s = find_mergeable(size, align, flags, name, ctor);
69219 if (s) {
69220- s->refcount++;
69221+ atomic_inc(&s->refcount);
69222 /*
69223 * Adjust the object sizes so that we clear
69224 * the complete object on kzalloc.
69225@@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
69226 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
69227
69228 if (sysfs_slab_alias(s, name)) {
69229- s->refcount--;
69230+ atomic_dec(&s->refcount);
69231 goto err;
69232 }
69233 up_write(&slub_lock);
69234@@ -3545,7 +3586,7 @@ void *__kmalloc_node_track_caller(size_t
69235 }
69236 #endif
69237
69238-#ifdef CONFIG_SYSFS
69239+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
69240 static int count_inuse(struct page *page)
69241 {
69242 return page->inuse;
69243@@ -3935,12 +3976,12 @@ static void resiliency_test(void)
69244 validate_slab_cache(kmalloc_caches[9]);
69245 }
69246 #else
69247-#ifdef CONFIG_SYSFS
69248+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
69249 static void resiliency_test(void) {};
69250 #endif
69251 #endif
69252
69253-#ifdef CONFIG_SYSFS
69254+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
69255 enum slab_stat_type {
69256 SL_ALL, /* All slabs */
69257 SL_PARTIAL, /* Only partially allocated slabs */
69258@@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
69259
69260 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
69261 {
69262- return sprintf(buf, "%d\n", s->refcount - 1);
69263+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
69264 }
69265 SLAB_ATTR_RO(aliases);
69266
69267@@ -4662,6 +4703,7 @@ static char *create_unique_id(struct kme
69268 return name;
69269 }
69270
69271+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
69272 static int sysfs_slab_add(struct kmem_cache *s)
69273 {
69274 int err;
69275@@ -4724,6 +4766,7 @@ static void sysfs_slab_remove(struct kme
69276 kobject_del(&s->kobj);
69277 kobject_put(&s->kobj);
69278 }
69279+#endif
69280
69281 /*
69282 * Need to buffer aliases during bootup until sysfs becomes
69283@@ -4737,6 +4780,7 @@ struct saved_alias {
69284
69285 static struct saved_alias *alias_list;
69286
69287+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
69288 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
69289 {
69290 struct saved_alias *al;
69291@@ -4759,6 +4803,7 @@ static int sysfs_slab_alias(struct kmem_
69292 alias_list = al;
69293 return 0;
69294 }
69295+#endif
69296
69297 static int __init slab_sysfs_init(void)
69298 {
69299@@ -4894,7 +4939,13 @@ static const struct file_operations proc
69300
69301 static int __init slab_proc_init(void)
69302 {
69303- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
69304+ mode_t gr_mode = S_IRUGO;
69305+
69306+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69307+ gr_mode = S_IRUSR;
69308+#endif
69309+
69310+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
69311 return 0;
69312 }
69313 module_init(slab_proc_init);
69314diff -urNp linux-3.0.4/mm/swap.c linux-3.0.4/mm/swap.c
69315--- linux-3.0.4/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
69316+++ linux-3.0.4/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
69317@@ -31,6 +31,7 @@
69318 #include <linux/backing-dev.h>
69319 #include <linux/memcontrol.h>
69320 #include <linux/gfp.h>
69321+#include <linux/hugetlb.h>
69322
69323 #include "internal.h"
69324
69325@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
69326
69327 __page_cache_release(page);
69328 dtor = get_compound_page_dtor(page);
69329+ if (!PageHuge(page))
69330+ BUG_ON(dtor != free_compound_page);
69331 (*dtor)(page);
69332 }
69333
69334diff -urNp linux-3.0.4/mm/swapfile.c linux-3.0.4/mm/swapfile.c
69335--- linux-3.0.4/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
69336+++ linux-3.0.4/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
69337@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
69338
69339 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
69340 /* Activity counter to indicate that a swapon or swapoff has occurred */
69341-static atomic_t proc_poll_event = ATOMIC_INIT(0);
69342+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
69343
69344 static inline unsigned char swap_count(unsigned char ent)
69345 {
69346@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
69347 }
69348 filp_close(swap_file, NULL);
69349 err = 0;
69350- atomic_inc(&proc_poll_event);
69351+ atomic_inc_unchecked(&proc_poll_event);
69352 wake_up_interruptible(&proc_poll_wait);
69353
69354 out_dput:
69355@@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
69356
69357 poll_wait(file, &proc_poll_wait, wait);
69358
69359- if (s->event != atomic_read(&proc_poll_event)) {
69360- s->event = atomic_read(&proc_poll_event);
69361+ if (s->event != atomic_read_unchecked(&proc_poll_event)) {
69362+ s->event = atomic_read_unchecked(&proc_poll_event);
69363 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
69364 }
69365
69366@@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
69367 }
69368
69369 s->seq.private = s;
69370- s->event = atomic_read(&proc_poll_event);
69371+ s->event = atomic_read_unchecked(&proc_poll_event);
69372 return ret;
69373 }
69374
69375@@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
69376 (p->flags & SWP_DISCARDABLE) ? "D" : "");
69377
69378 mutex_unlock(&swapon_mutex);
69379- atomic_inc(&proc_poll_event);
69380+ atomic_inc_unchecked(&proc_poll_event);
69381 wake_up_interruptible(&proc_poll_wait);
69382
69383 if (S_ISREG(inode->i_mode))
69384diff -urNp linux-3.0.4/mm/util.c linux-3.0.4/mm/util.c
69385--- linux-3.0.4/mm/util.c 2011-07-21 22:17:23.000000000 -0400
69386+++ linux-3.0.4/mm/util.c 2011-08-23 21:47:56.000000000 -0400
69387@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
69388 * allocated buffer. Use this if you don't want to free the buffer immediately
69389 * like, for example, with RCU.
69390 */
69391+#undef __krealloc
69392 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
69393 {
69394 void *ret;
69395@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
69396 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
69397 * %NULL pointer, the object pointed to is freed.
69398 */
69399+#undef krealloc
69400 void *krealloc(const void *p, size_t new_size, gfp_t flags)
69401 {
69402 void *ret;
69403@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
69404 void arch_pick_mmap_layout(struct mm_struct *mm)
69405 {
69406 mm->mmap_base = TASK_UNMAPPED_BASE;
69407+
69408+#ifdef CONFIG_PAX_RANDMMAP
69409+ if (mm->pax_flags & MF_PAX_RANDMMAP)
69410+ mm->mmap_base += mm->delta_mmap;
69411+#endif
69412+
69413 mm->get_unmapped_area = arch_get_unmapped_area;
69414 mm->unmap_area = arch_unmap_area;
69415 }
69416diff -urNp linux-3.0.4/mm/vmalloc.c linux-3.0.4/mm/vmalloc.c
69417--- linux-3.0.4/mm/vmalloc.c 2011-09-02 18:11:21.000000000 -0400
69418+++ linux-3.0.4/mm/vmalloc.c 2011-08-23 21:47:56.000000000 -0400
69419@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
69420
69421 pte = pte_offset_kernel(pmd, addr);
69422 do {
69423- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
69424- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
69425+
69426+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69427+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
69428+ BUG_ON(!pte_exec(*pte));
69429+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
69430+ continue;
69431+ }
69432+#endif
69433+
69434+ {
69435+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
69436+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
69437+ }
69438 } while (pte++, addr += PAGE_SIZE, addr != end);
69439 }
69440
69441@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
69442 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
69443 {
69444 pte_t *pte;
69445+ int ret = -ENOMEM;
69446
69447 /*
69448 * nr is a running index into the array which helps higher level
69449@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
69450 pte = pte_alloc_kernel(pmd, addr);
69451 if (!pte)
69452 return -ENOMEM;
69453+
69454+ pax_open_kernel();
69455 do {
69456 struct page *page = pages[*nr];
69457
69458- if (WARN_ON(!pte_none(*pte)))
69459- return -EBUSY;
69460- if (WARN_ON(!page))
69461- return -ENOMEM;
69462+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69463+ if (pgprot_val(prot) & _PAGE_NX)
69464+#endif
69465+
69466+ if (WARN_ON(!pte_none(*pte))) {
69467+ ret = -EBUSY;
69468+ goto out;
69469+ }
69470+ if (WARN_ON(!page)) {
69471+ ret = -ENOMEM;
69472+ goto out;
69473+ }
69474 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
69475 (*nr)++;
69476 } while (pte++, addr += PAGE_SIZE, addr != end);
69477- return 0;
69478+ ret = 0;
69479+out:
69480+ pax_close_kernel();
69481+ return ret;
69482 }
69483
69484 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
69485@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
69486 * and fall back on vmalloc() if that fails. Others
69487 * just put it in the vmalloc space.
69488 */
69489-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
69490+#ifdef CONFIG_MODULES
69491+#ifdef MODULES_VADDR
69492 unsigned long addr = (unsigned long)x;
69493 if (addr >= MODULES_VADDR && addr < MODULES_END)
69494 return 1;
69495 #endif
69496+
69497+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69498+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
69499+ return 1;
69500+#endif
69501+
69502+#endif
69503+
69504 return is_vmalloc_addr(x);
69505 }
69506
69507@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
69508
69509 if (!pgd_none(*pgd)) {
69510 pud_t *pud = pud_offset(pgd, addr);
69511+#ifdef CONFIG_X86
69512+ if (!pud_large(*pud))
69513+#endif
69514 if (!pud_none(*pud)) {
69515 pmd_t *pmd = pmd_offset(pud, addr);
69516+#ifdef CONFIG_X86
69517+ if (!pmd_large(*pmd))
69518+#endif
69519 if (!pmd_none(*pmd)) {
69520 pte_t *ptep, pte;
69521
69522@@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
69523 struct vm_struct *area;
69524
69525 BUG_ON(in_interrupt());
69526+
69527+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
69528+ if (flags & VM_KERNEXEC) {
69529+ if (start != VMALLOC_START || end != VMALLOC_END)
69530+ return NULL;
69531+ start = (unsigned long)MODULES_EXEC_VADDR;
69532+ end = (unsigned long)MODULES_EXEC_END;
69533+ }
69534+#endif
69535+
69536 if (flags & VM_IOREMAP) {
69537 int bit = fls(size);
69538
69539@@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
69540 if (count > totalram_pages)
69541 return NULL;
69542
69543+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
69544+ if (!(pgprot_val(prot) & _PAGE_NX))
69545+ flags |= VM_KERNEXEC;
69546+#endif
69547+
69548 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
69549 __builtin_return_address(0));
69550 if (!area)
69551@@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
69552 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
69553 return NULL;
69554
69555+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
69556+ if (!(pgprot_val(prot) & _PAGE_NX))
69557+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
69558+ node, gfp_mask, caller);
69559+ else
69560+#endif
69561+
69562 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
69563 gfp_mask, caller);
69564
69565@@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
69566 gfp_mask, prot, node, caller);
69567 }
69568
69569+#undef __vmalloc
69570 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
69571 {
69572 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
69573@@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
69574 * For tight control over page level allocator and protection flags
69575 * use __vmalloc() instead.
69576 */
69577+#undef vmalloc
69578 void *vmalloc(unsigned long size)
69579 {
69580 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
69581@@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
69582 * For tight control over page level allocator and protection flags
69583 * use __vmalloc() instead.
69584 */
69585+#undef vzalloc
69586 void *vzalloc(unsigned long size)
69587 {
69588 return __vmalloc_node_flags(size, -1,
69589@@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
69590 * The resulting memory area is zeroed so it can be mapped to userspace
69591 * without leaking data.
69592 */
69593+#undef vmalloc_user
69594 void *vmalloc_user(unsigned long size)
69595 {
69596 struct vm_struct *area;
69597@@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
69598 * For tight control over page level allocator and protection flags
69599 * use __vmalloc() instead.
69600 */
69601+#undef vmalloc_node
69602 void *vmalloc_node(unsigned long size, int node)
69603 {
69604 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
69605@@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
69606 * For tight control over page level allocator and protection flags
69607 * use __vmalloc_node() instead.
69608 */
69609+#undef vzalloc_node
69610 void *vzalloc_node(unsigned long size, int node)
69611 {
69612 return __vmalloc_node_flags(size, node,
69613@@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
69614 * For tight control over page level allocator and protection flags
69615 * use __vmalloc() instead.
69616 */
69617-
69618+#undef vmalloc_exec
69619 void *vmalloc_exec(unsigned long size)
69620 {
69621- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
69622+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
69623 -1, __builtin_return_address(0));
69624 }
69625
69626@@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
69627 * Allocate enough 32bit PA addressable pages to cover @size from the
69628 * page level allocator and map them into contiguous kernel virtual space.
69629 */
69630+#undef vmalloc_32
69631 void *vmalloc_32(unsigned long size)
69632 {
69633 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
69634@@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
69635 * The resulting memory area is 32bit addressable and zeroed so it can be
69636 * mapped to userspace without leaking data.
69637 */
69638+#undef vmalloc_32_user
69639 void *vmalloc_32_user(unsigned long size)
69640 {
69641 struct vm_struct *area;
69642@@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
69643 unsigned long uaddr = vma->vm_start;
69644 unsigned long usize = vma->vm_end - vma->vm_start;
69645
69646+ BUG_ON(vma->vm_mirror);
69647+
69648 if ((PAGE_SIZE-1) & (unsigned long)addr)
69649 return -EINVAL;
69650
69651diff -urNp linux-3.0.4/mm/vmstat.c linux-3.0.4/mm/vmstat.c
69652--- linux-3.0.4/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
69653+++ linux-3.0.4/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
69654@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
69655 *
69656 * vm_stat contains the global counters
69657 */
69658-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69659+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69660 EXPORT_SYMBOL(vm_stat);
69661
69662 #ifdef CONFIG_SMP
69663@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
69664 v = p->vm_stat_diff[i];
69665 p->vm_stat_diff[i] = 0;
69666 local_irq_restore(flags);
69667- atomic_long_add(v, &zone->vm_stat[i]);
69668+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
69669 global_diff[i] += v;
69670 #ifdef CONFIG_NUMA
69671 /* 3 seconds idle till flush */
69672@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
69673
69674 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
69675 if (global_diff[i])
69676- atomic_long_add(global_diff[i], &vm_stat[i]);
69677+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
69678 }
69679
69680 #endif
69681@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
69682 start_cpu_timer(cpu);
69683 #endif
69684 #ifdef CONFIG_PROC_FS
69685- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
69686- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
69687- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
69688- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
69689+ {
69690+ mode_t gr_mode = S_IRUGO;
69691+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69692+ gr_mode = S_IRUSR;
69693+#endif
69694+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
69695+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
69696+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
69697+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
69698+#else
69699+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
69700+#endif
69701+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
69702+ }
69703 #endif
69704 return 0;
69705 }
69706diff -urNp linux-3.0.4/net/8021q/vlan.c linux-3.0.4/net/8021q/vlan.c
69707--- linux-3.0.4/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
69708+++ linux-3.0.4/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
69709@@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
69710 err = -EPERM;
69711 if (!capable(CAP_NET_ADMIN))
69712 break;
69713- if ((args.u.name_type >= 0) &&
69714- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
69715+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
69716 struct vlan_net *vn;
69717
69718 vn = net_generic(net, vlan_net_id);
69719diff -urNp linux-3.0.4/net/9p/trans_fd.c linux-3.0.4/net/9p/trans_fd.c
69720--- linux-3.0.4/net/9p/trans_fd.c 2011-07-21 22:17:23.000000000 -0400
69721+++ linux-3.0.4/net/9p/trans_fd.c 2011-10-06 04:17:55.000000000 -0400
69722@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client
69723 oldfs = get_fs();
69724 set_fs(get_ds());
69725 /* The cast to a user pointer is valid due to the set_fs() */
69726- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
69727+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
69728 set_fs(oldfs);
69729
69730 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
69731diff -urNp linux-3.0.4/net/9p/trans_virtio.c linux-3.0.4/net/9p/trans_virtio.c
69732--- linux-3.0.4/net/9p/trans_virtio.c 2011-07-21 22:17:23.000000000 -0400
69733+++ linux-3.0.4/net/9p/trans_virtio.c 2011-10-06 04:17:55.000000000 -0400
69734@@ -328,7 +328,7 @@ req_retry_pinned:
69735 } else {
69736 char *pbuf;
69737 if (req->tc->pubuf)
69738- pbuf = (__force char *) req->tc->pubuf;
69739+ pbuf = (char __force_kernel *) req->tc->pubuf;
69740 else
69741 pbuf = req->tc->pkbuf;
69742 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
69743@@ -357,7 +357,7 @@ req_retry_pinned:
69744 } else {
69745 char *pbuf;
69746 if (req->tc->pubuf)
69747- pbuf = (__force char *) req->tc->pubuf;
69748+ pbuf = (char __force_kernel *) req->tc->pubuf;
69749 else
69750 pbuf = req->tc->pkbuf;
69751
69752diff -urNp linux-3.0.4/net/atm/atm_misc.c linux-3.0.4/net/atm/atm_misc.c
69753--- linux-3.0.4/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
69754+++ linux-3.0.4/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
69755@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
69756 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
69757 return 1;
69758 atm_return(vcc, truesize);
69759- atomic_inc(&vcc->stats->rx_drop);
69760+ atomic_inc_unchecked(&vcc->stats->rx_drop);
69761 return 0;
69762 }
69763 EXPORT_SYMBOL(atm_charge);
69764@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
69765 }
69766 }
69767 atm_return(vcc, guess);
69768- atomic_inc(&vcc->stats->rx_drop);
69769+ atomic_inc_unchecked(&vcc->stats->rx_drop);
69770 return NULL;
69771 }
69772 EXPORT_SYMBOL(atm_alloc_charge);
69773@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
69774
69775 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
69776 {
69777-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
69778+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
69779 __SONET_ITEMS
69780 #undef __HANDLE_ITEM
69781 }
69782@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
69783
69784 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
69785 {
69786-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
69787+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
69788 __SONET_ITEMS
69789 #undef __HANDLE_ITEM
69790 }
69791diff -urNp linux-3.0.4/net/atm/lec.h linux-3.0.4/net/atm/lec.h
69792--- linux-3.0.4/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
69793+++ linux-3.0.4/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
69794@@ -48,7 +48,7 @@ struct lane2_ops {
69795 const u8 *tlvs, u32 sizeoftlvs);
69796 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
69797 const u8 *tlvs, u32 sizeoftlvs);
69798-};
69799+} __no_const;
69800
69801 /*
69802 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
69803diff -urNp linux-3.0.4/net/atm/mpc.h linux-3.0.4/net/atm/mpc.h
69804--- linux-3.0.4/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
69805+++ linux-3.0.4/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
69806@@ -33,7 +33,7 @@ struct mpoa_client {
69807 struct mpc_parameters parameters; /* parameters for this client */
69808
69809 const struct net_device_ops *old_ops;
69810- struct net_device_ops new_ops;
69811+ net_device_ops_no_const new_ops;
69812 };
69813
69814
69815diff -urNp linux-3.0.4/net/atm/mpoa_caches.c linux-3.0.4/net/atm/mpoa_caches.c
69816--- linux-3.0.4/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
69817+++ linux-3.0.4/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
69818@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
69819 struct timeval now;
69820 struct k_message msg;
69821
69822+ pax_track_stack();
69823+
69824 do_gettimeofday(&now);
69825
69826 read_lock_bh(&client->ingress_lock);
69827diff -urNp linux-3.0.4/net/atm/proc.c linux-3.0.4/net/atm/proc.c
69828--- linux-3.0.4/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
69829+++ linux-3.0.4/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
69830@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
69831 const struct k_atm_aal_stats *stats)
69832 {
69833 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
69834- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
69835- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
69836- atomic_read(&stats->rx_drop));
69837+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
69838+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
69839+ atomic_read_unchecked(&stats->rx_drop));
69840 }
69841
69842 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
69843diff -urNp linux-3.0.4/net/atm/resources.c linux-3.0.4/net/atm/resources.c
69844--- linux-3.0.4/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
69845+++ linux-3.0.4/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
69846@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
69847 static void copy_aal_stats(struct k_atm_aal_stats *from,
69848 struct atm_aal_stats *to)
69849 {
69850-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
69851+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
69852 __AAL_STAT_ITEMS
69853 #undef __HANDLE_ITEM
69854 }
69855@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
69856 static void subtract_aal_stats(struct k_atm_aal_stats *from,
69857 struct atm_aal_stats *to)
69858 {
69859-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
69860+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
69861 __AAL_STAT_ITEMS
69862 #undef __HANDLE_ITEM
69863 }
69864diff -urNp linux-3.0.4/net/batman-adv/hard-interface.c linux-3.0.4/net/batman-adv/hard-interface.c
69865--- linux-3.0.4/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
69866+++ linux-3.0.4/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
69867@@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
69868 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
69869 dev_add_pack(&hard_iface->batman_adv_ptype);
69870
69871- atomic_set(&hard_iface->seqno, 1);
69872- atomic_set(&hard_iface->frag_seqno, 1);
69873+ atomic_set_unchecked(&hard_iface->seqno, 1);
69874+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
69875 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
69876 hard_iface->net_dev->name);
69877
69878diff -urNp linux-3.0.4/net/batman-adv/routing.c linux-3.0.4/net/batman-adv/routing.c
69879--- linux-3.0.4/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
69880+++ linux-3.0.4/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
69881@@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
69882 return;
69883
69884 /* could be changed by schedule_own_packet() */
69885- if_incoming_seqno = atomic_read(&if_incoming->seqno);
69886+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
69887
69888 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
69889
69890diff -urNp linux-3.0.4/net/batman-adv/send.c linux-3.0.4/net/batman-adv/send.c
69891--- linux-3.0.4/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
69892+++ linux-3.0.4/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
69893@@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
69894
69895 /* change sequence number to network order */
69896 batman_packet->seqno =
69897- htonl((uint32_t)atomic_read(&hard_iface->seqno));
69898+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
69899
69900 if (vis_server == VIS_TYPE_SERVER_SYNC)
69901 batman_packet->flags |= VIS_SERVER;
69902@@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
69903 else
69904 batman_packet->gw_flags = 0;
69905
69906- atomic_inc(&hard_iface->seqno);
69907+ atomic_inc_unchecked(&hard_iface->seqno);
69908
69909 slide_own_bcast_window(hard_iface);
69910 send_time = own_send_time(bat_priv);
69911diff -urNp linux-3.0.4/net/batman-adv/soft-interface.c linux-3.0.4/net/batman-adv/soft-interface.c
69912--- linux-3.0.4/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
69913+++ linux-3.0.4/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
69914@@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
69915
69916 /* set broadcast sequence number */
69917 bcast_packet->seqno =
69918- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
69919+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
69920
69921 add_bcast_packet_to_list(bat_priv, skb);
69922
69923@@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
69924 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
69925
69926 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
69927- atomic_set(&bat_priv->bcast_seqno, 1);
69928+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
69929 atomic_set(&bat_priv->tt_local_changed, 0);
69930
69931 bat_priv->primary_if = NULL;
69932diff -urNp linux-3.0.4/net/batman-adv/types.h linux-3.0.4/net/batman-adv/types.h
69933--- linux-3.0.4/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
69934+++ linux-3.0.4/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
69935@@ -38,8 +38,8 @@ struct hard_iface {
69936 int16_t if_num;
69937 char if_status;
69938 struct net_device *net_dev;
69939- atomic_t seqno;
69940- atomic_t frag_seqno;
69941+ atomic_unchecked_t seqno;
69942+ atomic_unchecked_t frag_seqno;
69943 unsigned char *packet_buff;
69944 int packet_len;
69945 struct kobject *hardif_obj;
69946@@ -142,7 +142,7 @@ struct bat_priv {
69947 atomic_t orig_interval; /* uint */
69948 atomic_t hop_penalty; /* uint */
69949 atomic_t log_level; /* uint */
69950- atomic_t bcast_seqno;
69951+ atomic_unchecked_t bcast_seqno;
69952 atomic_t bcast_queue_left;
69953 atomic_t batman_queue_left;
69954 char num_ifaces;
69955diff -urNp linux-3.0.4/net/batman-adv/unicast.c linux-3.0.4/net/batman-adv/unicast.c
69956--- linux-3.0.4/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
69957+++ linux-3.0.4/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
69958@@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
69959 frag1->flags = UNI_FRAG_HEAD | large_tail;
69960 frag2->flags = large_tail;
69961
69962- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
69963+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
69964 frag1->seqno = htons(seqno - 1);
69965 frag2->seqno = htons(seqno);
69966
69967diff -urNp linux-3.0.4/net/bridge/br_multicast.c linux-3.0.4/net/bridge/br_multicast.c
69968--- linux-3.0.4/net/bridge/br_multicast.c 2011-07-21 22:17:23.000000000 -0400
69969+++ linux-3.0.4/net/bridge/br_multicast.c 2011-08-23 21:47:56.000000000 -0400
69970@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
69971 nexthdr = ip6h->nexthdr;
69972 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
69973
69974- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
69975+ if (nexthdr != IPPROTO_ICMPV6)
69976 return 0;
69977
69978 /* Okay, we found ICMPv6 header */
69979diff -urNp linux-3.0.4/net/bridge/netfilter/ebtables.c linux-3.0.4/net/bridge/netfilter/ebtables.c
69980--- linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
69981+++ linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
69982@@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
69983 tmp.valid_hooks = t->table->valid_hooks;
69984 }
69985 mutex_unlock(&ebt_mutex);
69986- if (copy_to_user(user, &tmp, *len) != 0){
69987+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
69988 BUGPRINT("c2u Didn't work\n");
69989 ret = -EFAULT;
69990 break;
69991@@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
69992 int ret;
69993 void __user *pos;
69994
69995+ pax_track_stack();
69996+
69997 memset(&tinfo, 0, sizeof(tinfo));
69998
69999 if (cmd == EBT_SO_GET_ENTRIES) {
70000diff -urNp linux-3.0.4/net/caif/caif_socket.c linux-3.0.4/net/caif/caif_socket.c
70001--- linux-3.0.4/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
70002+++ linux-3.0.4/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
70003@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
70004 #ifdef CONFIG_DEBUG_FS
70005 struct debug_fs_counter {
70006 atomic_t caif_nr_socks;
70007- atomic_t caif_sock_create;
70008- atomic_t num_connect_req;
70009- atomic_t num_connect_resp;
70010- atomic_t num_connect_fail_resp;
70011- atomic_t num_disconnect;
70012- atomic_t num_remote_shutdown_ind;
70013- atomic_t num_tx_flow_off_ind;
70014- atomic_t num_tx_flow_on_ind;
70015- atomic_t num_rx_flow_off;
70016- atomic_t num_rx_flow_on;
70017+ atomic_unchecked_t caif_sock_create;
70018+ atomic_unchecked_t num_connect_req;
70019+ atomic_unchecked_t num_connect_resp;
70020+ atomic_unchecked_t num_connect_fail_resp;
70021+ atomic_unchecked_t num_disconnect;
70022+ atomic_unchecked_t num_remote_shutdown_ind;
70023+ atomic_unchecked_t num_tx_flow_off_ind;
70024+ atomic_unchecked_t num_tx_flow_on_ind;
70025+ atomic_unchecked_t num_rx_flow_off;
70026+ atomic_unchecked_t num_rx_flow_on;
70027 };
70028 static struct debug_fs_counter cnt;
70029 #define dbfs_atomic_inc(v) atomic_inc_return(v)
70030+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
70031 #define dbfs_atomic_dec(v) atomic_dec_return(v)
70032 #else
70033 #define dbfs_atomic_inc(v) 0
70034@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
70035 atomic_read(&cf_sk->sk.sk_rmem_alloc),
70036 sk_rcvbuf_lowwater(cf_sk));
70037 set_rx_flow_off(cf_sk);
70038- dbfs_atomic_inc(&cnt.num_rx_flow_off);
70039+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
70040 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
70041 }
70042
70043@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
70044 set_rx_flow_off(cf_sk);
70045 if (net_ratelimit())
70046 pr_debug("sending flow OFF due to rmem_schedule\n");
70047- dbfs_atomic_inc(&cnt.num_rx_flow_off);
70048+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
70049 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
70050 }
70051 skb->dev = NULL;
70052@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
70053 switch (flow) {
70054 case CAIF_CTRLCMD_FLOW_ON_IND:
70055 /* OK from modem to start sending again */
70056- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
70057+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
70058 set_tx_flow_on(cf_sk);
70059 cf_sk->sk.sk_state_change(&cf_sk->sk);
70060 break;
70061
70062 case CAIF_CTRLCMD_FLOW_OFF_IND:
70063 /* Modem asks us to shut up */
70064- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
70065+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
70066 set_tx_flow_off(cf_sk);
70067 cf_sk->sk.sk_state_change(&cf_sk->sk);
70068 break;
70069@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
70070 /* We're now connected */
70071 caif_client_register_refcnt(&cf_sk->layer,
70072 cfsk_hold, cfsk_put);
70073- dbfs_atomic_inc(&cnt.num_connect_resp);
70074+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
70075 cf_sk->sk.sk_state = CAIF_CONNECTED;
70076 set_tx_flow_on(cf_sk);
70077 cf_sk->sk.sk_state_change(&cf_sk->sk);
70078@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
70079
70080 case CAIF_CTRLCMD_INIT_FAIL_RSP:
70081 /* Connect request failed */
70082- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
70083+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
70084 cf_sk->sk.sk_err = ECONNREFUSED;
70085 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
70086 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
70087@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
70088
70089 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
70090 /* Modem has closed this connection, or device is down. */
70091- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
70092+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
70093 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
70094 cf_sk->sk.sk_err = ECONNRESET;
70095 set_rx_flow_on(cf_sk);
70096@@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
70097 return;
70098
70099 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
70100- dbfs_atomic_inc(&cnt.num_rx_flow_on);
70101+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
70102 set_rx_flow_on(cf_sk);
70103 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
70104 }
70105@@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
70106 /*ifindex = id of the interface.*/
70107 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
70108
70109- dbfs_atomic_inc(&cnt.num_connect_req);
70110+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
70111 cf_sk->layer.receive = caif_sktrecv_cb;
70112
70113 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
70114@@ -943,7 +944,7 @@ static int caif_release(struct socket *s
70115 spin_unlock_bh(&sk->sk_receive_queue.lock);
70116 sock->sk = NULL;
70117
70118- dbfs_atomic_inc(&cnt.num_disconnect);
70119+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
70120
70121 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
70122 if (cf_sk->debugfs_socket_dir != NULL)
70123@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
70124 cf_sk->conn_req.protocol = protocol;
70125 /* Increase the number of sockets created. */
70126 dbfs_atomic_inc(&cnt.caif_nr_socks);
70127- num = dbfs_atomic_inc(&cnt.caif_sock_create);
70128+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
70129 #ifdef CONFIG_DEBUG_FS
70130 if (!IS_ERR(debugfsdir)) {
70131
70132diff -urNp linux-3.0.4/net/caif/cfctrl.c linux-3.0.4/net/caif/cfctrl.c
70133--- linux-3.0.4/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
70134+++ linux-3.0.4/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
70135@@ -9,6 +9,7 @@
70136 #include <linux/stddef.h>
70137 #include <linux/spinlock.h>
70138 #include <linux/slab.h>
70139+#include <linux/sched.h>
70140 #include <net/caif/caif_layer.h>
70141 #include <net/caif/cfpkt.h>
70142 #include <net/caif/cfctrl.h>
70143@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
70144 dev_info.id = 0xff;
70145 memset(this, 0, sizeof(*this));
70146 cfsrvl_init(&this->serv, 0, &dev_info, false);
70147- atomic_set(&this->req_seq_no, 1);
70148- atomic_set(&this->rsp_seq_no, 1);
70149+ atomic_set_unchecked(&this->req_seq_no, 1);
70150+ atomic_set_unchecked(&this->rsp_seq_no, 1);
70151 this->serv.layer.receive = cfctrl_recv;
70152 sprintf(this->serv.layer.name, "ctrl");
70153 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
70154@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
70155 struct cfctrl_request_info *req)
70156 {
70157 spin_lock_bh(&ctrl->info_list_lock);
70158- atomic_inc(&ctrl->req_seq_no);
70159- req->sequence_no = atomic_read(&ctrl->req_seq_no);
70160+ atomic_inc_unchecked(&ctrl->req_seq_no);
70161+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
70162 list_add_tail(&req->list, &ctrl->list);
70163 spin_unlock_bh(&ctrl->info_list_lock);
70164 }
70165@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
70166 if (p != first)
70167 pr_warn("Requests are not received in order\n");
70168
70169- atomic_set(&ctrl->rsp_seq_no,
70170+ atomic_set_unchecked(&ctrl->rsp_seq_no,
70171 p->sequence_no);
70172 list_del(&p->list);
70173 goto out;
70174@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
70175 struct cfctrl *cfctrl = container_obj(layer);
70176 struct cfctrl_request_info rsp, *req;
70177
70178+ pax_track_stack();
70179
70180 cfpkt_extr_head(pkt, &cmdrsp, 1);
70181 cmd = cmdrsp & CFCTRL_CMD_MASK;
70182diff -urNp linux-3.0.4/net/compat.c linux-3.0.4/net/compat.c
70183--- linux-3.0.4/net/compat.c 2011-07-21 22:17:23.000000000 -0400
70184+++ linux-3.0.4/net/compat.c 2011-10-06 04:17:55.000000000 -0400
70185@@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kms
70186 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
70187 __get_user(kmsg->msg_flags, &umsg->msg_flags))
70188 return -EFAULT;
70189- kmsg->msg_name = compat_ptr(tmp1);
70190- kmsg->msg_iov = compat_ptr(tmp2);
70191- kmsg->msg_control = compat_ptr(tmp3);
70192+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
70193+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
70194+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
70195 return 0;
70196 }
70197
70198@@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *k
70199
70200 if (kern_msg->msg_namelen) {
70201 if (mode == VERIFY_READ) {
70202- int err = move_addr_to_kernel(kern_msg->msg_name,
70203+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
70204 kern_msg->msg_namelen,
70205 kern_address);
70206 if (err < 0)
70207@@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *k
70208 kern_msg->msg_name = NULL;
70209
70210 tot_len = iov_from_user_compat_to_kern(kern_iov,
70211- (struct compat_iovec __user *)kern_msg->msg_iov,
70212+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
70213 kern_msg->msg_iovlen);
70214 if (tot_len >= 0)
70215 kern_msg->msg_iov = kern_iov;
70216@@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *k
70217
70218 #define CMSG_COMPAT_FIRSTHDR(msg) \
70219 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
70220- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
70221+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
70222 (struct compat_cmsghdr __user *)NULL)
70223
70224 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
70225 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
70226 (ucmlen) <= (unsigned long) \
70227 ((mhdr)->msg_controllen - \
70228- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
70229+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
70230
70231 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
70232 struct compat_cmsghdr __user *cmsg, int cmsg_len)
70233 {
70234 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
70235- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
70236+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
70237 msg->msg_controllen)
70238 return NULL;
70239 return (struct compat_cmsghdr __user *)ptr;
70240@@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
70241 {
70242 struct compat_timeval ctv;
70243 struct compat_timespec cts[3];
70244- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
70245+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
70246 struct compat_cmsghdr cmhdr;
70247 int cmlen;
70248
70249@@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
70250
70251 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
70252 {
70253- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
70254+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
70255 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
70256 int fdnum = scm->fp->count;
70257 struct file **fp = scm->fp->fp;
70258@@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct so
70259 return -EFAULT;
70260 old_fs = get_fs();
70261 set_fs(KERNEL_DS);
70262- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
70263+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
70264 set_fs(old_fs);
70265
70266 return err;
70267@@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct so
70268 len = sizeof(ktime);
70269 old_fs = get_fs();
70270 set_fs(KERNEL_DS);
70271- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
70272+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
70273 set_fs(old_fs);
70274
70275 if (!err) {
70276@@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *so
70277 case MCAST_JOIN_GROUP:
70278 case MCAST_LEAVE_GROUP:
70279 {
70280- struct compat_group_req __user *gr32 = (void *)optval;
70281+ struct compat_group_req __user *gr32 = (void __user *)optval;
70282 struct group_req __user *kgr =
70283 compat_alloc_user_space(sizeof(struct group_req));
70284 u32 interface;
70285@@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *so
70286 case MCAST_BLOCK_SOURCE:
70287 case MCAST_UNBLOCK_SOURCE:
70288 {
70289- struct compat_group_source_req __user *gsr32 = (void *)optval;
70290+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
70291 struct group_source_req __user *kgsr = compat_alloc_user_space(
70292 sizeof(struct group_source_req));
70293 u32 interface;
70294@@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *so
70295 }
70296 case MCAST_MSFILTER:
70297 {
70298- struct compat_group_filter __user *gf32 = (void *)optval;
70299+ struct compat_group_filter __user *gf32 = (void __user *)optval;
70300 struct group_filter __user *kgf;
70301 u32 interface, fmode, numsrc;
70302
70303@@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *so
70304 char __user *optval, int __user *optlen,
70305 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
70306 {
70307- struct compat_group_filter __user *gf32 = (void *)optval;
70308+ struct compat_group_filter __user *gf32 = (void __user *)optval;
70309 struct group_filter __user *kgf;
70310 int __user *koptlen;
70311 u32 interface, fmode, numsrc;
70312diff -urNp linux-3.0.4/net/core/datagram.c linux-3.0.4/net/core/datagram.c
70313--- linux-3.0.4/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
70314+++ linux-3.0.4/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
70315@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
70316 }
70317
70318 kfree_skb(skb);
70319- atomic_inc(&sk->sk_drops);
70320+ atomic_inc_unchecked(&sk->sk_drops);
70321 sk_mem_reclaim_partial(sk);
70322
70323 return err;
70324diff -urNp linux-3.0.4/net/core/dev.c linux-3.0.4/net/core/dev.c
70325--- linux-3.0.4/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
70326+++ linux-3.0.4/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
70327@@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
70328 if (no_module && capable(CAP_NET_ADMIN))
70329 no_module = request_module("netdev-%s", name);
70330 if (no_module && capable(CAP_SYS_MODULE)) {
70331+#ifdef CONFIG_GRKERNSEC_MODHARDEN
70332+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
70333+#else
70334 if (!request_module("%s", name))
70335 pr_err("Loading kernel module for a network device "
70336 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
70337 "instead\n", name);
70338+#endif
70339 }
70340 }
70341 EXPORT_SYMBOL(dev_load);
70342@@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
70343
70344 struct dev_gso_cb {
70345 void (*destructor)(struct sk_buff *skb);
70346-};
70347+} __no_const;
70348
70349 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
70350
70351@@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
70352 }
70353 EXPORT_SYMBOL(netif_rx_ni);
70354
70355-static void net_tx_action(struct softirq_action *h)
70356+static void net_tx_action(void)
70357 {
70358 struct softnet_data *sd = &__get_cpu_var(softnet_data);
70359
70360@@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
70361 }
70362 EXPORT_SYMBOL(netif_napi_del);
70363
70364-static void net_rx_action(struct softirq_action *h)
70365+static void net_rx_action(void)
70366 {
70367 struct softnet_data *sd = &__get_cpu_var(softnet_data);
70368 unsigned long time_limit = jiffies + 2;
70369diff -urNp linux-3.0.4/net/core/flow.c linux-3.0.4/net/core/flow.c
70370--- linux-3.0.4/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
70371+++ linux-3.0.4/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
70372@@ -60,7 +60,7 @@ struct flow_cache {
70373 struct timer_list rnd_timer;
70374 };
70375
70376-atomic_t flow_cache_genid = ATOMIC_INIT(0);
70377+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
70378 EXPORT_SYMBOL(flow_cache_genid);
70379 static struct flow_cache flow_cache_global;
70380 static struct kmem_cache *flow_cachep __read_mostly;
70381@@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
70382
70383 static int flow_entry_valid(struct flow_cache_entry *fle)
70384 {
70385- if (atomic_read(&flow_cache_genid) != fle->genid)
70386+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
70387 return 0;
70388 if (fle->object && !fle->object->ops->check(fle->object))
70389 return 0;
70390@@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
70391 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
70392 fcp->hash_count++;
70393 }
70394- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
70395+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
70396 flo = fle->object;
70397 if (!flo)
70398 goto ret_object;
70399@@ -274,7 +274,7 @@ nocache:
70400 }
70401 flo = resolver(net, key, family, dir, flo, ctx);
70402 if (fle) {
70403- fle->genid = atomic_read(&flow_cache_genid);
70404+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
70405 if (!IS_ERR(flo))
70406 fle->object = flo;
70407 else
70408diff -urNp linux-3.0.4/net/core/iovec.c linux-3.0.4/net/core/iovec.c
70409--- linux-3.0.4/net/core/iovec.c 2011-07-21 22:17:23.000000000 -0400
70410+++ linux-3.0.4/net/core/iovec.c 2011-10-06 04:17:55.000000000 -0400
70411@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struc
70412 if (m->msg_namelen) {
70413 if (mode == VERIFY_READ) {
70414 void __user *namep;
70415- namep = (void __user __force *) m->msg_name;
70416+ namep = (void __force_user *) m->msg_name;
70417 err = move_addr_to_kernel(namep, m->msg_namelen,
70418 address);
70419 if (err < 0)
70420@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struc
70421 }
70422
70423 size = m->msg_iovlen * sizeof(struct iovec);
70424- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
70425+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
70426 return -EFAULT;
70427
70428 m->msg_iov = iov;
70429diff -urNp linux-3.0.4/net/core/rtnetlink.c linux-3.0.4/net/core/rtnetlink.c
70430--- linux-3.0.4/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
70431+++ linux-3.0.4/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
70432@@ -56,7 +56,7 @@
70433 struct rtnl_link {
70434 rtnl_doit_func doit;
70435 rtnl_dumpit_func dumpit;
70436-};
70437+} __no_const;
70438
70439 static DEFINE_MUTEX(rtnl_mutex);
70440
70441diff -urNp linux-3.0.4/net/core/scm.c linux-3.0.4/net/core/scm.c
70442--- linux-3.0.4/net/core/scm.c 2011-07-21 22:17:23.000000000 -0400
70443+++ linux-3.0.4/net/core/scm.c 2011-10-06 04:17:55.000000000 -0400
70444@@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send);
70445 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
70446 {
70447 struct cmsghdr __user *cm
70448- = (__force struct cmsghdr __user *)msg->msg_control;
70449+ = (struct cmsghdr __force_user *)msg->msg_control;
70450 struct cmsghdr cmhdr;
70451 int cmlen = CMSG_LEN(len);
70452 int err;
70453@@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int le
70454 err = -EFAULT;
70455 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
70456 goto out;
70457- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
70458+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
70459 goto out;
70460 cmlen = CMSG_SPACE(len);
70461 if (msg->msg_controllen < cmlen)
70462@@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg);
70463 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
70464 {
70465 struct cmsghdr __user *cm
70466- = (__force struct cmsghdr __user*)msg->msg_control;
70467+ = (struct cmsghdr __force_user *)msg->msg_control;
70468
70469 int fdmax = 0;
70470 int fdnum = scm->fp->count;
70471@@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg,
70472 if (fdnum < fdmax)
70473 fdmax = fdnum;
70474
70475- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
70476+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
70477 i++, cmfptr++)
70478 {
70479 int new_fd;
70480diff -urNp linux-3.0.4/net/core/skbuff.c linux-3.0.4/net/core/skbuff.c
70481--- linux-3.0.4/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
70482+++ linux-3.0.4/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
70483@@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
70484 struct sock *sk = skb->sk;
70485 int ret = 0;
70486
70487+ pax_track_stack();
70488+
70489 if (splice_grow_spd(pipe, &spd))
70490 return -ENOMEM;
70491
70492diff -urNp linux-3.0.4/net/core/sock.c linux-3.0.4/net/core/sock.c
70493--- linux-3.0.4/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
70494+++ linux-3.0.4/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
70495@@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
70496 */
70497 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
70498 (unsigned)sk->sk_rcvbuf) {
70499- atomic_inc(&sk->sk_drops);
70500+ atomic_inc_unchecked(&sk->sk_drops);
70501 return -ENOMEM;
70502 }
70503
70504@@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
70505 return err;
70506
70507 if (!sk_rmem_schedule(sk, skb->truesize)) {
70508- atomic_inc(&sk->sk_drops);
70509+ atomic_inc_unchecked(&sk->sk_drops);
70510 return -ENOBUFS;
70511 }
70512
70513@@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
70514 skb_dst_force(skb);
70515
70516 spin_lock_irqsave(&list->lock, flags);
70517- skb->dropcount = atomic_read(&sk->sk_drops);
70518+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
70519 __skb_queue_tail(list, skb);
70520 spin_unlock_irqrestore(&list->lock, flags);
70521
70522@@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
70523 skb->dev = NULL;
70524
70525 if (sk_rcvqueues_full(sk, skb)) {
70526- atomic_inc(&sk->sk_drops);
70527+ atomic_inc_unchecked(&sk->sk_drops);
70528 goto discard_and_relse;
70529 }
70530 if (nested)
70531@@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
70532 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
70533 } else if (sk_add_backlog(sk, skb)) {
70534 bh_unlock_sock(sk);
70535- atomic_inc(&sk->sk_drops);
70536+ atomic_inc_unchecked(&sk->sk_drops);
70537 goto discard_and_relse;
70538 }
70539
70540@@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
70541 if (len > sizeof(peercred))
70542 len = sizeof(peercred);
70543 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
70544- if (copy_to_user(optval, &peercred, len))
70545+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
70546 return -EFAULT;
70547 goto lenout;
70548 }
70549@@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
70550 return -ENOTCONN;
70551 if (lv < len)
70552 return -EINVAL;
70553- if (copy_to_user(optval, address, len))
70554+ if (len > sizeof(address) || copy_to_user(optval, address, len))
70555 return -EFAULT;
70556 goto lenout;
70557 }
70558@@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
70559
70560 if (len > lv)
70561 len = lv;
70562- if (copy_to_user(optval, &v, len))
70563+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
70564 return -EFAULT;
70565 lenout:
70566 if (put_user(len, optlen))
70567@@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
70568 */
70569 smp_wmb();
70570 atomic_set(&sk->sk_refcnt, 1);
70571- atomic_set(&sk->sk_drops, 0);
70572+ atomic_set_unchecked(&sk->sk_drops, 0);
70573 }
70574 EXPORT_SYMBOL(sock_init_data);
70575
70576diff -urNp linux-3.0.4/net/decnet/sysctl_net_decnet.c linux-3.0.4/net/decnet/sysctl_net_decnet.c
70577--- linux-3.0.4/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
70578+++ linux-3.0.4/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
70579@@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
70580
70581 if (len > *lenp) len = *lenp;
70582
70583- if (copy_to_user(buffer, addr, len))
70584+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
70585 return -EFAULT;
70586
70587 *lenp = len;
70588@@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
70589
70590 if (len > *lenp) len = *lenp;
70591
70592- if (copy_to_user(buffer, devname, len))
70593+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
70594 return -EFAULT;
70595
70596 *lenp = len;
70597diff -urNp linux-3.0.4/net/econet/Kconfig linux-3.0.4/net/econet/Kconfig
70598--- linux-3.0.4/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
70599+++ linux-3.0.4/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
70600@@ -4,7 +4,7 @@
70601
70602 config ECONET
70603 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
70604- depends on EXPERIMENTAL && INET
70605+ depends on EXPERIMENTAL && INET && BROKEN
70606 ---help---
70607 Econet is a fairly old and slow networking protocol mainly used by
70608 Acorn computers to access file and print servers. It uses native
70609diff -urNp linux-3.0.4/net/ipv4/fib_frontend.c linux-3.0.4/net/ipv4/fib_frontend.c
70610--- linux-3.0.4/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
70611+++ linux-3.0.4/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
70612@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
70613 #ifdef CONFIG_IP_ROUTE_MULTIPATH
70614 fib_sync_up(dev);
70615 #endif
70616- atomic_inc(&net->ipv4.dev_addr_genid);
70617+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
70618 rt_cache_flush(dev_net(dev), -1);
70619 break;
70620 case NETDEV_DOWN:
70621 fib_del_ifaddr(ifa, NULL);
70622- atomic_inc(&net->ipv4.dev_addr_genid);
70623+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
70624 if (ifa->ifa_dev->ifa_list == NULL) {
70625 /* Last address was deleted from this interface.
70626 * Disable IP.
70627@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
70628 #ifdef CONFIG_IP_ROUTE_MULTIPATH
70629 fib_sync_up(dev);
70630 #endif
70631- atomic_inc(&net->ipv4.dev_addr_genid);
70632+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
70633 rt_cache_flush(dev_net(dev), -1);
70634 break;
70635 case NETDEV_DOWN:
70636diff -urNp linux-3.0.4/net/ipv4/fib_semantics.c linux-3.0.4/net/ipv4/fib_semantics.c
70637--- linux-3.0.4/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
70638+++ linux-3.0.4/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
70639@@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
70640 nh->nh_saddr = inet_select_addr(nh->nh_dev,
70641 nh->nh_gw,
70642 nh->nh_parent->fib_scope);
70643- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
70644+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
70645
70646 return nh->nh_saddr;
70647 }
70648diff -urNp linux-3.0.4/net/ipv4/inet_diag.c linux-3.0.4/net/ipv4/inet_diag.c
70649--- linux-3.0.4/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
70650+++ linux-3.0.4/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
70651@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
70652 r->idiag_retrans = 0;
70653
70654 r->id.idiag_if = sk->sk_bound_dev_if;
70655+
70656+#ifdef CONFIG_GRKERNSEC_HIDESYM
70657+ r->id.idiag_cookie[0] = 0;
70658+ r->id.idiag_cookie[1] = 0;
70659+#else
70660 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
70661 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
70662+#endif
70663
70664 r->id.idiag_sport = inet->inet_sport;
70665 r->id.idiag_dport = inet->inet_dport;
70666@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
70667 r->idiag_family = tw->tw_family;
70668 r->idiag_retrans = 0;
70669 r->id.idiag_if = tw->tw_bound_dev_if;
70670+
70671+#ifdef CONFIG_GRKERNSEC_HIDESYM
70672+ r->id.idiag_cookie[0] = 0;
70673+ r->id.idiag_cookie[1] = 0;
70674+#else
70675 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
70676 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
70677+#endif
70678+
70679 r->id.idiag_sport = tw->tw_sport;
70680 r->id.idiag_dport = tw->tw_dport;
70681 r->id.idiag_src[0] = tw->tw_rcv_saddr;
70682@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
70683 if (sk == NULL)
70684 goto unlock;
70685
70686+#ifndef CONFIG_GRKERNSEC_HIDESYM
70687 err = -ESTALE;
70688 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
70689 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
70690 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
70691 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
70692 goto out;
70693+#endif
70694
70695 err = -ENOMEM;
70696 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
70697@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
70698 r->idiag_retrans = req->retrans;
70699
70700 r->id.idiag_if = sk->sk_bound_dev_if;
70701+
70702+#ifdef CONFIG_GRKERNSEC_HIDESYM
70703+ r->id.idiag_cookie[0] = 0;
70704+ r->id.idiag_cookie[1] = 0;
70705+#else
70706 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
70707 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
70708+#endif
70709
70710 tmo = req->expires - jiffies;
70711 if (tmo < 0)
70712diff -urNp linux-3.0.4/net/ipv4/inet_hashtables.c linux-3.0.4/net/ipv4/inet_hashtables.c
70713--- linux-3.0.4/net/ipv4/inet_hashtables.c 2011-09-02 18:11:21.000000000 -0400
70714+++ linux-3.0.4/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
70715@@ -18,12 +18,15 @@
70716 #include <linux/sched.h>
70717 #include <linux/slab.h>
70718 #include <linux/wait.h>
70719+#include <linux/security.h>
70720
70721 #include <net/inet_connection_sock.h>
70722 #include <net/inet_hashtables.h>
70723 #include <net/secure_seq.h>
70724 #include <net/ip.h>
70725
70726+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
70727+
70728 /*
70729 * Allocate and initialize a new local port bind bucket.
70730 * The bindhash mutex for snum's hash chain must be held here.
70731@@ -530,6 +533,8 @@ ok:
70732 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
70733 spin_unlock(&head->lock);
70734
70735+ gr_update_task_in_ip_table(current, inet_sk(sk));
70736+
70737 if (tw) {
70738 inet_twsk_deschedule(tw, death_row);
70739 while (twrefcnt) {
70740diff -urNp linux-3.0.4/net/ipv4/inetpeer.c linux-3.0.4/net/ipv4/inetpeer.c
70741--- linux-3.0.4/net/ipv4/inetpeer.c 2011-09-02 18:11:21.000000000 -0400
70742+++ linux-3.0.4/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
70743@@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
70744 unsigned int sequence;
70745 int invalidated, newrefcnt = 0;
70746
70747+ pax_track_stack();
70748+
70749 /* Look up for the address quickly, lockless.
70750 * Because of a concurrent writer, we might not find an existing entry.
70751 */
70752@@ -517,8 +519,8 @@ found: /* The existing node has been fo
70753 if (p) {
70754 p->daddr = *daddr;
70755 atomic_set(&p->refcnt, 1);
70756- atomic_set(&p->rid, 0);
70757- atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
70758+ atomic_set_unchecked(&p->rid, 0);
70759+ atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
70760 p->tcp_ts_stamp = 0;
70761 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
70762 p->rate_tokens = 0;
70763diff -urNp linux-3.0.4/net/ipv4/ipconfig.c linux-3.0.4/net/ipv4/ipconfig.c
70764--- linux-3.0.4/net/ipv4/ipconfig.c 2011-07-21 22:17:23.000000000 -0400
70765+++ linux-3.0.4/net/ipv4/ipconfig.c 2011-10-06 04:17:55.000000000 -0400
70766@@ -313,7 +313,7 @@ static int __init ic_devinet_ioctl(unsig
70767
70768 mm_segment_t oldfs = get_fs();
70769 set_fs(get_ds());
70770- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
70771+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
70772 set_fs(oldfs);
70773 return res;
70774 }
70775@@ -324,7 +324,7 @@ static int __init ic_dev_ioctl(unsigned
70776
70777 mm_segment_t oldfs = get_fs();
70778 set_fs(get_ds());
70779- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
70780+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
70781 set_fs(oldfs);
70782 return res;
70783 }
70784@@ -335,7 +335,7 @@ static int __init ic_route_ioctl(unsigne
70785
70786 mm_segment_t oldfs = get_fs();
70787 set_fs(get_ds());
70788- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
70789+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
70790 set_fs(oldfs);
70791 return res;
70792 }
70793diff -urNp linux-3.0.4/net/ipv4/ip_fragment.c linux-3.0.4/net/ipv4/ip_fragment.c
70794--- linux-3.0.4/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
70795+++ linux-3.0.4/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
70796@@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
70797 return 0;
70798
70799 start = qp->rid;
70800- end = atomic_inc_return(&peer->rid);
70801+ end = atomic_inc_return_unchecked(&peer->rid);
70802 qp->rid = end;
70803
70804 rc = qp->q.fragments && (end - start) > max;
70805diff -urNp linux-3.0.4/net/ipv4/ip_sockglue.c linux-3.0.4/net/ipv4/ip_sockglue.c
70806--- linux-3.0.4/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
70807+++ linux-3.0.4/net/ipv4/ip_sockglue.c 2011-10-06 04:17:55.000000000 -0400
70808@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
70809 int val;
70810 int len;
70811
70812+ pax_track_stack();
70813+
70814 if (level != SOL_IP)
70815 return -EOPNOTSUPP;
70816
70817@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
70818 len = min_t(unsigned int, len, opt->optlen);
70819 if (put_user(len, optlen))
70820 return -EFAULT;
70821- if (copy_to_user(optval, opt->__data, len))
70822+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
70823+ copy_to_user(optval, opt->__data, len))
70824 return -EFAULT;
70825 return 0;
70826 }
70827@@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock
70828 if (sk->sk_type != SOCK_STREAM)
70829 return -ENOPROTOOPT;
70830
70831- msg.msg_control = optval;
70832+ msg.msg_control = (void __force_kernel *)optval;
70833 msg.msg_controllen = len;
70834 msg.msg_flags = 0;
70835
70836diff -urNp linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
70837--- linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
70838+++ linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
70839@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
70840
70841 *len = 0;
70842
70843- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
70844+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
70845 if (*octets == NULL) {
70846 if (net_ratelimit())
70847 pr_notice("OOM in bsalg (%d)\n", __LINE__);
70848diff -urNp linux-3.0.4/net/ipv4/ping.c linux-3.0.4/net/ipv4/ping.c
70849--- linux-3.0.4/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
70850+++ linux-3.0.4/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
70851@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
70852 sk_rmem_alloc_get(sp),
70853 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
70854 atomic_read(&sp->sk_refcnt), sp,
70855- atomic_read(&sp->sk_drops), len);
70856+ atomic_read_unchecked(&sp->sk_drops), len);
70857 }
70858
70859 static int ping_seq_show(struct seq_file *seq, void *v)
70860diff -urNp linux-3.0.4/net/ipv4/raw.c linux-3.0.4/net/ipv4/raw.c
70861--- linux-3.0.4/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
70862+++ linux-3.0.4/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
70863@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
70864 int raw_rcv(struct sock *sk, struct sk_buff *skb)
70865 {
70866 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
70867- atomic_inc(&sk->sk_drops);
70868+ atomic_inc_unchecked(&sk->sk_drops);
70869 kfree_skb(skb);
70870 return NET_RX_DROP;
70871 }
70872@@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
70873
70874 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
70875 {
70876+ struct icmp_filter filter;
70877+
70878 if (optlen > sizeof(struct icmp_filter))
70879 optlen = sizeof(struct icmp_filter);
70880- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
70881+ if (copy_from_user(&filter, optval, optlen))
70882 return -EFAULT;
70883+ raw_sk(sk)->filter = filter;
70884 return 0;
70885 }
70886
70887 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
70888 {
70889 int len, ret = -EFAULT;
70890+ struct icmp_filter filter;
70891
70892 if (get_user(len, optlen))
70893 goto out;
70894@@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
70895 if (len > sizeof(struct icmp_filter))
70896 len = sizeof(struct icmp_filter);
70897 ret = -EFAULT;
70898- if (put_user(len, optlen) ||
70899- copy_to_user(optval, &raw_sk(sk)->filter, len))
70900+ filter = raw_sk(sk)->filter;
70901+ if (put_user(len, optlen) || len > sizeof filter ||
70902+ copy_to_user(optval, &filter, len))
70903 goto out;
70904 ret = 0;
70905 out: return ret;
70906@@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
70907 sk_wmem_alloc_get(sp),
70908 sk_rmem_alloc_get(sp),
70909 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
70910- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
70911+ atomic_read(&sp->sk_refcnt),
70912+#ifdef CONFIG_GRKERNSEC_HIDESYM
70913+ NULL,
70914+#else
70915+ sp,
70916+#endif
70917+ atomic_read_unchecked(&sp->sk_drops));
70918 }
70919
70920 static int raw_seq_show(struct seq_file *seq, void *v)
70921diff -urNp linux-3.0.4/net/ipv4/route.c linux-3.0.4/net/ipv4/route.c
70922--- linux-3.0.4/net/ipv4/route.c 2011-09-02 18:11:21.000000000 -0400
70923+++ linux-3.0.4/net/ipv4/route.c 2011-08-23 21:47:56.000000000 -0400
70924@@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
70925
70926 static inline int rt_genid(struct net *net)
70927 {
70928- return atomic_read(&net->ipv4.rt_genid);
70929+ return atomic_read_unchecked(&net->ipv4.rt_genid);
70930 }
70931
70932 #ifdef CONFIG_PROC_FS
70933@@ -833,7 +833,7 @@ static void rt_cache_invalidate(struct n
70934 unsigned char shuffle;
70935
70936 get_random_bytes(&shuffle, sizeof(shuffle));
70937- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
70938+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
70939 }
70940
70941 /*
70942@@ -2834,7 +2834,7 @@ static int rt_fill_info(struct net *net,
70943 error = rt->dst.error;
70944 if (peer) {
70945 inet_peer_refcheck(rt->peer);
70946- id = atomic_read(&peer->ip_id_count) & 0xffff;
70947+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
70948 if (peer->tcp_ts_stamp) {
70949 ts = peer->tcp_ts;
70950 tsage = get_seconds() - peer->tcp_ts_stamp;
70951diff -urNp linux-3.0.4/net/ipv4/tcp.c linux-3.0.4/net/ipv4/tcp.c
70952--- linux-3.0.4/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
70953+++ linux-3.0.4/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
70954@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
70955 int val;
70956 int err = 0;
70957
70958+ pax_track_stack();
70959+
70960 /* These are data/string values, all the others are ints */
70961 switch (optname) {
70962 case TCP_CONGESTION: {
70963@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
70964 struct tcp_sock *tp = tcp_sk(sk);
70965 int val, len;
70966
70967+ pax_track_stack();
70968+
70969 if (get_user(len, optlen))
70970 return -EFAULT;
70971
70972diff -urNp linux-3.0.4/net/ipv4/tcp_ipv4.c linux-3.0.4/net/ipv4/tcp_ipv4.c
70973--- linux-3.0.4/net/ipv4/tcp_ipv4.c 2011-09-02 18:11:21.000000000 -0400
70974+++ linux-3.0.4/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
70975@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
70976 int sysctl_tcp_low_latency __read_mostly;
70977 EXPORT_SYMBOL(sysctl_tcp_low_latency);
70978
70979+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
70980+extern int grsec_enable_blackhole;
70981+#endif
70982
70983 #ifdef CONFIG_TCP_MD5SIG
70984 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
70985@@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
70986 return 0;
70987
70988 reset:
70989+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
70990+ if (!grsec_enable_blackhole)
70991+#endif
70992 tcp_v4_send_reset(rsk, skb);
70993 discard:
70994 kfree_skb(skb);
70995@@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
70996 TCP_SKB_CB(skb)->sacked = 0;
70997
70998 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
70999- if (!sk)
71000+ if (!sk) {
71001+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71002+ ret = 1;
71003+#endif
71004 goto no_tcp_socket;
71005-
71006+ }
71007 process:
71008- if (sk->sk_state == TCP_TIME_WAIT)
71009+ if (sk->sk_state == TCP_TIME_WAIT) {
71010+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71011+ ret = 2;
71012+#endif
71013 goto do_time_wait;
71014+ }
71015
71016 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
71017 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
71018@@ -1724,6 +1737,10 @@ no_tcp_socket:
71019 bad_packet:
71020 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
71021 } else {
71022+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71023+ if (!grsec_enable_blackhole || (ret == 1 &&
71024+ (skb->dev->flags & IFF_LOOPBACK)))
71025+#endif
71026 tcp_v4_send_reset(NULL, skb);
71027 }
71028
71029@@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
71030 0, /* non standard timer */
71031 0, /* open_requests have no inode */
71032 atomic_read(&sk->sk_refcnt),
71033+#ifdef CONFIG_GRKERNSEC_HIDESYM
71034+ NULL,
71035+#else
71036 req,
71037+#endif
71038 len);
71039 }
71040
71041@@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
71042 sock_i_uid(sk),
71043 icsk->icsk_probes_out,
71044 sock_i_ino(sk),
71045- atomic_read(&sk->sk_refcnt), sk,
71046+ atomic_read(&sk->sk_refcnt),
71047+#ifdef CONFIG_GRKERNSEC_HIDESYM
71048+ NULL,
71049+#else
71050+ sk,
71051+#endif
71052 jiffies_to_clock_t(icsk->icsk_rto),
71053 jiffies_to_clock_t(icsk->icsk_ack.ato),
71054 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
71055@@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
71056 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
71057 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
71058 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
71059- atomic_read(&tw->tw_refcnt), tw, len);
71060+ atomic_read(&tw->tw_refcnt),
71061+#ifdef CONFIG_GRKERNSEC_HIDESYM
71062+ NULL,
71063+#else
71064+ tw,
71065+#endif
71066+ len);
71067 }
71068
71069 #define TMPSZ 150
71070diff -urNp linux-3.0.4/net/ipv4/tcp_minisocks.c linux-3.0.4/net/ipv4/tcp_minisocks.c
71071--- linux-3.0.4/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
71072+++ linux-3.0.4/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
71073@@ -27,6 +27,10 @@
71074 #include <net/inet_common.h>
71075 #include <net/xfrm.h>
71076
71077+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71078+extern int grsec_enable_blackhole;
71079+#endif
71080+
71081 int sysctl_tcp_syncookies __read_mostly = 1;
71082 EXPORT_SYMBOL(sysctl_tcp_syncookies);
71083
71084@@ -745,6 +749,10 @@ listen_overflow:
71085
71086 embryonic_reset:
71087 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
71088+
71089+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71090+ if (!grsec_enable_blackhole)
71091+#endif
71092 if (!(flg & TCP_FLAG_RST))
71093 req->rsk_ops->send_reset(sk, skb);
71094
71095diff -urNp linux-3.0.4/net/ipv4/tcp_output.c linux-3.0.4/net/ipv4/tcp_output.c
71096--- linux-3.0.4/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
71097+++ linux-3.0.4/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
71098@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
71099 int mss;
71100 int s_data_desired = 0;
71101
71102+ pax_track_stack();
71103+
71104 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
71105 s_data_desired = cvp->s_data_desired;
71106 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
71107diff -urNp linux-3.0.4/net/ipv4/tcp_probe.c linux-3.0.4/net/ipv4/tcp_probe.c
71108--- linux-3.0.4/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
71109+++ linux-3.0.4/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
71110@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
71111 if (cnt + width >= len)
71112 break;
71113
71114- if (copy_to_user(buf + cnt, tbuf, width))
71115+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
71116 return -EFAULT;
71117 cnt += width;
71118 }
71119diff -urNp linux-3.0.4/net/ipv4/tcp_timer.c linux-3.0.4/net/ipv4/tcp_timer.c
71120--- linux-3.0.4/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
71121+++ linux-3.0.4/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
71122@@ -22,6 +22,10 @@
71123 #include <linux/gfp.h>
71124 #include <net/tcp.h>
71125
71126+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71127+extern int grsec_lastack_retries;
71128+#endif
71129+
71130 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
71131 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
71132 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
71133@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
71134 }
71135 }
71136
71137+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71138+ if ((sk->sk_state == TCP_LAST_ACK) &&
71139+ (grsec_lastack_retries > 0) &&
71140+ (grsec_lastack_retries < retry_until))
71141+ retry_until = grsec_lastack_retries;
71142+#endif
71143+
71144 if (retransmits_timed_out(sk, retry_until,
71145 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
71146 /* Has it gone just too far? */
71147diff -urNp linux-3.0.4/net/ipv4/udp.c linux-3.0.4/net/ipv4/udp.c
71148--- linux-3.0.4/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
71149+++ linux-3.0.4/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
71150@@ -86,6 +86,7 @@
71151 #include <linux/types.h>
71152 #include <linux/fcntl.h>
71153 #include <linux/module.h>
71154+#include <linux/security.h>
71155 #include <linux/socket.h>
71156 #include <linux/sockios.h>
71157 #include <linux/igmp.h>
71158@@ -107,6 +108,10 @@
71159 #include <net/xfrm.h>
71160 #include "udp_impl.h"
71161
71162+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71163+extern int grsec_enable_blackhole;
71164+#endif
71165+
71166 struct udp_table udp_table __read_mostly;
71167 EXPORT_SYMBOL(udp_table);
71168
71169@@ -564,6 +569,9 @@ found:
71170 return s;
71171 }
71172
71173+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
71174+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
71175+
71176 /*
71177 * This routine is called by the ICMP module when it gets some
71178 * sort of error condition. If err < 0 then the socket should
71179@@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
71180 dport = usin->sin_port;
71181 if (dport == 0)
71182 return -EINVAL;
71183+
71184+ err = gr_search_udp_sendmsg(sk, usin);
71185+ if (err)
71186+ return err;
71187 } else {
71188 if (sk->sk_state != TCP_ESTABLISHED)
71189 return -EDESTADDRREQ;
71190+
71191+ err = gr_search_udp_sendmsg(sk, NULL);
71192+ if (err)
71193+ return err;
71194+
71195 daddr = inet->inet_daddr;
71196 dport = inet->inet_dport;
71197 /* Open fast path for connected socket.
71198@@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
71199 udp_lib_checksum_complete(skb)) {
71200 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
71201 IS_UDPLITE(sk));
71202- atomic_inc(&sk->sk_drops);
71203+ atomic_inc_unchecked(&sk->sk_drops);
71204 __skb_unlink(skb, rcvq);
71205 __skb_queue_tail(&list_kill, skb);
71206 }
71207@@ -1184,6 +1201,10 @@ try_again:
71208 if (!skb)
71209 goto out;
71210
71211+ err = gr_search_udp_recvmsg(sk, skb);
71212+ if (err)
71213+ goto out_free;
71214+
71215 ulen = skb->len - sizeof(struct udphdr);
71216 if (len > ulen)
71217 len = ulen;
71218@@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
71219
71220 drop:
71221 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
71222- atomic_inc(&sk->sk_drops);
71223+ atomic_inc_unchecked(&sk->sk_drops);
71224 kfree_skb(skb);
71225 return -1;
71226 }
71227@@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
71228 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
71229
71230 if (!skb1) {
71231- atomic_inc(&sk->sk_drops);
71232+ atomic_inc_unchecked(&sk->sk_drops);
71233 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
71234 IS_UDPLITE(sk));
71235 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
71236@@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
71237 goto csum_error;
71238
71239 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
71240+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71241+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
71242+#endif
71243 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
71244
71245 /*
71246@@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
71247 sk_wmem_alloc_get(sp),
71248 sk_rmem_alloc_get(sp),
71249 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
71250- atomic_read(&sp->sk_refcnt), sp,
71251- atomic_read(&sp->sk_drops), len);
71252+ atomic_read(&sp->sk_refcnt),
71253+#ifdef CONFIG_GRKERNSEC_HIDESYM
71254+ NULL,
71255+#else
71256+ sp,
71257+#endif
71258+ atomic_read_unchecked(&sp->sk_drops), len);
71259 }
71260
71261 int udp4_seq_show(struct seq_file *seq, void *v)
71262diff -urNp linux-3.0.4/net/ipv6/addrconf.c linux-3.0.4/net/ipv6/addrconf.c
71263--- linux-3.0.4/net/ipv6/addrconf.c 2011-07-21 22:17:23.000000000 -0400
71264+++ linux-3.0.4/net/ipv6/addrconf.c 2011-10-06 04:17:55.000000000 -0400
71265@@ -2072,7 +2072,7 @@ int addrconf_set_dstaddr(struct net *net
71266 p.iph.ihl = 5;
71267 p.iph.protocol = IPPROTO_IPV6;
71268 p.iph.ttl = 64;
71269- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
71270+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
71271
71272 if (ops->ndo_do_ioctl) {
71273 mm_segment_t oldfs = get_fs();
71274diff -urNp linux-3.0.4/net/ipv6/inet6_connection_sock.c linux-3.0.4/net/ipv6/inet6_connection_sock.c
71275--- linux-3.0.4/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
71276+++ linux-3.0.4/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
71277@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
71278 #ifdef CONFIG_XFRM
71279 {
71280 struct rt6_info *rt = (struct rt6_info *)dst;
71281- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
71282+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
71283 }
71284 #endif
71285 }
71286@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
71287 #ifdef CONFIG_XFRM
71288 if (dst) {
71289 struct rt6_info *rt = (struct rt6_info *)dst;
71290- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
71291+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
71292 __sk_dst_reset(sk);
71293 dst = NULL;
71294 }
71295diff -urNp linux-3.0.4/net/ipv6/ipv6_sockglue.c linux-3.0.4/net/ipv6/ipv6_sockglue.c
71296--- linux-3.0.4/net/ipv6/ipv6_sockglue.c 2011-07-21 22:17:23.000000000 -0400
71297+++ linux-3.0.4/net/ipv6/ipv6_sockglue.c 2011-10-06 04:17:55.000000000 -0400
71298@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
71299 int val, valbool;
71300 int retv = -ENOPROTOOPT;
71301
71302+ pax_track_stack();
71303+
71304 if (optval == NULL)
71305 val=0;
71306 else {
71307@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
71308 int len;
71309 int val;
71310
71311+ pax_track_stack();
71312+
71313 if (ip6_mroute_opt(optname))
71314 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
71315
71316@@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct soc
71317 if (sk->sk_type != SOCK_STREAM)
71318 return -ENOPROTOOPT;
71319
71320- msg.msg_control = optval;
71321+ msg.msg_control = (void __force_kernel *)optval;
71322 msg.msg_controllen = len;
71323 msg.msg_flags = 0;
71324
71325diff -urNp linux-3.0.4/net/ipv6/raw.c linux-3.0.4/net/ipv6/raw.c
71326--- linux-3.0.4/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
71327+++ linux-3.0.4/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
71328@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
71329 {
71330 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
71331 skb_checksum_complete(skb)) {
71332- atomic_inc(&sk->sk_drops);
71333+ atomic_inc_unchecked(&sk->sk_drops);
71334 kfree_skb(skb);
71335 return NET_RX_DROP;
71336 }
71337@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
71338 struct raw6_sock *rp = raw6_sk(sk);
71339
71340 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
71341- atomic_inc(&sk->sk_drops);
71342+ atomic_inc_unchecked(&sk->sk_drops);
71343 kfree_skb(skb);
71344 return NET_RX_DROP;
71345 }
71346@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
71347
71348 if (inet->hdrincl) {
71349 if (skb_checksum_complete(skb)) {
71350- atomic_inc(&sk->sk_drops);
71351+ atomic_inc_unchecked(&sk->sk_drops);
71352 kfree_skb(skb);
71353 return NET_RX_DROP;
71354 }
71355@@ -601,7 +601,7 @@ out:
71356 return err;
71357 }
71358
71359-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
71360+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
71361 struct flowi6 *fl6, struct dst_entry **dstp,
71362 unsigned int flags)
71363 {
71364@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
71365 u16 proto;
71366 int err;
71367
71368+ pax_track_stack();
71369+
71370 /* Rough check on arithmetic overflow,
71371 better check is made in ip6_append_data().
71372 */
71373@@ -909,12 +911,15 @@ do_confirm:
71374 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
71375 char __user *optval, int optlen)
71376 {
71377+ struct icmp6_filter filter;
71378+
71379 switch (optname) {
71380 case ICMPV6_FILTER:
71381 if (optlen > sizeof(struct icmp6_filter))
71382 optlen = sizeof(struct icmp6_filter);
71383- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
71384+ if (copy_from_user(&filter, optval, optlen))
71385 return -EFAULT;
71386+ raw6_sk(sk)->filter = filter;
71387 return 0;
71388 default:
71389 return -ENOPROTOOPT;
71390@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
71391 char __user *optval, int __user *optlen)
71392 {
71393 int len;
71394+ struct icmp6_filter filter;
71395
71396 switch (optname) {
71397 case ICMPV6_FILTER:
71398@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
71399 len = sizeof(struct icmp6_filter);
71400 if (put_user(len, optlen))
71401 return -EFAULT;
71402- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
71403+ filter = raw6_sk(sk)->filter;
71404+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
71405 return -EFAULT;
71406 return 0;
71407 default:
71408@@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
71409 0, 0L, 0,
71410 sock_i_uid(sp), 0,
71411 sock_i_ino(sp),
71412- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
71413+ atomic_read(&sp->sk_refcnt),
71414+#ifdef CONFIG_GRKERNSEC_HIDESYM
71415+ NULL,
71416+#else
71417+ sp,
71418+#endif
71419+ atomic_read_unchecked(&sp->sk_drops));
71420 }
71421
71422 static int raw6_seq_show(struct seq_file *seq, void *v)
71423diff -urNp linux-3.0.4/net/ipv6/tcp_ipv6.c linux-3.0.4/net/ipv6/tcp_ipv6.c
71424--- linux-3.0.4/net/ipv6/tcp_ipv6.c 2011-09-02 18:11:21.000000000 -0400
71425+++ linux-3.0.4/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
71426@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
71427 }
71428 #endif
71429
71430+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71431+extern int grsec_enable_blackhole;
71432+#endif
71433+
71434 static void tcp_v6_hash(struct sock *sk)
71435 {
71436 if (sk->sk_state != TCP_CLOSE) {
71437@@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
71438 return 0;
71439
71440 reset:
71441+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71442+ if (!grsec_enable_blackhole)
71443+#endif
71444 tcp_v6_send_reset(sk, skb);
71445 discard:
71446 if (opt_skb)
71447@@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
71448 TCP_SKB_CB(skb)->sacked = 0;
71449
71450 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
71451- if (!sk)
71452+ if (!sk) {
71453+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71454+ ret = 1;
71455+#endif
71456 goto no_tcp_socket;
71457+ }
71458
71459 process:
71460- if (sk->sk_state == TCP_TIME_WAIT)
71461+ if (sk->sk_state == TCP_TIME_WAIT) {
71462+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71463+ ret = 2;
71464+#endif
71465 goto do_time_wait;
71466+ }
71467
71468 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
71469 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
71470@@ -1794,6 +1809,10 @@ no_tcp_socket:
71471 bad_packet:
71472 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
71473 } else {
71474+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71475+ if (!grsec_enable_blackhole || (ret == 1 &&
71476+ (skb->dev->flags & IFF_LOOPBACK)))
71477+#endif
71478 tcp_v6_send_reset(NULL, skb);
71479 }
71480
71481@@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
71482 uid,
71483 0, /* non standard timer */
71484 0, /* open_requests have no inode */
71485- 0, req);
71486+ 0,
71487+#ifdef CONFIG_GRKERNSEC_HIDESYM
71488+ NULL
71489+#else
71490+ req
71491+#endif
71492+ );
71493 }
71494
71495 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
71496@@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
71497 sock_i_uid(sp),
71498 icsk->icsk_probes_out,
71499 sock_i_ino(sp),
71500- atomic_read(&sp->sk_refcnt), sp,
71501+ atomic_read(&sp->sk_refcnt),
71502+#ifdef CONFIG_GRKERNSEC_HIDESYM
71503+ NULL,
71504+#else
71505+ sp,
71506+#endif
71507 jiffies_to_clock_t(icsk->icsk_rto),
71508 jiffies_to_clock_t(icsk->icsk_ack.ato),
71509 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
71510@@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
71511 dest->s6_addr32[2], dest->s6_addr32[3], destp,
71512 tw->tw_substate, 0, 0,
71513 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
71514- atomic_read(&tw->tw_refcnt), tw);
71515+ atomic_read(&tw->tw_refcnt),
71516+#ifdef CONFIG_GRKERNSEC_HIDESYM
71517+ NULL
71518+#else
71519+ tw
71520+#endif
71521+ );
71522 }
71523
71524 static int tcp6_seq_show(struct seq_file *seq, void *v)
71525diff -urNp linux-3.0.4/net/ipv6/udp.c linux-3.0.4/net/ipv6/udp.c
71526--- linux-3.0.4/net/ipv6/udp.c 2011-09-02 18:11:21.000000000 -0400
71527+++ linux-3.0.4/net/ipv6/udp.c 2011-08-23 21:48:14.000000000 -0400
71528@@ -50,6 +50,10 @@
71529 #include <linux/seq_file.h>
71530 #include "udp_impl.h"
71531
71532+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71533+extern int grsec_enable_blackhole;
71534+#endif
71535+
71536 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
71537 {
71538 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
71539@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
71540
71541 return 0;
71542 drop:
71543- atomic_inc(&sk->sk_drops);
71544+ atomic_inc_unchecked(&sk->sk_drops);
71545 drop_no_sk_drops_inc:
71546 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
71547 kfree_skb(skb);
71548@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
71549 continue;
71550 }
71551 drop:
71552- atomic_inc(&sk->sk_drops);
71553+ atomic_inc_unchecked(&sk->sk_drops);
71554 UDP6_INC_STATS_BH(sock_net(sk),
71555 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
71556 UDP6_INC_STATS_BH(sock_net(sk),
71557@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
71558 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
71559 proto == IPPROTO_UDPLITE);
71560
71561+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71562+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
71563+#endif
71564 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
71565
71566 kfree_skb(skb);
71567@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
71568 if (!sock_owned_by_user(sk))
71569 udpv6_queue_rcv_skb(sk, skb);
71570 else if (sk_add_backlog(sk, skb)) {
71571- atomic_inc(&sk->sk_drops);
71572+ atomic_inc_unchecked(&sk->sk_drops);
71573 bh_unlock_sock(sk);
71574 sock_put(sk);
71575 goto discard;
71576@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
71577 0, 0L, 0,
71578 sock_i_uid(sp), 0,
71579 sock_i_ino(sp),
71580- atomic_read(&sp->sk_refcnt), sp,
71581- atomic_read(&sp->sk_drops));
71582+ atomic_read(&sp->sk_refcnt),
71583+#ifdef CONFIG_GRKERNSEC_HIDESYM
71584+ NULL,
71585+#else
71586+ sp,
71587+#endif
71588+ atomic_read_unchecked(&sp->sk_drops));
71589 }
71590
71591 int udp6_seq_show(struct seq_file *seq, void *v)
71592diff -urNp linux-3.0.4/net/irda/ircomm/ircomm_tty.c linux-3.0.4/net/irda/ircomm/ircomm_tty.c
71593--- linux-3.0.4/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
71594+++ linux-3.0.4/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
71595@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
71596 add_wait_queue(&self->open_wait, &wait);
71597
71598 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
71599- __FILE__,__LINE__, tty->driver->name, self->open_count );
71600+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
71601
71602 /* As far as I can see, we protect open_count - Jean II */
71603 spin_lock_irqsave(&self->spinlock, flags);
71604 if (!tty_hung_up_p(filp)) {
71605 extra_count = 1;
71606- self->open_count--;
71607+ local_dec(&self->open_count);
71608 }
71609 spin_unlock_irqrestore(&self->spinlock, flags);
71610- self->blocked_open++;
71611+ local_inc(&self->blocked_open);
71612
71613 while (1) {
71614 if (tty->termios->c_cflag & CBAUD) {
71615@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
71616 }
71617
71618 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
71619- __FILE__,__LINE__, tty->driver->name, self->open_count );
71620+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
71621
71622 schedule();
71623 }
71624@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
71625 if (extra_count) {
71626 /* ++ is not atomic, so this should be protected - Jean II */
71627 spin_lock_irqsave(&self->spinlock, flags);
71628- self->open_count++;
71629+ local_inc(&self->open_count);
71630 spin_unlock_irqrestore(&self->spinlock, flags);
71631 }
71632- self->blocked_open--;
71633+ local_dec(&self->blocked_open);
71634
71635 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
71636- __FILE__,__LINE__, tty->driver->name, self->open_count);
71637+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
71638
71639 if (!retval)
71640 self->flags |= ASYNC_NORMAL_ACTIVE;
71641@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
71642 }
71643 /* ++ is not atomic, so this should be protected - Jean II */
71644 spin_lock_irqsave(&self->spinlock, flags);
71645- self->open_count++;
71646+ local_inc(&self->open_count);
71647
71648 tty->driver_data = self;
71649 self->tty = tty;
71650 spin_unlock_irqrestore(&self->spinlock, flags);
71651
71652 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
71653- self->line, self->open_count);
71654+ self->line, local_read(&self->open_count));
71655
71656 /* Not really used by us, but lets do it anyway */
71657 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
71658@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
71659 return;
71660 }
71661
71662- if ((tty->count == 1) && (self->open_count != 1)) {
71663+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
71664 /*
71665 * Uh, oh. tty->count is 1, which means that the tty
71666 * structure will be freed. state->count should always
71667@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
71668 */
71669 IRDA_DEBUG(0, "%s(), bad serial port count; "
71670 "tty->count is 1, state->count is %d\n", __func__ ,
71671- self->open_count);
71672- self->open_count = 1;
71673+ local_read(&self->open_count));
71674+ local_set(&self->open_count, 1);
71675 }
71676
71677- if (--self->open_count < 0) {
71678+ if (local_dec_return(&self->open_count) < 0) {
71679 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
71680- __func__, self->line, self->open_count);
71681- self->open_count = 0;
71682+ __func__, self->line, local_read(&self->open_count));
71683+ local_set(&self->open_count, 0);
71684 }
71685- if (self->open_count) {
71686+ if (local_read(&self->open_count)) {
71687 spin_unlock_irqrestore(&self->spinlock, flags);
71688
71689 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
71690@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
71691 tty->closing = 0;
71692 self->tty = NULL;
71693
71694- if (self->blocked_open) {
71695+ if (local_read(&self->blocked_open)) {
71696 if (self->close_delay)
71697 schedule_timeout_interruptible(self->close_delay);
71698 wake_up_interruptible(&self->open_wait);
71699@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
71700 spin_lock_irqsave(&self->spinlock, flags);
71701 self->flags &= ~ASYNC_NORMAL_ACTIVE;
71702 self->tty = NULL;
71703- self->open_count = 0;
71704+ local_set(&self->open_count, 0);
71705 spin_unlock_irqrestore(&self->spinlock, flags);
71706
71707 wake_up_interruptible(&self->open_wait);
71708@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
71709 seq_putc(m, '\n');
71710
71711 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
71712- seq_printf(m, "Open count: %d\n", self->open_count);
71713+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
71714 seq_printf(m, "Max data size: %d\n", self->max_data_size);
71715 seq_printf(m, "Max header size: %d\n", self->max_header_size);
71716
71717diff -urNp linux-3.0.4/net/iucv/af_iucv.c linux-3.0.4/net/iucv/af_iucv.c
71718--- linux-3.0.4/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
71719+++ linux-3.0.4/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
71720@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
71721
71722 write_lock_bh(&iucv_sk_list.lock);
71723
71724- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
71725+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
71726 while (__iucv_get_sock_by_name(name)) {
71727 sprintf(name, "%08x",
71728- atomic_inc_return(&iucv_sk_list.autobind_name));
71729+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
71730 }
71731
71732 write_unlock_bh(&iucv_sk_list.lock);
71733diff -urNp linux-3.0.4/net/key/af_key.c linux-3.0.4/net/key/af_key.c
71734--- linux-3.0.4/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
71735+++ linux-3.0.4/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
71736@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
71737 struct xfrm_migrate m[XFRM_MAX_DEPTH];
71738 struct xfrm_kmaddress k;
71739
71740+ pax_track_stack();
71741+
71742 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
71743 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
71744 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
71745@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
71746 static u32 get_acqseq(void)
71747 {
71748 u32 res;
71749- static atomic_t acqseq;
71750+ static atomic_unchecked_t acqseq;
71751
71752 do {
71753- res = atomic_inc_return(&acqseq);
71754+ res = atomic_inc_return_unchecked(&acqseq);
71755 } while (!res);
71756 return res;
71757 }
71758diff -urNp linux-3.0.4/net/lapb/lapb_iface.c linux-3.0.4/net/lapb/lapb_iface.c
71759--- linux-3.0.4/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
71760+++ linux-3.0.4/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
71761@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
71762 goto out;
71763
71764 lapb->dev = dev;
71765- lapb->callbacks = *callbacks;
71766+ lapb->callbacks = callbacks;
71767
71768 __lapb_insert_cb(lapb);
71769
71770@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
71771
71772 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
71773 {
71774- if (lapb->callbacks.connect_confirmation)
71775- lapb->callbacks.connect_confirmation(lapb->dev, reason);
71776+ if (lapb->callbacks->connect_confirmation)
71777+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
71778 }
71779
71780 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
71781 {
71782- if (lapb->callbacks.connect_indication)
71783- lapb->callbacks.connect_indication(lapb->dev, reason);
71784+ if (lapb->callbacks->connect_indication)
71785+ lapb->callbacks->connect_indication(lapb->dev, reason);
71786 }
71787
71788 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
71789 {
71790- if (lapb->callbacks.disconnect_confirmation)
71791- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
71792+ if (lapb->callbacks->disconnect_confirmation)
71793+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
71794 }
71795
71796 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
71797 {
71798- if (lapb->callbacks.disconnect_indication)
71799- lapb->callbacks.disconnect_indication(lapb->dev, reason);
71800+ if (lapb->callbacks->disconnect_indication)
71801+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
71802 }
71803
71804 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
71805 {
71806- if (lapb->callbacks.data_indication)
71807- return lapb->callbacks.data_indication(lapb->dev, skb);
71808+ if (lapb->callbacks->data_indication)
71809+ return lapb->callbacks->data_indication(lapb->dev, skb);
71810
71811 kfree_skb(skb);
71812 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
71813@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
71814 {
71815 int used = 0;
71816
71817- if (lapb->callbacks.data_transmit) {
71818- lapb->callbacks.data_transmit(lapb->dev, skb);
71819+ if (lapb->callbacks->data_transmit) {
71820+ lapb->callbacks->data_transmit(lapb->dev, skb);
71821 used = 1;
71822 }
71823
71824diff -urNp linux-3.0.4/net/mac80211/debugfs_sta.c linux-3.0.4/net/mac80211/debugfs_sta.c
71825--- linux-3.0.4/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
71826+++ linux-3.0.4/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
71827@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
71828 struct tid_ampdu_rx *tid_rx;
71829 struct tid_ampdu_tx *tid_tx;
71830
71831+ pax_track_stack();
71832+
71833 rcu_read_lock();
71834
71835 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
71836@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
71837 struct sta_info *sta = file->private_data;
71838 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
71839
71840+ pax_track_stack();
71841+
71842 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
71843 htc->ht_supported ? "" : "not ");
71844 if (htc->ht_supported) {
71845diff -urNp linux-3.0.4/net/mac80211/ieee80211_i.h linux-3.0.4/net/mac80211/ieee80211_i.h
71846--- linux-3.0.4/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
71847+++ linux-3.0.4/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
71848@@ -27,6 +27,7 @@
71849 #include <net/ieee80211_radiotap.h>
71850 #include <net/cfg80211.h>
71851 #include <net/mac80211.h>
71852+#include <asm/local.h>
71853 #include "key.h"
71854 #include "sta_info.h"
71855
71856@@ -721,7 +722,7 @@ struct ieee80211_local {
71857 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
71858 spinlock_t queue_stop_reason_lock;
71859
71860- int open_count;
71861+ local_t open_count;
71862 int monitors, cooked_mntrs;
71863 /* number of interfaces with corresponding FIF_ flags */
71864 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
71865diff -urNp linux-3.0.4/net/mac80211/iface.c linux-3.0.4/net/mac80211/iface.c
71866--- linux-3.0.4/net/mac80211/iface.c 2011-09-02 18:11:21.000000000 -0400
71867+++ linux-3.0.4/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
71868@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
71869 break;
71870 }
71871
71872- if (local->open_count == 0) {
71873+ if (local_read(&local->open_count) == 0) {
71874 res = drv_start(local);
71875 if (res)
71876 goto err_del_bss;
71877@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
71878 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
71879
71880 if (!is_valid_ether_addr(dev->dev_addr)) {
71881- if (!local->open_count)
71882+ if (!local_read(&local->open_count))
71883 drv_stop(local);
71884 return -EADDRNOTAVAIL;
71885 }
71886@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
71887 mutex_unlock(&local->mtx);
71888
71889 if (coming_up)
71890- local->open_count++;
71891+ local_inc(&local->open_count);
71892
71893 if (hw_reconf_flags) {
71894 ieee80211_hw_config(local, hw_reconf_flags);
71895@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
71896 err_del_interface:
71897 drv_remove_interface(local, &sdata->vif);
71898 err_stop:
71899- if (!local->open_count)
71900+ if (!local_read(&local->open_count))
71901 drv_stop(local);
71902 err_del_bss:
71903 sdata->bss = NULL;
71904@@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
71905 }
71906
71907 if (going_down)
71908- local->open_count--;
71909+ local_dec(&local->open_count);
71910
71911 switch (sdata->vif.type) {
71912 case NL80211_IFTYPE_AP_VLAN:
71913@@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
71914
71915 ieee80211_recalc_ps(local, -1);
71916
71917- if (local->open_count == 0) {
71918+ if (local_read(&local->open_count) == 0) {
71919 if (local->ops->napi_poll)
71920 napi_disable(&local->napi);
71921 ieee80211_clear_tx_pending(local);
71922diff -urNp linux-3.0.4/net/mac80211/main.c linux-3.0.4/net/mac80211/main.c
71923--- linux-3.0.4/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
71924+++ linux-3.0.4/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
71925@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
71926 local->hw.conf.power_level = power;
71927 }
71928
71929- if (changed && local->open_count) {
71930+ if (changed && local_read(&local->open_count)) {
71931 ret = drv_config(local, changed);
71932 /*
71933 * Goal:
71934diff -urNp linux-3.0.4/net/mac80211/mlme.c linux-3.0.4/net/mac80211/mlme.c
71935--- linux-3.0.4/net/mac80211/mlme.c 2011-09-02 18:11:21.000000000 -0400
71936+++ linux-3.0.4/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
71937@@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
71938 bool have_higher_than_11mbit = false;
71939 u16 ap_ht_cap_flags;
71940
71941+ pax_track_stack();
71942+
71943 /* AssocResp and ReassocResp have identical structure */
71944
71945 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
71946diff -urNp linux-3.0.4/net/mac80211/pm.c linux-3.0.4/net/mac80211/pm.c
71947--- linux-3.0.4/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
71948+++ linux-3.0.4/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
71949@@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
71950 cancel_work_sync(&local->dynamic_ps_enable_work);
71951 del_timer_sync(&local->dynamic_ps_timer);
71952
71953- local->wowlan = wowlan && local->open_count;
71954+ local->wowlan = wowlan && local_read(&local->open_count);
71955 if (local->wowlan) {
71956 int err = drv_suspend(local, wowlan);
71957 if (err) {
71958@@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
71959 }
71960
71961 /* stop hardware - this must stop RX */
71962- if (local->open_count)
71963+ if (local_read(&local->open_count))
71964 ieee80211_stop_device(local);
71965
71966 suspend:
71967diff -urNp linux-3.0.4/net/mac80211/rate.c linux-3.0.4/net/mac80211/rate.c
71968--- linux-3.0.4/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
71969+++ linux-3.0.4/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
71970@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
71971
71972 ASSERT_RTNL();
71973
71974- if (local->open_count)
71975+ if (local_read(&local->open_count))
71976 return -EBUSY;
71977
71978 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
71979diff -urNp linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c
71980--- linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
71981+++ linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
71982@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
71983
71984 spin_unlock_irqrestore(&events->lock, status);
71985
71986- if (copy_to_user(buf, pb, p))
71987+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
71988 return -EFAULT;
71989
71990 return p;
71991diff -urNp linux-3.0.4/net/mac80211/util.c linux-3.0.4/net/mac80211/util.c
71992--- linux-3.0.4/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
71993+++ linux-3.0.4/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
71994@@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
71995 #endif
71996
71997 /* restart hardware */
71998- if (local->open_count) {
71999+ if (local_read(&local->open_count)) {
72000 /*
72001 * Upon resume hardware can sometimes be goofy due to
72002 * various platform / driver / bus issues, so restarting
72003diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c
72004--- linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
72005+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
72006@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
72007 /* Increase the refcnt counter of the dest */
72008 atomic_inc(&dest->refcnt);
72009
72010- conn_flags = atomic_read(&dest->conn_flags);
72011+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
72012 if (cp->protocol != IPPROTO_UDP)
72013 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
72014 /* Bind with the destination and its corresponding transmitter */
72015@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
72016 atomic_set(&cp->refcnt, 1);
72017
72018 atomic_set(&cp->n_control, 0);
72019- atomic_set(&cp->in_pkts, 0);
72020+ atomic_set_unchecked(&cp->in_pkts, 0);
72021
72022 atomic_inc(&ipvs->conn_count);
72023 if (flags & IP_VS_CONN_F_NO_CPORT)
72024@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
72025
72026 /* Don't drop the entry if its number of incoming packets is not
72027 located in [0, 8] */
72028- i = atomic_read(&cp->in_pkts);
72029+ i = atomic_read_unchecked(&cp->in_pkts);
72030 if (i > 8 || i < 0) return 0;
72031
72032 if (!todrop_rate[i]) return 0;
72033diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c
72034--- linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
72035+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
72036@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
72037 ret = cp->packet_xmit(skb, cp, pd->pp);
72038 /* do not touch skb anymore */
72039
72040- atomic_inc(&cp->in_pkts);
72041+ atomic_inc_unchecked(&cp->in_pkts);
72042 ip_vs_conn_put(cp);
72043 return ret;
72044 }
72045@@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
72046 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
72047 pkts = sysctl_sync_threshold(ipvs);
72048 else
72049- pkts = atomic_add_return(1, &cp->in_pkts);
72050+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
72051
72052 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
72053 cp->protocol == IPPROTO_SCTP) {
72054diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c
72055--- linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-09-02 18:11:21.000000000 -0400
72056+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
72057@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
72058 ip_vs_rs_hash(ipvs, dest);
72059 write_unlock_bh(&ipvs->rs_lock);
72060 }
72061- atomic_set(&dest->conn_flags, conn_flags);
72062+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
72063
72064 /* bind the service */
72065 if (!dest->svc) {
72066@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
72067 " %-7s %-6d %-10d %-10d\n",
72068 &dest->addr.in6,
72069 ntohs(dest->port),
72070- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72071+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72072 atomic_read(&dest->weight),
72073 atomic_read(&dest->activeconns),
72074 atomic_read(&dest->inactconns));
72075@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
72076 "%-7s %-6d %-10d %-10d\n",
72077 ntohl(dest->addr.ip),
72078 ntohs(dest->port),
72079- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72080+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72081 atomic_read(&dest->weight),
72082 atomic_read(&dest->activeconns),
72083 atomic_read(&dest->inactconns));
72084@@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
72085 struct ip_vs_dest_user *udest_compat;
72086 struct ip_vs_dest_user_kern udest;
72087
72088+ pax_track_stack();
72089+
72090 if (!capable(CAP_NET_ADMIN))
72091 return -EPERM;
72092
72093@@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
72094
72095 entry.addr = dest->addr.ip;
72096 entry.port = dest->port;
72097- entry.conn_flags = atomic_read(&dest->conn_flags);
72098+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
72099 entry.weight = atomic_read(&dest->weight);
72100 entry.u_threshold = dest->u_threshold;
72101 entry.l_threshold = dest->l_threshold;
72102@@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
72103 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
72104
72105 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
72106- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
72107+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
72108 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
72109 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
72110 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
72111diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c
72112--- linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
72113+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
72114@@ -648,7 +648,7 @@ control:
72115 * i.e only increment in_pkts for Templates.
72116 */
72117 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
72118- int pkts = atomic_add_return(1, &cp->in_pkts);
72119+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
72120
72121 if (pkts % sysctl_sync_period(ipvs) != 1)
72122 return;
72123@@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
72124
72125 if (opt)
72126 memcpy(&cp->in_seq, opt, sizeof(*opt));
72127- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
72128+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
72129 cp->state = state;
72130 cp->old_state = cp->state;
72131 /*
72132diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c
72133--- linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
72134+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
72135@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
72136 else
72137 rc = NF_ACCEPT;
72138 /* do not touch skb anymore */
72139- atomic_inc(&cp->in_pkts);
72140+ atomic_inc_unchecked(&cp->in_pkts);
72141 goto out;
72142 }
72143
72144@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
72145 else
72146 rc = NF_ACCEPT;
72147 /* do not touch skb anymore */
72148- atomic_inc(&cp->in_pkts);
72149+ atomic_inc_unchecked(&cp->in_pkts);
72150 goto out;
72151 }
72152
72153diff -urNp linux-3.0.4/net/netfilter/Kconfig linux-3.0.4/net/netfilter/Kconfig
72154--- linux-3.0.4/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
72155+++ linux-3.0.4/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
72156@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
72157
72158 To compile it as a module, choose M here. If unsure, say N.
72159
72160+config NETFILTER_XT_MATCH_GRADM
72161+ tristate '"gradm" match support'
72162+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
72163+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
72164+ ---help---
72165+ The gradm match allows to match on grsecurity RBAC being enabled.
72166+ It is useful when iptables rules are applied early on bootup to
72167+ prevent connections to the machine (except from a trusted host)
72168+ while the RBAC system is disabled.
72169+
72170 config NETFILTER_XT_MATCH_HASHLIMIT
72171 tristate '"hashlimit" match support'
72172 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
72173diff -urNp linux-3.0.4/net/netfilter/Makefile linux-3.0.4/net/netfilter/Makefile
72174--- linux-3.0.4/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
72175+++ linux-3.0.4/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
72176@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
72177 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
72178 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
72179 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
72180+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
72181 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
72182 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
72183 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
72184diff -urNp linux-3.0.4/net/netfilter/nfnetlink_log.c linux-3.0.4/net/netfilter/nfnetlink_log.c
72185--- linux-3.0.4/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
72186+++ linux-3.0.4/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
72187@@ -70,7 +70,7 @@ struct nfulnl_instance {
72188 };
72189
72190 static DEFINE_SPINLOCK(instances_lock);
72191-static atomic_t global_seq;
72192+static atomic_unchecked_t global_seq;
72193
72194 #define INSTANCE_BUCKETS 16
72195 static struct hlist_head instance_table[INSTANCE_BUCKETS];
72196@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
72197 /* global sequence number */
72198 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
72199 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
72200- htonl(atomic_inc_return(&global_seq)));
72201+ htonl(atomic_inc_return_unchecked(&global_seq)));
72202
72203 if (data_len) {
72204 struct nlattr *nla;
72205diff -urNp linux-3.0.4/net/netfilter/nfnetlink_queue.c linux-3.0.4/net/netfilter/nfnetlink_queue.c
72206--- linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
72207+++ linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
72208@@ -58,7 +58,7 @@ struct nfqnl_instance {
72209 */
72210 spinlock_t lock;
72211 unsigned int queue_total;
72212- atomic_t id_sequence; /* 'sequence' of pkt ids */
72213+ atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
72214 struct list_head queue_list; /* packets in queue */
72215 };
72216
72217@@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
72218 nfmsg->version = NFNETLINK_V0;
72219 nfmsg->res_id = htons(queue->queue_num);
72220
72221- entry->id = atomic_inc_return(&queue->id_sequence);
72222+ entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
72223 pmsg.packet_id = htonl(entry->id);
72224 pmsg.hw_protocol = entskb->protocol;
72225 pmsg.hook = entry->hook;
72226@@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
72227 inst->peer_pid, inst->queue_total,
72228 inst->copy_mode, inst->copy_range,
72229 inst->queue_dropped, inst->queue_user_dropped,
72230- atomic_read(&inst->id_sequence), 1);
72231+ atomic_read_unchecked(&inst->id_sequence), 1);
72232 }
72233
72234 static const struct seq_operations nfqnl_seq_ops = {
72235diff -urNp linux-3.0.4/net/netfilter/xt_gradm.c linux-3.0.4/net/netfilter/xt_gradm.c
72236--- linux-3.0.4/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
72237+++ linux-3.0.4/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
72238@@ -0,0 +1,51 @@
72239+/*
72240+ * gradm match for netfilter
72241