]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.0.4-201110060421.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.4-201110060421.patch
CommitLineData
635d4896
PK
1diff -urNp linux-3.0.4/arch/alpha/include/asm/elf.h linux-3.0.4/arch/alpha/include/asm/elf.h
2--- linux-3.0.4/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
3+++ linux-3.0.4/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
4@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-3.0.4/arch/alpha/include/asm/pgtable.h linux-3.0.4/arch/alpha/include/asm/pgtable.h
19--- linux-3.0.4/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
20+++ linux-3.0.4/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-3.0.4/arch/alpha/kernel/module.c linux-3.0.4/arch/alpha/kernel/module.c
40--- linux-3.0.4/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
41+++ linux-3.0.4/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-3.0.4/arch/alpha/kernel/osf_sys.c linux-3.0.4/arch/alpha/kernel/osf_sys.c
52--- linux-3.0.4/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
53+++ linux-3.0.4/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
54@@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-3.0.4/arch/alpha/mm/fault.c linux-3.0.4/arch/alpha/mm/fault.c
86--- linux-3.0.4/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
87+++ linux-3.0.4/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-3.0.4/arch/arm/include/asm/elf.h linux-3.0.4/arch/arm/include/asm/elf.h
245--- linux-3.0.4/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
246+++ linux-3.0.4/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
247@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267-struct mm_struct;
268-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269-#define arch_randomize_brk arch_randomize_brk
270-
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274diff -urNp linux-3.0.4/arch/arm/include/asm/kmap_types.h linux-3.0.4/arch/arm/include/asm/kmap_types.h
275--- linux-3.0.4/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
276+++ linux-3.0.4/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
277@@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281+ KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285diff -urNp linux-3.0.4/arch/arm/include/asm/uaccess.h linux-3.0.4/arch/arm/include/asm/uaccess.h
286--- linux-3.0.4/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
287+++ linux-3.0.4/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
288@@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292+extern void check_object_size(const void *ptr, unsigned long n, bool to);
293+
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297@@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305+
306+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307+{
308+ if (!__builtin_constant_p(n))
309+ check_object_size(to, n, false);
310+ return ___copy_from_user(to, from, n);
311+}
312+
313+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314+{
315+ if (!__builtin_constant_p(n))
316+ check_object_size(from, n, true);
317+ return ___copy_to_user(to, from, n);
318+}
319+
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327+ if ((long)n < 0)
328+ return n;
329+
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333@@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337+ if ((long)n < 0)
338+ return n;
339+
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343diff -urNp linux-3.0.4/arch/arm/kernel/armksyms.c linux-3.0.4/arch/arm/kernel/armksyms.c
344--- linux-3.0.4/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
345+++ linux-3.0.4/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
346@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350-EXPORT_SYMBOL(__copy_from_user);
351-EXPORT_SYMBOL(__copy_to_user);
352+EXPORT_SYMBOL(___copy_from_user);
353+EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357diff -urNp linux-3.0.4/arch/arm/kernel/process.c linux-3.0.4/arch/arm/kernel/process.c
358--- linux-3.0.4/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
359+++ linux-3.0.4/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
360@@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364-#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368@@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372-unsigned long arch_randomize_brk(struct mm_struct *mm)
373-{
374- unsigned long range_end = mm->brk + 0x02000000;
375- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376-}
377-
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381diff -urNp linux-3.0.4/arch/arm/kernel/traps.c linux-3.0.4/arch/arm/kernel/traps.c
382--- linux-3.0.4/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383+++ linux-3.0.4/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384@@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388+extern void gr_handle_kernel_exploit(void);
389+
390 /*
391 * This function is protected against re-entrancy.
392 */
393@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397+
398+ gr_handle_kernel_exploit();
399+
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403diff -urNp linux-3.0.4/arch/arm/lib/copy_from_user.S linux-3.0.4/arch/arm/lib/copy_from_user.S
404--- linux-3.0.4/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
405+++ linux-3.0.4/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
406@@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410- * size_t __copy_from_user(void *to, const void *from, size_t n)
411+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415@@ -84,11 +84,11 @@
416
417 .text
418
419-ENTRY(__copy_from_user)
420+ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424-ENDPROC(__copy_from_user)
425+ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429diff -urNp linux-3.0.4/arch/arm/lib/copy_to_user.S linux-3.0.4/arch/arm/lib/copy_to_user.S
430--- linux-3.0.4/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
431+++ linux-3.0.4/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
432@@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436- * size_t __copy_to_user(void *to, const void *from, size_t n)
437+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441@@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445-WEAK(__copy_to_user)
446+WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450-ENDPROC(__copy_to_user)
451+ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455diff -urNp linux-3.0.4/arch/arm/lib/uaccess.S linux-3.0.4/arch/arm/lib/uaccess.S
456--- linux-3.0.4/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
457+++ linux-3.0.4/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
458@@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471-ENTRY(__copy_to_user)
472+ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480-ENDPROC(__copy_to_user)
481+ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497-ENTRY(__copy_from_user)
498+ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506-ENDPROC(__copy_from_user)
507+ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511diff -urNp linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c
512--- linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
513+++ linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
514@@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518-__copy_to_user(void __user *to, const void *from, unsigned long n)
519+___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523diff -urNp linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c
524--- linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
525+++ linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
526@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535diff -urNp linux-3.0.4/arch/arm/mm/fault.c linux-3.0.4/arch/arm/mm/fault.c
536--- linux-3.0.4/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
537+++ linux-3.0.4/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
538@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542+#ifdef CONFIG_PAX_PAGEEXEC
543+ if (fsr & FSR_LNX_PF) {
544+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545+ do_group_exit(SIGKILL);
546+ }
547+#endif
548+
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552@@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556+#ifdef CONFIG_PAX_PAGEEXEC
557+void pax_report_insns(void *pc, void *sp)
558+{
559+ long i;
560+
561+ printk(KERN_ERR "PAX: bytes at PC: ");
562+ for (i = 0; i < 20; i++) {
563+ unsigned char c;
564+ if (get_user(c, (__force unsigned char __user *)pc+i))
565+ printk(KERN_CONT "?? ");
566+ else
567+ printk(KERN_CONT "%02x ", c);
568+ }
569+ printk("\n");
570+
571+ printk(KERN_ERR "PAX: bytes at SP-4: ");
572+ for (i = -1; i < 20; i++) {
573+ unsigned long c;
574+ if (get_user(c, (__force unsigned long __user *)sp+i))
575+ printk(KERN_CONT "???????? ");
576+ else
577+ printk(KERN_CONT "%08lx ", c);
578+ }
579+ printk("\n");
580+}
581+#endif
582+
583 /*
584 * First Level Translation Fault Handler
585 *
586diff -urNp linux-3.0.4/arch/arm/mm/mmap.c linux-3.0.4/arch/arm/mm/mmap.c
587--- linux-3.0.4/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
588+++ linux-3.0.4/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
589@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593+#ifdef CONFIG_PAX_RANDMMAP
594+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595+#endif
596+
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604- if (TASK_SIZE - len >= addr &&
605- (!vma || addr + len <= vma->vm_start))
606+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610- start_addr = addr = mm->free_area_cache;
611+ start_addr = addr = mm->free_area_cache;
612 } else {
613- start_addr = addr = TASK_UNMAPPED_BASE;
614- mm->cached_hole_size = 0;
615+ start_addr = addr = mm->mmap_base;
616+ mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620@@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624- if (start_addr != TASK_UNMAPPED_BASE) {
625- start_addr = addr = TASK_UNMAPPED_BASE;
626+ if (start_addr != mm->mmap_base) {
627+ start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633- if (!vma || addr + len <= vma->vm_start) {
634+ if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638diff -urNp linux-3.0.4/arch/avr32/include/asm/elf.h linux-3.0.4/arch/avr32/include/asm/elf.h
639--- linux-3.0.4/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
640+++ linux-3.0.4/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
641@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648+#ifdef CONFIG_PAX_ASLR
649+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650+
651+#define PAX_DELTA_MMAP_LEN 15
652+#define PAX_DELTA_STACK_LEN 15
653+#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657diff -urNp linux-3.0.4/arch/avr32/include/asm/kmap_types.h linux-3.0.4/arch/avr32/include/asm/kmap_types.h
658--- linux-3.0.4/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
659+++ linux-3.0.4/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
660@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664-D(14) KM_TYPE_NR
665+D(14) KM_CLEARPAGE,
666+D(15) KM_TYPE_NR
667 };
668
669 #undef D
670diff -urNp linux-3.0.4/arch/avr32/mm/fault.c linux-3.0.4/arch/avr32/mm/fault.c
671--- linux-3.0.4/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
672+++ linux-3.0.4/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
673@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677+#ifdef CONFIG_PAX_PAGEEXEC
678+void pax_report_insns(void *pc, void *sp)
679+{
680+ unsigned long i;
681+
682+ printk(KERN_ERR "PAX: bytes at PC: ");
683+ for (i = 0; i < 20; i++) {
684+ unsigned char c;
685+ if (get_user(c, (unsigned char *)pc+i))
686+ printk(KERN_CONT "???????? ");
687+ else
688+ printk(KERN_CONT "%02x ", c);
689+ }
690+ printk("\n");
691+}
692+#endif
693+
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697@@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706+ do_group_exit(SIGKILL);
707+ }
708+ }
709+#endif
710+
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714diff -urNp linux-3.0.4/arch/frv/include/asm/kmap_types.h linux-3.0.4/arch/frv/include/asm/kmap_types.h
715--- linux-3.0.4/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
716+++ linux-3.0.4/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
717@@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721+ KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725diff -urNp linux-3.0.4/arch/frv/mm/elf-fdpic.c linux-3.0.4/arch/frv/mm/elf-fdpic.c
726--- linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727+++ linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732- if (TASK_SIZE - len >= addr &&
733- (!vma || addr + len <= vma->vm_start))
734+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742- if (addr + len <= vma->vm_start)
743+ if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751- if (addr + len <= vma->vm_start)
752+ if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756diff -urNp linux-3.0.4/arch/ia64/include/asm/elf.h linux-3.0.4/arch/ia64/include/asm/elf.h
757--- linux-3.0.4/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
758+++ linux-3.0.4/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
759@@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763+#ifdef CONFIG_PAX_ASLR
764+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765+
766+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768+#endif
769+
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773diff -urNp linux-3.0.4/arch/ia64/include/asm/pgtable.h linux-3.0.4/arch/ia64/include/asm/pgtable.h
774--- linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775+++ linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776@@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780-
781+#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785@@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789+
790+#ifdef CONFIG_PAX_PAGEEXEC
791+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794+#else
795+# define PAGE_SHARED_NOEXEC PAGE_SHARED
796+# define PAGE_READONLY_NOEXEC PAGE_READONLY
797+# define PAGE_COPY_NOEXEC PAGE_COPY
798+#endif
799+
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803diff -urNp linux-3.0.4/arch/ia64/include/asm/spinlock.h linux-3.0.4/arch/ia64/include/asm/spinlock.h
804--- linux-3.0.4/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
805+++ linux-3.0.4/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
806@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815diff -urNp linux-3.0.4/arch/ia64/include/asm/uaccess.h linux-3.0.4/arch/ia64/include/asm/uaccess.h
816--- linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817+++ linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822- if (__access_ok(__cu_to, __cu_len, get_fs())) \
823+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831- if (__access_ok(__cu_from, __cu_len, get_fs())) \
832+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836diff -urNp linux-3.0.4/arch/ia64/kernel/module.c linux-3.0.4/arch/ia64/kernel/module.c
837--- linux-3.0.4/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
838+++ linux-3.0.4/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
839@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843- if (mod && mod->arch.init_unw_table &&
844- module_region == mod->module_init) {
845+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853+in_init_rx (const struct module *mod, uint64_t addr)
854+{
855+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856+}
857+
858+static inline int
859+in_init_rw (const struct module *mod, uint64_t addr)
860+{
861+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862+}
863+
864+static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867- return addr - (uint64_t) mod->module_init < mod->init_size;
868+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869+}
870+
871+static inline int
872+in_core_rx (const struct module *mod, uint64_t addr)
873+{
874+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875+}
876+
877+static inline int
878+in_core_rw (const struct module *mod, uint64_t addr)
879+{
880+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886- return addr - (uint64_t) mod->module_core < mod->core_size;
887+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896+ if (in_init_rx(mod, val))
897+ val -= (uint64_t) mod->module_init_rx;
898+ else if (in_init_rw(mod, val))
899+ val -= (uint64_t) mod->module_init_rw;
900+ else if (in_core_rx(mod, val))
901+ val -= (uint64_t) mod->module_core_rx;
902+ else if (in_core_rw(mod, val))
903+ val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911- if (mod->core_size > MAX_LTOFF)
912+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917- gp = mod->core_size - MAX_LTOFF / 2;
918+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920- gp = mod->core_size / 2;
921- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927diff -urNp linux-3.0.4/arch/ia64/kernel/sys_ia64.c linux-3.0.4/arch/ia64/kernel/sys_ia64.c
928--- linux-3.0.4/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
929+++ linux-3.0.4/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
930@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934+
935+#ifdef CONFIG_PAX_RANDMMAP
936+ if (mm->pax_flags & MF_PAX_RANDMMAP)
937+ addr = mm->free_area_cache;
938+ else
939+#endif
940+
941 if (!addr)
942 addr = mm->free_area_cache;
943
944@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948- if (start_addr != TASK_UNMAPPED_BASE) {
949+ if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951- addr = TASK_UNMAPPED_BASE;
952+ addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957- if (!vma || addr + len <= vma->vm_start) {
958+ if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962diff -urNp linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S
963--- linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
964+++ linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
965@@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969- __phys_per_cpu_start = __per_cpu_load;
970+ __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974diff -urNp linux-3.0.4/arch/ia64/mm/fault.c linux-3.0.4/arch/ia64/mm/fault.c
975--- linux-3.0.4/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
976+++ linux-3.0.4/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
977@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981+#ifdef CONFIG_PAX_PAGEEXEC
982+void pax_report_insns(void *pc, void *sp)
983+{
984+ unsigned long i;
985+
986+ printk(KERN_ERR "PAX: bytes at PC: ");
987+ for (i = 0; i < 8; i++) {
988+ unsigned int c;
989+ if (get_user(c, (unsigned int *)pc+i))
990+ printk(KERN_CONT "???????? ");
991+ else
992+ printk(KERN_CONT "%08x ", c);
993+ }
994+ printk("\n");
995+}
996+#endif
997+
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005- if ((vma->vm_flags & mask) != mask)
1006+ if ((vma->vm_flags & mask) != mask) {
1007+
1008+#ifdef CONFIG_PAX_PAGEEXEC
1009+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011+ goto bad_area;
1012+
1013+ up_read(&mm->mmap_sem);
1014+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015+ do_group_exit(SIGKILL);
1016+ }
1017+#endif
1018+
1019 goto bad_area;
1020
1021+ }
1022+
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026diff -urNp linux-3.0.4/arch/ia64/mm/hugetlbpage.c linux-3.0.4/arch/ia64/mm/hugetlbpage.c
1027--- linux-3.0.4/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1028+++ linux-3.0.4/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1029@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033- if (!vmm || (addr + len) <= vmm->vm_start)
1034+ if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038diff -urNp linux-3.0.4/arch/ia64/mm/init.c linux-3.0.4/arch/ia64/mm/init.c
1039--- linux-3.0.4/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1040+++ linux-3.0.4/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1041@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045+
1046+#ifdef CONFIG_PAX_PAGEEXEC
1047+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048+ vma->vm_flags &= ~VM_EXEC;
1049+
1050+#ifdef CONFIG_PAX_MPROTECT
1051+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052+ vma->vm_flags &= ~VM_MAYEXEC;
1053+#endif
1054+
1055+ }
1056+#endif
1057+
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061diff -urNp linux-3.0.4/arch/m32r/lib/usercopy.c linux-3.0.4/arch/m32r/lib/usercopy.c
1062--- linux-3.0.4/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1063+++ linux-3.0.4/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1064@@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068+ if ((long)n < 0)
1069+ return n;
1070+
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078+ if ((long)n < 0)
1079+ return n;
1080+
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084diff -urNp linux-3.0.4/arch/mips/include/asm/elf.h linux-3.0.4/arch/mips/include/asm/elf.h
1085--- linux-3.0.4/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1086+++ linux-3.0.4/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1087@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091+#ifdef CONFIG_PAX_ASLR
1092+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093+
1094+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096+#endif
1097+
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103-struct mm_struct;
1104-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105-#define arch_randomize_brk arch_randomize_brk
1106-
1107 #endif /* _ASM_ELF_H */
1108diff -urNp linux-3.0.4/arch/mips/include/asm/page.h linux-3.0.4/arch/mips/include/asm/page.h
1109--- linux-3.0.4/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1110+++ linux-3.0.4/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1111@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120diff -urNp linux-3.0.4/arch/mips/include/asm/system.h linux-3.0.4/arch/mips/include/asm/system.h
1121--- linux-3.0.4/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1122+++ linux-3.0.4/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1123@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127-extern unsigned long arch_align_stack(unsigned long sp);
1128+#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c
1132--- linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1133+++ linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1134@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138+#ifdef CONFIG_PAX_ASLR
1139+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140+
1141+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143+#endif
1144+
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c
1149--- linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1150+++ linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1151@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155+#ifdef CONFIG_PAX_ASLR
1156+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157+
1158+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160+#endif
1161+
1162 #include <asm/processor.h>
1163
1164 /*
1165diff -urNp linux-3.0.4/arch/mips/kernel/process.c linux-3.0.4/arch/mips/kernel/process.c
1166--- linux-3.0.4/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1167+++ linux-3.0.4/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1168@@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172-
1173-/*
1174- * Don't forget that the stack pointer must be aligned on a 8 bytes
1175- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176- */
1177-unsigned long arch_align_stack(unsigned long sp)
1178-{
1179- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180- sp -= get_random_int() & ~PAGE_MASK;
1181-
1182- return sp & ALMASK;
1183-}
1184diff -urNp linux-3.0.4/arch/mips/mm/fault.c linux-3.0.4/arch/mips/mm/fault.c
1185--- linux-3.0.4/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1186+++ linux-3.0.4/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1187@@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191+#ifdef CONFIG_PAX_PAGEEXEC
1192+void pax_report_insns(void *pc, void *sp)
1193+{
1194+ unsigned long i;
1195+
1196+ printk(KERN_ERR "PAX: bytes at PC: ");
1197+ for (i = 0; i < 5; i++) {
1198+ unsigned int c;
1199+ if (get_user(c, (unsigned int *)pc+i))
1200+ printk(KERN_CONT "???????? ");
1201+ else
1202+ printk(KERN_CONT "%08x ", c);
1203+ }
1204+ printk("\n");
1205+}
1206+#endif
1207+
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211diff -urNp linux-3.0.4/arch/mips/mm/mmap.c linux-3.0.4/arch/mips/mm/mmap.c
1212--- linux-3.0.4/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1213+++ linux-3.0.4/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1214@@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215 do_color_align = 0;
1216 if (filp || (flags & MAP_SHARED))
1217 do_color_align = 1;
1218+
1219+#ifdef CONFIG_PAX_RANDMMAP
1220+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221+#endif
1222+
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226 else
1227 addr = PAGE_ALIGN(addr);
1228 vmm = find_vma(current->mm, addr);
1229- if (TASK_SIZE - len >= addr &&
1230- (!vmm || addr + len <= vmm->vm_start))
1231+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232 return addr;
1233 }
1234 addr = current->mm->mmap_base;
1235@@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236 /* At this point: (!vmm || addr < vmm->vm_end). */
1237 if (TASK_SIZE - len < addr)
1238 return -ENOMEM;
1239- if (!vmm || addr + len <= vmm->vm_start)
1240+ if (check_heap_stack_gap(vmm, addr, len))
1241 return addr;
1242 addr = vmm->vm_end;
1243 if (do_color_align)
1244@@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245 mm->get_unmapped_area = arch_get_unmapped_area;
1246 mm->unmap_area = arch_unmap_area;
1247 }
1248-
1249-static inline unsigned long brk_rnd(void)
1250-{
1251- unsigned long rnd = get_random_int();
1252-
1253- rnd = rnd << PAGE_SHIFT;
1254- /* 8MB for 32bit, 256MB for 64bit */
1255- if (TASK_IS_32BIT_ADDR)
1256- rnd = rnd & 0x7ffffful;
1257- else
1258- rnd = rnd & 0xffffffful;
1259-
1260- return rnd;
1261-}
1262-
1263-unsigned long arch_randomize_brk(struct mm_struct *mm)
1264-{
1265- unsigned long base = mm->brk;
1266- unsigned long ret;
1267-
1268- ret = PAGE_ALIGN(base + brk_rnd());
1269-
1270- if (ret < mm->brk)
1271- return mm->brk;
1272-
1273- return ret;
1274-}
1275diff -urNp linux-3.0.4/arch/parisc/include/asm/elf.h linux-3.0.4/arch/parisc/include/asm/elf.h
1276--- linux-3.0.4/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1277+++ linux-3.0.4/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1278@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1279
1280 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1281
1282+#ifdef CONFIG_PAX_ASLR
1283+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1284+
1285+#define PAX_DELTA_MMAP_LEN 16
1286+#define PAX_DELTA_STACK_LEN 16
1287+#endif
1288+
1289 /* This yields a mask that user programs can use to figure out what
1290 instruction set this CPU supports. This could be done in user space,
1291 but it's not easy, and we've already done it here. */
1292diff -urNp linux-3.0.4/arch/parisc/include/asm/pgtable.h linux-3.0.4/arch/parisc/include/asm/pgtable.h
1293--- linux-3.0.4/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1294+++ linux-3.0.4/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1295@@ -210,6 +210,17 @@ struct vm_area_struct;
1296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297 #define PAGE_COPY PAGE_EXECREAD
1298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299+
1300+#ifdef CONFIG_PAX_PAGEEXEC
1301+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304+#else
1305+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1306+# define PAGE_COPY_NOEXEC PAGE_COPY
1307+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1308+#endif
1309+
1310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1313diff -urNp linux-3.0.4/arch/parisc/kernel/module.c linux-3.0.4/arch/parisc/kernel/module.c
1314--- linux-3.0.4/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1315+++ linux-3.0.4/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1316@@ -98,16 +98,38 @@
1317
1318 /* three functions to determine where in the module core
1319 * or init pieces the location is */
1320+static inline int in_init_rx(struct module *me, void *loc)
1321+{
1322+ return (loc >= me->module_init_rx &&
1323+ loc < (me->module_init_rx + me->init_size_rx));
1324+}
1325+
1326+static inline int in_init_rw(struct module *me, void *loc)
1327+{
1328+ return (loc >= me->module_init_rw &&
1329+ loc < (me->module_init_rw + me->init_size_rw));
1330+}
1331+
1332 static inline int in_init(struct module *me, void *loc)
1333 {
1334- return (loc >= me->module_init &&
1335- loc <= (me->module_init + me->init_size));
1336+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1337+}
1338+
1339+static inline int in_core_rx(struct module *me, void *loc)
1340+{
1341+ return (loc >= me->module_core_rx &&
1342+ loc < (me->module_core_rx + me->core_size_rx));
1343+}
1344+
1345+static inline int in_core_rw(struct module *me, void *loc)
1346+{
1347+ return (loc >= me->module_core_rw &&
1348+ loc < (me->module_core_rw + me->core_size_rw));
1349 }
1350
1351 static inline int in_core(struct module *me, void *loc)
1352 {
1353- return (loc >= me->module_core &&
1354- loc <= (me->module_core + me->core_size));
1355+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1356 }
1357
1358 static inline int in_local(struct module *me, void *loc)
1359@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360 }
1361
1362 /* align things a bit */
1363- me->core_size = ALIGN(me->core_size, 16);
1364- me->arch.got_offset = me->core_size;
1365- me->core_size += gots * sizeof(struct got_entry);
1366-
1367- me->core_size = ALIGN(me->core_size, 16);
1368- me->arch.fdesc_offset = me->core_size;
1369- me->core_size += fdescs * sizeof(Elf_Fdesc);
1370+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371+ me->arch.got_offset = me->core_size_rw;
1372+ me->core_size_rw += gots * sizeof(struct got_entry);
1373+
1374+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375+ me->arch.fdesc_offset = me->core_size_rw;
1376+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377
1378 me->arch.got_max = gots;
1379 me->arch.fdesc_max = fdescs;
1380@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1381
1382 BUG_ON(value == 0);
1383
1384- got = me->module_core + me->arch.got_offset;
1385+ got = me->module_core_rw + me->arch.got_offset;
1386 for (i = 0; got[i].addr; i++)
1387 if (got[i].addr == value)
1388 goto out;
1389@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1390 #ifdef CONFIG_64BIT
1391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392 {
1393- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395
1396 if (!value) {
1397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1399
1400 /* Create new one */
1401 fdesc->addr = value;
1402- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404 return (Elf_Addr)fdesc;
1405 }
1406 #endif /* CONFIG_64BIT */
1407@@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408
1409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410 end = table + sechdrs[me->arch.unwind_section].sh_size;
1411- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413
1414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415 me->arch.unwind_section, table, end, gp);
1416diff -urNp linux-3.0.4/arch/parisc/kernel/sys_parisc.c linux-3.0.4/arch/parisc/kernel/sys_parisc.c
1417--- linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418+++ linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420 /* At this point: (!vma || addr < vma->vm_end). */
1421 if (TASK_SIZE - len < addr)
1422 return -ENOMEM;
1423- if (!vma || addr + len <= vma->vm_start)
1424+ if (check_heap_stack_gap(vma, addr, len))
1425 return addr;
1426 addr = vma->vm_end;
1427 }
1428@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429 /* At this point: (!vma || addr < vma->vm_end). */
1430 if (TASK_SIZE - len < addr)
1431 return -ENOMEM;
1432- if (!vma || addr + len <= vma->vm_start)
1433+ if (check_heap_stack_gap(vma, addr, len))
1434 return addr;
1435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436 if (addr < vma->vm_end) /* handle wraparound */
1437@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438 if (flags & MAP_FIXED)
1439 return addr;
1440 if (!addr)
1441- addr = TASK_UNMAPPED_BASE;
1442+ addr = current->mm->mmap_base;
1443
1444 if (filp) {
1445 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446diff -urNp linux-3.0.4/arch/parisc/kernel/traps.c linux-3.0.4/arch/parisc/kernel/traps.c
1447--- linux-3.0.4/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1448+++ linux-3.0.4/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1449@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450
1451 down_read(&current->mm->mmap_sem);
1452 vma = find_vma(current->mm,regs->iaoq[0]);
1453- if (vma && (regs->iaoq[0] >= vma->vm_start)
1454- && (vma->vm_flags & VM_EXEC)) {
1455-
1456+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457 fault_address = regs->iaoq[0];
1458 fault_space = regs->iasq[0];
1459
1460diff -urNp linux-3.0.4/arch/parisc/mm/fault.c linux-3.0.4/arch/parisc/mm/fault.c
1461--- linux-3.0.4/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1462+++ linux-3.0.4/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1463@@ -15,6 +15,7 @@
1464 #include <linux/sched.h>
1465 #include <linux/interrupt.h>
1466 #include <linux/module.h>
1467+#include <linux/unistd.h>
1468
1469 #include <asm/uaccess.h>
1470 #include <asm/traps.h>
1471@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472 static unsigned long
1473 parisc_acctyp(unsigned long code, unsigned int inst)
1474 {
1475- if (code == 6 || code == 16)
1476+ if (code == 6 || code == 7 || code == 16)
1477 return VM_EXEC;
1478
1479 switch (inst & 0xf0000000) {
1480@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481 }
1482 #endif
1483
1484+#ifdef CONFIG_PAX_PAGEEXEC
1485+/*
1486+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487+ *
1488+ * returns 1 when task should be killed
1489+ * 2 when rt_sigreturn trampoline was detected
1490+ * 3 when unpatched PLT trampoline was detected
1491+ */
1492+static int pax_handle_fetch_fault(struct pt_regs *regs)
1493+{
1494+
1495+#ifdef CONFIG_PAX_EMUPLT
1496+ int err;
1497+
1498+ do { /* PaX: unpatched PLT emulation */
1499+ unsigned int bl, depwi;
1500+
1501+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503+
1504+ if (err)
1505+ break;
1506+
1507+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509+
1510+ err = get_user(ldw, (unsigned int *)addr);
1511+ err |= get_user(bv, (unsigned int *)(addr+4));
1512+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1513+
1514+ if (err)
1515+ break;
1516+
1517+ if (ldw == 0x0E801096U &&
1518+ bv == 0xEAC0C000U &&
1519+ ldw2 == 0x0E881095U)
1520+ {
1521+ unsigned int resolver, map;
1522+
1523+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525+ if (err)
1526+ break;
1527+
1528+ regs->gr[20] = instruction_pointer(regs)+8;
1529+ regs->gr[21] = map;
1530+ regs->gr[22] = resolver;
1531+ regs->iaoq[0] = resolver | 3UL;
1532+ regs->iaoq[1] = regs->iaoq[0] + 4;
1533+ return 3;
1534+ }
1535+ }
1536+ } while (0);
1537+#endif
1538+
1539+#ifdef CONFIG_PAX_EMUTRAMP
1540+
1541+#ifndef CONFIG_PAX_EMUSIGRT
1542+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543+ return 1;
1544+#endif
1545+
1546+ do { /* PaX: rt_sigreturn emulation */
1547+ unsigned int ldi1, ldi2, bel, nop;
1548+
1549+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553+
1554+ if (err)
1555+ break;
1556+
1557+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558+ ldi2 == 0x3414015AU &&
1559+ bel == 0xE4008200U &&
1560+ nop == 0x08000240U)
1561+ {
1562+ regs->gr[25] = (ldi1 & 2) >> 1;
1563+ regs->gr[20] = __NR_rt_sigreturn;
1564+ regs->gr[31] = regs->iaoq[1] + 16;
1565+ regs->sr[0] = regs->iasq[1];
1566+ regs->iaoq[0] = 0x100UL;
1567+ regs->iaoq[1] = regs->iaoq[0] + 4;
1568+ regs->iasq[0] = regs->sr[2];
1569+ regs->iasq[1] = regs->sr[2];
1570+ return 2;
1571+ }
1572+ } while (0);
1573+#endif
1574+
1575+ return 1;
1576+}
1577+
1578+void pax_report_insns(void *pc, void *sp)
1579+{
1580+ unsigned long i;
1581+
1582+ printk(KERN_ERR "PAX: bytes at PC: ");
1583+ for (i = 0; i < 5; i++) {
1584+ unsigned int c;
1585+ if (get_user(c, (unsigned int *)pc+i))
1586+ printk(KERN_CONT "???????? ");
1587+ else
1588+ printk(KERN_CONT "%08x ", c);
1589+ }
1590+ printk("\n");
1591+}
1592+#endif
1593+
1594 int fixup_exception(struct pt_regs *regs)
1595 {
1596 const struct exception_table_entry *fix;
1597@@ -192,8 +303,33 @@ good_area:
1598
1599 acc_type = parisc_acctyp(code,regs->iir);
1600
1601- if ((vma->vm_flags & acc_type) != acc_type)
1602+ if ((vma->vm_flags & acc_type) != acc_type) {
1603+
1604+#ifdef CONFIG_PAX_PAGEEXEC
1605+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606+ (address & ~3UL) == instruction_pointer(regs))
1607+ {
1608+ up_read(&mm->mmap_sem);
1609+ switch (pax_handle_fetch_fault(regs)) {
1610+
1611+#ifdef CONFIG_PAX_EMUPLT
1612+ case 3:
1613+ return;
1614+#endif
1615+
1616+#ifdef CONFIG_PAX_EMUTRAMP
1617+ case 2:
1618+ return;
1619+#endif
1620+
1621+ }
1622+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623+ do_group_exit(SIGKILL);
1624+ }
1625+#endif
1626+
1627 goto bad_area;
1628+ }
1629
1630 /*
1631 * If for any reason at all we couldn't handle the fault, make
1632diff -urNp linux-3.0.4/arch/powerpc/include/asm/elf.h linux-3.0.4/arch/powerpc/include/asm/elf.h
1633--- linux-3.0.4/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1634+++ linux-3.0.4/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1635@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636 the loader. We need to make sure that it is out of the way of the program
1637 that it will "exec", and that there is sufficient room for the brk. */
1638
1639-extern unsigned long randomize_et_dyn(unsigned long base);
1640-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1641+#define ELF_ET_DYN_BASE (0x20000000)
1642+
1643+#ifdef CONFIG_PAX_ASLR
1644+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1645+
1646+#ifdef __powerpc64__
1647+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1648+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1649+#else
1650+#define PAX_DELTA_MMAP_LEN 15
1651+#define PAX_DELTA_STACK_LEN 15
1652+#endif
1653+#endif
1654
1655 /*
1656 * Our registers are always unsigned longs, whether we're a 32 bit
1657@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658 (0x7ff >> (PAGE_SHIFT - 12)) : \
1659 (0x3ffff >> (PAGE_SHIFT - 12)))
1660
1661-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662-#define arch_randomize_brk arch_randomize_brk
1663-
1664 #endif /* __KERNEL__ */
1665
1666 /*
1667diff -urNp linux-3.0.4/arch/powerpc/include/asm/kmap_types.h linux-3.0.4/arch/powerpc/include/asm/kmap_types.h
1668--- linux-3.0.4/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1669+++ linux-3.0.4/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1670@@ -27,6 +27,7 @@ enum km_type {
1671 KM_PPC_SYNC_PAGE,
1672 KM_PPC_SYNC_ICACHE,
1673 KM_KDB,
1674+ KM_CLEARPAGE,
1675 KM_TYPE_NR
1676 };
1677
1678diff -urNp linux-3.0.4/arch/powerpc/include/asm/mman.h linux-3.0.4/arch/powerpc/include/asm/mman.h
1679--- linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680+++ linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682 }
1683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684
1685-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687 {
1688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689 }
1690diff -urNp linux-3.0.4/arch/powerpc/include/asm/page_64.h linux-3.0.4/arch/powerpc/include/asm/page_64.h
1691--- linux-3.0.4/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
1692+++ linux-3.0.4/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
1693@@ -155,15 +155,18 @@ do { \
1694 * stack by default, so in the absence of a PT_GNU_STACK program header
1695 * we turn execute permission off.
1696 */
1697-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1698- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699+#define VM_STACK_DEFAULT_FLAGS32 \
1700+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702
1703 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1704 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705
1706+#ifndef CONFIG_PAX_PAGEEXEC
1707 #define VM_STACK_DEFAULT_FLAGS \
1708 (is_32bit_task() ? \
1709 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710+#endif
1711
1712 #include <asm-generic/getorder.h>
1713
1714diff -urNp linux-3.0.4/arch/powerpc/include/asm/page.h linux-3.0.4/arch/powerpc/include/asm/page.h
1715--- linux-3.0.4/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716+++ linux-3.0.4/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718 * and needs to be executable. This means the whole heap ends
1719 * up being executable.
1720 */
1721-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1722- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723+#define VM_DATA_DEFAULT_FLAGS32 \
1724+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726
1727 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1728 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1731 #endif
1732
1733+#define ktla_ktva(addr) (addr)
1734+#define ktva_ktla(addr) (addr)
1735+
1736 #ifndef __ASSEMBLY__
1737
1738 #undef STRICT_MM_TYPECHECKS
1739diff -urNp linux-3.0.4/arch/powerpc/include/asm/pgtable.h linux-3.0.4/arch/powerpc/include/asm/pgtable.h
1740--- linux-3.0.4/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1741+++ linux-3.0.4/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1742@@ -2,6 +2,7 @@
1743 #define _ASM_POWERPC_PGTABLE_H
1744 #ifdef __KERNEL__
1745
1746+#include <linux/const.h>
1747 #ifndef __ASSEMBLY__
1748 #include <asm/processor.h> /* For TASK_SIZE */
1749 #include <asm/mmu.h>
1750diff -urNp linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h
1751--- linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
1752+++ linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
1753@@ -21,6 +21,7 @@
1754 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1755 #define _PAGE_USER 0x004 /* usermode access allowed */
1756 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1757+#define _PAGE_EXEC _PAGE_GUARDED
1758 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1759 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1760 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1761diff -urNp linux-3.0.4/arch/powerpc/include/asm/reg.h linux-3.0.4/arch/powerpc/include/asm/reg.h
1762--- linux-3.0.4/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
1763+++ linux-3.0.4/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
1764@@ -209,6 +209,7 @@
1765 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1766 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1767 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1768+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1769 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1770 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1771 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1772diff -urNp linux-3.0.4/arch/powerpc/include/asm/system.h linux-3.0.4/arch/powerpc/include/asm/system.h
1773--- linux-3.0.4/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1774+++ linux-3.0.4/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1775@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777 #endif
1778
1779-extern unsigned long arch_align_stack(unsigned long sp);
1780+#define arch_align_stack(x) ((x) & ~0xfUL)
1781
1782 /* Used in very early kernel initialization. */
1783 extern unsigned long reloc_offset(void);
1784diff -urNp linux-3.0.4/arch/powerpc/include/asm/uaccess.h linux-3.0.4/arch/powerpc/include/asm/uaccess.h
1785--- linux-3.0.4/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1786+++ linux-3.0.4/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1787@@ -13,6 +13,8 @@
1788 #define VERIFY_READ 0
1789 #define VERIFY_WRITE 1
1790
1791+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792+
1793 /*
1794 * The fs value determines whether argument validity checking should be
1795 * performed or not. If get_fs() == USER_DS, checking is performed, with
1796@@ -327,52 +329,6 @@ do { \
1797 extern unsigned long __copy_tofrom_user(void __user *to,
1798 const void __user *from, unsigned long size);
1799
1800-#ifndef __powerpc64__
1801-
1802-static inline unsigned long copy_from_user(void *to,
1803- const void __user *from, unsigned long n)
1804-{
1805- unsigned long over;
1806-
1807- if (access_ok(VERIFY_READ, from, n))
1808- return __copy_tofrom_user((__force void __user *)to, from, n);
1809- if ((unsigned long)from < TASK_SIZE) {
1810- over = (unsigned long)from + n - TASK_SIZE;
1811- return __copy_tofrom_user((__force void __user *)to, from,
1812- n - over) + over;
1813- }
1814- return n;
1815-}
1816-
1817-static inline unsigned long copy_to_user(void __user *to,
1818- const void *from, unsigned long n)
1819-{
1820- unsigned long over;
1821-
1822- if (access_ok(VERIFY_WRITE, to, n))
1823- return __copy_tofrom_user(to, (__force void __user *)from, n);
1824- if ((unsigned long)to < TASK_SIZE) {
1825- over = (unsigned long)to + n - TASK_SIZE;
1826- return __copy_tofrom_user(to, (__force void __user *)from,
1827- n - over) + over;
1828- }
1829- return n;
1830-}
1831-
1832-#else /* __powerpc64__ */
1833-
1834-#define __copy_in_user(to, from, size) \
1835- __copy_tofrom_user((to), (from), (size))
1836-
1837-extern unsigned long copy_from_user(void *to, const void __user *from,
1838- unsigned long n);
1839-extern unsigned long copy_to_user(void __user *to, const void *from,
1840- unsigned long n);
1841-extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842- unsigned long n);
1843-
1844-#endif /* __powerpc64__ */
1845-
1846 static inline unsigned long __copy_from_user_inatomic(void *to,
1847 const void __user *from, unsigned long n)
1848 {
1849@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850 if (ret == 0)
1851 return 0;
1852 }
1853+
1854+ if (!__builtin_constant_p(n))
1855+ check_object_size(to, n, false);
1856+
1857 return __copy_tofrom_user((__force void __user *)to, from, n);
1858 }
1859
1860@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861 if (ret == 0)
1862 return 0;
1863 }
1864+
1865+ if (!__builtin_constant_p(n))
1866+ check_object_size(from, n, true);
1867+
1868 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869 }
1870
1871@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872 return __copy_to_user_inatomic(to, from, size);
1873 }
1874
1875+#ifndef __powerpc64__
1876+
1877+static inline unsigned long __must_check copy_from_user(void *to,
1878+ const void __user *from, unsigned long n)
1879+{
1880+ unsigned long over;
1881+
1882+ if ((long)n < 0)
1883+ return n;
1884+
1885+ if (access_ok(VERIFY_READ, from, n)) {
1886+ if (!__builtin_constant_p(n))
1887+ check_object_size(to, n, false);
1888+ return __copy_tofrom_user((__force void __user *)to, from, n);
1889+ }
1890+ if ((unsigned long)from < TASK_SIZE) {
1891+ over = (unsigned long)from + n - TASK_SIZE;
1892+ if (!__builtin_constant_p(n - over))
1893+ check_object_size(to, n - over, false);
1894+ return __copy_tofrom_user((__force void __user *)to, from,
1895+ n - over) + over;
1896+ }
1897+ return n;
1898+}
1899+
1900+static inline unsigned long __must_check copy_to_user(void __user *to,
1901+ const void *from, unsigned long n)
1902+{
1903+ unsigned long over;
1904+
1905+ if ((long)n < 0)
1906+ return n;
1907+
1908+ if (access_ok(VERIFY_WRITE, to, n)) {
1909+ if (!__builtin_constant_p(n))
1910+ check_object_size(from, n, true);
1911+ return __copy_tofrom_user(to, (__force void __user *)from, n);
1912+ }
1913+ if ((unsigned long)to < TASK_SIZE) {
1914+ over = (unsigned long)to + n - TASK_SIZE;
1915+ if (!__builtin_constant_p(n))
1916+ check_object_size(from, n - over, true);
1917+ return __copy_tofrom_user(to, (__force void __user *)from,
1918+ n - over) + over;
1919+ }
1920+ return n;
1921+}
1922+
1923+#else /* __powerpc64__ */
1924+
1925+#define __copy_in_user(to, from, size) \
1926+ __copy_tofrom_user((to), (from), (size))
1927+
1928+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929+{
1930+ if ((long)n < 0 || n > INT_MAX)
1931+ return n;
1932+
1933+ if (!__builtin_constant_p(n))
1934+ check_object_size(to, n, false);
1935+
1936+ if (likely(access_ok(VERIFY_READ, from, n)))
1937+ n = __copy_from_user(to, from, n);
1938+ else
1939+ memset(to, 0, n);
1940+ return n;
1941+}
1942+
1943+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944+{
1945+ if ((long)n < 0 || n > INT_MAX)
1946+ return n;
1947+
1948+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949+ if (!__builtin_constant_p(n))
1950+ check_object_size(from, n, true);
1951+ n = __copy_to_user(to, from, n);
1952+ }
1953+ return n;
1954+}
1955+
1956+extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957+ unsigned long n);
1958+
1959+#endif /* __powerpc64__ */
1960+
1961 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962
1963 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S
1965--- linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
1966+++ linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
1967@@ -567,6 +567,7 @@ storage_fault_common:
1968 std r14,_DAR(r1)
1969 std r15,_DSISR(r1)
1970 addi r3,r1,STACK_FRAME_OVERHEAD
1971+ bl .save_nvgprs
1972 mr r4,r14
1973 mr r5,r15
1974 ld r14,PACA_EXGEN+EX_R14(r13)
1975@@ -576,8 +577,7 @@ storage_fault_common:
1976 cmpdi r3,0
1977 bne- 1f
1978 b .ret_from_except_lite
1979-1: bl .save_nvgprs
1980- mr r5,r3
1981+1: mr r5,r3
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 ld r4,_DAR(r1)
1984 bl .bad_page_fault
1985diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S
1986--- linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
1987+++ linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
1988@@ -956,10 +956,10 @@ handle_page_fault:
1989 11: ld r4,_DAR(r1)
1990 ld r5,_DSISR(r1)
1991 addi r3,r1,STACK_FRAME_OVERHEAD
1992+ bl .save_nvgprs
1993 bl .do_page_fault
1994 cmpdi r3,0
1995 beq+ 13f
1996- bl .save_nvgprs
1997 mr r5,r3
1998 addi r3,r1,STACK_FRAME_OVERHEAD
1999 lwz r4,_DAR(r1)
2000diff -urNp linux-3.0.4/arch/powerpc/kernel/module_32.c linux-3.0.4/arch/powerpc/kernel/module_32.c
2001--- linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002+++ linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004 me->arch.core_plt_section = i;
2005 }
2006 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007- printk("Module doesn't contain .plt or .init.plt sections.\n");
2008+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009 return -ENOEXEC;
2010 }
2011
2012@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013
2014 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015 /* Init, or core PLT? */
2016- if (location >= mod->module_core
2017- && location < mod->module_core + mod->core_size)
2018+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021- else
2022+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025+ else {
2026+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027+ return ~0UL;
2028+ }
2029
2030 /* Find this entry, or if that fails, the next avail. entry */
2031 while (entry->jump[0]) {
2032diff -urNp linux-3.0.4/arch/powerpc/kernel/module.c linux-3.0.4/arch/powerpc/kernel/module.c
2033--- linux-3.0.4/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2034+++ linux-3.0.4/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2035@@ -31,11 +31,24 @@
2036
2037 LIST_HEAD(module_bug_list);
2038
2039+#ifdef CONFIG_PAX_KERNEXEC
2040 void *module_alloc(unsigned long size)
2041 {
2042 if (size == 0)
2043 return NULL;
2044
2045+ return vmalloc(size);
2046+}
2047+
2048+void *module_alloc_exec(unsigned long size)
2049+#else
2050+void *module_alloc(unsigned long size)
2051+#endif
2052+
2053+{
2054+ if (size == 0)
2055+ return NULL;
2056+
2057 return vmalloc_exec(size);
2058 }
2059
2060@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061 vfree(module_region);
2062 }
2063
2064+#ifdef CONFIG_PAX_KERNEXEC
2065+void module_free_exec(struct module *mod, void *module_region)
2066+{
2067+ module_free(mod, module_region);
2068+}
2069+#endif
2070+
2071 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072 const Elf_Shdr *sechdrs,
2073 const char *name)
2074diff -urNp linux-3.0.4/arch/powerpc/kernel/process.c linux-3.0.4/arch/powerpc/kernel/process.c
2075--- linux-3.0.4/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2076+++ linux-3.0.4/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2077@@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078 * Lookup NIP late so we have the best change of getting the
2079 * above info out without failing
2080 */
2081- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085 #endif
2086 show_stack(current, (unsigned long *) regs->gpr[1]);
2087 if (!user_mode(regs))
2088@@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089 newsp = stack[0];
2090 ip = stack[STACK_FRAME_LR_SAVE];
2091 if (!firstframe || ip != lr) {
2092- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096- printk(" (%pS)",
2097+ printk(" (%pA)",
2098 (void *)current->ret_stack[curr_frame].ret);
2099 curr_frame--;
2100 }
2101@@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102 struct pt_regs *regs = (struct pt_regs *)
2103 (sp + STACK_FRAME_OVERHEAD);
2104 lr = regs->link;
2105- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2106+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2107 regs->trap, (void *)regs->nip, (void *)lr);
2108 firstframe = 1;
2109 }
2110@@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111 }
2112
2113 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114-
2115-unsigned long arch_align_stack(unsigned long sp)
2116-{
2117- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118- sp -= get_random_int() & ~PAGE_MASK;
2119- return sp & ~0xf;
2120-}
2121-
2122-static inline unsigned long brk_rnd(void)
2123-{
2124- unsigned long rnd = 0;
2125-
2126- /* 8MB for 32bit, 1GB for 64bit */
2127- if (is_32bit_task())
2128- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129- else
2130- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131-
2132- return rnd << PAGE_SHIFT;
2133-}
2134-
2135-unsigned long arch_randomize_brk(struct mm_struct *mm)
2136-{
2137- unsigned long base = mm->brk;
2138- unsigned long ret;
2139-
2140-#ifdef CONFIG_PPC_STD_MMU_64
2141- /*
2142- * If we are using 1TB segments and we are allowed to randomise
2143- * the heap, we can put it above 1TB so it is backed by a 1TB
2144- * segment. Otherwise the heap will be in the bottom 1TB
2145- * which always uses 256MB segments and this may result in a
2146- * performance penalty.
2147- */
2148- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150-#endif
2151-
2152- ret = PAGE_ALIGN(base + brk_rnd());
2153-
2154- if (ret < mm->brk)
2155- return mm->brk;
2156-
2157- return ret;
2158-}
2159-
2160-unsigned long randomize_et_dyn(unsigned long base)
2161-{
2162- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163-
2164- if (ret < base)
2165- return base;
2166-
2167- return ret;
2168-}
2169diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_32.c linux-3.0.4/arch/powerpc/kernel/signal_32.c
2170--- linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171+++ linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173 /* Save user registers on the stack */
2174 frame = &rt_sf->uc.uc_mcontext;
2175 addr = frame;
2176- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178 if (save_user_regs(regs, frame, 0, 1))
2179 goto badframe;
2180 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_64.c linux-3.0.4/arch/powerpc/kernel/signal_64.c
2182--- linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183+++ linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185 current->thread.fpscr.val = 0;
2186
2187 /* Set up to return from userspace. */
2188- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191 } else {
2192 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193diff -urNp linux-3.0.4/arch/powerpc/kernel/traps.c linux-3.0.4/arch/powerpc/kernel/traps.c
2194--- linux-3.0.4/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2195+++ linux-3.0.4/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2196@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197 static inline void pmac_backlight_unblank(void) { }
2198 #endif
2199
2200+extern void gr_handle_kernel_exploit(void);
2201+
2202 int die(const char *str, struct pt_regs *regs, long err)
2203 {
2204 static struct {
2205@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2206 if (panic_on_oops)
2207 panic("Fatal exception");
2208
2209+ gr_handle_kernel_exploit();
2210+
2211 oops_exit();
2212 do_exit(err);
2213
2214diff -urNp linux-3.0.4/arch/powerpc/kernel/vdso.c linux-3.0.4/arch/powerpc/kernel/vdso.c
2215--- linux-3.0.4/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2216+++ linux-3.0.4/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2217@@ -36,6 +36,7 @@
2218 #include <asm/firmware.h>
2219 #include <asm/vdso.h>
2220 #include <asm/vdso_datapage.h>
2221+#include <asm/mman.h>
2222
2223 #include "setup.h"
2224
2225@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = VDSO32_MBASE;
2227 #endif
2228
2229- current->mm->context.vdso_base = 0;
2230+ current->mm->context.vdso_base = ~0UL;
2231
2232 /* vDSO has a problem and was disabled, just don't "enable" it for the
2233 * process
2234@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235 vdso_base = get_unmapped_area(NULL, vdso_base,
2236 (vdso_pages << PAGE_SHIFT) +
2237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238- 0, 0);
2239+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2240 if (IS_ERR_VALUE(vdso_base)) {
2241 rc = vdso_base;
2242 goto fail_mmapsem;
2243diff -urNp linux-3.0.4/arch/powerpc/lib/usercopy_64.c linux-3.0.4/arch/powerpc/lib/usercopy_64.c
2244--- linux-3.0.4/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2245+++ linux-3.0.4/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2246@@ -9,22 +9,6 @@
2247 #include <linux/module.h>
2248 #include <asm/uaccess.h>
2249
2250-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251-{
2252- if (likely(access_ok(VERIFY_READ, from, n)))
2253- n = __copy_from_user(to, from, n);
2254- else
2255- memset(to, 0, n);
2256- return n;
2257-}
2258-
2259-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260-{
2261- if (likely(access_ok(VERIFY_WRITE, to, n)))
2262- n = __copy_to_user(to, from, n);
2263- return n;
2264-}
2265-
2266 unsigned long copy_in_user(void __user *to, const void __user *from,
2267 unsigned long n)
2268 {
2269@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270 return n;
2271 }
2272
2273-EXPORT_SYMBOL(copy_from_user);
2274-EXPORT_SYMBOL(copy_to_user);
2275 EXPORT_SYMBOL(copy_in_user);
2276
2277diff -urNp linux-3.0.4/arch/powerpc/mm/fault.c linux-3.0.4/arch/powerpc/mm/fault.c
2278--- linux-3.0.4/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279+++ linux-3.0.4/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280@@ -32,6 +32,10 @@
2281 #include <linux/perf_event.h>
2282 #include <linux/magic.h>
2283 #include <linux/ratelimit.h>
2284+#include <linux/slab.h>
2285+#include <linux/pagemap.h>
2286+#include <linux/compiler.h>
2287+#include <linux/unistd.h>
2288
2289 #include <asm/firmware.h>
2290 #include <asm/page.h>
2291@@ -43,6 +47,7 @@
2292 #include <asm/tlbflush.h>
2293 #include <asm/siginfo.h>
2294 #include <mm/mmu_decl.h>
2295+#include <asm/ptrace.h>
2296
2297 #ifdef CONFIG_KPROBES
2298 static inline int notify_page_fault(struct pt_regs *regs)
2299@@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300 }
2301 #endif
2302
2303+#ifdef CONFIG_PAX_PAGEEXEC
2304+/*
2305+ * PaX: decide what to do with offenders (regs->nip = fault address)
2306+ *
2307+ * returns 1 when task should be killed
2308+ */
2309+static int pax_handle_fetch_fault(struct pt_regs *regs)
2310+{
2311+ return 1;
2312+}
2313+
2314+void pax_report_insns(void *pc, void *sp)
2315+{
2316+ unsigned long i;
2317+
2318+ printk(KERN_ERR "PAX: bytes at PC: ");
2319+ for (i = 0; i < 5; i++) {
2320+ unsigned int c;
2321+ if (get_user(c, (unsigned int __user *)pc+i))
2322+ printk(KERN_CONT "???????? ");
2323+ else
2324+ printk(KERN_CONT "%08x ", c);
2325+ }
2326+ printk("\n");
2327+}
2328+#endif
2329+
2330 /*
2331 * Check whether the instruction at regs->nip is a store using
2332 * an update addressing form which will update r1.
2333@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334 * indicate errors in DSISR but can validly be set in SRR1.
2335 */
2336 if (trap == 0x400)
2337- error_code &= 0x48200000;
2338+ error_code &= 0x58200000;
2339 else
2340 is_write = error_code & DSISR_ISSTORE;
2341 #else
2342@@ -259,7 +291,7 @@ good_area:
2343 * "undefined". Of those that can be set, this is the only
2344 * one which seems bad.
2345 */
2346- if (error_code & 0x10000000)
2347+ if (error_code & DSISR_GUARDED)
2348 /* Guarded storage error. */
2349 goto bad_area;
2350 #endif /* CONFIG_8xx */
2351@@ -274,7 +306,7 @@ good_area:
2352 * processors use the same I/D cache coherency mechanism
2353 * as embedded.
2354 */
2355- if (error_code & DSISR_PROTFAULT)
2356+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357 goto bad_area;
2358 #endif /* CONFIG_PPC_STD_MMU */
2359
2360@@ -343,6 +375,23 @@ bad_area:
2361 bad_area_nosemaphore:
2362 /* User mode accesses cause a SIGSEGV */
2363 if (user_mode(regs)) {
2364+
2365+#ifdef CONFIG_PAX_PAGEEXEC
2366+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367+#ifdef CONFIG_PPC_STD_MMU
2368+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369+#else
2370+ if (is_exec && regs->nip == address) {
2371+#endif
2372+ switch (pax_handle_fetch_fault(regs)) {
2373+ }
2374+
2375+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376+ do_group_exit(SIGKILL);
2377+ }
2378+ }
2379+#endif
2380+
2381 _exception(SIGSEGV, regs, code, address);
2382 return 0;
2383 }
2384diff -urNp linux-3.0.4/arch/powerpc/mm/mmap_64.c linux-3.0.4/arch/powerpc/mm/mmap_64.c
2385--- linux-3.0.4/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2386+++ linux-3.0.4/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2387@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388 */
2389 if (mmap_is_legacy()) {
2390 mm->mmap_base = TASK_UNMAPPED_BASE;
2391+
2392+#ifdef CONFIG_PAX_RANDMMAP
2393+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2394+ mm->mmap_base += mm->delta_mmap;
2395+#endif
2396+
2397 mm->get_unmapped_area = arch_get_unmapped_area;
2398 mm->unmap_area = arch_unmap_area;
2399 } else {
2400 mm->mmap_base = mmap_base();
2401+
2402+#ifdef CONFIG_PAX_RANDMMAP
2403+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2404+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405+#endif
2406+
2407 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408 mm->unmap_area = arch_unmap_area_topdown;
2409 }
2410diff -urNp linux-3.0.4/arch/powerpc/mm/slice.c linux-3.0.4/arch/powerpc/mm/slice.c
2411--- linux-3.0.4/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412+++ linux-3.0.4/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414 if ((mm->task_size - len) < addr)
2415 return 0;
2416 vma = find_vma(mm, addr);
2417- return (!vma || (addr + len) <= vma->vm_start);
2418+ return check_heap_stack_gap(vma, addr, len);
2419 }
2420
2421 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422@@ -256,7 +256,7 @@ full_search:
2423 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2424 continue;
2425 }
2426- if (!vma || addr + len <= vma->vm_start) {
2427+ if (check_heap_stack_gap(vma, addr, len)) {
2428 /*
2429 * Remember the place where we stopped the search:
2430 */
2431@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432 }
2433 }
2434
2435- addr = mm->mmap_base;
2436- while (addr > len) {
2437+ if (mm->mmap_base < len)
2438+ addr = -ENOMEM;
2439+ else
2440+ addr = mm->mmap_base - len;
2441+
2442+ while (!IS_ERR_VALUE(addr)) {
2443 /* Go down by chunk size */
2444- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446
2447 /* Check for hit with different page size */
2448 mask = slice_range_to_mask(addr, len);
2449@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450 * return with success:
2451 */
2452 vma = find_vma(mm, addr);
2453- if (!vma || (addr + len) <= vma->vm_start) {
2454+ if (check_heap_stack_gap(vma, addr, len)) {
2455 /* remember the address as a hint for next time */
2456 if (use_cache)
2457 mm->free_area_cache = addr;
2458@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459 mm->cached_hole_size = vma->vm_start - addr;
2460
2461 /* try just below the current vma->vm_start */
2462- addr = vma->vm_start;
2463+ addr = skip_heap_stack_gap(vma, len);
2464 }
2465
2466 /*
2467@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468 if (fixed && addr > (mm->task_size - len))
2469 return -EINVAL;
2470
2471+#ifdef CONFIG_PAX_RANDMMAP
2472+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473+ addr = 0;
2474+#endif
2475+
2476 /* If hint, make sure it matches our alignment restrictions */
2477 if (!fixed && addr) {
2478 addr = _ALIGN_UP(addr, 1ul << pshift);
2479diff -urNp linux-3.0.4/arch/s390/include/asm/elf.h linux-3.0.4/arch/s390/include/asm/elf.h
2480--- linux-3.0.4/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2481+++ linux-3.0.4/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2482@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483 the loader. We need to make sure that it is out of the way of the program
2484 that it will "exec", and that there is sufficient room for the brk. */
2485
2486-extern unsigned long randomize_et_dyn(unsigned long base);
2487-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2488+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2489+
2490+#ifdef CONFIG_PAX_ASLR
2491+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492+
2493+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495+#endif
2496
2497 /* This yields a mask that user programs can use to figure out what
2498 instruction set this CPU supports. */
2499@@ -210,7 +216,4 @@ struct linux_binprm;
2500 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501 int arch_setup_additional_pages(struct linux_binprm *, int);
2502
2503-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504-#define arch_randomize_brk arch_randomize_brk
2505-
2506 #endif
2507diff -urNp linux-3.0.4/arch/s390/include/asm/system.h linux-3.0.4/arch/s390/include/asm/system.h
2508--- linux-3.0.4/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2509+++ linux-3.0.4/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2510@@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511 extern void (*_machine_halt)(void);
2512 extern void (*_machine_power_off)(void);
2513
2514-extern unsigned long arch_align_stack(unsigned long sp);
2515+#define arch_align_stack(x) ((x) & ~0xfUL)
2516
2517 static inline int tprot(unsigned long addr)
2518 {
2519diff -urNp linux-3.0.4/arch/s390/include/asm/uaccess.h linux-3.0.4/arch/s390/include/asm/uaccess.h
2520--- linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521+++ linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523 copy_to_user(void __user *to, const void *from, unsigned long n)
2524 {
2525 might_fault();
2526+
2527+ if ((long)n < 0)
2528+ return n;
2529+
2530 if (access_ok(VERIFY_WRITE, to, n))
2531 n = __copy_to_user(to, from, n);
2532 return n;
2533@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534 static inline unsigned long __must_check
2535 __copy_from_user(void *to, const void __user *from, unsigned long n)
2536 {
2537+ if ((long)n < 0)
2538+ return n;
2539+
2540 if (__builtin_constant_p(n) && (n <= 256))
2541 return uaccess.copy_from_user_small(n, from, to);
2542 else
2543@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544 unsigned int sz = __compiletime_object_size(to);
2545
2546 might_fault();
2547+
2548+ if ((long)n < 0)
2549+ return n;
2550+
2551 if (unlikely(sz != -1 && sz < n)) {
2552 copy_from_user_overflow();
2553 return n;
2554diff -urNp linux-3.0.4/arch/s390/kernel/module.c linux-3.0.4/arch/s390/kernel/module.c
2555--- linux-3.0.4/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2556+++ linux-3.0.4/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2557@@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558
2559 /* Increase core size by size of got & plt and set start
2560 offsets for got and plt. */
2561- me->core_size = ALIGN(me->core_size, 4);
2562- me->arch.got_offset = me->core_size;
2563- me->core_size += me->arch.got_size;
2564- me->arch.plt_offset = me->core_size;
2565- me->core_size += me->arch.plt_size;
2566+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567+ me->arch.got_offset = me->core_size_rw;
2568+ me->core_size_rw += me->arch.got_size;
2569+ me->arch.plt_offset = me->core_size_rx;
2570+ me->core_size_rx += me->arch.plt_size;
2571 return 0;
2572 }
2573
2574@@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575 if (info->got_initialized == 0) {
2576 Elf_Addr *gotent;
2577
2578- gotent = me->module_core + me->arch.got_offset +
2579+ gotent = me->module_core_rw + me->arch.got_offset +
2580 info->got_offset;
2581 *gotent = val;
2582 info->got_initialized = 1;
2583@@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 else if (r_type == R_390_GOTENT ||
2585 r_type == R_390_GOTPLTENT)
2586 *(unsigned int *) loc =
2587- (val + (Elf_Addr) me->module_core - loc) >> 1;
2588+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589 else if (r_type == R_390_GOT64 ||
2590 r_type == R_390_GOTPLT64)
2591 *(unsigned long *) loc = val;
2592@@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2594 if (info->plt_initialized == 0) {
2595 unsigned int *ip;
2596- ip = me->module_core + me->arch.plt_offset +
2597+ ip = me->module_core_rx + me->arch.plt_offset +
2598 info->plt_offset;
2599 #ifndef CONFIG_64BIT
2600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601@@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 val - loc + 0xffffUL < 0x1ffffeUL) ||
2603 (r_type == R_390_PLT32DBL &&
2604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605- val = (Elf_Addr) me->module_core +
2606+ val = (Elf_Addr) me->module_core_rx +
2607 me->arch.plt_offset +
2608 info->plt_offset;
2609 val += rela->r_addend - loc;
2610@@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2613 val = val + rela->r_addend -
2614- ((Elf_Addr) me->module_core + me->arch.got_offset);
2615+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616 if (r_type == R_390_GOTOFF16)
2617 *(unsigned short *) loc = val;
2618 else if (r_type == R_390_GOTOFF32)
2619@@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 break;
2621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2623- val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625 rela->r_addend - loc;
2626 if (r_type == R_390_GOTPC)
2627 *(unsigned int *) loc = val;
2628diff -urNp linux-3.0.4/arch/s390/kernel/process.c linux-3.0.4/arch/s390/kernel/process.c
2629--- linux-3.0.4/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2630+++ linux-3.0.4/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2631@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632 }
2633 return 0;
2634 }
2635-
2636-unsigned long arch_align_stack(unsigned long sp)
2637-{
2638- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639- sp -= get_random_int() & ~PAGE_MASK;
2640- return sp & ~0xf;
2641-}
2642-
2643-static inline unsigned long brk_rnd(void)
2644-{
2645- /* 8MB for 32bit, 1GB for 64bit */
2646- if (is_32bit_task())
2647- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648- else
2649- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650-}
2651-
2652-unsigned long arch_randomize_brk(struct mm_struct *mm)
2653-{
2654- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655-
2656- if (ret < mm->brk)
2657- return mm->brk;
2658- return ret;
2659-}
2660-
2661-unsigned long randomize_et_dyn(unsigned long base)
2662-{
2663- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664-
2665- if (!(current->flags & PF_RANDOMIZE))
2666- return base;
2667- if (ret < base)
2668- return base;
2669- return ret;
2670-}
2671diff -urNp linux-3.0.4/arch/s390/kernel/setup.c linux-3.0.4/arch/s390/kernel/setup.c
2672--- linux-3.0.4/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2673+++ linux-3.0.4/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2674@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675 }
2676 early_param("mem", early_parse_mem);
2677
2678-unsigned int user_mode = HOME_SPACE_MODE;
2679+unsigned int user_mode = SECONDARY_SPACE_MODE;
2680 EXPORT_SYMBOL_GPL(user_mode);
2681
2682 static int set_amode_and_uaccess(unsigned long user_amode,
2683diff -urNp linux-3.0.4/arch/s390/mm/mmap.c linux-3.0.4/arch/s390/mm/mmap.c
2684--- linux-3.0.4/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2685+++ linux-3.0.4/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2686@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690+
2691+#ifdef CONFIG_PAX_RANDMMAP
2692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2693+ mm->mmap_base += mm->delta_mmap;
2694+#endif
2695+
2696 mm->get_unmapped_area = arch_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700+
2701+#ifdef CONFIG_PAX_RANDMMAP
2702+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2703+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704+#endif
2705+
2706 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713+
2714+#ifdef CONFIG_PAX_RANDMMAP
2715+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2716+ mm->mmap_base += mm->delta_mmap;
2717+#endif
2718+
2719 mm->get_unmapped_area = s390_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723+
2724+#ifdef CONFIG_PAX_RANDMMAP
2725+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2726+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727+#endif
2728+
2729 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732diff -urNp linux-3.0.4/arch/score/include/asm/system.h linux-3.0.4/arch/score/include/asm/system.h
2733--- linux-3.0.4/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734+++ linux-3.0.4/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735@@ -17,7 +17,7 @@ do { \
2736 #define finish_arch_switch(prev) do {} while (0)
2737
2738 typedef void (*vi_handler_t)(void);
2739-extern unsigned long arch_align_stack(unsigned long sp);
2740+#define arch_align_stack(x) (x)
2741
2742 #define mb() barrier()
2743 #define rmb() barrier()
2744diff -urNp linux-3.0.4/arch/score/kernel/process.c linux-3.0.4/arch/score/kernel/process.c
2745--- linux-3.0.4/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2746+++ linux-3.0.4/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2747@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748
2749 return task_pt_regs(task)->cp0_epc;
2750 }
2751-
2752-unsigned long arch_align_stack(unsigned long sp)
2753-{
2754- return sp;
2755-}
2756diff -urNp linux-3.0.4/arch/sh/mm/mmap.c linux-3.0.4/arch/sh/mm/mmap.c
2757--- linux-3.0.4/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2758+++ linux-3.0.4/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2759@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760 addr = PAGE_ALIGN(addr);
2761
2762 vma = find_vma(mm, addr);
2763- if (TASK_SIZE - len >= addr &&
2764- (!vma || addr + len <= vma->vm_start))
2765+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766 return addr;
2767 }
2768
2769@@ -106,7 +105,7 @@ full_search:
2770 }
2771 return -ENOMEM;
2772 }
2773- if (likely(!vma || addr + len <= vma->vm_start)) {
2774+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2775 /*
2776 * Remember the place where we stopped the search:
2777 */
2778@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779 addr = PAGE_ALIGN(addr);
2780
2781 vma = find_vma(mm, addr);
2782- if (TASK_SIZE - len >= addr &&
2783- (!vma || addr + len <= vma->vm_start))
2784+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785 return addr;
2786 }
2787
2788@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789 /* make sure it can fit in the remaining address space */
2790 if (likely(addr > len)) {
2791 vma = find_vma(mm, addr-len);
2792- if (!vma || addr <= vma->vm_start) {
2793+ if (check_heap_stack_gap(vma, addr - len, len)) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr-len);
2796 }
2797@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798 if (unlikely(mm->mmap_base < len))
2799 goto bottomup;
2800
2801- addr = mm->mmap_base-len;
2802- if (do_colour_align)
2803- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804+ addr = mm->mmap_base - len;
2805
2806 do {
2807+ if (do_colour_align)
2808+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809 /*
2810 * Lookup failure means no vma is above this address,
2811 * else if new region fits below vma->vm_start,
2812 * return with success:
2813 */
2814 vma = find_vma(mm, addr);
2815- if (likely(!vma || addr+len <= vma->vm_start)) {
2816+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2817 /* remember the address as a hint for next time */
2818 return (mm->free_area_cache = addr);
2819 }
2820@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821 mm->cached_hole_size = vma->vm_start - addr;
2822
2823 /* try just below the current vma->vm_start */
2824- addr = vma->vm_start-len;
2825- if (do_colour_align)
2826- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827- } while (likely(len < vma->vm_start));
2828+ addr = skip_heap_stack_gap(vma, len);
2829+ } while (!IS_ERR_VALUE(addr));
2830
2831 bottomup:
2832 /*
2833diff -urNp linux-3.0.4/arch/sparc/include/asm/atomic_64.h linux-3.0.4/arch/sparc/include/asm/atomic_64.h
2834--- linux-3.0.4/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
2835+++ linux-3.0.4/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
2836@@ -14,18 +14,40 @@
2837 #define ATOMIC64_INIT(i) { (i) }
2838
2839 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2840+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841+{
2842+ return v->counter;
2843+}
2844 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2845+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846+{
2847+ return v->counter;
2848+}
2849
2850 #define atomic_set(v, i) (((v)->counter) = i)
2851+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852+{
2853+ v->counter = i;
2854+}
2855 #define atomic64_set(v, i) (((v)->counter) = i)
2856+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857+{
2858+ v->counter = i;
2859+}
2860
2861 extern void atomic_add(int, atomic_t *);
2862+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863 extern void atomic64_add(long, atomic64_t *);
2864+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865 extern void atomic_sub(int, atomic_t *);
2866+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867 extern void atomic64_sub(long, atomic64_t *);
2868+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869
2870 extern int atomic_add_ret(int, atomic_t *);
2871+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872 extern long atomic64_add_ret(long, atomic64_t *);
2873+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874 extern int atomic_sub_ret(int, atomic_t *);
2875 extern long atomic64_sub_ret(long, atomic64_t *);
2876
2877@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879
2880 #define atomic_inc_return(v) atomic_add_ret(1, v)
2881+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882+{
2883+ return atomic_add_ret_unchecked(1, v);
2884+}
2885 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887+{
2888+ return atomic64_add_ret_unchecked(1, v);
2889+}
2890
2891 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893
2894 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896+{
2897+ return atomic_add_ret_unchecked(i, v);
2898+}
2899 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901+{
2902+ return atomic64_add_ret_unchecked(i, v);
2903+}
2904
2905 /*
2906 * atomic_inc_and_test - increment and test
2907@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908 * other cases.
2909 */
2910 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912+{
2913+ return atomic_inc_return_unchecked(v) == 0;
2914+}
2915 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916
2917 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920
2921 #define atomic_inc(v) atomic_add(1, v)
2922+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923+{
2924+ atomic_add_unchecked(1, v);
2925+}
2926 #define atomic64_inc(v) atomic64_add(1, v)
2927+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928+{
2929+ atomic64_add_unchecked(1, v);
2930+}
2931
2932 #define atomic_dec(v) atomic_sub(1, v)
2933+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934+{
2935+ atomic_sub_unchecked(1, v);
2936+}
2937 #define atomic64_dec(v) atomic64_sub(1, v)
2938+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939+{
2940+ atomic64_sub_unchecked(1, v);
2941+}
2942
2943 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945
2946 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948+{
2949+ return cmpxchg(&v->counter, old, new);
2950+}
2951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953+{
2954+ return xchg(&v->counter, new);
2955+}
2956
2957 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958 {
2959- int c, old;
2960+ int c, old, new;
2961 c = atomic_read(v);
2962 for (;;) {
2963- if (unlikely(c == (u)))
2964+ if (unlikely(c == u))
2965 break;
2966- old = atomic_cmpxchg((v), c, c + (a));
2967+
2968+ asm volatile("addcc %2, %0, %0\n"
2969+
2970+#ifdef CONFIG_PAX_REFCOUNT
2971+ "tvs %%icc, 6\n"
2972+#endif
2973+
2974+ : "=r" (new)
2975+ : "0" (c), "ir" (a)
2976+ : "cc");
2977+
2978+ old = atomic_cmpxchg(v, c, new);
2979 if (likely(old == c))
2980 break;
2981 c = old;
2982 }
2983- return c != (u);
2984+ return c != u;
2985 }
2986
2987 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989 #define atomic64_cmpxchg(v, o, n) \
2990 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993+{
2994+ return xchg(&v->counter, new);
2995+}
2996
2997 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998 {
2999- long c, old;
3000+ long c, old, new;
3001 c = atomic64_read(v);
3002 for (;;) {
3003- if (unlikely(c == (u)))
3004+ if (unlikely(c == u))
3005 break;
3006- old = atomic64_cmpxchg((v), c, c + (a));
3007+
3008+ asm volatile("addcc %2, %0, %0\n"
3009+
3010+#ifdef CONFIG_PAX_REFCOUNT
3011+ "tvs %%xcc, 6\n"
3012+#endif
3013+
3014+ : "=r" (new)
3015+ : "0" (c), "ir" (a)
3016+ : "cc");
3017+
3018+ old = atomic64_cmpxchg(v, c, new);
3019 if (likely(old == c))
3020 break;
3021 c = old;
3022 }
3023- return c != (u);
3024+ return c != u;
3025 }
3026
3027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028diff -urNp linux-3.0.4/arch/sparc/include/asm/cache.h linux-3.0.4/arch/sparc/include/asm/cache.h
3029--- linux-3.0.4/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3030+++ linux-3.0.4/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3031@@ -10,7 +10,7 @@
3032 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3033
3034 #define L1_CACHE_SHIFT 5
3035-#define L1_CACHE_BYTES 32
3036+#define L1_CACHE_BYTES 32UL
3037
3038 #ifdef CONFIG_SPARC32
3039 #define SMP_CACHE_BYTES_SHIFT 5
3040diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_32.h linux-3.0.4/arch/sparc/include/asm/elf_32.h
3041--- linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042+++ linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043@@ -114,6 +114,13 @@ typedef struct {
3044
3045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3046
3047+#ifdef CONFIG_PAX_ASLR
3048+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3049+
3050+#define PAX_DELTA_MMAP_LEN 16
3051+#define PAX_DELTA_STACK_LEN 16
3052+#endif
3053+
3054 /* This yields a mask that user programs can use to figure out what
3055 instruction set this cpu supports. This can NOT be done in userspace
3056 on Sparc. */
3057diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_64.h linux-3.0.4/arch/sparc/include/asm/elf_64.h
3058--- linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-09-02 18:11:21.000000000 -0400
3059+++ linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060@@ -180,6 +180,13 @@ typedef struct {
3061 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3062 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063
3064+#ifdef CONFIG_PAX_ASLR
3065+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066+
3067+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069+#endif
3070+
3071 extern unsigned long sparc64_elf_hwcap;
3072 #define ELF_HWCAP sparc64_elf_hwcap
3073
3074diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtable_32.h linux-3.0.4/arch/sparc/include/asm/pgtable_32.h
3075--- linux-3.0.4/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3076+++ linux-3.0.4/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3077@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078 BTFIXUPDEF_INT(page_none)
3079 BTFIXUPDEF_INT(page_copy)
3080 BTFIXUPDEF_INT(page_readonly)
3081+
3082+#ifdef CONFIG_PAX_PAGEEXEC
3083+BTFIXUPDEF_INT(page_shared_noexec)
3084+BTFIXUPDEF_INT(page_copy_noexec)
3085+BTFIXUPDEF_INT(page_readonly_noexec)
3086+#endif
3087+
3088 BTFIXUPDEF_INT(page_kernel)
3089
3090 #define PMD_SHIFT SUN4C_PMD_SHIFT
3091@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3093 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3094
3095+#ifdef CONFIG_PAX_PAGEEXEC
3096+extern pgprot_t PAGE_SHARED_NOEXEC;
3097+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3098+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099+#else
3100+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3101+# define PAGE_COPY_NOEXEC PAGE_COPY
3102+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3103+#endif
3104+
3105 extern unsigned long page_kernel;
3106
3107 #ifdef MODULE
3108diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h
3109--- linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3110+++ linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3111@@ -115,6 +115,13 @@
3112 SRMMU_EXEC | SRMMU_REF)
3113 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114 SRMMU_EXEC | SRMMU_REF)
3115+
3116+#ifdef CONFIG_PAX_PAGEEXEC
3117+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120+#endif
3121+
3122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123 SRMMU_DIRTY | SRMMU_REF)
3124
3125diff -urNp linux-3.0.4/arch/sparc/include/asm/spinlock_64.h linux-3.0.4/arch/sparc/include/asm/spinlock_64.h
3126--- linux-3.0.4/arch/sparc/include/asm/spinlock_64.h 2011-07-21 22:17:23.000000000 -0400
3127+++ linux-3.0.4/arch/sparc/include/asm/spinlock_64.h 2011-08-23 21:47:55.000000000 -0400
3128@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129
3130 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131
3132-static void inline arch_read_lock(arch_rwlock_t *lock)
3133+static inline void arch_read_lock(arch_rwlock_t *lock)
3134 {
3135 unsigned long tmp1, tmp2;
3136
3137 __asm__ __volatile__ (
3138 "1: ldsw [%2], %0\n"
3139 " brlz,pn %0, 2f\n"
3140-"4: add %0, 1, %1\n"
3141+"4: addcc %0, 1, %1\n"
3142+
3143+#ifdef CONFIG_PAX_REFCOUNT
3144+" tvs %%icc, 6\n"
3145+#endif
3146+
3147 " cas [%2], %0, %1\n"
3148 " cmp %0, %1\n"
3149 " bne,pn %%icc, 1b\n"
3150@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151 " .previous"
3152 : "=&r" (tmp1), "=&r" (tmp2)
3153 : "r" (lock)
3154- : "memory");
3155+ : "memory", "cc");
3156 }
3157
3158-static int inline arch_read_trylock(arch_rwlock_t *lock)
3159+static inline int arch_read_trylock(arch_rwlock_t *lock)
3160 {
3161 int tmp1, tmp2;
3162
3163@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164 "1: ldsw [%2], %0\n"
3165 " brlz,a,pn %0, 2f\n"
3166 " mov 0, %0\n"
3167-" add %0, 1, %1\n"
3168+" addcc %0, 1, %1\n"
3169+
3170+#ifdef CONFIG_PAX_REFCOUNT
3171+" tvs %%icc, 6\n"
3172+#endif
3173+
3174 " cas [%2], %0, %1\n"
3175 " cmp %0, %1\n"
3176 " bne,pn %%icc, 1b\n"
3177@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178 return tmp1;
3179 }
3180
3181-static void inline arch_read_unlock(arch_rwlock_t *lock)
3182+static inline void arch_read_unlock(arch_rwlock_t *lock)
3183 {
3184 unsigned long tmp1, tmp2;
3185
3186 __asm__ __volatile__(
3187 "1: lduw [%2], %0\n"
3188-" sub %0, 1, %1\n"
3189+" subcc %0, 1, %1\n"
3190+
3191+#ifdef CONFIG_PAX_REFCOUNT
3192+" tvs %%icc, 6\n"
3193+#endif
3194+
3195 " cas [%2], %0, %1\n"
3196 " cmp %0, %1\n"
3197 " bne,pn %%xcc, 1b\n"
3198@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199 : "memory");
3200 }
3201
3202-static void inline arch_write_lock(arch_rwlock_t *lock)
3203+static inline void arch_write_lock(arch_rwlock_t *lock)
3204 {
3205 unsigned long mask, tmp1, tmp2;
3206
3207@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208 : "memory");
3209 }
3210
3211-static void inline arch_write_unlock(arch_rwlock_t *lock)
3212+static inline void arch_write_unlock(arch_rwlock_t *lock)
3213 {
3214 __asm__ __volatile__(
3215 " stw %%g0, [%0]"
3216@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217 : "memory");
3218 }
3219
3220-static int inline arch_write_trylock(arch_rwlock_t *lock)
3221+static inline int arch_write_trylock(arch_rwlock_t *lock)
3222 {
3223 unsigned long mask, tmp1, tmp2, result;
3224
3225diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_32.h linux-3.0.4/arch/sparc/include/asm/thread_info_32.h
3226--- linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227+++ linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228@@ -50,6 +50,8 @@ struct thread_info {
3229 unsigned long w_saved;
3230
3231 struct restart_block restart_block;
3232+
3233+ unsigned long lowest_stack;
3234 };
3235
3236 /*
3237diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_64.h linux-3.0.4/arch/sparc/include/asm/thread_info_64.h
3238--- linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239+++ linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240@@ -63,6 +63,8 @@ struct thread_info {
3241 struct pt_regs *kern_una_regs;
3242 unsigned int kern_una_insn;
3243
3244+ unsigned long lowest_stack;
3245+
3246 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3247 };
3248
3249diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_32.h linux-3.0.4/arch/sparc/include/asm/uaccess_32.h
3250--- linux-3.0.4/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3251+++ linux-3.0.4/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3252@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253
3254 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256- if (n && __access_ok((unsigned long) to, n))
3257+ if ((long)n < 0)
3258+ return n;
3259+
3260+ if (n && __access_ok((unsigned long) to, n)) {
3261+ if (!__builtin_constant_p(n))
3262+ check_object_size(from, n, true);
3263 return __copy_user(to, (__force void __user *) from, n);
3264- else
3265+ } else
3266 return n;
3267 }
3268
3269 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270 {
3271+ if ((long)n < 0)
3272+ return n;
3273+
3274+ if (!__builtin_constant_p(n))
3275+ check_object_size(from, n, true);
3276+
3277 return __copy_user(to, (__force void __user *) from, n);
3278 }
3279
3280 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281 {
3282- if (n && __access_ok((unsigned long) from, n))
3283+ if ((long)n < 0)
3284+ return n;
3285+
3286+ if (n && __access_ok((unsigned long) from, n)) {
3287+ if (!__builtin_constant_p(n))
3288+ check_object_size(to, n, false);
3289 return __copy_user((__force void __user *) to, from, n);
3290- else
3291+ } else
3292 return n;
3293 }
3294
3295 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296 {
3297+ if ((long)n < 0)
3298+ return n;
3299+
3300 return __copy_user((__force void __user *) to, from, n);
3301 }
3302
3303diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_64.h linux-3.0.4/arch/sparc/include/asm/uaccess_64.h
3304--- linux-3.0.4/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3305+++ linux-3.0.4/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3306@@ -10,6 +10,7 @@
3307 #include <linux/compiler.h>
3308 #include <linux/string.h>
3309 #include <linux/thread_info.h>
3310+#include <linux/kernel.h>
3311 #include <asm/asi.h>
3312 #include <asm/system.h>
3313 #include <asm/spitfire.h>
3314@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315 static inline unsigned long __must_check
3316 copy_from_user(void *to, const void __user *from, unsigned long size)
3317 {
3318- unsigned long ret = ___copy_from_user(to, from, size);
3319+ unsigned long ret;
3320
3321+ if ((long)size < 0 || size > INT_MAX)
3322+ return size;
3323+
3324+ if (!__builtin_constant_p(size))
3325+ check_object_size(to, size, false);
3326+
3327+ ret = ___copy_from_user(to, from, size);
3328 if (unlikely(ret))
3329 ret = copy_from_user_fixup(to, from, size);
3330
3331@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332 static inline unsigned long __must_check
3333 copy_to_user(void __user *to, const void *from, unsigned long size)
3334 {
3335- unsigned long ret = ___copy_to_user(to, from, size);
3336+ unsigned long ret;
3337+
3338+ if ((long)size < 0 || size > INT_MAX)
3339+ return size;
3340+
3341+ if (!__builtin_constant_p(size))
3342+ check_object_size(from, size, true);
3343
3344+ ret = ___copy_to_user(to, from, size);
3345 if (unlikely(ret))
3346 ret = copy_to_user_fixup(to, from, size);
3347 return ret;
3348diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess.h linux-3.0.4/arch/sparc/include/asm/uaccess.h
3349--- linux-3.0.4/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3350+++ linux-3.0.4/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3351@@ -1,5 +1,13 @@
3352 #ifndef ___ASM_SPARC_UACCESS_H
3353 #define ___ASM_SPARC_UACCESS_H
3354+
3355+#ifdef __KERNEL__
3356+#ifndef __ASSEMBLY__
3357+#include <linux/types.h>
3358+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359+#endif
3360+#endif
3361+
3362 #if defined(__sparc__) && defined(__arch64__)
3363 #include <asm/uaccess_64.h>
3364 #else
3365diff -urNp linux-3.0.4/arch/sparc/kernel/Makefile linux-3.0.4/arch/sparc/kernel/Makefile
3366--- linux-3.0.4/arch/sparc/kernel/Makefile 2011-07-21 22:17:23.000000000 -0400
3367+++ linux-3.0.4/arch/sparc/kernel/Makefile 2011-08-23 21:47:55.000000000 -0400
3368@@ -3,7 +3,7 @@
3369 #
3370
3371 asflags-y := -ansi
3372-ccflags-y := -Werror
3373+#ccflags-y := -Werror
3374
3375 extra-y := head_$(BITS).o
3376 extra-y += init_task.o
3377diff -urNp linux-3.0.4/arch/sparc/kernel/process_32.c linux-3.0.4/arch/sparc/kernel/process_32.c
3378--- linux-3.0.4/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3379+++ linux-3.0.4/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3380@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381 rw->ins[4], rw->ins[5],
3382 rw->ins[6],
3383 rw->ins[7]);
3384- printk("%pS\n", (void *) rw->ins[7]);
3385+ printk("%pA\n", (void *) rw->ins[7]);
3386 rw = (struct reg_window32 *) rw->ins[6];
3387 }
3388 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390
3391 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3392 r->psr, r->pc, r->npc, r->y, print_tainted());
3393- printk("PC: <%pS>\n", (void *) r->pc);
3394+ printk("PC: <%pA>\n", (void *) r->pc);
3395 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3396 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3399 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403
3404 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3405 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407 rw = (struct reg_window32 *) fp;
3408 pc = rw->ins[7];
3409 printk("[%08lx : ", pc);
3410- printk("%pS ] ", (void *) pc);
3411+ printk("%pA ] ", (void *) pc);
3412 fp = rw->ins[6];
3413 } while (++count < 16);
3414 printk("\n");
3415diff -urNp linux-3.0.4/arch/sparc/kernel/process_64.c linux-3.0.4/arch/sparc/kernel/process_64.c
3416--- linux-3.0.4/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3417+++ linux-3.0.4/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3418@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421 if (regs->tstate & TSTATE_PRIV)
3422- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424 }
3425
3426 void show_regs(struct pt_regs *regs)
3427 {
3428 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3429 regs->tpc, regs->tnpc, regs->y, print_tainted());
3430- printk("TPC: <%pS>\n", (void *) regs->tpc);
3431+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3432 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434 regs->u_regs[3]);
3435@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438 regs->u_regs[15]);
3439- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441 show_regwindow(regs);
3442 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443 }
3444@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445 ((tp && tp->task) ? tp->task->pid : -1));
3446
3447 if (gp->tstate & TSTATE_PRIV) {
3448- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450 (void *) gp->tpc,
3451 (void *) gp->o7,
3452 (void *) gp->i7,
3453diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c
3454--- linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3455+++ linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3456@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457 if (ARCH_SUN4C && len > 0x20000000)
3458 return -ENOMEM;
3459 if (!addr)
3460- addr = TASK_UNMAPPED_BASE;
3461+ addr = current->mm->mmap_base;
3462
3463 if (flags & MAP_SHARED)
3464 addr = COLOUR_ALIGN(addr);
3465@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466 }
3467 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468 return -ENOMEM;
3469- if (!vmm || addr + len <= vmm->vm_start)
3470+ if (check_heap_stack_gap(vmm, addr, len))
3471 return addr;
3472 addr = vmm->vm_end;
3473 if (flags & MAP_SHARED)
3474diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c
3475--- linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3476+++ linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3477@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478 /* We do not accept a shared mapping if it would violate
3479 * cache aliasing constraints.
3480 */
3481- if ((flags & MAP_SHARED) &&
3482+ if ((filp || (flags & MAP_SHARED)) &&
3483 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484 return -EINVAL;
3485 return addr;
3486@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487 if (filp || (flags & MAP_SHARED))
3488 do_color_align = 1;
3489
3490+#ifdef CONFIG_PAX_RANDMMAP
3491+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492+#endif
3493+
3494 if (addr) {
3495 if (do_color_align)
3496 addr = COLOUR_ALIGN(addr, pgoff);
3497@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498 addr = PAGE_ALIGN(addr);
3499
3500 vma = find_vma(mm, addr);
3501- if (task_size - len >= addr &&
3502- (!vma || addr + len <= vma->vm_start))
3503+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504 return addr;
3505 }
3506
3507 if (len > mm->cached_hole_size) {
3508- start_addr = addr = mm->free_area_cache;
3509+ start_addr = addr = mm->free_area_cache;
3510 } else {
3511- start_addr = addr = TASK_UNMAPPED_BASE;
3512+ start_addr = addr = mm->mmap_base;
3513 mm->cached_hole_size = 0;
3514 }
3515
3516@@ -174,14 +177,14 @@ full_search:
3517 vma = find_vma(mm, VA_EXCLUDE_END);
3518 }
3519 if (unlikely(task_size < addr)) {
3520- if (start_addr != TASK_UNMAPPED_BASE) {
3521- start_addr = addr = TASK_UNMAPPED_BASE;
3522+ if (start_addr != mm->mmap_base) {
3523+ start_addr = addr = mm->mmap_base;
3524 mm->cached_hole_size = 0;
3525 goto full_search;
3526 }
3527 return -ENOMEM;
3528 }
3529- if (likely(!vma || addr + len <= vma->vm_start)) {
3530+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3531 /*
3532 * Remember the place where we stopped the search:
3533 */
3534@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535 /* We do not accept a shared mapping if it would violate
3536 * cache aliasing constraints.
3537 */
3538- if ((flags & MAP_SHARED) &&
3539+ if ((filp || (flags & MAP_SHARED)) &&
3540 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541 return -EINVAL;
3542 return addr;
3543@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544 addr = PAGE_ALIGN(addr);
3545
3546 vma = find_vma(mm, addr);
3547- if (task_size - len >= addr &&
3548- (!vma || addr + len <= vma->vm_start))
3549+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550 return addr;
3551 }
3552
3553@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554 /* make sure it can fit in the remaining address space */
3555 if (likely(addr > len)) {
3556 vma = find_vma(mm, addr-len);
3557- if (!vma || addr <= vma->vm_start) {
3558+ if (check_heap_stack_gap(vma, addr - len, len)) {
3559 /* remember the address as a hint for next time */
3560 return (mm->free_area_cache = addr-len);
3561 }
3562@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563 if (unlikely(mm->mmap_base < len))
3564 goto bottomup;
3565
3566- addr = mm->mmap_base-len;
3567- if (do_color_align)
3568- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569+ addr = mm->mmap_base - len;
3570
3571 do {
3572+ if (do_color_align)
3573+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574 /*
3575 * Lookup failure means no vma is above this address,
3576 * else if new region fits below vma->vm_start,
3577 * return with success:
3578 */
3579 vma = find_vma(mm, addr);
3580- if (likely(!vma || addr+len <= vma->vm_start)) {
3581+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3582 /* remember the address as a hint for next time */
3583 return (mm->free_area_cache = addr);
3584 }
3585@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586 mm->cached_hole_size = vma->vm_start - addr;
3587
3588 /* try just below the current vma->vm_start */
3589- addr = vma->vm_start-len;
3590- if (do_color_align)
3591- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592- } while (likely(len < vma->vm_start));
3593+ addr = skip_heap_stack_gap(vma, len);
3594+ } while (!IS_ERR_VALUE(addr));
3595
3596 bottomup:
3597 /*
3598@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599 gap == RLIM_INFINITY ||
3600 sysctl_legacy_va_layout) {
3601 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602+
3603+#ifdef CONFIG_PAX_RANDMMAP
3604+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3605+ mm->mmap_base += mm->delta_mmap;
3606+#endif
3607+
3608 mm->get_unmapped_area = arch_get_unmapped_area;
3609 mm->unmap_area = arch_unmap_area;
3610 } else {
3611@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612 gap = (task_size / 6 * 5);
3613
3614 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615+
3616+#ifdef CONFIG_PAX_RANDMMAP
3617+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3618+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619+#endif
3620+
3621 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622 mm->unmap_area = arch_unmap_area_topdown;
3623 }
3624diff -urNp linux-3.0.4/arch/sparc/kernel/traps_32.c linux-3.0.4/arch/sparc/kernel/traps_32.c
3625--- linux-3.0.4/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3626+++ linux-3.0.4/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3627@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630
3631+extern void gr_handle_kernel_exploit(void);
3632+
3633 void die_if_kernel(char *str, struct pt_regs *regs)
3634 {
3635 static int die_counter;
3636@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637 count++ < 30 &&
3638 (((unsigned long) rw) >= PAGE_OFFSET) &&
3639 !(((unsigned long) rw) & 0x7)) {
3640- printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642 (void *) rw->ins[7]);
3643 rw = (struct reg_window32 *)rw->ins[6];
3644 }
3645 }
3646 printk("Instruction DUMP:");
3647 instruction_dump ((unsigned long *) regs->pc);
3648- if(regs->psr & PSR_PS)
3649+ if(regs->psr & PSR_PS) {
3650+ gr_handle_kernel_exploit();
3651 do_exit(SIGKILL);
3652+ }
3653 do_exit(SIGSEGV);
3654 }
3655
3656diff -urNp linux-3.0.4/arch/sparc/kernel/traps_64.c linux-3.0.4/arch/sparc/kernel/traps_64.c
3657--- linux-3.0.4/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3658+++ linux-3.0.4/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3659@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660 i + 1,
3661 p->trapstack[i].tstate, p->trapstack[i].tpc,
3662 p->trapstack[i].tnpc, p->trapstack[i].tt);
3663- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665 }
3666 }
3667
3668@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669
3670 lvl -= 0x100;
3671 if (regs->tstate & TSTATE_PRIV) {
3672+
3673+#ifdef CONFIG_PAX_REFCOUNT
3674+ if (lvl == 6)
3675+ pax_report_refcount_overflow(regs);
3676+#endif
3677+
3678 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679 die_if_kernel(buffer, regs);
3680 }
3681@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683 {
3684 char buffer[32];
3685-
3686+
3687 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688 0, lvl, SIGTRAP) == NOTIFY_STOP)
3689 return;
3690
3691+#ifdef CONFIG_PAX_REFCOUNT
3692+ if (lvl == 6)
3693+ pax_report_refcount_overflow(regs);
3694+#endif
3695+
3696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697
3698 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701 printk("%s" "ERROR(%d): ",
3702 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703- printk("TPC<%pS>\n", (void *) regs->tpc);
3704+ printk("TPC<%pA>\n", (void *) regs->tpc);
3705 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709 smp_processor_id(),
3710 (type & 0x1) ? 'I' : 'D',
3711 regs->tpc);
3712- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714 panic("Irrecoverable Cheetah+ parity error.");
3715 }
3716
3717@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718 smp_processor_id(),
3719 (type & 0x1) ? 'I' : 'D',
3720 regs->tpc);
3721- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723 }
3724
3725 struct sun4v_error_entry {
3726@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727
3728 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729 regs->tpc, tl);
3730- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735 (void *) regs->u_regs[UREG_I7]);
3736 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737 "pte[%lx] error[%lx]\n",
3738@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739
3740 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741 regs->tpc, tl);
3742- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747 (void *) regs->u_regs[UREG_I7]);
3748 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749 "pte[%lx] error[%lx]\n",
3750@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751 fp = (unsigned long)sf->fp + STACK_BIAS;
3752 }
3753
3754- printk(" [%016lx] %pS\n", pc, (void *) pc);
3755+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758 int index = tsk->curr_ret_stack;
3759 if (tsk->ret_stack && index >= graph) {
3760 pc = tsk->ret_stack[index - graph].ret;
3761- printk(" [%016lx] %pS\n", pc, (void *) pc);
3762+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3763 graph++;
3764 }
3765 }
3766@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767 return (struct reg_window *) (fp + STACK_BIAS);
3768 }
3769
3770+extern void gr_handle_kernel_exploit(void);
3771+
3772 void die_if_kernel(char *str, struct pt_regs *regs)
3773 {
3774 static int die_counter;
3775@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776 while (rw &&
3777 count++ < 30 &&
3778 kstack_valid(tp, (unsigned long) rw)) {
3779- printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781 (void *) rw->ins[7]);
3782
3783 rw = kernel_stack_up(rw);
3784@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785 }
3786 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787 }
3788- if (regs->tstate & TSTATE_PRIV)
3789+ if (regs->tstate & TSTATE_PRIV) {
3790+ gr_handle_kernel_exploit();
3791 do_exit(SIGKILL);
3792+ }
3793 do_exit(SIGSEGV);
3794 }
3795 EXPORT_SYMBOL(die_if_kernel);
3796diff -urNp linux-3.0.4/arch/sparc/kernel/unaligned_64.c linux-3.0.4/arch/sparc/kernel/unaligned_64.c
3797--- linux-3.0.4/arch/sparc/kernel/unaligned_64.c 2011-09-02 18:11:21.000000000 -0400
3798+++ linux-3.0.4/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
3799@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801
3802 if (__ratelimit(&ratelimit)) {
3803- printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805 regs->tpc, (void *) regs->tpc);
3806 }
3807 }
3808diff -urNp linux-3.0.4/arch/sparc/lib/atomic_64.S linux-3.0.4/arch/sparc/lib/atomic_64.S
3809--- linux-3.0.4/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
3810+++ linux-3.0.4/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
3811@@ -18,7 +18,12 @@
3812 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813 BACKOFF_SETUP(%o2)
3814 1: lduw [%o1], %g1
3815- add %g1, %o0, %g7
3816+ addcc %g1, %o0, %g7
3817+
3818+#ifdef CONFIG_PAX_REFCOUNT
3819+ tvs %icc, 6
3820+#endif
3821+
3822 cas [%o1], %g1, %g7
3823 cmp %g1, %g7
3824 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3825@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826 2: BACKOFF_SPIN(%o2, %o3, 1b)
3827 .size atomic_add, .-atomic_add
3828
3829+ .globl atomic_add_unchecked
3830+ .type atomic_add_unchecked,#function
3831+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832+ BACKOFF_SETUP(%o2)
3833+1: lduw [%o1], %g1
3834+ add %g1, %o0, %g7
3835+ cas [%o1], %g1, %g7
3836+ cmp %g1, %g7
3837+ bne,pn %icc, 2f
3838+ nop
3839+ retl
3840+ nop
3841+2: BACKOFF_SPIN(%o2, %o3, 1b)
3842+ .size atomic_add_unchecked, .-atomic_add_unchecked
3843+
3844 .globl atomic_sub
3845 .type atomic_sub,#function
3846 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847 BACKOFF_SETUP(%o2)
3848 1: lduw [%o1], %g1
3849- sub %g1, %o0, %g7
3850+ subcc %g1, %o0, %g7
3851+
3852+#ifdef CONFIG_PAX_REFCOUNT
3853+ tvs %icc, 6
3854+#endif
3855+
3856 cas [%o1], %g1, %g7
3857 cmp %g1, %g7
3858 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3859@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860 2: BACKOFF_SPIN(%o2, %o3, 1b)
3861 .size atomic_sub, .-atomic_sub
3862
3863+ .globl atomic_sub_unchecked
3864+ .type atomic_sub_unchecked,#function
3865+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866+ BACKOFF_SETUP(%o2)
3867+1: lduw [%o1], %g1
3868+ sub %g1, %o0, %g7
3869+ cas [%o1], %g1, %g7
3870+ cmp %g1, %g7
3871+ bne,pn %icc, 2f
3872+ nop
3873+ retl
3874+ nop
3875+2: BACKOFF_SPIN(%o2, %o3, 1b)
3876+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
3877+
3878 .globl atomic_add_ret
3879 .type atomic_add_ret,#function
3880 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881 BACKOFF_SETUP(%o2)
3882 1: lduw [%o1], %g1
3883- add %g1, %o0, %g7
3884+ addcc %g1, %o0, %g7
3885+
3886+#ifdef CONFIG_PAX_REFCOUNT
3887+ tvs %icc, 6
3888+#endif
3889+
3890 cas [%o1], %g1, %g7
3891 cmp %g1, %g7
3892 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3893@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3894 2: BACKOFF_SPIN(%o2, %o3, 1b)
3895 .size atomic_add_ret, .-atomic_add_ret
3896
3897+ .globl atomic_add_ret_unchecked
3898+ .type atomic_add_ret_unchecked,#function
3899+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900+ BACKOFF_SETUP(%o2)
3901+1: lduw [%o1], %g1
3902+ addcc %g1, %o0, %g7
3903+ cas [%o1], %g1, %g7
3904+ cmp %g1, %g7
3905+ bne,pn %icc, 2f
3906+ add %g7, %o0, %g7
3907+ sra %g7, 0, %o0
3908+ retl
3909+ nop
3910+2: BACKOFF_SPIN(%o2, %o3, 1b)
3911+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912+
3913 .globl atomic_sub_ret
3914 .type atomic_sub_ret,#function
3915 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916 BACKOFF_SETUP(%o2)
3917 1: lduw [%o1], %g1
3918- sub %g1, %o0, %g7
3919+ subcc %g1, %o0, %g7
3920+
3921+#ifdef CONFIG_PAX_REFCOUNT
3922+ tvs %icc, 6
3923+#endif
3924+
3925 cas [%o1], %g1, %g7
3926 cmp %g1, %g7
3927 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3928@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3929 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930 BACKOFF_SETUP(%o2)
3931 1: ldx [%o1], %g1
3932- add %g1, %o0, %g7
3933+ addcc %g1, %o0, %g7
3934+
3935+#ifdef CONFIG_PAX_REFCOUNT
3936+ tvs %xcc, 6
3937+#endif
3938+
3939 casx [%o1], %g1, %g7
3940 cmp %g1, %g7
3941 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3942@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3943 2: BACKOFF_SPIN(%o2, %o3, 1b)
3944 .size atomic64_add, .-atomic64_add
3945
3946+ .globl atomic64_add_unchecked
3947+ .type atomic64_add_unchecked,#function
3948+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949+ BACKOFF_SETUP(%o2)
3950+1: ldx [%o1], %g1
3951+ addcc %g1, %o0, %g7
3952+ casx [%o1], %g1, %g7
3953+ cmp %g1, %g7
3954+ bne,pn %xcc, 2f
3955+ nop
3956+ retl
3957+ nop
3958+2: BACKOFF_SPIN(%o2, %o3, 1b)
3959+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
3960+
3961 .globl atomic64_sub
3962 .type atomic64_sub,#function
3963 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964 BACKOFF_SETUP(%o2)
3965 1: ldx [%o1], %g1
3966- sub %g1, %o0, %g7
3967+ subcc %g1, %o0, %g7
3968+
3969+#ifdef CONFIG_PAX_REFCOUNT
3970+ tvs %xcc, 6
3971+#endif
3972+
3973 casx [%o1], %g1, %g7
3974 cmp %g1, %g7
3975 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3976@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3977 2: BACKOFF_SPIN(%o2, %o3, 1b)
3978 .size atomic64_sub, .-atomic64_sub
3979
3980+ .globl atomic64_sub_unchecked
3981+ .type atomic64_sub_unchecked,#function
3982+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983+ BACKOFF_SETUP(%o2)
3984+1: ldx [%o1], %g1
3985+ subcc %g1, %o0, %g7
3986+ casx [%o1], %g1, %g7
3987+ cmp %g1, %g7
3988+ bne,pn %xcc, 2f
3989+ nop
3990+ retl
3991+ nop
3992+2: BACKOFF_SPIN(%o2, %o3, 1b)
3993+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994+
3995 .globl atomic64_add_ret
3996 .type atomic64_add_ret,#function
3997 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998 BACKOFF_SETUP(%o2)
3999 1: ldx [%o1], %g1
4000- add %g1, %o0, %g7
4001+ addcc %g1, %o0, %g7
4002+
4003+#ifdef CONFIG_PAX_REFCOUNT
4004+ tvs %xcc, 6
4005+#endif
4006+
4007 casx [%o1], %g1, %g7
4008 cmp %g1, %g7
4009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4010@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011 2: BACKOFF_SPIN(%o2, %o3, 1b)
4012 .size atomic64_add_ret, .-atomic64_add_ret
4013
4014+ .globl atomic64_add_ret_unchecked
4015+ .type atomic64_add_ret_unchecked,#function
4016+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017+ BACKOFF_SETUP(%o2)
4018+1: ldx [%o1], %g1
4019+ addcc %g1, %o0, %g7
4020+ casx [%o1], %g1, %g7
4021+ cmp %g1, %g7
4022+ bne,pn %xcc, 2f
4023+ add %g7, %o0, %g7
4024+ mov %g7, %o0
4025+ retl
4026+ nop
4027+2: BACKOFF_SPIN(%o2, %o3, 1b)
4028+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029+
4030 .globl atomic64_sub_ret
4031 .type atomic64_sub_ret,#function
4032 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033 BACKOFF_SETUP(%o2)
4034 1: ldx [%o1], %g1
4035- sub %g1, %o0, %g7
4036+ subcc %g1, %o0, %g7
4037+
4038+#ifdef CONFIG_PAX_REFCOUNT
4039+ tvs %xcc, 6
4040+#endif
4041+
4042 casx [%o1], %g1, %g7
4043 cmp %g1, %g7
4044 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4045diff -urNp linux-3.0.4/arch/sparc/lib/ksyms.c linux-3.0.4/arch/sparc/lib/ksyms.c
4046--- linux-3.0.4/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4047+++ linux-3.0.4/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4048@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049
4050 /* Atomic counter implementation. */
4051 EXPORT_SYMBOL(atomic_add);
4052+EXPORT_SYMBOL(atomic_add_unchecked);
4053 EXPORT_SYMBOL(atomic_add_ret);
4054+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055 EXPORT_SYMBOL(atomic_sub);
4056+EXPORT_SYMBOL(atomic_sub_unchecked);
4057 EXPORT_SYMBOL(atomic_sub_ret);
4058 EXPORT_SYMBOL(atomic64_add);
4059+EXPORT_SYMBOL(atomic64_add_unchecked);
4060 EXPORT_SYMBOL(atomic64_add_ret);
4061+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062 EXPORT_SYMBOL(atomic64_sub);
4063+EXPORT_SYMBOL(atomic64_sub_unchecked);
4064 EXPORT_SYMBOL(atomic64_sub_ret);
4065
4066 /* Atomic bit operations. */
4067diff -urNp linux-3.0.4/arch/sparc/lib/Makefile linux-3.0.4/arch/sparc/lib/Makefile
4068--- linux-3.0.4/arch/sparc/lib/Makefile 2011-09-02 18:11:21.000000000 -0400
4069+++ linux-3.0.4/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070@@ -2,7 +2,7 @@
4071 #
4072
4073 asflags-y := -ansi -DST_DIV0=0x02
4074-ccflags-y := -Werror
4075+#ccflags-y := -Werror
4076
4077 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079diff -urNp linux-3.0.4/arch/sparc/Makefile linux-3.0.4/arch/sparc/Makefile
4080--- linux-3.0.4/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
4081+++ linux-3.0.4/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
4082@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4083 # Export what is needed by arch/sparc/boot/Makefile
4084 export VMLINUX_INIT VMLINUX_MAIN
4085 VMLINUX_INIT := $(head-y) $(init-y)
4086-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089 VMLINUX_MAIN += $(drivers-y) $(net-y)
4090
4091diff -urNp linux-3.0.4/arch/sparc/mm/fault_32.c linux-3.0.4/arch/sparc/mm/fault_32.c
4092--- linux-3.0.4/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4093+++ linux-3.0.4/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4094@@ -22,6 +22,9 @@
4095 #include <linux/interrupt.h>
4096 #include <linux/module.h>
4097 #include <linux/kdebug.h>
4098+#include <linux/slab.h>
4099+#include <linux/pagemap.h>
4100+#include <linux/compiler.h>
4101
4102 #include <asm/system.h>
4103 #include <asm/page.h>
4104@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105 return safe_compute_effective_address(regs, insn);
4106 }
4107
4108+#ifdef CONFIG_PAX_PAGEEXEC
4109+#ifdef CONFIG_PAX_DLRESOLVE
4110+static void pax_emuplt_close(struct vm_area_struct *vma)
4111+{
4112+ vma->vm_mm->call_dl_resolve = 0UL;
4113+}
4114+
4115+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116+{
4117+ unsigned int *kaddr;
4118+
4119+ vmf->page = alloc_page(GFP_HIGHUSER);
4120+ if (!vmf->page)
4121+ return VM_FAULT_OOM;
4122+
4123+ kaddr = kmap(vmf->page);
4124+ memset(kaddr, 0, PAGE_SIZE);
4125+ kaddr[0] = 0x9DE3BFA8U; /* save */
4126+ flush_dcache_page(vmf->page);
4127+ kunmap(vmf->page);
4128+ return VM_FAULT_MAJOR;
4129+}
4130+
4131+static const struct vm_operations_struct pax_vm_ops = {
4132+ .close = pax_emuplt_close,
4133+ .fault = pax_emuplt_fault
4134+};
4135+
4136+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137+{
4138+ int ret;
4139+
4140+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4141+ vma->vm_mm = current->mm;
4142+ vma->vm_start = addr;
4143+ vma->vm_end = addr + PAGE_SIZE;
4144+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146+ vma->vm_ops = &pax_vm_ops;
4147+
4148+ ret = insert_vm_struct(current->mm, vma);
4149+ if (ret)
4150+ return ret;
4151+
4152+ ++current->mm->total_vm;
4153+ return 0;
4154+}
4155+#endif
4156+
4157+/*
4158+ * PaX: decide what to do with offenders (regs->pc = fault address)
4159+ *
4160+ * returns 1 when task should be killed
4161+ * 2 when patched PLT trampoline was detected
4162+ * 3 when unpatched PLT trampoline was detected
4163+ */
4164+static int pax_handle_fetch_fault(struct pt_regs *regs)
4165+{
4166+
4167+#ifdef CONFIG_PAX_EMUPLT
4168+ int err;
4169+
4170+ do { /* PaX: patched PLT emulation #1 */
4171+ unsigned int sethi1, sethi2, jmpl;
4172+
4173+ err = get_user(sethi1, (unsigned int *)regs->pc);
4174+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176+
4177+ if (err)
4178+ break;
4179+
4180+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183+ {
4184+ unsigned int addr;
4185+
4186+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187+ addr = regs->u_regs[UREG_G1];
4188+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189+ regs->pc = addr;
4190+ regs->npc = addr+4;
4191+ return 2;
4192+ }
4193+ } while (0);
4194+
4195+ { /* PaX: patched PLT emulation #2 */
4196+ unsigned int ba;
4197+
4198+ err = get_user(ba, (unsigned int *)regs->pc);
4199+
4200+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201+ unsigned int addr;
4202+
4203+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204+ regs->pc = addr;
4205+ regs->npc = addr+4;
4206+ return 2;
4207+ }
4208+ }
4209+
4210+ do { /* PaX: patched PLT emulation #3 */
4211+ unsigned int sethi, jmpl, nop;
4212+
4213+ err = get_user(sethi, (unsigned int *)regs->pc);
4214+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216+
4217+ if (err)
4218+ break;
4219+
4220+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222+ nop == 0x01000000U)
4223+ {
4224+ unsigned int addr;
4225+
4226+ addr = (sethi & 0x003FFFFFU) << 10;
4227+ regs->u_regs[UREG_G1] = addr;
4228+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229+ regs->pc = addr;
4230+ regs->npc = addr+4;
4231+ return 2;
4232+ }
4233+ } while (0);
4234+
4235+ do { /* PaX: unpatched PLT emulation step 1 */
4236+ unsigned int sethi, ba, nop;
4237+
4238+ err = get_user(sethi, (unsigned int *)regs->pc);
4239+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241+
4242+ if (err)
4243+ break;
4244+
4245+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247+ nop == 0x01000000U)
4248+ {
4249+ unsigned int addr, save, call;
4250+
4251+ if ((ba & 0xFFC00000U) == 0x30800000U)
4252+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253+ else
4254+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255+
4256+ err = get_user(save, (unsigned int *)addr);
4257+ err |= get_user(call, (unsigned int *)(addr+4));
4258+ err |= get_user(nop, (unsigned int *)(addr+8));
4259+ if (err)
4260+ break;
4261+
4262+#ifdef CONFIG_PAX_DLRESOLVE
4263+ if (save == 0x9DE3BFA8U &&
4264+ (call & 0xC0000000U) == 0x40000000U &&
4265+ nop == 0x01000000U)
4266+ {
4267+ struct vm_area_struct *vma;
4268+ unsigned long call_dl_resolve;
4269+
4270+ down_read(&current->mm->mmap_sem);
4271+ call_dl_resolve = current->mm->call_dl_resolve;
4272+ up_read(&current->mm->mmap_sem);
4273+ if (likely(call_dl_resolve))
4274+ goto emulate;
4275+
4276+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277+
4278+ down_write(&current->mm->mmap_sem);
4279+ if (current->mm->call_dl_resolve) {
4280+ call_dl_resolve = current->mm->call_dl_resolve;
4281+ up_write(&current->mm->mmap_sem);
4282+ if (vma)
4283+ kmem_cache_free(vm_area_cachep, vma);
4284+ goto emulate;
4285+ }
4286+
4287+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289+ up_write(&current->mm->mmap_sem);
4290+ if (vma)
4291+ kmem_cache_free(vm_area_cachep, vma);
4292+ return 1;
4293+ }
4294+
4295+ if (pax_insert_vma(vma, call_dl_resolve)) {
4296+ up_write(&current->mm->mmap_sem);
4297+ kmem_cache_free(vm_area_cachep, vma);
4298+ return 1;
4299+ }
4300+
4301+ current->mm->call_dl_resolve = call_dl_resolve;
4302+ up_write(&current->mm->mmap_sem);
4303+
4304+emulate:
4305+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306+ regs->pc = call_dl_resolve;
4307+ regs->npc = addr+4;
4308+ return 3;
4309+ }
4310+#endif
4311+
4312+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313+ if ((save & 0xFFC00000U) == 0x05000000U &&
4314+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4315+ nop == 0x01000000U)
4316+ {
4317+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318+ regs->u_regs[UREG_G2] = addr + 4;
4319+ addr = (save & 0x003FFFFFU) << 10;
4320+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321+ regs->pc = addr;
4322+ regs->npc = addr+4;
4323+ return 3;
4324+ }
4325+ }
4326+ } while (0);
4327+
4328+ do { /* PaX: unpatched PLT emulation step 2 */
4329+ unsigned int save, call, nop;
4330+
4331+ err = get_user(save, (unsigned int *)(regs->pc-4));
4332+ err |= get_user(call, (unsigned int *)regs->pc);
4333+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334+ if (err)
4335+ break;
4336+
4337+ if (save == 0x9DE3BFA8U &&
4338+ (call & 0xC0000000U) == 0x40000000U &&
4339+ nop == 0x01000000U)
4340+ {
4341+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342+
4343+ regs->u_regs[UREG_RETPC] = regs->pc;
4344+ regs->pc = dl_resolve;
4345+ regs->npc = dl_resolve+4;
4346+ return 3;
4347+ }
4348+ } while (0);
4349+#endif
4350+
4351+ return 1;
4352+}
4353+
4354+void pax_report_insns(void *pc, void *sp)
4355+{
4356+ unsigned long i;
4357+
4358+ printk(KERN_ERR "PAX: bytes at PC: ");
4359+ for (i = 0; i < 8; i++) {
4360+ unsigned int c;
4361+ if (get_user(c, (unsigned int *)pc+i))
4362+ printk(KERN_CONT "???????? ");
4363+ else
4364+ printk(KERN_CONT "%08x ", c);
4365+ }
4366+ printk("\n");
4367+}
4368+#endif
4369+
4370 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371 int text_fault)
4372 {
4373@@ -281,6 +546,24 @@ good_area:
4374 if(!(vma->vm_flags & VM_WRITE))
4375 goto bad_area;
4376 } else {
4377+
4378+#ifdef CONFIG_PAX_PAGEEXEC
4379+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380+ up_read(&mm->mmap_sem);
4381+ switch (pax_handle_fetch_fault(regs)) {
4382+
4383+#ifdef CONFIG_PAX_EMUPLT
4384+ case 2:
4385+ case 3:
4386+ return;
4387+#endif
4388+
4389+ }
4390+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391+ do_group_exit(SIGKILL);
4392+ }
4393+#endif
4394+
4395 /* Allow reads even for write-only mappings */
4396 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397 goto bad_area;
4398diff -urNp linux-3.0.4/arch/sparc/mm/fault_64.c linux-3.0.4/arch/sparc/mm/fault_64.c
4399--- linux-3.0.4/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4400+++ linux-3.0.4/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4401@@ -21,6 +21,9 @@
4402 #include <linux/kprobes.h>
4403 #include <linux/kdebug.h>
4404 #include <linux/percpu.h>
4405+#include <linux/slab.h>
4406+#include <linux/pagemap.h>
4407+#include <linux/compiler.h>
4408
4409 #include <asm/page.h>
4410 #include <asm/pgtable.h>
4411@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413 regs->tpc);
4414 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418 dump_stack();
4419 unhandled_fault(regs->tpc, current, regs);
4420@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421 show_regs(regs);
4422 }
4423
4424+#ifdef CONFIG_PAX_PAGEEXEC
4425+#ifdef CONFIG_PAX_DLRESOLVE
4426+static void pax_emuplt_close(struct vm_area_struct *vma)
4427+{
4428+ vma->vm_mm->call_dl_resolve = 0UL;
4429+}
4430+
4431+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432+{
4433+ unsigned int *kaddr;
4434+
4435+ vmf->page = alloc_page(GFP_HIGHUSER);
4436+ if (!vmf->page)
4437+ return VM_FAULT_OOM;
4438+
4439+ kaddr = kmap(vmf->page);
4440+ memset(kaddr, 0, PAGE_SIZE);
4441+ kaddr[0] = 0x9DE3BFA8U; /* save */
4442+ flush_dcache_page(vmf->page);
4443+ kunmap(vmf->page);
4444+ return VM_FAULT_MAJOR;
4445+}
4446+
4447+static const struct vm_operations_struct pax_vm_ops = {
4448+ .close = pax_emuplt_close,
4449+ .fault = pax_emuplt_fault
4450+};
4451+
4452+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453+{
4454+ int ret;
4455+
4456+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4457+ vma->vm_mm = current->mm;
4458+ vma->vm_start = addr;
4459+ vma->vm_end = addr + PAGE_SIZE;
4460+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462+ vma->vm_ops = &pax_vm_ops;
4463+
4464+ ret = insert_vm_struct(current->mm, vma);
4465+ if (ret)
4466+ return ret;
4467+
4468+ ++current->mm->total_vm;
4469+ return 0;
4470+}
4471+#endif
4472+
4473+/*
4474+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4475+ *
4476+ * returns 1 when task should be killed
4477+ * 2 when patched PLT trampoline was detected
4478+ * 3 when unpatched PLT trampoline was detected
4479+ */
4480+static int pax_handle_fetch_fault(struct pt_regs *regs)
4481+{
4482+
4483+#ifdef CONFIG_PAX_EMUPLT
4484+ int err;
4485+
4486+ do { /* PaX: patched PLT emulation #1 */
4487+ unsigned int sethi1, sethi2, jmpl;
4488+
4489+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4490+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492+
4493+ if (err)
4494+ break;
4495+
4496+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499+ {
4500+ unsigned long addr;
4501+
4502+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503+ addr = regs->u_regs[UREG_G1];
4504+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505+
4506+ if (test_thread_flag(TIF_32BIT))
4507+ addr &= 0xFFFFFFFFUL;
4508+
4509+ regs->tpc = addr;
4510+ regs->tnpc = addr+4;
4511+ return 2;
4512+ }
4513+ } while (0);
4514+
4515+ { /* PaX: patched PLT emulation #2 */
4516+ unsigned int ba;
4517+
4518+ err = get_user(ba, (unsigned int *)regs->tpc);
4519+
4520+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521+ unsigned long addr;
4522+
4523+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524+
4525+ if (test_thread_flag(TIF_32BIT))
4526+ addr &= 0xFFFFFFFFUL;
4527+
4528+ regs->tpc = addr;
4529+ regs->tnpc = addr+4;
4530+ return 2;
4531+ }
4532+ }
4533+
4534+ do { /* PaX: patched PLT emulation #3 */
4535+ unsigned int sethi, jmpl, nop;
4536+
4537+ err = get_user(sethi, (unsigned int *)regs->tpc);
4538+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540+
4541+ if (err)
4542+ break;
4543+
4544+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546+ nop == 0x01000000U)
4547+ {
4548+ unsigned long addr;
4549+
4550+ addr = (sethi & 0x003FFFFFU) << 10;
4551+ regs->u_regs[UREG_G1] = addr;
4552+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553+
4554+ if (test_thread_flag(TIF_32BIT))
4555+ addr &= 0xFFFFFFFFUL;
4556+
4557+ regs->tpc = addr;
4558+ regs->tnpc = addr+4;
4559+ return 2;
4560+ }
4561+ } while (0);
4562+
4563+ do { /* PaX: patched PLT emulation #4 */
4564+ unsigned int sethi, mov1, call, mov2;
4565+
4566+ err = get_user(sethi, (unsigned int *)regs->tpc);
4567+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570+
4571+ if (err)
4572+ break;
4573+
4574+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575+ mov1 == 0x8210000FU &&
4576+ (call & 0xC0000000U) == 0x40000000U &&
4577+ mov2 == 0x9E100001U)
4578+ {
4579+ unsigned long addr;
4580+
4581+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583+
4584+ if (test_thread_flag(TIF_32BIT))
4585+ addr &= 0xFFFFFFFFUL;
4586+
4587+ regs->tpc = addr;
4588+ regs->tnpc = addr+4;
4589+ return 2;
4590+ }
4591+ } while (0);
4592+
4593+ do { /* PaX: patched PLT emulation #5 */
4594+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595+
4596+ err = get_user(sethi, (unsigned int *)regs->tpc);
4597+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604+
4605+ if (err)
4606+ break;
4607+
4608+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611+ (or1 & 0xFFFFE000U) == 0x82106000U &&
4612+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613+ sllx == 0x83287020U &&
4614+ jmpl == 0x81C04005U &&
4615+ nop == 0x01000000U)
4616+ {
4617+ unsigned long addr;
4618+
4619+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620+ regs->u_regs[UREG_G1] <<= 32;
4621+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623+ regs->tpc = addr;
4624+ regs->tnpc = addr+4;
4625+ return 2;
4626+ }
4627+ } while (0);
4628+
4629+ do { /* PaX: patched PLT emulation #6 */
4630+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4631+
4632+ err = get_user(sethi, (unsigned int *)regs->tpc);
4633+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639+
4640+ if (err)
4641+ break;
4642+
4643+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646+ sllx == 0x83287020U &&
4647+ (or & 0xFFFFE000U) == 0x8A116000U &&
4648+ jmpl == 0x81C04005U &&
4649+ nop == 0x01000000U)
4650+ {
4651+ unsigned long addr;
4652+
4653+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654+ regs->u_regs[UREG_G1] <<= 32;
4655+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657+ regs->tpc = addr;
4658+ regs->tnpc = addr+4;
4659+ return 2;
4660+ }
4661+ } while (0);
4662+
4663+ do { /* PaX: unpatched PLT emulation step 1 */
4664+ unsigned int sethi, ba, nop;
4665+
4666+ err = get_user(sethi, (unsigned int *)regs->tpc);
4667+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669+
4670+ if (err)
4671+ break;
4672+
4673+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675+ nop == 0x01000000U)
4676+ {
4677+ unsigned long addr;
4678+ unsigned int save, call;
4679+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680+
4681+ if ((ba & 0xFFC00000U) == 0x30800000U)
4682+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683+ else
4684+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685+
4686+ if (test_thread_flag(TIF_32BIT))
4687+ addr &= 0xFFFFFFFFUL;
4688+
4689+ err = get_user(save, (unsigned int *)addr);
4690+ err |= get_user(call, (unsigned int *)(addr+4));
4691+ err |= get_user(nop, (unsigned int *)(addr+8));
4692+ if (err)
4693+ break;
4694+
4695+#ifdef CONFIG_PAX_DLRESOLVE
4696+ if (save == 0x9DE3BFA8U &&
4697+ (call & 0xC0000000U) == 0x40000000U &&
4698+ nop == 0x01000000U)
4699+ {
4700+ struct vm_area_struct *vma;
4701+ unsigned long call_dl_resolve;
4702+
4703+ down_read(&current->mm->mmap_sem);
4704+ call_dl_resolve = current->mm->call_dl_resolve;
4705+ up_read(&current->mm->mmap_sem);
4706+ if (likely(call_dl_resolve))
4707+ goto emulate;
4708+
4709+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710+
4711+ down_write(&current->mm->mmap_sem);
4712+ if (current->mm->call_dl_resolve) {
4713+ call_dl_resolve = current->mm->call_dl_resolve;
4714+ up_write(&current->mm->mmap_sem);
4715+ if (vma)
4716+ kmem_cache_free(vm_area_cachep, vma);
4717+ goto emulate;
4718+ }
4719+
4720+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722+ up_write(&current->mm->mmap_sem);
4723+ if (vma)
4724+ kmem_cache_free(vm_area_cachep, vma);
4725+ return 1;
4726+ }
4727+
4728+ if (pax_insert_vma(vma, call_dl_resolve)) {
4729+ up_write(&current->mm->mmap_sem);
4730+ kmem_cache_free(vm_area_cachep, vma);
4731+ return 1;
4732+ }
4733+
4734+ current->mm->call_dl_resolve = call_dl_resolve;
4735+ up_write(&current->mm->mmap_sem);
4736+
4737+emulate:
4738+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739+ regs->tpc = call_dl_resolve;
4740+ regs->tnpc = addr+4;
4741+ return 3;
4742+ }
4743+#endif
4744+
4745+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746+ if ((save & 0xFFC00000U) == 0x05000000U &&
4747+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4748+ nop == 0x01000000U)
4749+ {
4750+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751+ regs->u_regs[UREG_G2] = addr + 4;
4752+ addr = (save & 0x003FFFFFU) << 10;
4753+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754+
4755+ if (test_thread_flag(TIF_32BIT))
4756+ addr &= 0xFFFFFFFFUL;
4757+
4758+ regs->tpc = addr;
4759+ regs->tnpc = addr+4;
4760+ return 3;
4761+ }
4762+
4763+ /* PaX: 64-bit PLT stub */
4764+ err = get_user(sethi1, (unsigned int *)addr);
4765+ err |= get_user(sethi2, (unsigned int *)(addr+4));
4766+ err |= get_user(or1, (unsigned int *)(addr+8));
4767+ err |= get_user(or2, (unsigned int *)(addr+12));
4768+ err |= get_user(sllx, (unsigned int *)(addr+16));
4769+ err |= get_user(add, (unsigned int *)(addr+20));
4770+ err |= get_user(jmpl, (unsigned int *)(addr+24));
4771+ err |= get_user(nop, (unsigned int *)(addr+28));
4772+ if (err)
4773+ break;
4774+
4775+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777+ (or1 & 0xFFFFE000U) == 0x88112000U &&
4778+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779+ sllx == 0x89293020U &&
4780+ add == 0x8A010005U &&
4781+ jmpl == 0x89C14000U &&
4782+ nop == 0x01000000U)
4783+ {
4784+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786+ regs->u_regs[UREG_G4] <<= 32;
4787+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789+ regs->u_regs[UREG_G4] = addr + 24;
4790+ addr = regs->u_regs[UREG_G5];
4791+ regs->tpc = addr;
4792+ regs->tnpc = addr+4;
4793+ return 3;
4794+ }
4795+ }
4796+ } while (0);
4797+
4798+#ifdef CONFIG_PAX_DLRESOLVE
4799+ do { /* PaX: unpatched PLT emulation step 2 */
4800+ unsigned int save, call, nop;
4801+
4802+ err = get_user(save, (unsigned int *)(regs->tpc-4));
4803+ err |= get_user(call, (unsigned int *)regs->tpc);
4804+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805+ if (err)
4806+ break;
4807+
4808+ if (save == 0x9DE3BFA8U &&
4809+ (call & 0xC0000000U) == 0x40000000U &&
4810+ nop == 0x01000000U)
4811+ {
4812+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813+
4814+ if (test_thread_flag(TIF_32BIT))
4815+ dl_resolve &= 0xFFFFFFFFUL;
4816+
4817+ regs->u_regs[UREG_RETPC] = regs->tpc;
4818+ regs->tpc = dl_resolve;
4819+ regs->tnpc = dl_resolve+4;
4820+ return 3;
4821+ }
4822+ } while (0);
4823+#endif
4824+
4825+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826+ unsigned int sethi, ba, nop;
4827+
4828+ err = get_user(sethi, (unsigned int *)regs->tpc);
4829+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831+
4832+ if (err)
4833+ break;
4834+
4835+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836+ (ba & 0xFFF00000U) == 0x30600000U &&
4837+ nop == 0x01000000U)
4838+ {
4839+ unsigned long addr;
4840+
4841+ addr = (sethi & 0x003FFFFFU) << 10;
4842+ regs->u_regs[UREG_G1] = addr;
4843+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844+
4845+ if (test_thread_flag(TIF_32BIT))
4846+ addr &= 0xFFFFFFFFUL;
4847+
4848+ regs->tpc = addr;
4849+ regs->tnpc = addr+4;
4850+ return 2;
4851+ }
4852+ } while (0);
4853+
4854+#endif
4855+
4856+ return 1;
4857+}
4858+
4859+void pax_report_insns(void *pc, void *sp)
4860+{
4861+ unsigned long i;
4862+
4863+ printk(KERN_ERR "PAX: bytes at PC: ");
4864+ for (i = 0; i < 8; i++) {
4865+ unsigned int c;
4866+ if (get_user(c, (unsigned int *)pc+i))
4867+ printk(KERN_CONT "???????? ");
4868+ else
4869+ printk(KERN_CONT "%08x ", c);
4870+ }
4871+ printk("\n");
4872+}
4873+#endif
4874+
4875 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876 {
4877 struct mm_struct *mm = current->mm;
4878@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879 if (!vma)
4880 goto bad_area;
4881
4882+#ifdef CONFIG_PAX_PAGEEXEC
4883+ /* PaX: detect ITLB misses on non-exec pages */
4884+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886+ {
4887+ if (address != regs->tpc)
4888+ goto good_area;
4889+
4890+ up_read(&mm->mmap_sem);
4891+ switch (pax_handle_fetch_fault(regs)) {
4892+
4893+#ifdef CONFIG_PAX_EMUPLT
4894+ case 2:
4895+ case 3:
4896+ return;
4897+#endif
4898+
4899+ }
4900+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901+ do_group_exit(SIGKILL);
4902+ }
4903+#endif
4904+
4905 /* Pure DTLB misses do not tell us whether the fault causing
4906 * load/store/atomic was a write or not, it only says that there
4907 * was no match. So in such a case we (carefully) read the
4908diff -urNp linux-3.0.4/arch/sparc/mm/hugetlbpage.c linux-3.0.4/arch/sparc/mm/hugetlbpage.c
4909--- linux-3.0.4/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
4910+++ linux-3.0.4/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
4911@@ -68,7 +68,7 @@ full_search:
4912 }
4913 return -ENOMEM;
4914 }
4915- if (likely(!vma || addr + len <= vma->vm_start)) {
4916+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /*
4918 * Remember the place where we stopped the search:
4919 */
4920@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921 /* make sure it can fit in the remaining address space */
4922 if (likely(addr > len)) {
4923 vma = find_vma(mm, addr-len);
4924- if (!vma || addr <= vma->vm_start) {
4925+ if (check_heap_stack_gap(vma, addr - len, len)) {
4926 /* remember the address as a hint for next time */
4927 return (mm->free_area_cache = addr-len);
4928 }
4929@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930 if (unlikely(mm->mmap_base < len))
4931 goto bottomup;
4932
4933- addr = (mm->mmap_base-len) & HPAGE_MASK;
4934+ addr = mm->mmap_base - len;
4935
4936 do {
4937+ addr &= HPAGE_MASK;
4938 /*
4939 * Lookup failure means no vma is above this address,
4940 * else if new region fits below vma->vm_start,
4941 * return with success:
4942 */
4943 vma = find_vma(mm, addr);
4944- if (likely(!vma || addr+len <= vma->vm_start)) {
4945+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4946 /* remember the address as a hint for next time */
4947 return (mm->free_area_cache = addr);
4948 }
4949@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950 mm->cached_hole_size = vma->vm_start - addr;
4951
4952 /* try just below the current vma->vm_start */
4953- addr = (vma->vm_start-len) & HPAGE_MASK;
4954- } while (likely(len < vma->vm_start));
4955+ addr = skip_heap_stack_gap(vma, len);
4956+ } while (!IS_ERR_VALUE(addr));
4957
4958 bottomup:
4959 /*
4960@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961 if (addr) {
4962 addr = ALIGN(addr, HPAGE_SIZE);
4963 vma = find_vma(mm, addr);
4964- if (task_size - len >= addr &&
4965- (!vma || addr + len <= vma->vm_start))
4966+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967 return addr;
4968 }
4969 if (mm->get_unmapped_area == arch_get_unmapped_area)
4970diff -urNp linux-3.0.4/arch/sparc/mm/init_32.c linux-3.0.4/arch/sparc/mm/init_32.c
4971--- linux-3.0.4/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972+++ linux-3.0.4/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973@@ -316,6 +316,9 @@ extern void device_scan(void);
4974 pgprot_t PAGE_SHARED __read_mostly;
4975 EXPORT_SYMBOL(PAGE_SHARED);
4976
4977+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979+
4980 void __init paging_init(void)
4981 {
4982 switch(sparc_cpu_model) {
4983@@ -344,17 +347,17 @@ void __init paging_init(void)
4984
4985 /* Initialize the protection map with non-constant, MMU dependent values. */
4986 protection_map[0] = PAGE_NONE;
4987- protection_map[1] = PAGE_READONLY;
4988- protection_map[2] = PAGE_COPY;
4989- protection_map[3] = PAGE_COPY;
4990+ protection_map[1] = PAGE_READONLY_NOEXEC;
4991+ protection_map[2] = PAGE_COPY_NOEXEC;
4992+ protection_map[3] = PAGE_COPY_NOEXEC;
4993 protection_map[4] = PAGE_READONLY;
4994 protection_map[5] = PAGE_READONLY;
4995 protection_map[6] = PAGE_COPY;
4996 protection_map[7] = PAGE_COPY;
4997 protection_map[8] = PAGE_NONE;
4998- protection_map[9] = PAGE_READONLY;
4999- protection_map[10] = PAGE_SHARED;
5000- protection_map[11] = PAGE_SHARED;
5001+ protection_map[9] = PAGE_READONLY_NOEXEC;
5002+ protection_map[10] = PAGE_SHARED_NOEXEC;
5003+ protection_map[11] = PAGE_SHARED_NOEXEC;
5004 protection_map[12] = PAGE_READONLY;
5005 protection_map[13] = PAGE_READONLY;
5006 protection_map[14] = PAGE_SHARED;
5007diff -urNp linux-3.0.4/arch/sparc/mm/Makefile linux-3.0.4/arch/sparc/mm/Makefile
5008--- linux-3.0.4/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
5009+++ linux-3.0.4/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
5010@@ -2,7 +2,7 @@
5011 #
5012
5013 asflags-y := -ansi
5014-ccflags-y := -Werror
5015+#ccflags-y := -Werror
5016
5017 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5018 obj-y += fault_$(BITS).o
5019diff -urNp linux-3.0.4/arch/sparc/mm/srmmu.c linux-3.0.4/arch/sparc/mm/srmmu.c
5020--- linux-3.0.4/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5021+++ linux-3.0.4/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5022@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026+
5027+#ifdef CONFIG_PAX_PAGEEXEC
5028+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031+#endif
5032+
5033 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035
5036diff -urNp linux-3.0.4/arch/um/include/asm/kmap_types.h linux-3.0.4/arch/um/include/asm/kmap_types.h
5037--- linux-3.0.4/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5038+++ linux-3.0.4/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5039@@ -23,6 +23,7 @@ enum km_type {
5040 KM_IRQ1,
5041 KM_SOFTIRQ0,
5042 KM_SOFTIRQ1,
5043+ KM_CLEARPAGE,
5044 KM_TYPE_NR
5045 };
5046
5047diff -urNp linux-3.0.4/arch/um/include/asm/page.h linux-3.0.4/arch/um/include/asm/page.h
5048--- linux-3.0.4/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5049+++ linux-3.0.4/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5050@@ -14,6 +14,9 @@
5051 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5052 #define PAGE_MASK (~(PAGE_SIZE-1))
5053
5054+#define ktla_ktva(addr) (addr)
5055+#define ktva_ktla(addr) (addr)
5056+
5057 #ifndef __ASSEMBLY__
5058
5059 struct page;
5060diff -urNp linux-3.0.4/arch/um/kernel/process.c linux-3.0.4/arch/um/kernel/process.c
5061--- linux-3.0.4/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5062+++ linux-3.0.4/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5063@@ -404,22 +404,6 @@ int singlestepping(void * t)
5064 return 2;
5065 }
5066
5067-/*
5068- * Only x86 and x86_64 have an arch_align_stack().
5069- * All other arches have "#define arch_align_stack(x) (x)"
5070- * in their asm/system.h
5071- * As this is included in UML from asm-um/system-generic.h,
5072- * we can use it to behave as the subarch does.
5073- */
5074-#ifndef arch_align_stack
5075-unsigned long arch_align_stack(unsigned long sp)
5076-{
5077- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078- sp -= get_random_int() % 8192;
5079- return sp & ~0xf;
5080-}
5081-#endif
5082-
5083 unsigned long get_wchan(struct task_struct *p)
5084 {
5085 unsigned long stack_page, sp, ip;
5086diff -urNp linux-3.0.4/arch/um/sys-i386/syscalls.c linux-3.0.4/arch/um/sys-i386/syscalls.c
5087--- linux-3.0.4/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5088+++ linux-3.0.4/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5089@@ -11,6 +11,21 @@
5090 #include "asm/uaccess.h"
5091 #include "asm/unistd.h"
5092
5093+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5094+{
5095+ unsigned long pax_task_size = TASK_SIZE;
5096+
5097+#ifdef CONFIG_PAX_SEGMEXEC
5098+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5099+ pax_task_size = SEGMEXEC_TASK_SIZE;
5100+#endif
5101+
5102+ if (len > pax_task_size || addr > pax_task_size - len)
5103+ return -EINVAL;
5104+
5105+ return 0;
5106+}
5107+
5108 /*
5109 * The prototype on i386 is:
5110 *
5111diff -urNp linux-3.0.4/arch/x86/boot/bitops.h linux-3.0.4/arch/x86/boot/bitops.h
5112--- linux-3.0.4/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5113+++ linux-3.0.4/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5114@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5115 u8 v;
5116 const u32 *p = (const u32 *)addr;
5117
5118- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5119+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5120 return v;
5121 }
5122
5123@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5124
5125 static inline void set_bit(int nr, void *addr)
5126 {
5127- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5128+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5129 }
5130
5131 #endif /* BOOT_BITOPS_H */
5132diff -urNp linux-3.0.4/arch/x86/boot/boot.h linux-3.0.4/arch/x86/boot/boot.h
5133--- linux-3.0.4/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5134+++ linux-3.0.4/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5135@@ -85,7 +85,7 @@ static inline void io_delay(void)
5136 static inline u16 ds(void)
5137 {
5138 u16 seg;
5139- asm("movw %%ds,%0" : "=rm" (seg));
5140+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5141 return seg;
5142 }
5143
5144@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5145 static inline int memcmp(const void *s1, const void *s2, size_t len)
5146 {
5147 u8 diff;
5148- asm("repe; cmpsb; setnz %0"
5149+ asm volatile("repe; cmpsb; setnz %0"
5150 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5151 return diff;
5152 }
5153diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_32.S linux-3.0.4/arch/x86/boot/compressed/head_32.S
5154--- linux-3.0.4/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5155+++ linux-3.0.4/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5156@@ -76,7 +76,7 @@ ENTRY(startup_32)
5157 notl %eax
5158 andl %eax, %ebx
5159 #else
5160- movl $LOAD_PHYSICAL_ADDR, %ebx
5161+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5162 #endif
5163
5164 /* Target address to relocate to for decompression */
5165@@ -162,7 +162,7 @@ relocated:
5166 * and where it was actually loaded.
5167 */
5168 movl %ebp, %ebx
5169- subl $LOAD_PHYSICAL_ADDR, %ebx
5170+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5171 jz 2f /* Nothing to be done if loaded at compiled addr. */
5172 /*
5173 * Process relocations.
5174@@ -170,8 +170,7 @@ relocated:
5175
5176 1: subl $4, %edi
5177 movl (%edi), %ecx
5178- testl %ecx, %ecx
5179- jz 2f
5180+ jecxz 2f
5181 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5182 jmp 1b
5183 2:
5184diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_64.S linux-3.0.4/arch/x86/boot/compressed/head_64.S
5185--- linux-3.0.4/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5186+++ linux-3.0.4/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5187@@ -91,7 +91,7 @@ ENTRY(startup_32)
5188 notl %eax
5189 andl %eax, %ebx
5190 #else
5191- movl $LOAD_PHYSICAL_ADDR, %ebx
5192+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5193 #endif
5194
5195 /* Target address to relocate to for decompression */
5196@@ -233,7 +233,7 @@ ENTRY(startup_64)
5197 notq %rax
5198 andq %rax, %rbp
5199 #else
5200- movq $LOAD_PHYSICAL_ADDR, %rbp
5201+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205diff -urNp linux-3.0.4/arch/x86/boot/compressed/Makefile linux-3.0.4/arch/x86/boot/compressed/Makefile
5206--- linux-3.0.4/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5207+++ linux-3.0.4/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5208@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5209 KBUILD_CFLAGS += $(cflags-y)
5210 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5211 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5212+ifdef CONSTIFY_PLUGIN
5213+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5214+endif
5215
5216 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5217 GCOV_PROFILE := n
5218diff -urNp linux-3.0.4/arch/x86/boot/compressed/misc.c linux-3.0.4/arch/x86/boot/compressed/misc.c
5219--- linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5220+++ linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5221@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5222 case PT_LOAD:
5223 #ifdef CONFIG_RELOCATABLE
5224 dest = output;
5225- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5226+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5227 #else
5228 dest = (void *)(phdr->p_paddr);
5229 #endif
5230@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5231 error("Destination address too large");
5232 #endif
5233 #ifndef CONFIG_RELOCATABLE
5234- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5235+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5236 error("Wrong destination address");
5237 #endif
5238
5239diff -urNp linux-3.0.4/arch/x86/boot/compressed/relocs.c linux-3.0.4/arch/x86/boot/compressed/relocs.c
5240--- linux-3.0.4/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5241+++ linux-3.0.4/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5242@@ -13,8 +13,11 @@
5243
5244 static void die(char *fmt, ...);
5245
5246+#include "../../../../include/generated/autoconf.h"
5247+
5248 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5249 static Elf32_Ehdr ehdr;
5250+static Elf32_Phdr *phdr;
5251 static unsigned long reloc_count, reloc_idx;
5252 static unsigned long *relocs;
5253
5254@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5255 }
5256 }
5257
5258+static void read_phdrs(FILE *fp)
5259+{
5260+ unsigned int i;
5261+
5262+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5263+ if (!phdr) {
5264+ die("Unable to allocate %d program headers\n",
5265+ ehdr.e_phnum);
5266+ }
5267+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5268+ die("Seek to %d failed: %s\n",
5269+ ehdr.e_phoff, strerror(errno));
5270+ }
5271+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5272+ die("Cannot read ELF program headers: %s\n",
5273+ strerror(errno));
5274+ }
5275+ for(i = 0; i < ehdr.e_phnum; i++) {
5276+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5277+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5278+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5279+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5280+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5281+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5282+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5283+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5284+ }
5285+
5286+}
5287+
5288 static void read_shdrs(FILE *fp)
5289 {
5290- int i;
5291+ unsigned int i;
5292 Elf32_Shdr shdr;
5293
5294 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5295@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5296
5297 static void read_strtabs(FILE *fp)
5298 {
5299- int i;
5300+ unsigned int i;
5301 for (i = 0; i < ehdr.e_shnum; i++) {
5302 struct section *sec = &secs[i];
5303 if (sec->shdr.sh_type != SHT_STRTAB) {
5304@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5305
5306 static void read_symtabs(FILE *fp)
5307 {
5308- int i,j;
5309+ unsigned int i,j;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_SYMTAB) {
5313@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5314
5315 static void read_relocs(FILE *fp)
5316 {
5317- int i,j;
5318+ unsigned int i,j;
5319+ uint32_t base;
5320+
5321 for (i = 0; i < ehdr.e_shnum; i++) {
5322 struct section *sec = &secs[i];
5323 if (sec->shdr.sh_type != SHT_REL) {
5324@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5325 die("Cannot read symbol table: %s\n",
5326 strerror(errno));
5327 }
5328+ base = 0;
5329+ for (j = 0; j < ehdr.e_phnum; j++) {
5330+ if (phdr[j].p_type != PT_LOAD )
5331+ continue;
5332+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5333+ continue;
5334+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5335+ break;
5336+ }
5337 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5338 Elf32_Rel *rel = &sec->reltab[j];
5339- rel->r_offset = elf32_to_cpu(rel->r_offset);
5340+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5341 rel->r_info = elf32_to_cpu(rel->r_info);
5342 }
5343 }
5344@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5345
5346 static void print_absolute_symbols(void)
5347 {
5348- int i;
5349+ unsigned int i;
5350 printf("Absolute symbols\n");
5351 printf(" Num: Value Size Type Bind Visibility Name\n");
5352 for (i = 0; i < ehdr.e_shnum; i++) {
5353 struct section *sec = &secs[i];
5354 char *sym_strtab;
5355 Elf32_Sym *sh_symtab;
5356- int j;
5357+ unsigned int j;
5358
5359 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360 continue;
5361@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5362
5363 static void print_absolute_relocs(void)
5364 {
5365- int i, printed = 0;
5366+ unsigned int i, printed = 0;
5367
5368 for (i = 0; i < ehdr.e_shnum; i++) {
5369 struct section *sec = &secs[i];
5370 struct section *sec_applies, *sec_symtab;
5371 char *sym_strtab;
5372 Elf32_Sym *sh_symtab;
5373- int j;
5374+ unsigned int j;
5375 if (sec->shdr.sh_type != SHT_REL) {
5376 continue;
5377 }
5378@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5379
5380 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5381 {
5382- int i;
5383+ unsigned int i;
5384 /* Walk through the relocations */
5385 for (i = 0; i < ehdr.e_shnum; i++) {
5386 char *sym_strtab;
5387 Elf32_Sym *sh_symtab;
5388 struct section *sec_applies, *sec_symtab;
5389- int j;
5390+ unsigned int j;
5391 struct section *sec = &secs[i];
5392
5393 if (sec->shdr.sh_type != SHT_REL) {
5394@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5395 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5396 continue;
5397 }
5398+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5399+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5400+ continue;
5401+
5402+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5403+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5404+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5405+ continue;
5406+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5407+ continue;
5408+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5409+ continue;
5410+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5411+ continue;
5412+#endif
5413+
5414 switch (r_type) {
5415 case R_386_NONE:
5416 case R_386_PC32:
5417@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5418
5419 static void emit_relocs(int as_text)
5420 {
5421- int i;
5422+ unsigned int i;
5423 /* Count how many relocations I have and allocate space for them. */
5424 reloc_count = 0;
5425 walk_relocs(count_reloc);
5426@@ -665,6 +725,7 @@ int main(int argc, char **argv)
5427 fname, strerror(errno));
5428 }
5429 read_ehdr(fp);
5430+ read_phdrs(fp);
5431 read_shdrs(fp);
5432 read_strtabs(fp);
5433 read_symtabs(fp);
5434diff -urNp linux-3.0.4/arch/x86/boot/cpucheck.c linux-3.0.4/arch/x86/boot/cpucheck.c
5435--- linux-3.0.4/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5436+++ linux-3.0.4/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5437@@ -74,7 +74,7 @@ static int has_fpu(void)
5438 u16 fcw = -1, fsw = -1;
5439 u32 cr0;
5440
5441- asm("movl %%cr0,%0" : "=r" (cr0));
5442+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
5443 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5444 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5445 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5446@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5447 {
5448 u32 f0, f1;
5449
5450- asm("pushfl ; "
5451+ asm volatile("pushfl ; "
5452 "pushfl ; "
5453 "popl %0 ; "
5454 "movl %0,%1 ; "
5455@@ -115,7 +115,7 @@ static void get_flags(void)
5456 set_bit(X86_FEATURE_FPU, cpu.flags);
5457
5458 if (has_eflag(X86_EFLAGS_ID)) {
5459- asm("cpuid"
5460+ asm volatile("cpuid"
5461 : "=a" (max_intel_level),
5462 "=b" (cpu_vendor[0]),
5463 "=d" (cpu_vendor[1]),
5464@@ -124,7 +124,7 @@ static void get_flags(void)
5465
5466 if (max_intel_level >= 0x00000001 &&
5467 max_intel_level <= 0x0000ffff) {
5468- asm("cpuid"
5469+ asm volatile("cpuid"
5470 : "=a" (tfms),
5471 "=c" (cpu.flags[4]),
5472 "=d" (cpu.flags[0])
5473@@ -136,7 +136,7 @@ static void get_flags(void)
5474 cpu.model += ((tfms >> 16) & 0xf) << 4;
5475 }
5476
5477- asm("cpuid"
5478+ asm volatile("cpuid"
5479 : "=a" (max_amd_level)
5480 : "a" (0x80000000)
5481 : "ebx", "ecx", "edx");
5482@@ -144,7 +144,7 @@ static void get_flags(void)
5483 if (max_amd_level >= 0x80000001 &&
5484 max_amd_level <= 0x8000ffff) {
5485 u32 eax = 0x80000001;
5486- asm("cpuid"
5487+ asm volatile("cpuid"
5488 : "+a" (eax),
5489 "=c" (cpu.flags[6]),
5490 "=d" (cpu.flags[1])
5491@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5492 u32 ecx = MSR_K7_HWCR;
5493 u32 eax, edx;
5494
5495- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5496+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5497 eax &= ~(1 << 15);
5498- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5499+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5500
5501 get_flags(); /* Make sure it really did something */
5502 err = check_flags();
5503@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5504 u32 ecx = MSR_VIA_FCR;
5505 u32 eax, edx;
5506
5507- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5508+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5509 eax |= (1<<1)|(1<<7);
5510- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5511+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5512
5513 set_bit(X86_FEATURE_CX8, cpu.flags);
5514 err = check_flags();
5515@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5516 u32 eax, edx;
5517 u32 level = 1;
5518
5519- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5520- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5521- asm("cpuid"
5522+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5523+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5524+ asm volatile("cpuid"
5525 : "+a" (level), "=d" (cpu.flags[0])
5526 : : "ecx", "ebx");
5527- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5528+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529
5530 err = check_flags();
5531 }
5532diff -urNp linux-3.0.4/arch/x86/boot/header.S linux-3.0.4/arch/x86/boot/header.S
5533--- linux-3.0.4/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
5534+++ linux-3.0.4/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
5535@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5536 # single linked list of
5537 # struct setup_data
5538
5539-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5540+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5541
5542 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5543 #define VO_INIT_SIZE (VO__end - VO__text)
5544diff -urNp linux-3.0.4/arch/x86/boot/Makefile linux-3.0.4/arch/x86/boot/Makefile
5545--- linux-3.0.4/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5546+++ linux-3.0.4/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5547@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5548 $(call cc-option, -fno-stack-protector) \
5549 $(call cc-option, -mpreferred-stack-boundary=2)
5550 KBUILD_CFLAGS += $(call cc-option, -m32)
5551+ifdef CONSTIFY_PLUGIN
5552+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5553+endif
5554 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5555 GCOV_PROFILE := n
5556
5557diff -urNp linux-3.0.4/arch/x86/boot/memory.c linux-3.0.4/arch/x86/boot/memory.c
5558--- linux-3.0.4/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
5559+++ linux-3.0.4/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
5560@@ -19,7 +19,7 @@
5561
5562 static int detect_memory_e820(void)
5563 {
5564- int count = 0;
5565+ unsigned int count = 0;
5566 struct biosregs ireg, oreg;
5567 struct e820entry *desc = boot_params.e820_map;
5568 static struct e820entry buf; /* static so it is zeroed */
5569diff -urNp linux-3.0.4/arch/x86/boot/video.c linux-3.0.4/arch/x86/boot/video.c
5570--- linux-3.0.4/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
5571+++ linux-3.0.4/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
5572@@ -96,7 +96,7 @@ static void store_mode_params(void)
5573 static unsigned int get_entry(void)
5574 {
5575 char entry_buf[4];
5576- int i, len = 0;
5577+ unsigned int i, len = 0;
5578 int key;
5579 unsigned int v;
5580
5581diff -urNp linux-3.0.4/arch/x86/boot/video-vesa.c linux-3.0.4/arch/x86/boot/video-vesa.c
5582--- linux-3.0.4/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
5583+++ linux-3.0.4/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
5584@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5585
5586 boot_params.screen_info.vesapm_seg = oreg.es;
5587 boot_params.screen_info.vesapm_off = oreg.di;
5588+ boot_params.screen_info.vesapm_size = oreg.cx;
5589 }
5590
5591 /*
5592diff -urNp linux-3.0.4/arch/x86/crypto/aes-x86_64-asm_64.S linux-3.0.4/arch/x86/crypto/aes-x86_64-asm_64.S
5593--- linux-3.0.4/arch/x86/crypto/aes-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5594+++ linux-3.0.4/arch/x86/crypto/aes-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5595@@ -8,6 +8,8 @@
5596 * including this sentence is retained in full.
5597 */
5598
5599+#include <asm/alternative-asm.h>
5600+
5601 .extern crypto_ft_tab
5602 .extern crypto_it_tab
5603 .extern crypto_fl_tab
5604@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
5605 je B192; \
5606 leaq 32(r9),r9;
5607
5608+#define ret pax_force_retaddr; ret
5609+
5610 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
5611 movq r1,r2; \
5612 movq r3,r4; \
5613diff -urNp linux-3.0.4/arch/x86/crypto/salsa20-x86_64-asm_64.S linux-3.0.4/arch/x86/crypto/salsa20-x86_64-asm_64.S
5614--- linux-3.0.4/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5615+++ linux-3.0.4/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5616@@ -1,3 +1,5 @@
5617+#include <asm/alternative-asm.h>
5618+
5619 # enter ECRYPT_encrypt_bytes
5620 .text
5621 .p2align 5
5622@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
5623 add %r11,%rsp
5624 mov %rdi,%rax
5625 mov %rsi,%rdx
5626+ pax_force_retaddr
5627 ret
5628 # bytesatleast65:
5629 ._bytesatleast65:
5630@@ -891,6 +894,7 @@ ECRYPT_keysetup:
5631 add %r11,%rsp
5632 mov %rdi,%rax
5633 mov %rsi,%rdx
5634+ pax_force_retaddr
5635 ret
5636 # enter ECRYPT_ivsetup
5637 .text
5638@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
5639 add %r11,%rsp
5640 mov %rdi,%rax
5641 mov %rsi,%rdx
5642+ pax_force_retaddr
5643 ret
5644diff -urNp linux-3.0.4/arch/x86/crypto/twofish-x86_64-asm_64.S linux-3.0.4/arch/x86/crypto/twofish-x86_64-asm_64.S
5645--- linux-3.0.4/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5646+++ linux-3.0.4/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5647@@ -21,6 +21,7 @@
5648 .text
5649
5650 #include <asm/asm-offsets.h>
5651+#include <asm/alternative-asm.h>
5652
5653 #define a_offset 0
5654 #define b_offset 4
5655@@ -269,6 +270,7 @@ twofish_enc_blk:
5656
5657 popq R1
5658 movq $1,%rax
5659+ pax_force_retaddr
5660 ret
5661
5662 twofish_dec_blk:
5663@@ -321,4 +323,5 @@ twofish_dec_blk:
5664
5665 popq R1
5666 movq $1,%rax
5667+ pax_force_retaddr
5668 ret
5669diff -urNp linux-3.0.4/arch/x86/ia32/ia32_aout.c linux-3.0.4/arch/x86/ia32/ia32_aout.c
5670--- linux-3.0.4/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
5671+++ linux-3.0.4/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
5672@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5673 unsigned long dump_start, dump_size;
5674 struct user32 dump;
5675
5676+ memset(&dump, 0, sizeof(dump));
5677+
5678 fs = get_fs();
5679 set_fs(KERNEL_DS);
5680 has_dumped = 1;
5681diff -urNp linux-3.0.4/arch/x86/ia32/ia32entry.S linux-3.0.4/arch/x86/ia32/ia32entry.S
5682--- linux-3.0.4/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
5683+++ linux-3.0.4/arch/x86/ia32/ia32entry.S 2011-08-25 17:36:37.000000000 -0400
5684@@ -13,6 +13,7 @@
5685 #include <asm/thread_info.h>
5686 #include <asm/segment.h>
5687 #include <asm/irqflags.h>
5688+#include <asm/pgtable.h>
5689 #include <linux/linkage.h>
5690
5691 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5692@@ -95,6 +96,29 @@ ENTRY(native_irq_enable_sysexit)
5693 ENDPROC(native_irq_enable_sysexit)
5694 #endif
5695
5696+ .macro pax_enter_kernel_user
5697+#ifdef CONFIG_PAX_MEMORY_UDEREF
5698+ call pax_enter_kernel_user
5699+#endif
5700+ .endm
5701+
5702+ .macro pax_exit_kernel_user
5703+#ifdef CONFIG_PAX_MEMORY_UDEREF
5704+ call pax_exit_kernel_user
5705+#endif
5706+#ifdef CONFIG_PAX_RANDKSTACK
5707+ pushq %rax
5708+ call pax_randomize_kstack
5709+ popq %rax
5710+#endif
5711+ .endm
5712+
5713+ .macro pax_erase_kstack
5714+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5715+ call pax_erase_kstack
5716+#endif
5717+ .endm
5718+
5719 /*
5720 * 32bit SYSENTER instruction entry.
5721 *
5722@@ -121,7 +145,7 @@ ENTRY(ia32_sysenter_target)
5723 CFI_REGISTER rsp,rbp
5724 SWAPGS_UNSAFE_STACK
5725 movq PER_CPU_VAR(kernel_stack), %rsp
5726- addq $(KERNEL_STACK_OFFSET),%rsp
5727+ pax_enter_kernel_user
5728 /*
5729 * No need to follow this irqs on/off section: the syscall
5730 * disabled irqs, here we enable it straight after entry:
5731@@ -134,7 +158,8 @@ ENTRY(ia32_sysenter_target)
5732 CFI_REL_OFFSET rsp,0
5733 pushfq_cfi
5734 /*CFI_REL_OFFSET rflags,0*/
5735- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5736+ GET_THREAD_INFO(%r10)
5737+ movl TI_sysenter_return(%r10), %r10d
5738 CFI_REGISTER rip,r10
5739 pushq_cfi $__USER32_CS
5740 /*CFI_REL_OFFSET cs,0*/
5741@@ -146,6 +171,12 @@ ENTRY(ia32_sysenter_target)
5742 SAVE_ARGS 0,0,1
5743 /* no need to do an access_ok check here because rbp has been
5744 32bit zero extended */
5745+
5746+#ifdef CONFIG_PAX_MEMORY_UDEREF
5747+ mov $PAX_USER_SHADOW_BASE,%r10
5748+ add %r10,%rbp
5749+#endif
5750+
5751 1: movl (%rbp),%ebp
5752 .section __ex_table,"a"
5753 .quad 1b,ia32_badarg
5754@@ -168,6 +199,8 @@ sysenter_dispatch:
5755 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5756 jnz sysexit_audit
5757 sysexit_from_sys_call:
5758+ pax_exit_kernel_user
5759+ pax_erase_kstack
5760 andl $~TS_COMPAT,TI_status(%r10)
5761 /* clear IF, that popfq doesn't enable interrupts early */
5762 andl $~0x200,EFLAGS-R11(%rsp)
5763@@ -194,6 +227,9 @@ sysexit_from_sys_call:
5764 movl %eax,%esi /* 2nd arg: syscall number */
5765 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5766 call audit_syscall_entry
5767+
5768+ pax_erase_kstack
5769+
5770 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5771 cmpq $(IA32_NR_syscalls-1),%rax
5772 ja ia32_badsys
5773@@ -246,6 +282,9 @@ sysenter_tracesys:
5774 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5775 movq %rsp,%rdi /* &pt_regs -> arg1 */
5776 call syscall_trace_enter
5777+
5778+ pax_erase_kstack
5779+
5780 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5781 RESTORE_REST
5782 cmpq $(IA32_NR_syscalls-1),%rax
5783@@ -277,19 +316,24 @@ ENDPROC(ia32_sysenter_target)
5784 ENTRY(ia32_cstar_target)
5785 CFI_STARTPROC32 simple
5786 CFI_SIGNAL_FRAME
5787- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5788+ CFI_DEF_CFA rsp,0
5789 CFI_REGISTER rip,rcx
5790 /*CFI_REGISTER rflags,r11*/
5791 SWAPGS_UNSAFE_STACK
5792 movl %esp,%r8d
5793 CFI_REGISTER rsp,r8
5794 movq PER_CPU_VAR(kernel_stack),%rsp
5795+
5796+#ifdef CONFIG_PAX_MEMORY_UDEREF
5797+ pax_enter_kernel_user
5798+#endif
5799+
5800 /*
5801 * No need to follow this irqs on/off section: the syscall
5802 * disabled irqs and here we enable it straight after entry:
5803 */
5804 ENABLE_INTERRUPTS(CLBR_NONE)
5805- SAVE_ARGS 8,1,1
5806+ SAVE_ARGS 8*6,1,1
5807 movl %eax,%eax /* zero extension */
5808 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5809 movq %rcx,RIP-ARGOFFSET(%rsp)
5810@@ -305,6 +349,12 @@ ENTRY(ia32_cstar_target)
5811 /* no need to do an access_ok check here because r8 has been
5812 32bit zero extended */
5813 /* hardware stack frame is complete now */
5814+
5815+#ifdef CONFIG_PAX_MEMORY_UDEREF
5816+ mov $PAX_USER_SHADOW_BASE,%r10
5817+ add %r10,%r8
5818+#endif
5819+
5820 1: movl (%r8),%r9d
5821 .section __ex_table,"a"
5822 .quad 1b,ia32_badarg
5823@@ -327,6 +377,8 @@ cstar_dispatch:
5824 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5825 jnz sysretl_audit
5826 sysretl_from_sys_call:
5827+ pax_exit_kernel_user
5828+ pax_erase_kstack
5829 andl $~TS_COMPAT,TI_status(%r10)
5830 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5831 movl RIP-ARGOFFSET(%rsp),%ecx
5832@@ -364,6 +416,9 @@ cstar_tracesys:
5833 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5834 movq %rsp,%rdi /* &pt_regs -> arg1 */
5835 call syscall_trace_enter
5836+
5837+ pax_erase_kstack
5838+
5839 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5840 RESTORE_REST
5841 xchgl %ebp,%r9d
5842@@ -409,6 +464,7 @@ ENTRY(ia32_syscall)
5843 CFI_REL_OFFSET rip,RIP-RIP
5844 PARAVIRT_ADJUST_EXCEPTION_FRAME
5845 SWAPGS
5846+ pax_enter_kernel_user
5847 /*
5848 * No need to follow this irqs on/off section: the syscall
5849 * disabled irqs and here we enable it straight after entry:
5850@@ -441,6 +497,9 @@ ia32_tracesys:
5851 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5852 movq %rsp,%rdi /* &pt_regs -> arg1 */
5853 call syscall_trace_enter
5854+
5855+ pax_erase_kstack
5856+
5857 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5858 RESTORE_REST
5859 cmpq $(IA32_NR_syscalls-1),%rax
5860diff -urNp linux-3.0.4/arch/x86/ia32/ia32_signal.c linux-3.0.4/arch/x86/ia32/ia32_signal.c
5861--- linux-3.0.4/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
5862+++ linux-3.0.4/arch/x86/ia32/ia32_signal.c 2011-10-06 04:17:55.000000000 -0400
5863@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const
5864 }
5865 seg = get_fs();
5866 set_fs(KERNEL_DS);
5867- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
5868+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
5869 set_fs(seg);
5870 if (ret >= 0 && uoss_ptr) {
5871 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
5872@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct
5873 */
5874 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
5875 size_t frame_size,
5876- void **fpstate)
5877+ void __user **fpstate)
5878 {
5879 unsigned long sp;
5880
5881@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct
5882
5883 if (used_math()) {
5884 sp = sp - sig_xstate_ia32_size;
5885- *fpstate = (struct _fpstate_ia32 *) sp;
5886+ *fpstate = (struct _fpstate_ia32 __user *) sp;
5887 if (save_i387_xstate_ia32(*fpstate) < 0)
5888 return (void __user *) -1L;
5889 }
5890@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5891 sp -= frame_size;
5892 /* Align the stack pointer according to the i386 ABI,
5893 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5894- sp = ((sp + 4) & -16ul) - 4;
5895+ sp = ((sp - 12) & -16ul) - 4;
5896 return (void __user *) sp;
5897 }
5898
5899@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5900 * These are actually not used anymore, but left because some
5901 * gdb versions depend on them as a marker.
5902 */
5903- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5904+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5905 } put_user_catch(err);
5906
5907 if (err)
5908@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5909 0xb8,
5910 __NR_ia32_rt_sigreturn,
5911 0x80cd,
5912- 0,
5913+ 0
5914 };
5915
5916 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5917@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5918
5919 if (ka->sa.sa_flags & SA_RESTORER)
5920 restorer = ka->sa.sa_restorer;
5921+ else if (current->mm->context.vdso)
5922+ /* Return stub is in 32bit vsyscall page */
5923+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5924 else
5925- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5926- rt_sigreturn);
5927+ restorer = &frame->retcode;
5928 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5929
5930 /*
5931 * Not actually used anymore, but left because some gdb
5932 * versions need it.
5933 */
5934- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5935+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5936 } put_user_catch(err);
5937
5938 if (err)
5939diff -urNp linux-3.0.4/arch/x86/ia32/sys_ia32.c linux-3.0.4/arch/x86/ia32/sys_ia32.c
5940--- linux-3.0.4/arch/x86/ia32/sys_ia32.c 2011-07-21 22:17:23.000000000 -0400
5941+++ linux-3.0.4/arch/x86/ia32/sys_ia32.c 2011-10-06 04:17:55.000000000 -0400
5942@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsign
5943 */
5944 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
5945 {
5946- typeof(ubuf->st_uid) uid = 0;
5947- typeof(ubuf->st_gid) gid = 0;
5948+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
5949+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
5950 SET_UID(uid, stat->uid);
5951 SET_GID(gid, stat->gid);
5952 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
5953@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int
5954 }
5955 set_fs(KERNEL_DS);
5956 ret = sys_rt_sigprocmask(how,
5957- set ? (sigset_t __user *)&s : NULL,
5958- oset ? (sigset_t __user *)&s : NULL,
5959+ set ? (sigset_t __force_user *)&s : NULL,
5960+ oset ? (sigset_t __force_user *)&s : NULL,
5961 sigsetsize);
5962 set_fs(old_fs);
5963 if (ret)
5964@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int
5965 return alarm_setitimer(seconds);
5966 }
5967
5968-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
5969+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
5970 int options)
5971 {
5972 return compat_sys_wait4(pid, stat_addr, options, NULL);
5973@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_inter
5974 mm_segment_t old_fs = get_fs();
5975
5976 set_fs(KERNEL_DS);
5977- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
5978+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
5979 set_fs(old_fs);
5980 if (put_compat_timespec(&t, interval))
5981 return -EFAULT;
5982@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(comp
5983 mm_segment_t old_fs = get_fs();
5984
5985 set_fs(KERNEL_DS);
5986- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
5987+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
5988 set_fs(old_fs);
5989 if (!ret) {
5990 switch (_NSIG_WORDS) {
5991@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(in
5992 if (copy_siginfo_from_user32(&info, uinfo))
5993 return -EFAULT;
5994 set_fs(KERNEL_DS);
5995- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
5996+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
5997 set_fs(old_fs);
5998 return ret;
5999 }
6000@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_f
6001 return -EFAULT;
6002
6003 set_fs(KERNEL_DS);
6004- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
6005+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
6006 count);
6007 set_fs(old_fs);
6008
6009diff -urNp linux-3.0.4/arch/x86/include/asm/alternative-asm.h linux-3.0.4/arch/x86/include/asm/alternative-asm.h
6010--- linux-3.0.4/arch/x86/include/asm/alternative-asm.h 2011-07-21 22:17:23.000000000 -0400
6011+++ linux-3.0.4/arch/x86/include/asm/alternative-asm.h 2011-10-06 04:17:55.000000000 -0400
6012@@ -15,6 +15,15 @@
6013 .endm
6014 #endif
6015
6016+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
6017+ .macro pax_force_retaddr rip=0
6018+ btsq $63,\rip(%rsp)
6019+ .endm
6020+#else
6021+ .macro pax_force_retaddr rip=0
6022+ .endm
6023+#endif
6024+
6025 .macro altinstruction_entry orig alt feature orig_len alt_len
6026 .align 8
6027 .quad \orig
6028diff -urNp linux-3.0.4/arch/x86/include/asm/alternative.h linux-3.0.4/arch/x86/include/asm/alternative.h
6029--- linux-3.0.4/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
6030+++ linux-3.0.4/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
6031@@ -93,7 +93,7 @@ static inline int alternatives_text_rese
6032 ".section .discard,\"aw\",@progbits\n" \
6033 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
6034 ".previous\n" \
6035- ".section .altinstr_replacement, \"ax\"\n" \
6036+ ".section .altinstr_replacement, \"a\"\n" \
6037 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6038 ".previous"
6039
6040diff -urNp linux-3.0.4/arch/x86/include/asm/apic.h linux-3.0.4/arch/x86/include/asm/apic.h
6041--- linux-3.0.4/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
6042+++ linux-3.0.4/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
6043@@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
6044
6045 #ifdef CONFIG_X86_LOCAL_APIC
6046
6047-extern unsigned int apic_verbosity;
6048+extern int apic_verbosity;
6049 extern int local_apic_timer_c2_ok;
6050
6051 extern int disable_apic;
6052diff -urNp linux-3.0.4/arch/x86/include/asm/apm.h linux-3.0.4/arch/x86/include/asm/apm.h
6053--- linux-3.0.4/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
6054+++ linux-3.0.4/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
6055@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6056 __asm__ __volatile__(APM_DO_ZERO_SEGS
6057 "pushl %%edi\n\t"
6058 "pushl %%ebp\n\t"
6059- "lcall *%%cs:apm_bios_entry\n\t"
6060+ "lcall *%%ss:apm_bios_entry\n\t"
6061 "setc %%al\n\t"
6062 "popl %%ebp\n\t"
6063 "popl %%edi\n\t"
6064@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6065 __asm__ __volatile__(APM_DO_ZERO_SEGS
6066 "pushl %%edi\n\t"
6067 "pushl %%ebp\n\t"
6068- "lcall *%%cs:apm_bios_entry\n\t"
6069+ "lcall *%%ss:apm_bios_entry\n\t"
6070 "setc %%bl\n\t"
6071 "popl %%ebp\n\t"
6072 "popl %%edi\n\t"
6073diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_32.h linux-3.0.4/arch/x86/include/asm/atomic64_32.h
6074--- linux-3.0.4/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
6075+++ linux-3.0.4/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
6076@@ -12,6 +12,14 @@ typedef struct {
6077 u64 __aligned(8) counter;
6078 } atomic64_t;
6079
6080+#ifdef CONFIG_PAX_REFCOUNT
6081+typedef struct {
6082+ u64 __aligned(8) counter;
6083+} atomic64_unchecked_t;
6084+#else
6085+typedef atomic64_t atomic64_unchecked_t;
6086+#endif
6087+
6088 #define ATOMIC64_INIT(val) { (val) }
6089
6090 #ifdef CONFIG_X86_CMPXCHG64
6091@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
6092 }
6093
6094 /**
6095+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
6096+ * @p: pointer to type atomic64_unchecked_t
6097+ * @o: expected value
6098+ * @n: new value
6099+ *
6100+ * Atomically sets @v to @n if it was equal to @o and returns
6101+ * the old value.
6102+ */
6103+
6104+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
6105+{
6106+ return cmpxchg64(&v->counter, o, n);
6107+}
6108+
6109+/**
6110 * atomic64_xchg - xchg atomic64 variable
6111 * @v: pointer to type atomic64_t
6112 * @n: value to assign
6113@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
6114 }
6115
6116 /**
6117+ * atomic64_set_unchecked - set atomic64 variable
6118+ * @v: pointer to type atomic64_unchecked_t
6119+ * @n: value to assign
6120+ *
6121+ * Atomically sets the value of @v to @n.
6122+ */
6123+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
6124+{
6125+ unsigned high = (unsigned)(i >> 32);
6126+ unsigned low = (unsigned)i;
6127+ asm volatile(ATOMIC64_ALTERNATIVE(set)
6128+ : "+b" (low), "+c" (high)
6129+ : "S" (v)
6130+ : "eax", "edx", "memory"
6131+ );
6132+}
6133+
6134+/**
6135 * atomic64_read - read atomic64 variable
6136 * @v: pointer to type atomic64_t
6137 *
6138@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
6139 }
6140
6141 /**
6142+ * atomic64_read_unchecked - read atomic64 variable
6143+ * @v: pointer to type atomic64_unchecked_t
6144+ *
6145+ * Atomically reads the value of @v and returns it.
6146+ */
6147+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
6148+{
6149+ long long r;
6150+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
6151+ : "=A" (r), "+c" (v)
6152+ : : "memory"
6153+ );
6154+ return r;
6155+ }
6156+
6157+/**
6158 * atomic64_add_return - add and return
6159 * @i: integer value to add
6160 * @v: pointer to type atomic64_t
6161@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
6162 return i;
6163 }
6164
6165+/**
6166+ * atomic64_add_return_unchecked - add and return
6167+ * @i: integer value to add
6168+ * @v: pointer to type atomic64_unchecked_t
6169+ *
6170+ * Atomically adds @i to @v and returns @i + *@v
6171+ */
6172+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
6173+{
6174+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6175+ : "+A" (i), "+c" (v)
6176+ : : "memory"
6177+ );
6178+ return i;
6179+}
6180+
6181 /*
6182 * Other variants with different arithmetic operators:
6183 */
6184@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6185 return a;
6186 }
6187
6188+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6189+{
6190+ long long a;
6191+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6192+ : "=A" (a)
6193+ : "S" (v)
6194+ : "memory", "ecx"
6195+ );
6196+ return a;
6197+}
6198+
6199 static inline long long atomic64_dec_return(atomic64_t *v)
6200 {
6201 long long a;
6202@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6203 }
6204
6205 /**
6206+ * atomic64_add_unchecked - add integer to atomic64 variable
6207+ * @i: integer value to add
6208+ * @v: pointer to type atomic64_unchecked_t
6209+ *
6210+ * Atomically adds @i to @v.
6211+ */
6212+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6213+{
6214+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6215+ : "+A" (i), "+c" (v)
6216+ : : "memory"
6217+ );
6218+ return i;
6219+}
6220+
6221+/**
6222 * atomic64_sub - subtract the atomic64 variable
6223 * @i: integer value to subtract
6224 * @v: pointer to type atomic64_t
6225diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_64.h linux-3.0.4/arch/x86/include/asm/atomic64_64.h
6226--- linux-3.0.4/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
6227+++ linux-3.0.4/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
6228@@ -18,7 +18,19 @@
6229 */
6230 static inline long atomic64_read(const atomic64_t *v)
6231 {
6232- return (*(volatile long *)&(v)->counter);
6233+ return (*(volatile const long *)&(v)->counter);
6234+}
6235+
6236+/**
6237+ * atomic64_read_unchecked - read atomic64 variable
6238+ * @v: pointer of type atomic64_unchecked_t
6239+ *
6240+ * Atomically reads the value of @v.
6241+ * Doesn't imply a read memory barrier.
6242+ */
6243+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6244+{
6245+ return (*(volatile const long *)&(v)->counter);
6246 }
6247
6248 /**
6249@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6250 }
6251
6252 /**
6253+ * atomic64_set_unchecked - set atomic64 variable
6254+ * @v: pointer to type atomic64_unchecked_t
6255+ * @i: required value
6256+ *
6257+ * Atomically sets the value of @v to @i.
6258+ */
6259+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6260+{
6261+ v->counter = i;
6262+}
6263+
6264+/**
6265 * atomic64_add - add integer to atomic64 variable
6266 * @i: integer value to add
6267 * @v: pointer to type atomic64_t
6268@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6269 */
6270 static inline void atomic64_add(long i, atomic64_t *v)
6271 {
6272+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
6273+
6274+#ifdef CONFIG_PAX_REFCOUNT
6275+ "jno 0f\n"
6276+ LOCK_PREFIX "subq %1,%0\n"
6277+ "int $4\n0:\n"
6278+ _ASM_EXTABLE(0b, 0b)
6279+#endif
6280+
6281+ : "=m" (v->counter)
6282+ : "er" (i), "m" (v->counter));
6283+}
6284+
6285+/**
6286+ * atomic64_add_unchecked - add integer to atomic64 variable
6287+ * @i: integer value to add
6288+ * @v: pointer to type atomic64_unchecked_t
6289+ *
6290+ * Atomically adds @i to @v.
6291+ */
6292+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6293+{
6294 asm volatile(LOCK_PREFIX "addq %1,%0"
6295 : "=m" (v->counter)
6296 : "er" (i), "m" (v->counter));
6297@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6298 */
6299 static inline void atomic64_sub(long i, atomic64_t *v)
6300 {
6301- asm volatile(LOCK_PREFIX "subq %1,%0"
6302+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6303+
6304+#ifdef CONFIG_PAX_REFCOUNT
6305+ "jno 0f\n"
6306+ LOCK_PREFIX "addq %1,%0\n"
6307+ "int $4\n0:\n"
6308+ _ASM_EXTABLE(0b, 0b)
6309+#endif
6310+
6311+ : "=m" (v->counter)
6312+ : "er" (i), "m" (v->counter));
6313+}
6314+
6315+/**
6316+ * atomic64_sub_unchecked - subtract the atomic64 variable
6317+ * @i: integer value to subtract
6318+ * @v: pointer to type atomic64_unchecked_t
6319+ *
6320+ * Atomically subtracts @i from @v.
6321+ */
6322+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6323+{
6324+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6325 : "=m" (v->counter)
6326 : "er" (i), "m" (v->counter));
6327 }
6328@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6329 {
6330 unsigned char c;
6331
6332- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6333+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
6334+
6335+#ifdef CONFIG_PAX_REFCOUNT
6336+ "jno 0f\n"
6337+ LOCK_PREFIX "addq %2,%0\n"
6338+ "int $4\n0:\n"
6339+ _ASM_EXTABLE(0b, 0b)
6340+#endif
6341+
6342+ "sete %1\n"
6343 : "=m" (v->counter), "=qm" (c)
6344 : "er" (i), "m" (v->counter) : "memory");
6345 return c;
6346@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6347 */
6348 static inline void atomic64_inc(atomic64_t *v)
6349 {
6350+ asm volatile(LOCK_PREFIX "incq %0\n"
6351+
6352+#ifdef CONFIG_PAX_REFCOUNT
6353+ "jno 0f\n"
6354+ LOCK_PREFIX "decq %0\n"
6355+ "int $4\n0:\n"
6356+ _ASM_EXTABLE(0b, 0b)
6357+#endif
6358+
6359+ : "=m" (v->counter)
6360+ : "m" (v->counter));
6361+}
6362+
6363+/**
6364+ * atomic64_inc_unchecked - increment atomic64 variable
6365+ * @v: pointer to type atomic64_unchecked_t
6366+ *
6367+ * Atomically increments @v by 1.
6368+ */
6369+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6370+{
6371 asm volatile(LOCK_PREFIX "incq %0"
6372 : "=m" (v->counter)
6373 : "m" (v->counter));
6374@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6375 */
6376 static inline void atomic64_dec(atomic64_t *v)
6377 {
6378- asm volatile(LOCK_PREFIX "decq %0"
6379+ asm volatile(LOCK_PREFIX "decq %0\n"
6380+
6381+#ifdef CONFIG_PAX_REFCOUNT
6382+ "jno 0f\n"
6383+ LOCK_PREFIX "incq %0\n"
6384+ "int $4\n0:\n"
6385+ _ASM_EXTABLE(0b, 0b)
6386+#endif
6387+
6388+ : "=m" (v->counter)
6389+ : "m" (v->counter));
6390+}
6391+
6392+/**
6393+ * atomic64_dec_unchecked - decrement atomic64 variable
6394+ * @v: pointer to type atomic64_t
6395+ *
6396+ * Atomically decrements @v by 1.
6397+ */
6398+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6399+{
6400+ asm volatile(LOCK_PREFIX "decq %0\n"
6401 : "=m" (v->counter)
6402 : "m" (v->counter));
6403 }
6404@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6405 {
6406 unsigned char c;
6407
6408- asm volatile(LOCK_PREFIX "decq %0; sete %1"
6409+ asm volatile(LOCK_PREFIX "decq %0\n"
6410+
6411+#ifdef CONFIG_PAX_REFCOUNT
6412+ "jno 0f\n"
6413+ LOCK_PREFIX "incq %0\n"
6414+ "int $4\n0:\n"
6415+ _ASM_EXTABLE(0b, 0b)
6416+#endif
6417+
6418+ "sete %1\n"
6419 : "=m" (v->counter), "=qm" (c)
6420 : "m" (v->counter) : "memory");
6421 return c != 0;
6422@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6423 {
6424 unsigned char c;
6425
6426- asm volatile(LOCK_PREFIX "incq %0; sete %1"
6427+ asm volatile(LOCK_PREFIX "incq %0\n"
6428+
6429+#ifdef CONFIG_PAX_REFCOUNT
6430+ "jno 0f\n"
6431+ LOCK_PREFIX "decq %0\n"
6432+ "int $4\n0:\n"
6433+ _ASM_EXTABLE(0b, 0b)
6434+#endif
6435+
6436+ "sete %1\n"
6437 : "=m" (v->counter), "=qm" (c)
6438 : "m" (v->counter) : "memory");
6439 return c != 0;
6440@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6441 {
6442 unsigned char c;
6443
6444- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6445+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
6446+
6447+#ifdef CONFIG_PAX_REFCOUNT
6448+ "jno 0f\n"
6449+ LOCK_PREFIX "subq %2,%0\n"
6450+ "int $4\n0:\n"
6451+ _ASM_EXTABLE(0b, 0b)
6452+#endif
6453+
6454+ "sets %1\n"
6455 : "=m" (v->counter), "=qm" (c)
6456 : "er" (i), "m" (v->counter) : "memory");
6457 return c;
6458@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6459 static inline long atomic64_add_return(long i, atomic64_t *v)
6460 {
6461 long __i = i;
6462- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6463+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6464+
6465+#ifdef CONFIG_PAX_REFCOUNT
6466+ "jno 0f\n"
6467+ "movq %0, %1\n"
6468+ "int $4\n0:\n"
6469+ _ASM_EXTABLE(0b, 0b)
6470+#endif
6471+
6472+ : "+r" (i), "+m" (v->counter)
6473+ : : "memory");
6474+ return i + __i;
6475+}
6476+
6477+/**
6478+ * atomic64_add_return_unchecked - add and return
6479+ * @i: integer value to add
6480+ * @v: pointer to type atomic64_unchecked_t
6481+ *
6482+ * Atomically adds @i to @v and returns @i + @v
6483+ */
6484+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6485+{
6486+ long __i = i;
6487+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
6488 : "+r" (i), "+m" (v->counter)
6489 : : "memory");
6490 return i + __i;
6491@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6492 }
6493
6494 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6495+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6496+{
6497+ return atomic64_add_return_unchecked(1, v);
6498+}
6499 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6500
6501 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6502@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6503 return cmpxchg(&v->counter, old, new);
6504 }
6505
6506+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6507+{
6508+ return cmpxchg(&v->counter, old, new);
6509+}
6510+
6511 static inline long atomic64_xchg(atomic64_t *v, long new)
6512 {
6513 return xchg(&v->counter, new);
6514@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6515 */
6516 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6517 {
6518- long c, old;
6519+ long c, old, new;
6520 c = atomic64_read(v);
6521 for (;;) {
6522- if (unlikely(c == (u)))
6523+ if (unlikely(c == u))
6524 break;
6525- old = atomic64_cmpxchg((v), c, c + (a));
6526+
6527+ asm volatile("add %2,%0\n"
6528+
6529+#ifdef CONFIG_PAX_REFCOUNT
6530+ "jno 0f\n"
6531+ "sub %2,%0\n"
6532+ "int $4\n0:\n"
6533+ _ASM_EXTABLE(0b, 0b)
6534+#endif
6535+
6536+ : "=r" (new)
6537+ : "0" (c), "ir" (a));
6538+
6539+ old = atomic64_cmpxchg(v, c, new);
6540 if (likely(old == c))
6541 break;
6542 c = old;
6543 }
6544- return c != (u);
6545+ return c != u;
6546 }
6547
6548 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6549diff -urNp linux-3.0.4/arch/x86/include/asm/atomic.h linux-3.0.4/arch/x86/include/asm/atomic.h
6550--- linux-3.0.4/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6551+++ linux-3.0.4/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6552@@ -22,7 +22,18 @@
6553 */
6554 static inline int atomic_read(const atomic_t *v)
6555 {
6556- return (*(volatile int *)&(v)->counter);
6557+ return (*(volatile const int *)&(v)->counter);
6558+}
6559+
6560+/**
6561+ * atomic_read_unchecked - read atomic variable
6562+ * @v: pointer of type atomic_unchecked_t
6563+ *
6564+ * Atomically reads the value of @v.
6565+ */
6566+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6567+{
6568+ return (*(volatile const int *)&(v)->counter);
6569 }
6570
6571 /**
6572@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6573 }
6574
6575 /**
6576+ * atomic_set_unchecked - set atomic variable
6577+ * @v: pointer of type atomic_unchecked_t
6578+ * @i: required value
6579+ *
6580+ * Atomically sets the value of @v to @i.
6581+ */
6582+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6583+{
6584+ v->counter = i;
6585+}
6586+
6587+/**
6588 * atomic_add - add integer to atomic variable
6589 * @i: integer value to add
6590 * @v: pointer of type atomic_t
6591@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6592 */
6593 static inline void atomic_add(int i, atomic_t *v)
6594 {
6595- asm volatile(LOCK_PREFIX "addl %1,%0"
6596+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6597+
6598+#ifdef CONFIG_PAX_REFCOUNT
6599+ "jno 0f\n"
6600+ LOCK_PREFIX "subl %1,%0\n"
6601+ "int $4\n0:\n"
6602+ _ASM_EXTABLE(0b, 0b)
6603+#endif
6604+
6605+ : "+m" (v->counter)
6606+ : "ir" (i));
6607+}
6608+
6609+/**
6610+ * atomic_add_unchecked - add integer to atomic variable
6611+ * @i: integer value to add
6612+ * @v: pointer of type atomic_unchecked_t
6613+ *
6614+ * Atomically adds @i to @v.
6615+ */
6616+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6617+{
6618+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6619 : "+m" (v->counter)
6620 : "ir" (i));
6621 }
6622@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6623 */
6624 static inline void atomic_sub(int i, atomic_t *v)
6625 {
6626- asm volatile(LOCK_PREFIX "subl %1,%0"
6627+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6628+
6629+#ifdef CONFIG_PAX_REFCOUNT
6630+ "jno 0f\n"
6631+ LOCK_PREFIX "addl %1,%0\n"
6632+ "int $4\n0:\n"
6633+ _ASM_EXTABLE(0b, 0b)
6634+#endif
6635+
6636+ : "+m" (v->counter)
6637+ : "ir" (i));
6638+}
6639+
6640+/**
6641+ * atomic_sub_unchecked - subtract integer from atomic variable
6642+ * @i: integer value to subtract
6643+ * @v: pointer of type atomic_unchecked_t
6644+ *
6645+ * Atomically subtracts @i from @v.
6646+ */
6647+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6648+{
6649+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6650 : "+m" (v->counter)
6651 : "ir" (i));
6652 }
6653@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6654 {
6655 unsigned char c;
6656
6657- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6658+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6659+
6660+#ifdef CONFIG_PAX_REFCOUNT
6661+ "jno 0f\n"
6662+ LOCK_PREFIX "addl %2,%0\n"
6663+ "int $4\n0:\n"
6664+ _ASM_EXTABLE(0b, 0b)
6665+#endif
6666+
6667+ "sete %1\n"
6668 : "+m" (v->counter), "=qm" (c)
6669 : "ir" (i) : "memory");
6670 return c;
6671@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6672 */
6673 static inline void atomic_inc(atomic_t *v)
6674 {
6675- asm volatile(LOCK_PREFIX "incl %0"
6676+ asm volatile(LOCK_PREFIX "incl %0\n"
6677+
6678+#ifdef CONFIG_PAX_REFCOUNT
6679+ "jno 0f\n"
6680+ LOCK_PREFIX "decl %0\n"
6681+ "int $4\n0:\n"
6682+ _ASM_EXTABLE(0b, 0b)
6683+#endif
6684+
6685+ : "+m" (v->counter));
6686+}
6687+
6688+/**
6689+ * atomic_inc_unchecked - increment atomic variable
6690+ * @v: pointer of type atomic_unchecked_t
6691+ *
6692+ * Atomically increments @v by 1.
6693+ */
6694+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6695+{
6696+ asm volatile(LOCK_PREFIX "incl %0\n"
6697 : "+m" (v->counter));
6698 }
6699
6700@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6701 */
6702 static inline void atomic_dec(atomic_t *v)
6703 {
6704- asm volatile(LOCK_PREFIX "decl %0"
6705+ asm volatile(LOCK_PREFIX "decl %0\n"
6706+
6707+#ifdef CONFIG_PAX_REFCOUNT
6708+ "jno 0f\n"
6709+ LOCK_PREFIX "incl %0\n"
6710+ "int $4\n0:\n"
6711+ _ASM_EXTABLE(0b, 0b)
6712+#endif
6713+
6714+ : "+m" (v->counter));
6715+}
6716+
6717+/**
6718+ * atomic_dec_unchecked - decrement atomic variable
6719+ * @v: pointer of type atomic_unchecked_t
6720+ *
6721+ * Atomically decrements @v by 1.
6722+ */
6723+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6724+{
6725+ asm volatile(LOCK_PREFIX "decl %0\n"
6726 : "+m" (v->counter));
6727 }
6728
6729@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6730 {
6731 unsigned char c;
6732
6733- asm volatile(LOCK_PREFIX "decl %0; sete %1"
6734+ asm volatile(LOCK_PREFIX "decl %0\n"
6735+
6736+#ifdef CONFIG_PAX_REFCOUNT
6737+ "jno 0f\n"
6738+ LOCK_PREFIX "incl %0\n"
6739+ "int $4\n0:\n"
6740+ _ASM_EXTABLE(0b, 0b)
6741+#endif
6742+
6743+ "sete %1\n"
6744 : "+m" (v->counter), "=qm" (c)
6745 : : "memory");
6746 return c != 0;
6747@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6748 {
6749 unsigned char c;
6750
6751- asm volatile(LOCK_PREFIX "incl %0; sete %1"
6752+ asm volatile(LOCK_PREFIX "incl %0\n"
6753+
6754+#ifdef CONFIG_PAX_REFCOUNT
6755+ "jno 0f\n"
6756+ LOCK_PREFIX "decl %0\n"
6757+ "int $4\n0:\n"
6758+ _ASM_EXTABLE(0b, 0b)
6759+#endif
6760+
6761+ "sete %1\n"
6762+ : "+m" (v->counter), "=qm" (c)
6763+ : : "memory");
6764+ return c != 0;
6765+}
6766+
6767+/**
6768+ * atomic_inc_and_test_unchecked - increment and test
6769+ * @v: pointer of type atomic_unchecked_t
6770+ *
6771+ * Atomically increments @v by 1
6772+ * and returns true if the result is zero, or false for all
6773+ * other cases.
6774+ */
6775+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6776+{
6777+ unsigned char c;
6778+
6779+ asm volatile(LOCK_PREFIX "incl %0\n"
6780+ "sete %1\n"
6781 : "+m" (v->counter), "=qm" (c)
6782 : : "memory");
6783 return c != 0;
6784@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6785 {
6786 unsigned char c;
6787
6788- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6789+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
6790+
6791+#ifdef CONFIG_PAX_REFCOUNT
6792+ "jno 0f\n"
6793+ LOCK_PREFIX "subl %2,%0\n"
6794+ "int $4\n0:\n"
6795+ _ASM_EXTABLE(0b, 0b)
6796+#endif
6797+
6798+ "sets %1\n"
6799 : "+m" (v->counter), "=qm" (c)
6800 : "ir" (i) : "memory");
6801 return c;
6802@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6803 #endif
6804 /* Modern 486+ processor */
6805 __i = i;
6806+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6807+
6808+#ifdef CONFIG_PAX_REFCOUNT
6809+ "jno 0f\n"
6810+ "movl %0, %1\n"
6811+ "int $4\n0:\n"
6812+ _ASM_EXTABLE(0b, 0b)
6813+#endif
6814+
6815+ : "+r" (i), "+m" (v->counter)
6816+ : : "memory");
6817+ return i + __i;
6818+
6819+#ifdef CONFIG_M386
6820+no_xadd: /* Legacy 386 processor */
6821+ local_irq_save(flags);
6822+ __i = atomic_read(v);
6823+ atomic_set(v, i + __i);
6824+ local_irq_restore(flags);
6825+ return i + __i;
6826+#endif
6827+}
6828+
6829+/**
6830+ * atomic_add_return_unchecked - add integer and return
6831+ * @v: pointer of type atomic_unchecked_t
6832+ * @i: integer value to add
6833+ *
6834+ * Atomically adds @i to @v and returns @i + @v
6835+ */
6836+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6837+{
6838+ int __i;
6839+#ifdef CONFIG_M386
6840+ unsigned long flags;
6841+ if (unlikely(boot_cpu_data.x86 <= 3))
6842+ goto no_xadd;
6843+#endif
6844+ /* Modern 486+ processor */
6845+ __i = i;
6846 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6847 : "+r" (i), "+m" (v->counter)
6848 : : "memory");
6849@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6850 }
6851
6852 #define atomic_inc_return(v) (atomic_add_return(1, v))
6853+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6854+{
6855+ return atomic_add_return_unchecked(1, v);
6856+}
6857 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6858
6859 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6860@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6861 return cmpxchg(&v->counter, old, new);
6862 }
6863
6864+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6865+{
6866+ return cmpxchg(&v->counter, old, new);
6867+}
6868+
6869 static inline int atomic_xchg(atomic_t *v, int new)
6870 {
6871 return xchg(&v->counter, new);
6872 }
6873
6874+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6875+{
6876+ return xchg(&v->counter, new);
6877+}
6878+
6879 /**
6880 * atomic_add_unless - add unless the number is already a given value
6881 * @v: pointer of type atomic_t
6882@@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6883 */
6884 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6885 {
6886- int c, old;
6887+ int c, old, new;
6888 c = atomic_read(v);
6889 for (;;) {
6890- if (unlikely(c == (u)))
6891+ if (unlikely(c == u))
6892 break;
6893- old = atomic_cmpxchg((v), c, c + (a));
6894+
6895+ asm volatile("addl %2,%0\n"
6896+
6897+#ifdef CONFIG_PAX_REFCOUNT
6898+ "jno 0f\n"
6899+ "subl %2,%0\n"
6900+ "int $4\n0:\n"
6901+ _ASM_EXTABLE(0b, 0b)
6902+#endif
6903+
6904+ : "=r" (new)
6905+ : "0" (c), "ir" (a));
6906+
6907+ old = atomic_cmpxchg(v, c, new);
6908 if (likely(old == c))
6909 break;
6910 c = old;
6911 }
6912- return c != (u);
6913+ return c != u;
6914 }
6915
6916 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6917
6918+/**
6919+ * atomic_inc_not_zero_hint - increment if not null
6920+ * @v: pointer of type atomic_t
6921+ * @hint: probable value of the atomic before the increment
6922+ *
6923+ * This version of atomic_inc_not_zero() gives a hint of probable
6924+ * value of the atomic. This helps processor to not read the memory
6925+ * before doing the atomic read/modify/write cycle, lowering
6926+ * number of bus transactions on some arches.
6927+ *
6928+ * Returns: 0 if increment was not done, 1 otherwise.
6929+ */
6930+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6931+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6932+{
6933+ int val, c = hint, new;
6934+
6935+ /* sanity test, should be removed by compiler if hint is a constant */
6936+ if (!hint)
6937+ return atomic_inc_not_zero(v);
6938+
6939+ do {
6940+ asm volatile("incl %0\n"
6941+
6942+#ifdef CONFIG_PAX_REFCOUNT
6943+ "jno 0f\n"
6944+ "decl %0\n"
6945+ "int $4\n0:\n"
6946+ _ASM_EXTABLE(0b, 0b)
6947+#endif
6948+
6949+ : "=r" (new)
6950+ : "0" (c));
6951+
6952+ val = atomic_cmpxchg(v, c, new);
6953+ if (val == c)
6954+ return 1;
6955+ c = val;
6956+ } while (c);
6957+
6958+ return 0;
6959+}
6960+
6961 /*
6962 * atomic_dec_if_positive - decrement by 1 if old value positive
6963 * @v: pointer of type atomic_t
6964diff -urNp linux-3.0.4/arch/x86/include/asm/bitops.h linux-3.0.4/arch/x86/include/asm/bitops.h
6965--- linux-3.0.4/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
6966+++ linux-3.0.4/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
6967@@ -38,7 +38,7 @@
6968 * a mask operation on a byte.
6969 */
6970 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6971-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6972+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6973 #define CONST_MASK(nr) (1 << ((nr) & 7))
6974
6975 /**
6976diff -urNp linux-3.0.4/arch/x86/include/asm/boot.h linux-3.0.4/arch/x86/include/asm/boot.h
6977--- linux-3.0.4/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
6978+++ linux-3.0.4/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
6979@@ -11,10 +11,15 @@
6980 #include <asm/pgtable_types.h>
6981
6982 /* Physical address where kernel should be loaded. */
6983-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6984+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6985 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6986 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6987
6988+#ifndef __ASSEMBLY__
6989+extern unsigned char __LOAD_PHYSICAL_ADDR[];
6990+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6991+#endif
6992+
6993 /* Minimum kernel alignment, as a power of two */
6994 #ifdef CONFIG_X86_64
6995 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6996diff -urNp linux-3.0.4/arch/x86/include/asm/cacheflush.h linux-3.0.4/arch/x86/include/asm/cacheflush.h
6997--- linux-3.0.4/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
6998+++ linux-3.0.4/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
6999@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
7000 unsigned long pg_flags = pg->flags & _PGMT_MASK;
7001
7002 if (pg_flags == _PGMT_DEFAULT)
7003- return -1;
7004+ return ~0UL;
7005 else if (pg_flags == _PGMT_WC)
7006 return _PAGE_CACHE_WC;
7007 else if (pg_flags == _PGMT_UC_MINUS)
7008diff -urNp linux-3.0.4/arch/x86/include/asm/cache.h linux-3.0.4/arch/x86/include/asm/cache.h
7009--- linux-3.0.4/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
7010+++ linux-3.0.4/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
7011@@ -5,12 +5,13 @@
7012
7013 /* L1 cache line size */
7014 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7015-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7016+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7017
7018 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7019+#define __read_only __attribute__((__section__(".data..read_only")))
7020
7021 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
7022-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
7023+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
7024
7025 #ifdef CONFIG_X86_VSMP
7026 #ifdef CONFIG_SMP
7027diff -urNp linux-3.0.4/arch/x86/include/asm/checksum_32.h linux-3.0.4/arch/x86/include/asm/checksum_32.h
7028--- linux-3.0.4/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
7029+++ linux-3.0.4/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
7030@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7031 int len, __wsum sum,
7032 int *src_err_ptr, int *dst_err_ptr);
7033
7034+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7035+ int len, __wsum sum,
7036+ int *src_err_ptr, int *dst_err_ptr);
7037+
7038+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7039+ int len, __wsum sum,
7040+ int *src_err_ptr, int *dst_err_ptr);
7041+
7042 /*
7043 * Note: when you get a NULL pointer exception here this means someone
7044 * passed in an incorrect kernel address to one of these functions.
7045@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7046 int *err_ptr)
7047 {
7048 might_sleep();
7049- return csum_partial_copy_generic((__force void *)src, dst,
7050+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
7051 len, sum, err_ptr, NULL);
7052 }
7053
7054@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7055 {
7056 might_sleep();
7057 if (access_ok(VERIFY_WRITE, dst, len))
7058- return csum_partial_copy_generic(src, (__force void *)dst,
7059+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7060 len, sum, NULL, err_ptr);
7061
7062 if (len)
7063diff -urNp linux-3.0.4/arch/x86/include/asm/cpufeature.h linux-3.0.4/arch/x86/include/asm/cpufeature.h
7064--- linux-3.0.4/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
7065+++ linux-3.0.4/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
7066@@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
7067 ".section .discard,\"aw\",@progbits\n"
7068 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
7069 ".previous\n"
7070- ".section .altinstr_replacement,\"ax\"\n"
7071+ ".section .altinstr_replacement,\"a\"\n"
7072 "3: movb $1,%0\n"
7073 "4:\n"
7074 ".previous\n"
7075diff -urNp linux-3.0.4/arch/x86/include/asm/desc_defs.h linux-3.0.4/arch/x86/include/asm/desc_defs.h
7076--- linux-3.0.4/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
7077+++ linux-3.0.4/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
7078@@ -31,6 +31,12 @@ struct desc_struct {
7079 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7080 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7081 };
7082+ struct {
7083+ u16 offset_low;
7084+ u16 seg;
7085+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7086+ unsigned offset_high: 16;
7087+ } gate;
7088 };
7089 } __attribute__((packed));
7090
7091diff -urNp linux-3.0.4/arch/x86/include/asm/desc.h linux-3.0.4/arch/x86/include/asm/desc.h
7092--- linux-3.0.4/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
7093+++ linux-3.0.4/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
7094@@ -4,6 +4,7 @@
7095 #include <asm/desc_defs.h>
7096 #include <asm/ldt.h>
7097 #include <asm/mmu.h>
7098+#include <asm/pgtable.h>
7099
7100 #include <linux/smp.h>
7101
7102@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
7103
7104 desc->type = (info->read_exec_only ^ 1) << 1;
7105 desc->type |= info->contents << 2;
7106+ desc->type |= info->seg_not_present ^ 1;
7107
7108 desc->s = 1;
7109 desc->dpl = 0x3;
7110@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
7111 }
7112
7113 extern struct desc_ptr idt_descr;
7114-extern gate_desc idt_table[];
7115-
7116-struct gdt_page {
7117- struct desc_struct gdt[GDT_ENTRIES];
7118-} __attribute__((aligned(PAGE_SIZE)));
7119-
7120-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
7121+extern gate_desc idt_table[256];
7122
7123+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
7124 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7125 {
7126- return per_cpu(gdt_page, cpu).gdt;
7127+ return cpu_gdt_table[cpu];
7128 }
7129
7130 #ifdef CONFIG_X86_64
7131@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
7132 unsigned long base, unsigned dpl, unsigned flags,
7133 unsigned short seg)
7134 {
7135- gate->a = (seg << 16) | (base & 0xffff);
7136- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7137+ gate->gate.offset_low = base;
7138+ gate->gate.seg = seg;
7139+ gate->gate.reserved = 0;
7140+ gate->gate.type = type;
7141+ gate->gate.s = 0;
7142+ gate->gate.dpl = dpl;
7143+ gate->gate.p = 1;
7144+ gate->gate.offset_high = base >> 16;
7145 }
7146
7147 #endif
7148@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
7149
7150 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
7151 {
7152+ pax_open_kernel();
7153 memcpy(&idt[entry], gate, sizeof(*gate));
7154+ pax_close_kernel();
7155 }
7156
7157 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
7158 {
7159+ pax_open_kernel();
7160 memcpy(&ldt[entry], desc, 8);
7161+ pax_close_kernel();
7162 }
7163
7164 static inline void
7165@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
7166 default: size = sizeof(*gdt); break;
7167 }
7168
7169+ pax_open_kernel();
7170 memcpy(&gdt[entry], desc, size);
7171+ pax_close_kernel();
7172 }
7173
7174 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7175@@ -207,7 +216,9 @@ static inline void native_set_ldt(const
7176
7177 static inline void native_load_tr_desc(void)
7178 {
7179+ pax_open_kernel();
7180 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7181+ pax_close_kernel();
7182 }
7183
7184 static inline void native_load_gdt(const struct desc_ptr *dtr)
7185@@ -244,8 +255,10 @@ static inline void native_load_tls(struc
7186 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7187 unsigned int i;
7188
7189+ pax_open_kernel();
7190 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7191 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7192+ pax_close_kernel();
7193 }
7194
7195 #define _LDT_empty(info) \
7196@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7197 desc->limit = (limit >> 16) & 0xf;
7198 }
7199
7200-static inline void _set_gate(int gate, unsigned type, void *addr,
7201+static inline void _set_gate(int gate, unsigned type, const void *addr,
7202 unsigned dpl, unsigned ist, unsigned seg)
7203 {
7204 gate_desc s;
7205@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7206 * Pentium F0 0F bugfix can have resulted in the mapped
7207 * IDT being write-protected.
7208 */
7209-static inline void set_intr_gate(unsigned int n, void *addr)
7210+static inline void set_intr_gate(unsigned int n, const void *addr)
7211 {
7212 BUG_ON((unsigned)n > 0xFF);
7213 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7214@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7215 /*
7216 * This routine sets up an interrupt gate at directory privilege level 3.
7217 */
7218-static inline void set_system_intr_gate(unsigned int n, void *addr)
7219+static inline void set_system_intr_gate(unsigned int n, const void *addr)
7220 {
7221 BUG_ON((unsigned)n > 0xFF);
7222 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7223 }
7224
7225-static inline void set_system_trap_gate(unsigned int n, void *addr)
7226+static inline void set_system_trap_gate(unsigned int n, const void *addr)
7227 {
7228 BUG_ON((unsigned)n > 0xFF);
7229 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7230 }
7231
7232-static inline void set_trap_gate(unsigned int n, void *addr)
7233+static inline void set_trap_gate(unsigned int n, const void *addr)
7234 {
7235 BUG_ON((unsigned)n > 0xFF);
7236 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7237@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7238 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7239 {
7240 BUG_ON((unsigned)n > 0xFF);
7241- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7242+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7243 }
7244
7245-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7246+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7247 {
7248 BUG_ON((unsigned)n > 0xFF);
7249 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7250 }
7251
7252-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7253+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7254 {
7255 BUG_ON((unsigned)n > 0xFF);
7256 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7257 }
7258
7259+#ifdef CONFIG_X86_32
7260+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7261+{
7262+ struct desc_struct d;
7263+
7264+ if (likely(limit))
7265+ limit = (limit - 1UL) >> PAGE_SHIFT;
7266+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
7267+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7268+}
7269+#endif
7270+
7271 #endif /* _ASM_X86_DESC_H */
7272diff -urNp linux-3.0.4/arch/x86/include/asm/e820.h linux-3.0.4/arch/x86/include/asm/e820.h
7273--- linux-3.0.4/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7274+++ linux-3.0.4/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7275@@ -69,7 +69,7 @@ struct e820map {
7276 #define ISA_START_ADDRESS 0xa0000
7277 #define ISA_END_ADDRESS 0x100000
7278
7279-#define BIOS_BEGIN 0x000a0000
7280+#define BIOS_BEGIN 0x000c0000
7281 #define BIOS_END 0x00100000
7282
7283 #define BIOS_ROM_BASE 0xffe00000
7284diff -urNp linux-3.0.4/arch/x86/include/asm/elf.h linux-3.0.4/arch/x86/include/asm/elf.h
7285--- linux-3.0.4/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7286+++ linux-3.0.4/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7287@@ -237,7 +237,25 @@ extern int force_personality32;
7288 the loader. We need to make sure that it is out of the way of the program
7289 that it will "exec", and that there is sufficient room for the brk. */
7290
7291+#ifdef CONFIG_PAX_SEGMEXEC
7292+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7293+#else
7294 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7295+#endif
7296+
7297+#ifdef CONFIG_PAX_ASLR
7298+#ifdef CONFIG_X86_32
7299+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7300+
7301+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7302+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7303+#else
7304+#define PAX_ELF_ET_DYN_BASE 0x400000UL
7305+
7306+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7307+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7308+#endif
7309+#endif
7310
7311 /* This yields a mask that user programs can use to figure out what
7312 instruction set this CPU supports. This could be done in user space,
7313@@ -290,9 +308,7 @@ do { \
7314
7315 #define ARCH_DLINFO \
7316 do { \
7317- if (vdso_enabled) \
7318- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7319- (unsigned long)current->mm->context.vdso); \
7320+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7321 } while (0)
7322
7323 #define AT_SYSINFO 32
7324@@ -303,7 +319,7 @@ do { \
7325
7326 #endif /* !CONFIG_X86_32 */
7327
7328-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7329+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7330
7331 #define VDSO_ENTRY \
7332 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7333@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7334 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7335 #define compat_arch_setup_additional_pages syscall32_setup_pages
7336
7337-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7338-#define arch_randomize_brk arch_randomize_brk
7339-
7340 #endif /* _ASM_X86_ELF_H */
7341diff -urNp linux-3.0.4/arch/x86/include/asm/emergency-restart.h linux-3.0.4/arch/x86/include/asm/emergency-restart.h
7342--- linux-3.0.4/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7343+++ linux-3.0.4/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7344@@ -15,6 +15,6 @@ enum reboot_type {
7345
7346 extern enum reboot_type reboot_type;
7347
7348-extern void machine_emergency_restart(void);
7349+extern void machine_emergency_restart(void) __noreturn;
7350
7351 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7352diff -urNp linux-3.0.4/arch/x86/include/asm/futex.h linux-3.0.4/arch/x86/include/asm/futex.h
7353--- linux-3.0.4/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7354+++ linux-3.0.4/arch/x86/include/asm/futex.h 2011-10-06 04:17:55.000000000 -0400
7355@@ -12,16 +12,18 @@
7356 #include <asm/system.h>
7357
7358 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7359+ typecheck(u32 __user *, uaddr); \
7360 asm volatile("1:\t" insn "\n" \
7361 "2:\t.section .fixup,\"ax\"\n" \
7362 "3:\tmov\t%3, %1\n" \
7363 "\tjmp\t2b\n" \
7364 "\t.previous\n" \
7365 _ASM_EXTABLE(1b, 3b) \
7366- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7367+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
7368 : "i" (-EFAULT), "0" (oparg), "1" (0))
7369
7370 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7371+ typecheck(u32 __user *, uaddr); \
7372 asm volatile("1:\tmovl %2, %0\n" \
7373 "\tmovl\t%0, %3\n" \
7374 "\t" insn "\n" \
7375@@ -34,7 +36,7 @@
7376 _ASM_EXTABLE(1b, 4b) \
7377 _ASM_EXTABLE(2b, 4b) \
7378 : "=&a" (oldval), "=&r" (ret), \
7379- "+m" (*uaddr), "=&r" (tem) \
7380+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
7381 : "r" (oparg), "i" (-EFAULT), "1" (0))
7382
7383 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7384@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7385
7386 switch (op) {
7387 case FUTEX_OP_SET:
7388- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7389+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7390 break;
7391 case FUTEX_OP_ADD:
7392- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7393+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7394 uaddr, oparg);
7395 break;
7396 case FUTEX_OP_OR:
7397@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7398 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7399 return -EFAULT;
7400
7401- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7402+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7403 "2:\t.section .fixup, \"ax\"\n"
7404 "3:\tmov %3, %0\n"
7405 "\tjmp 2b\n"
7406 "\t.previous\n"
7407 _ASM_EXTABLE(1b, 3b)
7408- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7409+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
7410 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7411 : "memory"
7412 );
7413diff -urNp linux-3.0.4/arch/x86/include/asm/hw_irq.h linux-3.0.4/arch/x86/include/asm/hw_irq.h
7414--- linux-3.0.4/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7415+++ linux-3.0.4/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7416@@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7417 extern void enable_IO_APIC(void);
7418
7419 /* Statistics */
7420-extern atomic_t irq_err_count;
7421-extern atomic_t irq_mis_count;
7422+extern atomic_unchecked_t irq_err_count;
7423+extern atomic_unchecked_t irq_mis_count;
7424
7425 /* EISA */
7426 extern void eisa_set_level_irq(unsigned int irq);
7427diff -urNp linux-3.0.4/arch/x86/include/asm/i387.h linux-3.0.4/arch/x86/include/asm/i387.h
7428--- linux-3.0.4/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7429+++ linux-3.0.4/arch/x86/include/asm/i387.h 2011-10-06 04:17:55.000000000 -0400
7430@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7431 {
7432 int err;
7433
7434+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7435+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7436+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
7437+#endif
7438+
7439 /* See comment in fxsave() below. */
7440 #ifdef CONFIG_AS_FXSAVEQ
7441 asm volatile("1: fxrstorq %[fx]\n\t"
7442@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7443 {
7444 int err;
7445
7446+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7447+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7448+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7449+#endif
7450+
7451 /*
7452 * Clear the bytes not touched by the fxsave and reserved
7453 * for the SW usage.
7454@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7455 #endif /* CONFIG_X86_64 */
7456
7457 /* We need a safe address that is cheap to find and that is already
7458- in L1 during context switch. The best choices are unfortunately
7459- different for UP and SMP */
7460-#ifdef CONFIG_SMP
7461-#define safe_address (__per_cpu_offset[0])
7462-#else
7463-#define safe_address (kstat_cpu(0).cpustat.user)
7464-#endif
7465+ in L1 during context switch. */
7466+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7467
7468 /*
7469 * These must be called with preempt disabled
7470@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7471 struct thread_info *me = current_thread_info();
7472 preempt_disable();
7473 if (me->status & TS_USEDFPU)
7474- __save_init_fpu(me->task);
7475+ __save_init_fpu(current);
7476 else
7477 clts();
7478 }
7479diff -urNp linux-3.0.4/arch/x86/include/asm/io.h linux-3.0.4/arch/x86/include/asm/io.h
7480--- linux-3.0.4/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7481+++ linux-3.0.4/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7482@@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7483
7484 #include <linux/vmalloc.h>
7485
7486+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7487+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7488+{
7489+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7490+}
7491+
7492+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7493+{
7494+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7495+}
7496+
7497 /*
7498 * Convert a virtual cached pointer to an uncached pointer
7499 */
7500diff -urNp linux-3.0.4/arch/x86/include/asm/irqflags.h linux-3.0.4/arch/x86/include/asm/irqflags.h
7501--- linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7502+++ linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7503@@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7504 sti; \
7505 sysexit
7506
7507+#define GET_CR0_INTO_RDI mov %cr0, %rdi
7508+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7509+#define GET_CR3_INTO_RDI mov %cr3, %rdi
7510+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7511+
7512 #else
7513 #define INTERRUPT_RETURN iret
7514 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7515diff -urNp linux-3.0.4/arch/x86/include/asm/kprobes.h linux-3.0.4/arch/x86/include/asm/kprobes.h
7516--- linux-3.0.4/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
7517+++ linux-3.0.4/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
7518@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7519 #define RELATIVEJUMP_SIZE 5
7520 #define RELATIVECALL_OPCODE 0xe8
7521 #define RELATIVE_ADDR_SIZE 4
7522-#define MAX_STACK_SIZE 64
7523-#define MIN_STACK_SIZE(ADDR) \
7524- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7525- THREAD_SIZE - (unsigned long)(ADDR))) \
7526- ? (MAX_STACK_SIZE) \
7527- : (((unsigned long)current_thread_info()) + \
7528- THREAD_SIZE - (unsigned long)(ADDR)))
7529+#define MAX_STACK_SIZE 64UL
7530+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7531
7532 #define flush_insn_slot(p) do { } while (0)
7533
7534diff -urNp linux-3.0.4/arch/x86/include/asm/kvm_host.h linux-3.0.4/arch/x86/include/asm/kvm_host.h
7535--- linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7536+++ linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-08-26 19:49:56.000000000 -0400
7537@@ -441,7 +441,7 @@ struct kvm_arch {
7538 unsigned int n_used_mmu_pages;
7539 unsigned int n_requested_mmu_pages;
7540 unsigned int n_max_mmu_pages;
7541- atomic_t invlpg_counter;
7542+ atomic_unchecked_t invlpg_counter;
7543 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7544 /*
7545 * Hash table of struct kvm_mmu_page.
7546@@ -619,7 +619,7 @@ struct kvm_x86_ops {
7547 enum x86_intercept_stage stage);
7548
7549 const struct trace_print_flags *exit_reasons_str;
7550-};
7551+} __do_const;
7552
7553 struct kvm_arch_async_pf {
7554 u32 token;
7555diff -urNp linux-3.0.4/arch/x86/include/asm/local.h linux-3.0.4/arch/x86/include/asm/local.h
7556--- linux-3.0.4/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
7557+++ linux-3.0.4/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
7558@@ -18,26 +18,58 @@ typedef struct {
7559
7560 static inline void local_inc(local_t *l)
7561 {
7562- asm volatile(_ASM_INC "%0"
7563+ asm volatile(_ASM_INC "%0\n"
7564+
7565+#ifdef CONFIG_PAX_REFCOUNT
7566+ "jno 0f\n"
7567+ _ASM_DEC "%0\n"
7568+ "int $4\n0:\n"
7569+ _ASM_EXTABLE(0b, 0b)
7570+#endif
7571+
7572 : "+m" (l->a.counter));
7573 }
7574
7575 static inline void local_dec(local_t *l)
7576 {
7577- asm volatile(_ASM_DEC "%0"
7578+ asm volatile(_ASM_DEC "%0\n"
7579+
7580+#ifdef CONFIG_PAX_REFCOUNT
7581+ "jno 0f\n"
7582+ _ASM_INC "%0\n"
7583+ "int $4\n0:\n"
7584+ _ASM_EXTABLE(0b, 0b)
7585+#endif
7586+
7587 : "+m" (l->a.counter));
7588 }
7589
7590 static inline void local_add(long i, local_t *l)
7591 {
7592- asm volatile(_ASM_ADD "%1,%0"
7593+ asm volatile(_ASM_ADD "%1,%0\n"
7594+
7595+#ifdef CONFIG_PAX_REFCOUNT
7596+ "jno 0f\n"
7597+ _ASM_SUB "%1,%0\n"
7598+ "int $4\n0:\n"
7599+ _ASM_EXTABLE(0b, 0b)
7600+#endif
7601+
7602 : "+m" (l->a.counter)
7603 : "ir" (i));
7604 }
7605
7606 static inline void local_sub(long i, local_t *l)
7607 {
7608- asm volatile(_ASM_SUB "%1,%0"
7609+ asm volatile(_ASM_SUB "%1,%0\n"
7610+
7611+#ifdef CONFIG_PAX_REFCOUNT
7612+ "jno 0f\n"
7613+ _ASM_ADD "%1,%0\n"
7614+ "int $4\n0:\n"
7615+ _ASM_EXTABLE(0b, 0b)
7616+#endif
7617+
7618 : "+m" (l->a.counter)
7619 : "ir" (i));
7620 }
7621@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7622 {
7623 unsigned char c;
7624
7625- asm volatile(_ASM_SUB "%2,%0; sete %1"
7626+ asm volatile(_ASM_SUB "%2,%0\n"
7627+
7628+#ifdef CONFIG_PAX_REFCOUNT
7629+ "jno 0f\n"
7630+ _ASM_ADD "%2,%0\n"
7631+ "int $4\n0:\n"
7632+ _ASM_EXTABLE(0b, 0b)
7633+#endif
7634+
7635+ "sete %1\n"
7636 : "+m" (l->a.counter), "=qm" (c)
7637 : "ir" (i) : "memory");
7638 return c;
7639@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7640 {
7641 unsigned char c;
7642
7643- asm volatile(_ASM_DEC "%0; sete %1"
7644+ asm volatile(_ASM_DEC "%0\n"
7645+
7646+#ifdef CONFIG_PAX_REFCOUNT
7647+ "jno 0f\n"
7648+ _ASM_INC "%0\n"
7649+ "int $4\n0:\n"
7650+ _ASM_EXTABLE(0b, 0b)
7651+#endif
7652+
7653+ "sete %1\n"
7654 : "+m" (l->a.counter), "=qm" (c)
7655 : : "memory");
7656 return c != 0;
7657@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7658 {
7659 unsigned char c;
7660
7661- asm volatile(_ASM_INC "%0; sete %1"
7662+ asm volatile(_ASM_INC "%0\n"
7663+
7664+#ifdef CONFIG_PAX_REFCOUNT
7665+ "jno 0f\n"
7666+ _ASM_DEC "%0\n"
7667+ "int $4\n0:\n"
7668+ _ASM_EXTABLE(0b, 0b)
7669+#endif
7670+
7671+ "sete %1\n"
7672 : "+m" (l->a.counter), "=qm" (c)
7673 : : "memory");
7674 return c != 0;
7675@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7676 {
7677 unsigned char c;
7678
7679- asm volatile(_ASM_ADD "%2,%0; sets %1"
7680+ asm volatile(_ASM_ADD "%2,%0\n"
7681+
7682+#ifdef CONFIG_PAX_REFCOUNT
7683+ "jno 0f\n"
7684+ _ASM_SUB "%2,%0\n"
7685+ "int $4\n0:\n"
7686+ _ASM_EXTABLE(0b, 0b)
7687+#endif
7688+
7689+ "sets %1\n"
7690 : "+m" (l->a.counter), "=qm" (c)
7691 : "ir" (i) : "memory");
7692 return c;
7693@@ -133,7 +201,15 @@ static inline long local_add_return(long
7694 #endif
7695 /* Modern 486+ processor */
7696 __i = i;
7697- asm volatile(_ASM_XADD "%0, %1;"
7698+ asm volatile(_ASM_XADD "%0, %1\n"
7699+
7700+#ifdef CONFIG_PAX_REFCOUNT
7701+ "jno 0f\n"
7702+ _ASM_MOV "%0,%1\n"
7703+ "int $4\n0:\n"
7704+ _ASM_EXTABLE(0b, 0b)
7705+#endif
7706+
7707 : "+r" (i), "+m" (l->a.counter)
7708 : : "memory");
7709 return i + __i;
7710diff -urNp linux-3.0.4/arch/x86/include/asm/mman.h linux-3.0.4/arch/x86/include/asm/mman.h
7711--- linux-3.0.4/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
7712+++ linux-3.0.4/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
7713@@ -5,4 +5,14 @@
7714
7715 #include <asm-generic/mman.h>
7716
7717+#ifdef __KERNEL__
7718+#ifndef __ASSEMBLY__
7719+#ifdef CONFIG_X86_32
7720+#define arch_mmap_check i386_mmap_check
7721+int i386_mmap_check(unsigned long addr, unsigned long len,
7722+ unsigned long flags);
7723+#endif
7724+#endif
7725+#endif
7726+
7727 #endif /* _ASM_X86_MMAN_H */
7728diff -urNp linux-3.0.4/arch/x86/include/asm/mmu_context.h linux-3.0.4/arch/x86/include/asm/mmu_context.h
7729--- linux-3.0.4/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
7730+++ linux-3.0.4/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
7731@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7732
7733 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7734 {
7735+
7736+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7737+ unsigned int i;
7738+ pgd_t *pgd;
7739+
7740+ pax_open_kernel();
7741+ pgd = get_cpu_pgd(smp_processor_id());
7742+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7743+ set_pgd_batched(pgd+i, native_make_pgd(0));
7744+ pax_close_kernel();
7745+#endif
7746+
7747 #ifdef CONFIG_SMP
7748 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7749 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7750@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7751 struct task_struct *tsk)
7752 {
7753 unsigned cpu = smp_processor_id();
7754+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7755+ int tlbstate = TLBSTATE_OK;
7756+#endif
7757
7758 if (likely(prev != next)) {
7759 #ifdef CONFIG_SMP
7760+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7761+ tlbstate = percpu_read(cpu_tlbstate.state);
7762+#endif
7763 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7764 percpu_write(cpu_tlbstate.active_mm, next);
7765 #endif
7766 cpumask_set_cpu(cpu, mm_cpumask(next));
7767
7768 /* Re-load page tables */
7769+#ifdef CONFIG_PAX_PER_CPU_PGD
7770+ pax_open_kernel();
7771+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7772+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7773+ pax_close_kernel();
7774+ load_cr3(get_cpu_pgd(cpu));
7775+#else
7776 load_cr3(next->pgd);
7777+#endif
7778
7779 /* stop flush ipis for the previous mm */
7780 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7781@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7782 */
7783 if (unlikely(prev->context.ldt != next->context.ldt))
7784 load_LDT_nolock(&next->context);
7785- }
7786+
7787+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7788+ if (!(__supported_pte_mask & _PAGE_NX)) {
7789+ smp_mb__before_clear_bit();
7790+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7791+ smp_mb__after_clear_bit();
7792+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7793+ }
7794+#endif
7795+
7796+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7797+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7798+ prev->context.user_cs_limit != next->context.user_cs_limit))
7799+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7800 #ifdef CONFIG_SMP
7801+ else if (unlikely(tlbstate != TLBSTATE_OK))
7802+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7803+#endif
7804+#endif
7805+
7806+ }
7807 else {
7808+
7809+#ifdef CONFIG_PAX_PER_CPU_PGD
7810+ pax_open_kernel();
7811+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7812+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7813+ pax_close_kernel();
7814+ load_cr3(get_cpu_pgd(cpu));
7815+#endif
7816+
7817+#ifdef CONFIG_SMP
7818 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7819 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7820
7821@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7822 * tlb flush IPI delivery. We must reload CR3
7823 * to make sure to use no freed page tables.
7824 */
7825+
7826+#ifndef CONFIG_PAX_PER_CPU_PGD
7827 load_cr3(next->pgd);
7828+#endif
7829+
7830 load_LDT_nolock(&next->context);
7831+
7832+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7833+ if (!(__supported_pte_mask & _PAGE_NX))
7834+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7835+#endif
7836+
7837+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7838+#ifdef CONFIG_PAX_PAGEEXEC
7839+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7840+#endif
7841+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7842+#endif
7843+
7844 }
7845- }
7846 #endif
7847+ }
7848 }
7849
7850 #define activate_mm(prev, next) \
7851diff -urNp linux-3.0.4/arch/x86/include/asm/mmu.h linux-3.0.4/arch/x86/include/asm/mmu.h
7852--- linux-3.0.4/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
7853+++ linux-3.0.4/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
7854@@ -9,7 +9,7 @@
7855 * we put the segment information here.
7856 */
7857 typedef struct {
7858- void *ldt;
7859+ struct desc_struct *ldt;
7860 int size;
7861
7862 #ifdef CONFIG_X86_64
7863@@ -18,7 +18,19 @@ typedef struct {
7864 #endif
7865
7866 struct mutex lock;
7867- void *vdso;
7868+ unsigned long vdso;
7869+
7870+#ifdef CONFIG_X86_32
7871+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7872+ unsigned long user_cs_base;
7873+ unsigned long user_cs_limit;
7874+
7875+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7876+ cpumask_t cpu_user_cs_mask;
7877+#endif
7878+
7879+#endif
7880+#endif
7881 } mm_context_t;
7882
7883 #ifdef CONFIG_SMP
7884diff -urNp linux-3.0.4/arch/x86/include/asm/module.h linux-3.0.4/arch/x86/include/asm/module.h
7885--- linux-3.0.4/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
7886+++ linux-3.0.4/arch/x86/include/asm/module.h 2011-10-06 04:21:18.000000000 -0400
7887@@ -5,6 +5,7 @@
7888
7889 #ifdef CONFIG_X86_64
7890 /* X86_64 does not define MODULE_PROC_FAMILY */
7891+#define MODULE_PROC_FAMILY ""
7892 #elif defined CONFIG_M386
7893 #define MODULE_PROC_FAMILY "386 "
7894 #elif defined CONFIG_M486
7895@@ -59,8 +60,36 @@
7896 #error unknown processor family
7897 #endif
7898
7899-#ifdef CONFIG_X86_32
7900-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7901+#ifdef CONFIG_PAX_MEMORY_UDEREF
7902+#define MODULE_PAX_UDEREF "UDEREF "
7903+#else
7904+#define MODULE_PAX_UDEREF ""
7905+#endif
7906+
7907+#ifdef CONFIG_PAX_KERNEXEC
7908+#define MODULE_PAX_KERNEXEC "KERNEXEC "
7909+#else
7910+#define MODULE_PAX_KERNEXEC ""
7911+#endif
7912+
7913+#ifdef CONFIG_PAX_REFCOUNT
7914+#define MODULE_PAX_REFCOUNT "REFCOUNT "
7915+#else
7916+#define MODULE_PAX_REFCOUNT ""
7917 #endif
7918
7919+#ifdef CONSTIFY_PLUGIN
7920+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
7921+#else
7922+#define MODULE_CONSTIFY_PLUGIN ""
7923+#endif
7924+
7925+#ifdef CONFIG_GRKERNSEC
7926+#define MODULE_GRSEC "GRSECURITY "
7927+#else
7928+#define MODULE_GRSEC ""
7929+#endif
7930+
7931+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN
7932+
7933 #endif /* _ASM_X86_MODULE_H */
7934diff -urNp linux-3.0.4/arch/x86/include/asm/page_64_types.h linux-3.0.4/arch/x86/include/asm/page_64_types.h
7935--- linux-3.0.4/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
7936+++ linux-3.0.4/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
7937@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7938
7939 /* duplicated to the one in bootmem.h */
7940 extern unsigned long max_pfn;
7941-extern unsigned long phys_base;
7942+extern const unsigned long phys_base;
7943
7944 extern unsigned long __phys_addr(unsigned long);
7945 #define __phys_reloc_hide(x) (x)
7946diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt.h linux-3.0.4/arch/x86/include/asm/paravirt.h
7947--- linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7948+++ linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7949@@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
7950 val);
7951 }
7952
7953+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7954+{
7955+ pgdval_t val = native_pgd_val(pgd);
7956+
7957+ if (sizeof(pgdval_t) > sizeof(long))
7958+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7959+ val, (u64)val >> 32);
7960+ else
7961+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7962+ val);
7963+}
7964+
7965 static inline void pgd_clear(pgd_t *pgdp)
7966 {
7967 set_pgd(pgdp, __pgd(0));
7968@@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
7969 pv_mmu_ops.set_fixmap(idx, phys, flags);
7970 }
7971
7972+#ifdef CONFIG_PAX_KERNEXEC
7973+static inline unsigned long pax_open_kernel(void)
7974+{
7975+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7976+}
7977+
7978+static inline unsigned long pax_close_kernel(void)
7979+{
7980+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7981+}
7982+#else
7983+static inline unsigned long pax_open_kernel(void) { return 0; }
7984+static inline unsigned long pax_close_kernel(void) { return 0; }
7985+#endif
7986+
7987 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7988
7989 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7990@@ -955,7 +982,7 @@ extern void default_banner(void);
7991
7992 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7993 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7994-#define PARA_INDIRECT(addr) *%cs:addr
7995+#define PARA_INDIRECT(addr) *%ss:addr
7996 #endif
7997
7998 #define INTERRUPT_RETURN \
7999@@ -1032,6 +1059,21 @@ extern void default_banner(void);
8000 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
8001 CLBR_NONE, \
8002 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
8003+
8004+#define GET_CR0_INTO_RDI \
8005+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
8006+ mov %rax,%rdi
8007+
8008+#define SET_RDI_INTO_CR0 \
8009+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
8010+
8011+#define GET_CR3_INTO_RDI \
8012+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
8013+ mov %rax,%rdi
8014+
8015+#define SET_RDI_INTO_CR3 \
8016+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
8017+
8018 #endif /* CONFIG_X86_32 */
8019
8020 #endif /* __ASSEMBLY__ */
8021diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt_types.h linux-3.0.4/arch/x86/include/asm/paravirt_types.h
8022--- linux-3.0.4/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
8023+++ linux-3.0.4/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
8024@@ -78,19 +78,19 @@ struct pv_init_ops {
8025 */
8026 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
8027 unsigned long addr, unsigned len);
8028-};
8029+} __no_const;
8030
8031
8032 struct pv_lazy_ops {
8033 /* Set deferred update mode, used for batching operations. */
8034 void (*enter)(void);
8035 void (*leave)(void);
8036-};
8037+} __no_const;
8038
8039 struct pv_time_ops {
8040 unsigned long long (*sched_clock)(void);
8041 unsigned long (*get_tsc_khz)(void);
8042-};
8043+} __no_const;
8044
8045 struct pv_cpu_ops {
8046 /* hooks for various privileged instructions */
8047@@ -186,7 +186,7 @@ struct pv_cpu_ops {
8048
8049 void (*start_context_switch)(struct task_struct *prev);
8050 void (*end_context_switch)(struct task_struct *next);
8051-};
8052+} __no_const;
8053
8054 struct pv_irq_ops {
8055 /*
8056@@ -217,7 +217,7 @@ struct pv_apic_ops {
8057 unsigned long start_eip,
8058 unsigned long start_esp);
8059 #endif
8060-};
8061+} __no_const;
8062
8063 struct pv_mmu_ops {
8064 unsigned long (*read_cr2)(void);
8065@@ -306,6 +306,7 @@ struct pv_mmu_ops {
8066 struct paravirt_callee_save make_pud;
8067
8068 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
8069+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
8070 #endif /* PAGETABLE_LEVELS == 4 */
8071 #endif /* PAGETABLE_LEVELS >= 3 */
8072
8073@@ -317,6 +318,12 @@ struct pv_mmu_ops {
8074 an mfn. We can tell which is which from the index. */
8075 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
8076 phys_addr_t phys, pgprot_t flags);
8077+
8078+#ifdef CONFIG_PAX_KERNEXEC
8079+ unsigned long (*pax_open_kernel)(void);
8080+ unsigned long (*pax_close_kernel)(void);
8081+#endif
8082+
8083 };
8084
8085 struct arch_spinlock;
8086@@ -327,7 +334,7 @@ struct pv_lock_ops {
8087 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
8088 int (*spin_trylock)(struct arch_spinlock *lock);
8089 void (*spin_unlock)(struct arch_spinlock *lock);
8090-};
8091+} __no_const;
8092
8093 /* This contains all the paravirt structures: we get a convenient
8094 * number for each function using the offset which we use to indicate
8095diff -urNp linux-3.0.4/arch/x86/include/asm/pgalloc.h linux-3.0.4/arch/x86/include/asm/pgalloc.h
8096--- linux-3.0.4/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
8097+++ linux-3.0.4/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
8098@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
8099 pmd_t *pmd, pte_t *pte)
8100 {
8101 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8102+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
8103+}
8104+
8105+static inline void pmd_populate_user(struct mm_struct *mm,
8106+ pmd_t *pmd, pte_t *pte)
8107+{
8108+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8109 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
8110 }
8111
8112diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-2level.h linux-3.0.4/arch/x86/include/asm/pgtable-2level.h
8113--- linux-3.0.4/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
8114+++ linux-3.0.4/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
8115@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
8116
8117 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8118 {
8119+ pax_open_kernel();
8120 *pmdp = pmd;
8121+ pax_close_kernel();
8122 }
8123
8124 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
8125diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32.h linux-3.0.4/arch/x86/include/asm/pgtable_32.h
8126--- linux-3.0.4/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
8127+++ linux-3.0.4/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
8128@@ -25,9 +25,6 @@
8129 struct mm_struct;
8130 struct vm_area_struct;
8131
8132-extern pgd_t swapper_pg_dir[1024];
8133-extern pgd_t initial_page_table[1024];
8134-
8135 static inline void pgtable_cache_init(void) { }
8136 static inline void check_pgt_cache(void) { }
8137 void paging_init(void);
8138@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
8139 # include <asm/pgtable-2level.h>
8140 #endif
8141
8142+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
8143+extern pgd_t initial_page_table[PTRS_PER_PGD];
8144+#ifdef CONFIG_X86_PAE
8145+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
8146+#endif
8147+
8148 #if defined(CONFIG_HIGHPTE)
8149 #define pte_offset_map(dir, address) \
8150 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
8151@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
8152 /* Clear a kernel PTE and flush it from the TLB */
8153 #define kpte_clear_flush(ptep, vaddr) \
8154 do { \
8155+ pax_open_kernel(); \
8156 pte_clear(&init_mm, (vaddr), (ptep)); \
8157+ pax_close_kernel(); \
8158 __flush_tlb_one((vaddr)); \
8159 } while (0)
8160
8161@@ -74,6 +79,9 @@ do { \
8162
8163 #endif /* !__ASSEMBLY__ */
8164
8165+#define HAVE_ARCH_UNMAPPED_AREA
8166+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
8167+
8168 /*
8169 * kern_addr_valid() is (1) for FLATMEM and (0) for
8170 * SPARSEMEM and DISCONTIGMEM
8171diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h
8172--- linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
8173+++ linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
8174@@ -8,7 +8,7 @@
8175 */
8176 #ifdef CONFIG_X86_PAE
8177 # include <asm/pgtable-3level_types.h>
8178-# define PMD_SIZE (1UL << PMD_SHIFT)
8179+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8180 # define PMD_MASK (~(PMD_SIZE - 1))
8181 #else
8182 # include <asm/pgtable-2level_types.h>
8183@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8184 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8185 #endif
8186
8187+#ifdef CONFIG_PAX_KERNEXEC
8188+#ifndef __ASSEMBLY__
8189+extern unsigned char MODULES_EXEC_VADDR[];
8190+extern unsigned char MODULES_EXEC_END[];
8191+#endif
8192+#include <asm/boot.h>
8193+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8194+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8195+#else
8196+#define ktla_ktva(addr) (addr)
8197+#define ktva_ktla(addr) (addr)
8198+#endif
8199+
8200 #define MODULES_VADDR VMALLOC_START
8201 #define MODULES_END VMALLOC_END
8202 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8203diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-3level.h linux-3.0.4/arch/x86/include/asm/pgtable-3level.h
8204--- linux-3.0.4/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8205+++ linux-3.0.4/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8206@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8207
8208 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8209 {
8210+ pax_open_kernel();
8211 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8212+ pax_close_kernel();
8213 }
8214
8215 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8216 {
8217+ pax_open_kernel();
8218 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8219+ pax_close_kernel();
8220 }
8221
8222 /*
8223diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64.h linux-3.0.4/arch/x86/include/asm/pgtable_64.h
8224--- linux-3.0.4/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8225+++ linux-3.0.4/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8226@@ -16,10 +16,13 @@
8227
8228 extern pud_t level3_kernel_pgt[512];
8229 extern pud_t level3_ident_pgt[512];
8230+extern pud_t level3_vmalloc_pgt[512];
8231+extern pud_t level3_vmemmap_pgt[512];
8232+extern pud_t level2_vmemmap_pgt[512];
8233 extern pmd_t level2_kernel_pgt[512];
8234 extern pmd_t level2_fixmap_pgt[512];
8235-extern pmd_t level2_ident_pgt[512];
8236-extern pgd_t init_level4_pgt[];
8237+extern pmd_t level2_ident_pgt[512*2];
8238+extern pgd_t init_level4_pgt[512];
8239
8240 #define swapper_pg_dir init_level4_pgt
8241
8242@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8243
8244 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8245 {
8246+ pax_open_kernel();
8247 *pmdp = pmd;
8248+ pax_close_kernel();
8249 }
8250
8251 static inline void native_pmd_clear(pmd_t *pmd)
8252@@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8253
8254 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8255 {
8256+ pax_open_kernel();
8257+ *pgdp = pgd;
8258+ pax_close_kernel();
8259+}
8260+
8261+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8262+{
8263 *pgdp = pgd;
8264 }
8265
8266diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h
8267--- linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8268+++ linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8269@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8270 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8271 #define MODULES_END _AC(0xffffffffff000000, UL)
8272 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8273+#define MODULES_EXEC_VADDR MODULES_VADDR
8274+#define MODULES_EXEC_END MODULES_END
8275+
8276+#define ktla_ktva(addr) (addr)
8277+#define ktva_ktla(addr) (addr)
8278
8279 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8280diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable.h linux-3.0.4/arch/x86/include/asm/pgtable.h
8281--- linux-3.0.4/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8282+++ linux-3.0.4/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8283@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8284
8285 #ifndef __PAGETABLE_PUD_FOLDED
8286 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8287+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8288 #define pgd_clear(pgd) native_pgd_clear(pgd)
8289 #endif
8290
8291@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8292
8293 #define arch_end_context_switch(prev) do {} while(0)
8294
8295+#define pax_open_kernel() native_pax_open_kernel()
8296+#define pax_close_kernel() native_pax_close_kernel()
8297 #endif /* CONFIG_PARAVIRT */
8298
8299+#define __HAVE_ARCH_PAX_OPEN_KERNEL
8300+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8301+
8302+#ifdef CONFIG_PAX_KERNEXEC
8303+static inline unsigned long native_pax_open_kernel(void)
8304+{
8305+ unsigned long cr0;
8306+
8307+ preempt_disable();
8308+ barrier();
8309+ cr0 = read_cr0() ^ X86_CR0_WP;
8310+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
8311+ write_cr0(cr0);
8312+ return cr0 ^ X86_CR0_WP;
8313+}
8314+
8315+static inline unsigned long native_pax_close_kernel(void)
8316+{
8317+ unsigned long cr0;
8318+
8319+ cr0 = read_cr0() ^ X86_CR0_WP;
8320+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8321+ write_cr0(cr0);
8322+ barrier();
8323+ preempt_enable_no_resched();
8324+ return cr0 ^ X86_CR0_WP;
8325+}
8326+#else
8327+static inline unsigned long native_pax_open_kernel(void) { return 0; }
8328+static inline unsigned long native_pax_close_kernel(void) { return 0; }
8329+#endif
8330+
8331 /*
8332 * The following only work if pte_present() is true.
8333 * Undefined behaviour if not..
8334 */
8335+static inline int pte_user(pte_t pte)
8336+{
8337+ return pte_val(pte) & _PAGE_USER;
8338+}
8339+
8340 static inline int pte_dirty(pte_t pte)
8341 {
8342 return pte_flags(pte) & _PAGE_DIRTY;
8343@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8344 return pte_clear_flags(pte, _PAGE_RW);
8345 }
8346
8347+static inline pte_t pte_mkread(pte_t pte)
8348+{
8349+ return __pte(pte_val(pte) | _PAGE_USER);
8350+}
8351+
8352 static inline pte_t pte_mkexec(pte_t pte)
8353 {
8354- return pte_clear_flags(pte, _PAGE_NX);
8355+#ifdef CONFIG_X86_PAE
8356+ if (__supported_pte_mask & _PAGE_NX)
8357+ return pte_clear_flags(pte, _PAGE_NX);
8358+ else
8359+#endif
8360+ return pte_set_flags(pte, _PAGE_USER);
8361+}
8362+
8363+static inline pte_t pte_exprotect(pte_t pte)
8364+{
8365+#ifdef CONFIG_X86_PAE
8366+ if (__supported_pte_mask & _PAGE_NX)
8367+ return pte_set_flags(pte, _PAGE_NX);
8368+ else
8369+#endif
8370+ return pte_clear_flags(pte, _PAGE_USER);
8371 }
8372
8373 static inline pte_t pte_mkdirty(pte_t pte)
8374@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8375 #endif
8376
8377 #ifndef __ASSEMBLY__
8378+
8379+#ifdef CONFIG_PAX_PER_CPU_PGD
8380+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8381+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8382+{
8383+ return cpu_pgd[cpu];
8384+}
8385+#endif
8386+
8387 #include <linux/mm_types.h>
8388
8389 static inline int pte_none(pte_t pte)
8390@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8391
8392 static inline int pgd_bad(pgd_t pgd)
8393 {
8394- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8395+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8396 }
8397
8398 static inline int pgd_none(pgd_t pgd)
8399@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8400 * pgd_offset() returns a (pgd_t *)
8401 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8402 */
8403-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8404+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8405+
8406+#ifdef CONFIG_PAX_PER_CPU_PGD
8407+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8408+#endif
8409+
8410 /*
8411 * a shortcut which implies the use of the kernel's pgd, instead
8412 * of a process's
8413@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8414 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8415 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8416
8417+#ifdef CONFIG_X86_32
8418+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8419+#else
8420+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8421+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8422+
8423+#ifdef CONFIG_PAX_MEMORY_UDEREF
8424+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8425+#else
8426+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8427+#endif
8428+
8429+#endif
8430+
8431 #ifndef __ASSEMBLY__
8432
8433 extern int direct_gbpages;
8434@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8435 * dst and src can be on the same page, but the range must not overlap,
8436 * and must not cross a page boundary.
8437 */
8438-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8439+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8440 {
8441- memcpy(dst, src, count * sizeof(pgd_t));
8442+ pax_open_kernel();
8443+ while (count--)
8444+ *dst++ = *src++;
8445+ pax_close_kernel();
8446 }
8447
8448+#ifdef CONFIG_PAX_PER_CPU_PGD
8449+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8450+#endif
8451+
8452+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8453+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8454+#else
8455+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8456+#endif
8457
8458 #include <asm-generic/pgtable.h>
8459 #endif /* __ASSEMBLY__ */
8460diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_types.h linux-3.0.4/arch/x86/include/asm/pgtable_types.h
8461--- linux-3.0.4/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8462+++ linux-3.0.4/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8463@@ -16,13 +16,12 @@
8464 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8465 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8466 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8467-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8468+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8469 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8470 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8471 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8472-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8473-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8474-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8475+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8476+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8477 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8478
8479 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8480@@ -40,7 +39,6 @@
8481 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8482 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8483 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8484-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8485 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8486 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8487 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8488@@ -57,8 +55,10 @@
8489
8490 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8491 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8492-#else
8493+#elif defined(CONFIG_KMEMCHECK)
8494 #define _PAGE_NX (_AT(pteval_t, 0))
8495+#else
8496+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8497 #endif
8498
8499 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8500@@ -96,6 +96,9 @@
8501 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8502 _PAGE_ACCESSED)
8503
8504+#define PAGE_READONLY_NOEXEC PAGE_READONLY
8505+#define PAGE_SHARED_NOEXEC PAGE_SHARED
8506+
8507 #define __PAGE_KERNEL_EXEC \
8508 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8509 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8510@@ -106,8 +109,8 @@
8511 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8512 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8513 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8514-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8515-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8516+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8517+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8518 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8519 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8520 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8521@@ -166,8 +169,8 @@
8522 * bits are combined, this will alow user to access the high address mapped
8523 * VDSO in the presence of CONFIG_COMPAT_VDSO
8524 */
8525-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8526-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8527+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8528+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8529 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8530 #endif
8531
8532@@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8533 {
8534 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8535 }
8536+#endif
8537
8538+#if PAGETABLE_LEVELS == 3
8539+#include <asm-generic/pgtable-nopud.h>
8540+#endif
8541+
8542+#if PAGETABLE_LEVELS == 2
8543+#include <asm-generic/pgtable-nopmd.h>
8544+#endif
8545+
8546+#ifndef __ASSEMBLY__
8547 #if PAGETABLE_LEVELS > 3
8548 typedef struct { pudval_t pud; } pud_t;
8549
8550@@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8551 return pud.pud;
8552 }
8553 #else
8554-#include <asm-generic/pgtable-nopud.h>
8555-
8556 static inline pudval_t native_pud_val(pud_t pud)
8557 {
8558 return native_pgd_val(pud.pgd);
8559@@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8560 return pmd.pmd;
8561 }
8562 #else
8563-#include <asm-generic/pgtable-nopmd.h>
8564-
8565 static inline pmdval_t native_pmd_val(pmd_t pmd)
8566 {
8567 return native_pgd_val(pmd.pud.pgd);
8568@@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8569
8570 extern pteval_t __supported_pte_mask;
8571 extern void set_nx(void);
8572-extern int nx_enabled;
8573
8574 #define pgprot_writecombine pgprot_writecombine
8575 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8576diff -urNp linux-3.0.4/arch/x86/include/asm/processor.h linux-3.0.4/arch/x86/include/asm/processor.h
8577--- linux-3.0.4/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
8578+++ linux-3.0.4/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
8579@@ -266,7 +266,7 @@ struct tss_struct {
8580
8581 } ____cacheline_aligned;
8582
8583-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8584+extern struct tss_struct init_tss[NR_CPUS];
8585
8586 /*
8587 * Save the original ist values for checking stack pointers during debugging
8588@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8589 */
8590 #define TASK_SIZE PAGE_OFFSET
8591 #define TASK_SIZE_MAX TASK_SIZE
8592+
8593+#ifdef CONFIG_PAX_SEGMEXEC
8594+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8595+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8596+#else
8597 #define STACK_TOP TASK_SIZE
8598-#define STACK_TOP_MAX STACK_TOP
8599+#endif
8600+
8601+#define STACK_TOP_MAX TASK_SIZE
8602
8603 #define INIT_THREAD { \
8604- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8605+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8606 .vm86_info = NULL, \
8607 .sysenter_cs = __KERNEL_CS, \
8608 .io_bitmap_ptr = NULL, \
8609@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8610 */
8611 #define INIT_TSS { \
8612 .x86_tss = { \
8613- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8614+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8615 .ss0 = __KERNEL_DS, \
8616 .ss1 = __KERNEL_CS, \
8617 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8618@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8619 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8620
8621 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8622-#define KSTK_TOP(info) \
8623-({ \
8624- unsigned long *__ptr = (unsigned long *)(info); \
8625- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8626-})
8627+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8628
8629 /*
8630 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8631@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8632 #define task_pt_regs(task) \
8633 ({ \
8634 struct pt_regs *__regs__; \
8635- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8636+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8637 __regs__ - 1; \
8638 })
8639
8640@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8641 /*
8642 * User space process size. 47bits minus one guard page.
8643 */
8644-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8645+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8646
8647 /* This decides where the kernel will search for a free chunk of vm
8648 * space during mmap's.
8649 */
8650 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8651- 0xc0000000 : 0xFFFFe000)
8652+ 0xc0000000 : 0xFFFFf000)
8653
8654 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8655 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8656@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8657 #define STACK_TOP_MAX TASK_SIZE_MAX
8658
8659 #define INIT_THREAD { \
8660- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8661+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8662 }
8663
8664 #define INIT_TSS { \
8665- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8666+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8667 }
8668
8669 /*
8670@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8671 */
8672 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8673
8674+#ifdef CONFIG_PAX_SEGMEXEC
8675+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8676+#endif
8677+
8678 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8679
8680 /* Get/set a process' ability to use the timestamp counter instruction */
8681diff -urNp linux-3.0.4/arch/x86/include/asm/ptrace.h linux-3.0.4/arch/x86/include/asm/ptrace.h
8682--- linux-3.0.4/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
8683+++ linux-3.0.4/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
8684@@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8685 }
8686
8687 /*
8688- * user_mode_vm(regs) determines whether a register set came from user mode.
8689+ * user_mode(regs) determines whether a register set came from user mode.
8690 * This is true if V8086 mode was enabled OR if the register set was from
8691 * protected mode with RPL-3 CS value. This tricky test checks that with
8692 * one comparison. Many places in the kernel can bypass this full check
8693- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8694+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8695+ * be used.
8696 */
8697-static inline int user_mode(struct pt_regs *regs)
8698+static inline int user_mode_novm(struct pt_regs *regs)
8699 {
8700 #ifdef CONFIG_X86_32
8701 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8702 #else
8703- return !!(regs->cs & 3);
8704+ return !!(regs->cs & SEGMENT_RPL_MASK);
8705 #endif
8706 }
8707
8708-static inline int user_mode_vm(struct pt_regs *regs)
8709+static inline int user_mode(struct pt_regs *regs)
8710 {
8711 #ifdef CONFIG_X86_32
8712 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8713 USER_RPL;
8714 #else
8715- return user_mode(regs);
8716+ return user_mode_novm(regs);
8717 #endif
8718 }
8719
8720diff -urNp linux-3.0.4/arch/x86/include/asm/reboot.h linux-3.0.4/arch/x86/include/asm/reboot.h
8721--- linux-3.0.4/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
8722+++ linux-3.0.4/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
8723@@ -6,19 +6,19 @@
8724 struct pt_regs;
8725
8726 struct machine_ops {
8727- void (*restart)(char *cmd);
8728- void (*halt)(void);
8729- void (*power_off)(void);
8730+ void (* __noreturn restart)(char *cmd);
8731+ void (* __noreturn halt)(void);
8732+ void (* __noreturn power_off)(void);
8733 void (*shutdown)(void);
8734 void (*crash_shutdown)(struct pt_regs *);
8735- void (*emergency_restart)(void);
8736-};
8737+ void (* __noreturn emergency_restart)(void);
8738+} __no_const;
8739
8740 extern struct machine_ops machine_ops;
8741
8742 void native_machine_crash_shutdown(struct pt_regs *regs);
8743 void native_machine_shutdown(void);
8744-void machine_real_restart(unsigned int type);
8745+void machine_real_restart(unsigned int type) __noreturn;
8746 /* These must match dispatch_table in reboot_32.S */
8747 #define MRR_BIOS 0
8748 #define MRR_APM 1
8749diff -urNp linux-3.0.4/arch/x86/include/asm/rwsem.h linux-3.0.4/arch/x86/include/asm/rwsem.h
8750--- linux-3.0.4/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
8751+++ linux-3.0.4/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
8752@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8753 {
8754 asm volatile("# beginning down_read\n\t"
8755 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8756+
8757+#ifdef CONFIG_PAX_REFCOUNT
8758+ "jno 0f\n"
8759+ LOCK_PREFIX _ASM_DEC "(%1)\n"
8760+ "int $4\n0:\n"
8761+ _ASM_EXTABLE(0b, 0b)
8762+#endif
8763+
8764 /* adds 0x00000001 */
8765 " jns 1f\n"
8766 " call call_rwsem_down_read_failed\n"
8767@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8768 "1:\n\t"
8769 " mov %1,%2\n\t"
8770 " add %3,%2\n\t"
8771+
8772+#ifdef CONFIG_PAX_REFCOUNT
8773+ "jno 0f\n"
8774+ "sub %3,%2\n"
8775+ "int $4\n0:\n"
8776+ _ASM_EXTABLE(0b, 0b)
8777+#endif
8778+
8779 " jle 2f\n\t"
8780 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8781 " jnz 1b\n\t"
8782@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8783 long tmp;
8784 asm volatile("# beginning down_write\n\t"
8785 LOCK_PREFIX " xadd %1,(%2)\n\t"
8786+
8787+#ifdef CONFIG_PAX_REFCOUNT
8788+ "jno 0f\n"
8789+ "mov %1,(%2)\n"
8790+ "int $4\n0:\n"
8791+ _ASM_EXTABLE(0b, 0b)
8792+#endif
8793+
8794 /* adds 0xffff0001, returns the old value */
8795 " test %1,%1\n\t"
8796 /* was the count 0 before? */
8797@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8798 long tmp;
8799 asm volatile("# beginning __up_read\n\t"
8800 LOCK_PREFIX " xadd %1,(%2)\n\t"
8801+
8802+#ifdef CONFIG_PAX_REFCOUNT
8803+ "jno 0f\n"
8804+ "mov %1,(%2)\n"
8805+ "int $4\n0:\n"
8806+ _ASM_EXTABLE(0b, 0b)
8807+#endif
8808+
8809 /* subtracts 1, returns the old value */
8810 " jns 1f\n\t"
8811 " call call_rwsem_wake\n" /* expects old value in %edx */
8812@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8813 long tmp;
8814 asm volatile("# beginning __up_write\n\t"
8815 LOCK_PREFIX " xadd %1,(%2)\n\t"
8816+
8817+#ifdef CONFIG_PAX_REFCOUNT
8818+ "jno 0f\n"
8819+ "mov %1,(%2)\n"
8820+ "int $4\n0:\n"
8821+ _ASM_EXTABLE(0b, 0b)
8822+#endif
8823+
8824 /* subtracts 0xffff0001, returns the old value */
8825 " jns 1f\n\t"
8826 " call call_rwsem_wake\n" /* expects old value in %edx */
8827@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8828 {
8829 asm volatile("# beginning __downgrade_write\n\t"
8830 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8831+
8832+#ifdef CONFIG_PAX_REFCOUNT
8833+ "jno 0f\n"
8834+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8835+ "int $4\n0:\n"
8836+ _ASM_EXTABLE(0b, 0b)
8837+#endif
8838+
8839 /*
8840 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8841 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8842@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8843 */
8844 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8845 {
8846- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8847+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8848+
8849+#ifdef CONFIG_PAX_REFCOUNT
8850+ "jno 0f\n"
8851+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
8852+ "int $4\n0:\n"
8853+ _ASM_EXTABLE(0b, 0b)
8854+#endif
8855+
8856 : "+m" (sem->count)
8857 : "er" (delta));
8858 }
8859@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8860 {
8861 long tmp = delta;
8862
8863- asm volatile(LOCK_PREFIX "xadd %0,%1"
8864+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8865+
8866+#ifdef CONFIG_PAX_REFCOUNT
8867+ "jno 0f\n"
8868+ "mov %0,%1\n"
8869+ "int $4\n0:\n"
8870+ _ASM_EXTABLE(0b, 0b)
8871+#endif
8872+
8873 : "+r" (tmp), "+m" (sem->count)
8874 : : "memory");
8875
8876diff -urNp linux-3.0.4/arch/x86/include/asm/segment.h linux-3.0.4/arch/x86/include/asm/segment.h
8877--- linux-3.0.4/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
8878+++ linux-3.0.4/arch/x86/include/asm/segment.h 2011-09-17 00:53:42.000000000 -0400
8879@@ -64,10 +64,15 @@
8880 * 26 - ESPFIX small SS
8881 * 27 - per-cpu [ offset to per-cpu data area ]
8882 * 28 - stack_canary-20 [ for stack protector ]
8883- * 29 - unused
8884- * 30 - unused
8885+ * 29 - PCI BIOS CS
8886+ * 30 - PCI BIOS DS
8887 * 31 - TSS for double fault handler
8888 */
8889+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
8890+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
8891+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
8892+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
8893+
8894 #define GDT_ENTRY_TLS_MIN 6
8895 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
8896
8897@@ -79,6 +84,8 @@
8898
8899 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8900
8901+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8902+
8903 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8904
8905 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8906@@ -104,6 +111,12 @@
8907 #define __KERNEL_STACK_CANARY 0
8908 #endif
8909
8910+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8911+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8912+
8913+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8914+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8915+
8916 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8917
8918 /*
8919@@ -141,7 +154,7 @@
8920 */
8921
8922 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8923-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8924+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8925
8926
8927 #else
8928@@ -165,6 +178,8 @@
8929 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8930 #define __USER32_DS __USER_DS
8931
8932+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8933+
8934 #define GDT_ENTRY_TSS 8 /* needs two entries */
8935 #define GDT_ENTRY_LDT 10 /* needs two entries */
8936 #define GDT_ENTRY_TLS_MIN 12
8937@@ -185,6 +200,7 @@
8938 #endif
8939
8940 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8941+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8942 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8943 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8944 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8945diff -urNp linux-3.0.4/arch/x86/include/asm/smp.h linux-3.0.4/arch/x86/include/asm/smp.h
8946--- linux-3.0.4/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
8947+++ linux-3.0.4/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
8948@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8949 /* cpus sharing the last level cache: */
8950 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8951 DECLARE_PER_CPU(u16, cpu_llc_id);
8952-DECLARE_PER_CPU(int, cpu_number);
8953+DECLARE_PER_CPU(unsigned int, cpu_number);
8954
8955 static inline struct cpumask *cpu_sibling_mask(int cpu)
8956 {
8957@@ -77,7 +77,7 @@ struct smp_ops {
8958
8959 void (*send_call_func_ipi)(const struct cpumask *mask);
8960 void (*send_call_func_single_ipi)(int cpu);
8961-};
8962+} __no_const;
8963
8964 /* Globals due to paravirt */
8965 extern void set_cpu_sibling_map(int cpu);
8966@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8967 extern int safe_smp_processor_id(void);
8968
8969 #elif defined(CONFIG_X86_64_SMP)
8970-#define raw_smp_processor_id() (percpu_read(cpu_number))
8971-
8972-#define stack_smp_processor_id() \
8973-({ \
8974- struct thread_info *ti; \
8975- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8976- ti->cpu; \
8977-})
8978+#define raw_smp_processor_id() (percpu_read(cpu_number))
8979+#define stack_smp_processor_id() raw_smp_processor_id()
8980 #define safe_smp_processor_id() smp_processor_id()
8981
8982 #endif
8983diff -urNp linux-3.0.4/arch/x86/include/asm/spinlock.h linux-3.0.4/arch/x86/include/asm/spinlock.h
8984--- linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
8985+++ linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
8986@@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8987 static inline void arch_read_lock(arch_rwlock_t *rw)
8988 {
8989 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8990+
8991+#ifdef CONFIG_PAX_REFCOUNT
8992+ "jno 0f\n"
8993+ LOCK_PREFIX " addl $1,(%0)\n"
8994+ "int $4\n0:\n"
8995+ _ASM_EXTABLE(0b, 0b)
8996+#endif
8997+
8998 "jns 1f\n"
8999 "call __read_lock_failed\n\t"
9000 "1:\n"
9001@@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
9002 static inline void arch_write_lock(arch_rwlock_t *rw)
9003 {
9004 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
9005+
9006+#ifdef CONFIG_PAX_REFCOUNT
9007+ "jno 0f\n"
9008+ LOCK_PREFIX " addl %1,(%0)\n"
9009+ "int $4\n0:\n"
9010+ _ASM_EXTABLE(0b, 0b)
9011+#endif
9012+
9013 "jz 1f\n"
9014 "call __write_lock_failed\n\t"
9015 "1:\n"
9016@@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
9017
9018 static inline void arch_read_unlock(arch_rwlock_t *rw)
9019 {
9020- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
9021+ asm volatile(LOCK_PREFIX "incl %0\n"
9022+
9023+#ifdef CONFIG_PAX_REFCOUNT
9024+ "jno 0f\n"
9025+ LOCK_PREFIX "decl %0\n"
9026+ "int $4\n0:\n"
9027+ _ASM_EXTABLE(0b, 0b)
9028+#endif
9029+
9030+ :"+m" (rw->lock) : : "memory");
9031 }
9032
9033 static inline void arch_write_unlock(arch_rwlock_t *rw)
9034 {
9035- asm volatile(LOCK_PREFIX "addl %1, %0"
9036+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
9037+
9038+#ifdef CONFIG_PAX_REFCOUNT
9039+ "jno 0f\n"
9040+ LOCK_PREFIX "subl %1, %0\n"
9041+ "int $4\n0:\n"
9042+ _ASM_EXTABLE(0b, 0b)
9043+#endif
9044+
9045 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
9046 }
9047
9048diff -urNp linux-3.0.4/arch/x86/include/asm/stackprotector.h linux-3.0.4/arch/x86/include/asm/stackprotector.h
9049--- linux-3.0.4/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
9050+++ linux-3.0.4/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
9051@@ -48,7 +48,7 @@
9052 * head_32 for boot CPU and setup_per_cpu_areas() for others.
9053 */
9054 #define GDT_STACK_CANARY_INIT \
9055- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
9056+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
9057
9058 /*
9059 * Initialize the stackprotector canary value.
9060@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
9061
9062 static inline void load_stack_canary_segment(void)
9063 {
9064-#ifdef CONFIG_X86_32
9065+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
9066 asm volatile ("mov %0, %%gs" : : "r" (0));
9067 #endif
9068 }
9069diff -urNp linux-3.0.4/arch/x86/include/asm/stacktrace.h linux-3.0.4/arch/x86/include/asm/stacktrace.h
9070--- linux-3.0.4/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
9071+++ linux-3.0.4/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
9072@@ -11,28 +11,20 @@
9073
9074 extern int kstack_depth_to_print;
9075
9076-struct thread_info;
9077+struct task_struct;
9078 struct stacktrace_ops;
9079
9080-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
9081- unsigned long *stack,
9082- unsigned long bp,
9083- const struct stacktrace_ops *ops,
9084- void *data,
9085- unsigned long *end,
9086- int *graph);
9087-
9088-extern unsigned long
9089-print_context_stack(struct thread_info *tinfo,
9090- unsigned long *stack, unsigned long bp,
9091- const struct stacktrace_ops *ops, void *data,
9092- unsigned long *end, int *graph);
9093-
9094-extern unsigned long
9095-print_context_stack_bp(struct thread_info *tinfo,
9096- unsigned long *stack, unsigned long bp,
9097- const struct stacktrace_ops *ops, void *data,
9098- unsigned long *end, int *graph);
9099+typedef unsigned long walk_stack_t(struct task_struct *task,
9100+ void *stack_start,
9101+ unsigned long *stack,
9102+ unsigned long bp,
9103+ const struct stacktrace_ops *ops,
9104+ void *data,
9105+ unsigned long *end,
9106+ int *graph);
9107+
9108+extern walk_stack_t print_context_stack;
9109+extern walk_stack_t print_context_stack_bp;
9110
9111 /* Generic stack tracer with callbacks */
9112
9113@@ -40,7 +32,7 @@ struct stacktrace_ops {
9114 void (*address)(void *data, unsigned long address, int reliable);
9115 /* On negative return stop dumping */
9116 int (*stack)(void *data, char *name);
9117- walk_stack_t walk_stack;
9118+ walk_stack_t *walk_stack;
9119 };
9120
9121 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
9122diff -urNp linux-3.0.4/arch/x86/include/asm/sys_ia32.h linux-3.0.4/arch/x86/include/asm/sys_ia32.h
9123--- linux-3.0.4/arch/x86/include/asm/sys_ia32.h 2011-07-21 22:17:23.000000000 -0400
9124+++ linux-3.0.4/arch/x86/include/asm/sys_ia32.h 2011-10-06 04:17:55.000000000 -0400
9125@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int
9126 compat_sigset_t __user *, unsigned int);
9127 asmlinkage long sys32_alarm(unsigned int);
9128
9129-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
9130+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
9131 asmlinkage long sys32_sysfs(int, u32, u32);
9132
9133 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
9134diff -urNp linux-3.0.4/arch/x86/include/asm/system.h linux-3.0.4/arch/x86/include/asm/system.h
9135--- linux-3.0.4/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
9136+++ linux-3.0.4/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
9137@@ -129,7 +129,7 @@ do { \
9138 "call __switch_to\n\t" \
9139 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
9140 __switch_canary \
9141- "movq %P[thread_info](%%rsi),%%r8\n\t" \
9142+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9143 "movq %%rax,%%rdi\n\t" \
9144 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9145 "jnz ret_from_fork\n\t" \
9146@@ -140,7 +140,7 @@ do { \
9147 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9148 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9149 [_tif_fork] "i" (_TIF_FORK), \
9150- [thread_info] "i" (offsetof(struct task_struct, stack)), \
9151+ [thread_info] "m" (current_tinfo), \
9152 [current_task] "m" (current_task) \
9153 __switch_canary_iparam \
9154 : "memory", "cc" __EXTRA_CLOBBER)
9155@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9156 {
9157 unsigned long __limit;
9158 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9159- return __limit + 1;
9160+ return __limit;
9161 }
9162
9163 static inline void native_clts(void)
9164@@ -397,12 +397,12 @@ void enable_hlt(void);
9165
9166 void cpu_idle_wait(void);
9167
9168-extern unsigned long arch_align_stack(unsigned long sp);
9169+#define arch_align_stack(x) ((x) & ~0xfUL)
9170 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9171
9172 void default_idle(void);
9173
9174-void stop_this_cpu(void *dummy);
9175+void stop_this_cpu(void *dummy) __noreturn;
9176
9177 /*
9178 * Force strict CPU ordering.
9179diff -urNp linux-3.0.4/arch/x86/include/asm/thread_info.h linux-3.0.4/arch/x86/include/asm/thread_info.h
9180--- linux-3.0.4/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
9181+++ linux-3.0.4/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
9182@@ -10,6 +10,7 @@
9183 #include <linux/compiler.h>
9184 #include <asm/page.h>
9185 #include <asm/types.h>
9186+#include <asm/percpu.h>
9187
9188 /*
9189 * low level task data that entry.S needs immediate access to
9190@@ -24,7 +25,6 @@ struct exec_domain;
9191 #include <asm/atomic.h>
9192
9193 struct thread_info {
9194- struct task_struct *task; /* main task structure */
9195 struct exec_domain *exec_domain; /* execution domain */
9196 __u32 flags; /* low level flags */
9197 __u32 status; /* thread synchronous flags */
9198@@ -34,18 +34,12 @@ struct thread_info {
9199 mm_segment_t addr_limit;
9200 struct restart_block restart_block;
9201 void __user *sysenter_return;
9202-#ifdef CONFIG_X86_32
9203- unsigned long previous_esp; /* ESP of the previous stack in
9204- case of nested (IRQ) stacks
9205- */
9206- __u8 supervisor_stack[0];
9207-#endif
9208+ unsigned long lowest_stack;
9209 int uaccess_err;
9210 };
9211
9212-#define INIT_THREAD_INFO(tsk) \
9213+#define INIT_THREAD_INFO \
9214 { \
9215- .task = &tsk, \
9216 .exec_domain = &default_exec_domain, \
9217 .flags = 0, \
9218 .cpu = 0, \
9219@@ -56,7 +50,7 @@ struct thread_info {
9220 }, \
9221 }
9222
9223-#define init_thread_info (init_thread_union.thread_info)
9224+#define init_thread_info (init_thread_union.stack)
9225 #define init_stack (init_thread_union.stack)
9226
9227 #else /* !__ASSEMBLY__ */
9228@@ -170,6 +164,23 @@ struct thread_info {
9229 ret; \
9230 })
9231
9232+#ifdef __ASSEMBLY__
9233+/* how to get the thread information struct from ASM */
9234+#define GET_THREAD_INFO(reg) \
9235+ mov PER_CPU_VAR(current_tinfo), reg
9236+
9237+/* use this one if reg already contains %esp */
9238+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9239+#else
9240+/* how to get the thread information struct from C */
9241+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9242+
9243+static __always_inline struct thread_info *current_thread_info(void)
9244+{
9245+ return percpu_read_stable(current_tinfo);
9246+}
9247+#endif
9248+
9249 #ifdef CONFIG_X86_32
9250
9251 #define STACK_WARN (THREAD_SIZE/8)
9252@@ -180,35 +191,13 @@ struct thread_info {
9253 */
9254 #ifndef __ASSEMBLY__
9255
9256-
9257 /* how to get the current stack pointer from C */
9258 register unsigned long current_stack_pointer asm("esp") __used;
9259
9260-/* how to get the thread information struct from C */
9261-static inline struct thread_info *current_thread_info(void)
9262-{
9263- return (struct thread_info *)
9264- (current_stack_pointer & ~(THREAD_SIZE - 1));
9265-}
9266-
9267-#else /* !__ASSEMBLY__ */
9268-
9269-/* how to get the thread information struct from ASM */
9270-#define GET_THREAD_INFO(reg) \
9271- movl $-THREAD_SIZE, reg; \
9272- andl %esp, reg
9273-
9274-/* use this one if reg already contains %esp */
9275-#define GET_THREAD_INFO_WITH_ESP(reg) \
9276- andl $-THREAD_SIZE, reg
9277-
9278 #endif
9279
9280 #else /* X86_32 */
9281
9282-#include <asm/percpu.h>
9283-#define KERNEL_STACK_OFFSET (5*8)
9284-
9285 /*
9286 * macros/functions for gaining access to the thread information structure
9287 * preempt_count needs to be 1 initially, until the scheduler is functional.
9288@@ -216,21 +205,8 @@ static inline struct thread_info *curren
9289 #ifndef __ASSEMBLY__
9290 DECLARE_PER_CPU(unsigned long, kernel_stack);
9291
9292-static inline struct thread_info *current_thread_info(void)
9293-{
9294- struct thread_info *ti;
9295- ti = (void *)(percpu_read_stable(kernel_stack) +
9296- KERNEL_STACK_OFFSET - THREAD_SIZE);
9297- return ti;
9298-}
9299-
9300-#else /* !__ASSEMBLY__ */
9301-
9302-/* how to get the thread information struct from ASM */
9303-#define GET_THREAD_INFO(reg) \
9304- movq PER_CPU_VAR(kernel_stack),reg ; \
9305- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9306-
9307+/* how to get the current stack pointer from C */
9308+register unsigned long current_stack_pointer asm("rsp") __used;
9309 #endif
9310
9311 #endif /* !X86_32 */
9312@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9313 extern void free_thread_info(struct thread_info *ti);
9314 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9315 #define arch_task_cache_init arch_task_cache_init
9316+
9317+#define __HAVE_THREAD_FUNCTIONS
9318+#define task_thread_info(task) (&(task)->tinfo)
9319+#define task_stack_page(task) ((task)->stack)
9320+#define setup_thread_stack(p, org) do {} while (0)
9321+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9322+
9323+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9324+extern struct task_struct *alloc_task_struct_node(int node);
9325+extern void free_task_struct(struct task_struct *);
9326+
9327 #endif
9328 #endif /* _ASM_X86_THREAD_INFO_H */
9329diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_32.h linux-3.0.4/arch/x86/include/asm/uaccess_32.h
9330--- linux-3.0.4/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
9331+++ linux-3.0.4/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
9332@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9333 static __always_inline unsigned long __must_check
9334 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9335 {
9336+ pax_track_stack();
9337+
9338+ if ((long)n < 0)
9339+ return n;
9340+
9341 if (__builtin_constant_p(n)) {
9342 unsigned long ret;
9343
9344@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9345 return ret;
9346 }
9347 }
9348+ if (!__builtin_constant_p(n))
9349+ check_object_size(from, n, true);
9350 return __copy_to_user_ll(to, from, n);
9351 }
9352
9353@@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9354 __copy_to_user(void __user *to, const void *from, unsigned long n)
9355 {
9356 might_fault();
9357+
9358 return __copy_to_user_inatomic(to, from, n);
9359 }
9360
9361 static __always_inline unsigned long
9362 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9363 {
9364+ if ((long)n < 0)
9365+ return n;
9366+
9367 /* Avoid zeroing the tail if the copy fails..
9368 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9369 * but as the zeroing behaviour is only significant when n is not
9370@@ -137,6 +148,12 @@ static __always_inline unsigned long
9371 __copy_from_user(void *to, const void __user *from, unsigned long n)
9372 {
9373 might_fault();
9374+
9375+ pax_track_stack();
9376+
9377+ if ((long)n < 0)
9378+ return n;
9379+
9380 if (__builtin_constant_p(n)) {
9381 unsigned long ret;
9382
9383@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9384 return ret;
9385 }
9386 }
9387+ if (!__builtin_constant_p(n))
9388+ check_object_size(to, n, false);
9389 return __copy_from_user_ll(to, from, n);
9390 }
9391
9392@@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9393 const void __user *from, unsigned long n)
9394 {
9395 might_fault();
9396+
9397+ if ((long)n < 0)
9398+ return n;
9399+
9400 if (__builtin_constant_p(n)) {
9401 unsigned long ret;
9402
9403@@ -181,15 +204,19 @@ static __always_inline unsigned long
9404 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9405 unsigned long n)
9406 {
9407- return __copy_from_user_ll_nocache_nozero(to, from, n);
9408-}
9409+ if ((long)n < 0)
9410+ return n;
9411
9412-unsigned long __must_check copy_to_user(void __user *to,
9413- const void *from, unsigned long n);
9414-unsigned long __must_check _copy_from_user(void *to,
9415- const void __user *from,
9416- unsigned long n);
9417+ return __copy_from_user_ll_nocache_nozero(to, from, n);
9418+}
9419
9420+extern void copy_to_user_overflow(void)
9421+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9422+ __compiletime_error("copy_to_user() buffer size is not provably correct")
9423+#else
9424+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
9425+#endif
9426+;
9427
9428 extern void copy_from_user_overflow(void)
9429 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9430@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9431 #endif
9432 ;
9433
9434-static inline unsigned long __must_check copy_from_user(void *to,
9435- const void __user *from,
9436- unsigned long n)
9437+/**
9438+ * copy_to_user: - Copy a block of data into user space.
9439+ * @to: Destination address, in user space.
9440+ * @from: Source address, in kernel space.
9441+ * @n: Number of bytes to copy.
9442+ *
9443+ * Context: User context only. This function may sleep.
9444+ *
9445+ * Copy data from kernel space to user space.
9446+ *
9447+ * Returns number of bytes that could not be copied.
9448+ * On success, this will be zero.
9449+ */
9450+static inline unsigned long __must_check
9451+copy_to_user(void __user *to, const void *from, unsigned long n)
9452+{
9453+ int sz = __compiletime_object_size(from);
9454+
9455+ if (unlikely(sz != -1 && sz < n))
9456+ copy_to_user_overflow();
9457+ else if (access_ok(VERIFY_WRITE, to, n))
9458+ n = __copy_to_user(to, from, n);
9459+ return n;
9460+}
9461+
9462+/**
9463+ * copy_from_user: - Copy a block of data from user space.
9464+ * @to: Destination address, in kernel space.
9465+ * @from: Source address, in user space.
9466+ * @n: Number of bytes to copy.
9467+ *
9468+ * Context: User context only. This function may sleep.
9469+ *
9470+ * Copy data from user space to kernel space.
9471+ *
9472+ * Returns number of bytes that could not be copied.
9473+ * On success, this will be zero.
9474+ *
9475+ * If some data could not be copied, this function will pad the copied
9476+ * data to the requested size using zero bytes.
9477+ */
9478+static inline unsigned long __must_check
9479+copy_from_user(void *to, const void __user *from, unsigned long n)
9480 {
9481 int sz = __compiletime_object_size(to);
9482
9483- if (likely(sz == -1 || sz >= n))
9484- n = _copy_from_user(to, from, n);
9485- else
9486+ if (unlikely(sz != -1 && sz < n))
9487 copy_from_user_overflow();
9488-
9489+ else if (access_ok(VERIFY_READ, from, n))
9490+ n = __copy_from_user(to, from, n);
9491+ else if ((long)n > 0) {
9492+ if (!__builtin_constant_p(n))
9493+ check_object_size(to, n, false);
9494+ memset(to, 0, n);
9495+ }
9496 return n;
9497 }
9498
9499diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_64.h linux-3.0.4/arch/x86/include/asm/uaccess_64.h
9500--- linux-3.0.4/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
9501+++ linux-3.0.4/arch/x86/include/asm/uaccess_64.h 2011-10-06 04:17:55.000000000 -0400
9502@@ -10,6 +10,9 @@
9503 #include <asm/alternative.h>
9504 #include <asm/cpufeature.h>
9505 #include <asm/page.h>
9506+#include <asm/pgtable.h>
9507+
9508+#define set_fs(x) (current_thread_info()->addr_limit = (x))
9509
9510 /*
9511 * Copy To/From Userspace
9512@@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9513 return ret;
9514 }
9515
9516-__must_check unsigned long
9517-_copy_to_user(void __user *to, const void *from, unsigned len);
9518-__must_check unsigned long
9519-_copy_from_user(void *to, const void __user *from, unsigned len);
9520+static __always_inline __must_check unsigned long
9521+__copy_to_user(void __user *to, const void *from, unsigned len);
9522+static __always_inline __must_check unsigned long
9523+__copy_from_user(void *to, const void __user *from, unsigned len);
9524 __must_check unsigned long
9525 copy_in_user(void __user *to, const void __user *from, unsigned len);
9526
9527 static inline unsigned long __must_check copy_from_user(void *to,
9528 const void __user *from,
9529- unsigned long n)
9530+ unsigned n)
9531 {
9532- int sz = __compiletime_object_size(to);
9533-
9534 might_fault();
9535- if (likely(sz == -1 || sz >= n))
9536- n = _copy_from_user(to, from, n);
9537-#ifdef CONFIG_DEBUG_VM
9538- else
9539- WARN(1, "Buffer overflow detected!\n");
9540-#endif
9541+
9542+ if (access_ok(VERIFY_READ, from, n))
9543+ n = __copy_from_user(to, from, n);
9544+ else if ((int)n > 0) {
9545+ if (!__builtin_constant_p(n))
9546+ check_object_size(to, n, false);
9547+ memset(to, 0, n);
9548+ }
9549 return n;
9550 }
9551
9552@@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9553 {
9554 might_fault();
9555
9556- return _copy_to_user(dst, src, size);
9557+ if (access_ok(VERIFY_WRITE, dst, size))
9558+ size = __copy_to_user(dst, src, size);
9559+ return size;
9560 }
9561
9562 static __always_inline __must_check
9563-int __copy_from_user(void *dst, const void __user *src, unsigned size)
9564+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9565 {
9566- int ret = 0;
9567+ int sz = __compiletime_object_size(dst);
9568+ unsigned ret = 0;
9569
9570 might_fault();
9571- if (!__builtin_constant_p(size))
9572- return copy_user_generic(dst, (__force void *)src, size);
9573+
9574+ pax_track_stack();
9575+
9576+ if ((int)size < 0)
9577+ return size;
9578+
9579+#ifdef CONFIG_PAX_MEMORY_UDEREF
9580+ if (!__access_ok(VERIFY_READ, src, size))
9581+ return size;
9582+#endif
9583+
9584+ if (unlikely(sz != -1 && sz < size)) {
9585+#ifdef CONFIG_DEBUG_VM
9586+ WARN(1, "Buffer overflow detected!\n");
9587+#endif
9588+ return size;
9589+ }
9590+
9591+ if (!__builtin_constant_p(size)) {
9592+ check_object_size(dst, size, false);
9593+
9594+#ifdef CONFIG_PAX_MEMORY_UDEREF
9595+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9596+ src += PAX_USER_SHADOW_BASE;
9597+#endif
9598+
9599+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9600+ }
9601 switch (size) {
9602- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9603+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9604 ret, "b", "b", "=q", 1);
9605 return ret;
9606- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9607+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9608 ret, "w", "w", "=r", 2);
9609 return ret;
9610- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9611+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9612 ret, "l", "k", "=r", 4);
9613 return ret;
9614- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9615+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9616 ret, "q", "", "=r", 8);
9617 return ret;
9618 case 10:
9619- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9620+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9621 ret, "q", "", "=r", 10);
9622 if (unlikely(ret))
9623 return ret;
9624 __get_user_asm(*(u16 *)(8 + (char *)dst),
9625- (u16 __user *)(8 + (char __user *)src),
9626+ (const u16 __user *)(8 + (const char __user *)src),
9627 ret, "w", "w", "=r", 2);
9628 return ret;
9629 case 16:
9630- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9631+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9632 ret, "q", "", "=r", 16);
9633 if (unlikely(ret))
9634 return ret;
9635 __get_user_asm(*(u64 *)(8 + (char *)dst),
9636- (u64 __user *)(8 + (char __user *)src),
9637+ (const u64 __user *)(8 + (const char __user *)src),
9638 ret, "q", "", "=r", 8);
9639 return ret;
9640 default:
9641- return copy_user_generic(dst, (__force void *)src, size);
9642+
9643+#ifdef CONFIG_PAX_MEMORY_UDEREF
9644+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9645+ src += PAX_USER_SHADOW_BASE;
9646+#endif
9647+
9648+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9649 }
9650 }
9651
9652 static __always_inline __must_check
9653-int __copy_to_user(void __user *dst, const void *src, unsigned size)
9654+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9655 {
9656- int ret = 0;
9657+ int sz = __compiletime_object_size(src);
9658+ unsigned ret = 0;
9659
9660 might_fault();
9661- if (!__builtin_constant_p(size))
9662- return copy_user_generic((__force void *)dst, src, size);
9663+
9664+ pax_track_stack();
9665+
9666+ if ((int)size < 0)
9667+ return size;
9668+
9669+#ifdef CONFIG_PAX_MEMORY_UDEREF
9670+ if (!__access_ok(VERIFY_WRITE, dst, size))
9671+ return size;
9672+#endif
9673+
9674+ if (unlikely(sz != -1 && sz < size)) {
9675+#ifdef CONFIG_DEBUG_VM
9676+ WARN(1, "Buffer overflow detected!\n");
9677+#endif
9678+ return size;
9679+ }
9680+
9681+ if (!__builtin_constant_p(size)) {
9682+ check_object_size(src, size, true);
9683+
9684+#ifdef CONFIG_PAX_MEMORY_UDEREF
9685+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9686+ dst += PAX_USER_SHADOW_BASE;
9687+#endif
9688+
9689+ return copy_user_generic((__force_kernel void *)dst, src, size);
9690+ }
9691 switch (size) {
9692- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9693+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9694 ret, "b", "b", "iq", 1);
9695 return ret;
9696- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9697+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9698 ret, "w", "w", "ir", 2);
9699 return ret;
9700- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9701+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9702 ret, "l", "k", "ir", 4);
9703 return ret;
9704- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9705+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9706 ret, "q", "", "er", 8);
9707 return ret;
9708 case 10:
9709- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9710+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9711 ret, "q", "", "er", 10);
9712 if (unlikely(ret))
9713 return ret;
9714 asm("":::"memory");
9715- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9716+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9717 ret, "w", "w", "ir", 2);
9718 return ret;
9719 case 16:
9720- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9721+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9722 ret, "q", "", "er", 16);
9723 if (unlikely(ret))
9724 return ret;
9725 asm("":::"memory");
9726- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9727+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9728 ret, "q", "", "er", 8);
9729 return ret;
9730 default:
9731- return copy_user_generic((__force void *)dst, src, size);
9732+
9733+#ifdef CONFIG_PAX_MEMORY_UDEREF
9734+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9735+ dst += PAX_USER_SHADOW_BASE;
9736+#endif
9737+
9738+ return copy_user_generic((__force_kernel void *)dst, src, size);
9739 }
9740 }
9741
9742 static __always_inline __must_check
9743-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9744+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9745 {
9746- int ret = 0;
9747+ unsigned ret = 0;
9748
9749 might_fault();
9750- if (!__builtin_constant_p(size))
9751- return copy_user_generic((__force void *)dst,
9752- (__force void *)src, size);
9753+
9754+ if ((int)size < 0)
9755+ return size;
9756+
9757+#ifdef CONFIG_PAX_MEMORY_UDEREF
9758+ if (!__access_ok(VERIFY_READ, src, size))
9759+ return size;
9760+ if (!__access_ok(VERIFY_WRITE, dst, size))
9761+ return size;
9762+#endif
9763+
9764+ if (!__builtin_constant_p(size)) {
9765+
9766+#ifdef CONFIG_PAX_MEMORY_UDEREF
9767+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9768+ src += PAX_USER_SHADOW_BASE;
9769+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9770+ dst += PAX_USER_SHADOW_BASE;
9771+#endif
9772+
9773+ return copy_user_generic((__force_kernel void *)dst,
9774+ (__force_kernel const void *)src, size);
9775+ }
9776 switch (size) {
9777 case 1: {
9778 u8 tmp;
9779- __get_user_asm(tmp, (u8 __user *)src,
9780+ __get_user_asm(tmp, (const u8 __user *)src,
9781 ret, "b", "b", "=q", 1);
9782 if (likely(!ret))
9783 __put_user_asm(tmp, (u8 __user *)dst,
9784@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9785 }
9786 case 2: {
9787 u16 tmp;
9788- __get_user_asm(tmp, (u16 __user *)src,
9789+ __get_user_asm(tmp, (const u16 __user *)src,
9790 ret, "w", "w", "=r", 2);
9791 if (likely(!ret))
9792 __put_user_asm(tmp, (u16 __user *)dst,
9793@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9794
9795 case 4: {
9796 u32 tmp;
9797- __get_user_asm(tmp, (u32 __user *)src,
9798+ __get_user_asm(tmp, (const u32 __user *)src,
9799 ret, "l", "k", "=r", 4);
9800 if (likely(!ret))
9801 __put_user_asm(tmp, (u32 __user *)dst,
9802@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9803 }
9804 case 8: {
9805 u64 tmp;
9806- __get_user_asm(tmp, (u64 __user *)src,
9807+ __get_user_asm(tmp, (const u64 __user *)src,
9808 ret, "q", "", "=r", 8);
9809 if (likely(!ret))
9810 __put_user_asm(tmp, (u64 __user *)dst,
9811@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9812 return ret;
9813 }
9814 default:
9815- return copy_user_generic((__force void *)dst,
9816- (__force void *)src, size);
9817+
9818+#ifdef CONFIG_PAX_MEMORY_UDEREF
9819+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9820+ src += PAX_USER_SHADOW_BASE;
9821+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9822+ dst += PAX_USER_SHADOW_BASE;
9823+#endif
9824+
9825+ return copy_user_generic((__force_kernel void *)dst,
9826+ (__force_kernel const void *)src, size);
9827 }
9828 }
9829
9830@@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9831 static __must_check __always_inline int
9832 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9833 {
9834- return copy_user_generic(dst, (__force const void *)src, size);
9835+ pax_track_stack();
9836+
9837+ if ((int)size < 0)
9838+ return size;
9839+
9840+#ifdef CONFIG_PAX_MEMORY_UDEREF
9841+ if (!__access_ok(VERIFY_READ, src, size))
9842+ return size;
9843+
9844+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9845+ src += PAX_USER_SHADOW_BASE;
9846+#endif
9847+
9848+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9849 }
9850
9851-static __must_check __always_inline int
9852+static __must_check __always_inline unsigned long
9853 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9854 {
9855- return copy_user_generic((__force void *)dst, src, size);
9856+ if ((int)size < 0)
9857+ return size;
9858+
9859+#ifdef CONFIG_PAX_MEMORY_UDEREF
9860+ if (!__access_ok(VERIFY_WRITE, dst, size))
9861+ return size;
9862+
9863+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9864+ dst += PAX_USER_SHADOW_BASE;
9865+#endif
9866+
9867+ return copy_user_generic((__force_kernel void *)dst, src, size);
9868 }
9869
9870-extern long __copy_user_nocache(void *dst, const void __user *src,
9871+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9872 unsigned size, int zerorest);
9873
9874-static inline int
9875-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9876+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9877 {
9878 might_sleep();
9879+
9880+ if ((int)size < 0)
9881+ return size;
9882+
9883+#ifdef CONFIG_PAX_MEMORY_UDEREF
9884+ if (!__access_ok(VERIFY_READ, src, size))
9885+ return size;
9886+#endif
9887+
9888 return __copy_user_nocache(dst, src, size, 1);
9889 }
9890
9891-static inline int
9892-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9893+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9894 unsigned size)
9895 {
9896+ if ((int)size < 0)
9897+ return size;
9898+
9899+#ifdef CONFIG_PAX_MEMORY_UDEREF
9900+ if (!__access_ok(VERIFY_READ, src, size))
9901+ return size;
9902+#endif
9903+
9904 return __copy_user_nocache(dst, src, size, 0);
9905 }
9906
9907-unsigned long
9908-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9909+extern unsigned long
9910+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest);
9911
9912 #endif /* _ASM_X86_UACCESS_64_H */
9913diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess.h linux-3.0.4/arch/x86/include/asm/uaccess.h
9914--- linux-3.0.4/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9915+++ linux-3.0.4/arch/x86/include/asm/uaccess.h 2011-10-06 04:17:55.000000000 -0400
9916@@ -7,12 +7,15 @@
9917 #include <linux/compiler.h>
9918 #include <linux/thread_info.h>
9919 #include <linux/string.h>
9920+#include <linux/sched.h>
9921 #include <asm/asm.h>
9922 #include <asm/page.h>
9923
9924 #define VERIFY_READ 0
9925 #define VERIFY_WRITE 1
9926
9927+extern void check_object_size(const void *ptr, unsigned long n, bool to);
9928+
9929 /*
9930 * The fs value determines whether argument validity checking should be
9931 * performed or not. If get_fs() == USER_DS, checking is performed, with
9932@@ -28,7 +31,12 @@
9933
9934 #define get_ds() (KERNEL_DS)
9935 #define get_fs() (current_thread_info()->addr_limit)
9936+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9937+void __set_fs(mm_segment_t x);
9938+void set_fs(mm_segment_t x);
9939+#else
9940 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9941+#endif
9942
9943 #define segment_eq(a, b) ((a).seg == (b).seg)
9944
9945@@ -76,7 +84,33 @@
9946 * checks that the pointer is in the user space range - after calling
9947 * this function, memory access functions may still return -EFAULT.
9948 */
9949-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9950+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9951+#define access_ok(type, addr, size) \
9952+({ \
9953+ long __size = size; \
9954+ unsigned long __addr = (unsigned long)addr; \
9955+ unsigned long __addr_ao = __addr & PAGE_MASK; \
9956+ unsigned long __end_ao = __addr + __size - 1; \
9957+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9958+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9959+ while(__addr_ao <= __end_ao) { \
9960+ char __c_ao; \
9961+ __addr_ao += PAGE_SIZE; \
9962+ if (__size > PAGE_SIZE) \
9963+ cond_resched(); \
9964+ if (__get_user(__c_ao, (char __user *)__addr)) \
9965+ break; \
9966+ if (type != VERIFY_WRITE) { \
9967+ __addr = __addr_ao; \
9968+ continue; \
9969+ } \
9970+ if (__put_user(__c_ao, (char __user *)__addr)) \
9971+ break; \
9972+ __addr = __addr_ao; \
9973+ } \
9974+ } \
9975+ __ret_ao; \
9976+})
9977
9978 /*
9979 * The exception table consists of pairs of addresses: the first is the
9980@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9981 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9982 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9983
9984-
9985+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9986+#define __copyuser_seg "gs;"
9987+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9988+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9989+#else
9990+#define __copyuser_seg
9991+#define __COPYUSER_SET_ES
9992+#define __COPYUSER_RESTORE_ES
9993+#endif
9994
9995 #ifdef CONFIG_X86_32
9996 #define __put_user_asm_u64(x, addr, err, errret) \
9997- asm volatile("1: movl %%eax,0(%2)\n" \
9998- "2: movl %%edx,4(%2)\n" \
9999+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10000+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10001 "3:\n" \
10002 ".section .fixup,\"ax\"\n" \
10003 "4: movl %3,%0\n" \
10004@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10005 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10006
10007 #define __put_user_asm_ex_u64(x, addr) \
10008- asm volatile("1: movl %%eax,0(%1)\n" \
10009- "2: movl %%edx,4(%1)\n" \
10010+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10011+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10012 "3:\n" \
10013 _ASM_EXTABLE(1b, 2b - 1b) \
10014 _ASM_EXTABLE(2b, 3b - 2b) \
10015@@ -252,7 +294,7 @@ extern void __put_user_8(void);
10016 __typeof__(*(ptr)) __pu_val; \
10017 __chk_user_ptr(ptr); \
10018 might_fault(); \
10019- __pu_val = x; \
10020+ __pu_val = (x); \
10021 switch (sizeof(*(ptr))) { \
10022 case 1: \
10023 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10024@@ -373,7 +415,7 @@ do { \
10025 } while (0)
10026
10027 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10028- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10029+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10030 "2:\n" \
10031 ".section .fixup,\"ax\"\n" \
10032 "3: mov %3,%0\n" \
10033@@ -381,7 +423,7 @@ do { \
10034 " jmp 2b\n" \
10035 ".previous\n" \
10036 _ASM_EXTABLE(1b, 3b) \
10037- : "=r" (err), ltype(x) \
10038+ : "=r" (err), ltype (x) \
10039 : "m" (__m(addr)), "i" (errret), "0" (err))
10040
10041 #define __get_user_size_ex(x, ptr, size) \
10042@@ -406,7 +448,7 @@ do { \
10043 } while (0)
10044
10045 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10046- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10047+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10048 "2:\n" \
10049 _ASM_EXTABLE(1b, 2b - 1b) \
10050 : ltype(x) : "m" (__m(addr)))
10051@@ -423,13 +465,24 @@ do { \
10052 int __gu_err; \
10053 unsigned long __gu_val; \
10054 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10055- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10056+ (x) = (__typeof__(*(ptr)))__gu_val; \
10057 __gu_err; \
10058 })
10059
10060 /* FIXME: this hack is definitely wrong -AK */
10061 struct __large_struct { unsigned long buf[100]; };
10062-#define __m(x) (*(struct __large_struct __user *)(x))
10063+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10064+#define ____m(x) \
10065+({ \
10066+ unsigned long ____x = (unsigned long)(x); \
10067+ if (____x < PAX_USER_SHADOW_BASE) \
10068+ ____x += PAX_USER_SHADOW_BASE; \
10069+ (void __user *)____x; \
10070+})
10071+#else
10072+#define ____m(x) (x)
10073+#endif
10074+#define __m(x) (*(struct __large_struct __user *)____m(x))
10075
10076 /*
10077 * Tell gcc we read from memory instead of writing: this is because
10078@@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
10079 * aliasing issues.
10080 */
10081 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10082- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10083+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10084 "2:\n" \
10085 ".section .fixup,\"ax\"\n" \
10086 "3: mov %3,%0\n" \
10087@@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
10088 ".previous\n" \
10089 _ASM_EXTABLE(1b, 3b) \
10090 : "=r"(err) \
10091- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10092+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10093
10094 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10095- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10096+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10097 "2:\n" \
10098 _ASM_EXTABLE(1b, 2b - 1b) \
10099 : : ltype(x), "m" (__m(addr)))
10100@@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
10101 * On error, the variable @x is set to zero.
10102 */
10103
10104+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10105+#define __get_user(x, ptr) get_user((x), (ptr))
10106+#else
10107 #define __get_user(x, ptr) \
10108 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10109+#endif
10110
10111 /**
10112 * __put_user: - Write a simple value into user space, with less checking.
10113@@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
10114 * Returns zero on success, or -EFAULT on error.
10115 */
10116
10117+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10118+#define __put_user(x, ptr) put_user((x), (ptr))
10119+#else
10120 #define __put_user(x, ptr) \
10121 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10122+#endif
10123
10124 #define __get_user_unaligned __get_user
10125 #define __put_user_unaligned __put_user
10126@@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
10127 #define get_user_ex(x, ptr) do { \
10128 unsigned long __gue_val; \
10129 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10130- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10131+ (x) = (__typeof__(*(ptr)))__gue_val; \
10132 } while (0)
10133
10134 #ifdef CONFIG_X86_WP_WORKS_OK
10135diff -urNp linux-3.0.4/arch/x86/include/asm/vdso.h linux-3.0.4/arch/x86/include/asm/vdso.h
10136--- linux-3.0.4/arch/x86/include/asm/vdso.h 2011-07-21 22:17:23.000000000 -0400
10137+++ linux-3.0.4/arch/x86/include/asm/vdso.h 2011-10-06 04:17:55.000000000 -0400
10138@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
10139 #define VDSO32_SYMBOL(base, name) \
10140 ({ \
10141 extern const char VDSO32_##name[]; \
10142- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10143+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10144 })
10145 #endif
10146
10147diff -urNp linux-3.0.4/arch/x86/include/asm/x86_init.h linux-3.0.4/arch/x86/include/asm/x86_init.h
10148--- linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
10149+++ linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
10150@@ -28,7 +28,7 @@ struct x86_init_mpparse {
10151 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
10152 void (*find_smp_config)(void);
10153 void (*get_smp_config)(unsigned int early);
10154-};
10155+} __no_const;
10156
10157 /**
10158 * struct x86_init_resources - platform specific resource related ops
10159@@ -42,7 +42,7 @@ struct x86_init_resources {
10160 void (*probe_roms)(void);
10161 void (*reserve_resources)(void);
10162 char *(*memory_setup)(void);
10163-};
10164+} __no_const;
10165
10166 /**
10167 * struct x86_init_irqs - platform specific interrupt setup
10168@@ -55,7 +55,7 @@ struct x86_init_irqs {
10169 void (*pre_vector_init)(void);
10170 void (*intr_init)(void);
10171 void (*trap_init)(void);
10172-};
10173+} __no_const;
10174
10175 /**
10176 * struct x86_init_oem - oem platform specific customizing functions
10177@@ -65,7 +65,7 @@ struct x86_init_irqs {
10178 struct x86_init_oem {
10179 void (*arch_setup)(void);
10180 void (*banner)(void);
10181-};
10182+} __no_const;
10183
10184 /**
10185 * struct x86_init_mapping - platform specific initial kernel pagetable setup
10186@@ -76,7 +76,7 @@ struct x86_init_oem {
10187 */
10188 struct x86_init_mapping {
10189 void (*pagetable_reserve)(u64 start, u64 end);
10190-};
10191+} __no_const;
10192
10193 /**
10194 * struct x86_init_paging - platform specific paging functions
10195@@ -86,7 +86,7 @@ struct x86_init_mapping {
10196 struct x86_init_paging {
10197 void (*pagetable_setup_start)(pgd_t *base);
10198 void (*pagetable_setup_done)(pgd_t *base);
10199-};
10200+} __no_const;
10201
10202 /**
10203 * struct x86_init_timers - platform specific timer setup
10204@@ -101,7 +101,7 @@ struct x86_init_timers {
10205 void (*tsc_pre_init)(void);
10206 void (*timer_init)(void);
10207 void (*wallclock_init)(void);
10208-};
10209+} __no_const;
10210
10211 /**
10212 * struct x86_init_iommu - platform specific iommu setup
10213@@ -109,7 +109,7 @@ struct x86_init_timers {
10214 */
10215 struct x86_init_iommu {
10216 int (*iommu_init)(void);
10217-};
10218+} __no_const;
10219
10220 /**
10221 * struct x86_init_pci - platform specific pci init functions
10222@@ -123,7 +123,7 @@ struct x86_init_pci {
10223 int (*init)(void);
10224 void (*init_irq)(void);
10225 void (*fixup_irqs)(void);
10226-};
10227+} __no_const;
10228
10229 /**
10230 * struct x86_init_ops - functions for platform specific setup
10231@@ -139,7 +139,7 @@ struct x86_init_ops {
10232 struct x86_init_timers timers;
10233 struct x86_init_iommu iommu;
10234 struct x86_init_pci pci;
10235-};
10236+} __no_const;
10237
10238 /**
10239 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10240@@ -147,7 +147,7 @@ struct x86_init_ops {
10241 */
10242 struct x86_cpuinit_ops {
10243 void (*setup_percpu_clockev)(void);
10244-};
10245+} __no_const;
10246
10247 /**
10248 * struct x86_platform_ops - platform specific runtime functions
10249@@ -166,7 +166,7 @@ struct x86_platform_ops {
10250 bool (*is_untracked_pat_range)(u64 start, u64 end);
10251 void (*nmi_init)(void);
10252 int (*i8042_detect)(void);
10253-};
10254+} __no_const;
10255
10256 struct pci_dev;
10257
10258@@ -174,7 +174,7 @@ struct x86_msi_ops {
10259 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10260 void (*teardown_msi_irq)(unsigned int irq);
10261 void (*teardown_msi_irqs)(struct pci_dev *dev);
10262-};
10263+} __no_const;
10264
10265 extern struct x86_init_ops x86_init;
10266 extern struct x86_cpuinit_ops x86_cpuinit;
10267diff -urNp linux-3.0.4/arch/x86/include/asm/xsave.h linux-3.0.4/arch/x86/include/asm/xsave.h
10268--- linux-3.0.4/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10269+++ linux-3.0.4/arch/x86/include/asm/xsave.h 2011-10-06 04:17:55.000000000 -0400
10270@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10271 {
10272 int err;
10273
10274+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10275+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10276+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10277+#endif
10278+
10279 /*
10280 * Clear the xsave header first, so that reserved fields are
10281 * initialized to zero.
10282@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsav
10283 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
10284 {
10285 int err;
10286- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
10287+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
10288 u32 lmask = mask;
10289 u32 hmask = mask >> 32;
10290
10291+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10292+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10293+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10294+#endif
10295+
10296 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10297 "2:\n"
10298 ".section .fixup,\"ax\"\n"
10299diff -urNp linux-3.0.4/arch/x86/Kconfig linux-3.0.4/arch/x86/Kconfig
10300--- linux-3.0.4/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
10301+++ linux-3.0.4/arch/x86/Kconfig 2011-09-17 00:58:36.000000000 -0400
10302@@ -229,7 +229,7 @@ config X86_HT
10303
10304 config X86_32_LAZY_GS
10305 def_bool y
10306- depends on X86_32 && !CC_STACKPROTECTOR
10307+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10308
10309 config ARCH_HWEIGHT_CFLAGS
10310 string
10311@@ -1018,7 +1018,7 @@ choice
10312
10313 config NOHIGHMEM
10314 bool "off"
10315- depends on !X86_NUMAQ
10316+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10317 ---help---
10318 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10319 However, the address space of 32-bit x86 processors is only 4
10320@@ -1055,7 +1055,7 @@ config NOHIGHMEM
10321
10322 config HIGHMEM4G
10323 bool "4GB"
10324- depends on !X86_NUMAQ
10325+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10326 ---help---
10327 Select this if you have a 32-bit processor and between 1 and 4
10328 gigabytes of physical RAM.
10329@@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10330 hex
10331 default 0xB0000000 if VMSPLIT_3G_OPT
10332 default 0x80000000 if VMSPLIT_2G
10333- default 0x78000000 if VMSPLIT_2G_OPT
10334+ default 0x70000000 if VMSPLIT_2G_OPT
10335 default 0x40000000 if VMSPLIT_1G
10336 default 0xC0000000
10337 depends on X86_32
10338@@ -1483,6 +1483,7 @@ config SECCOMP
10339
10340 config CC_STACKPROTECTOR
10341 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10342+ depends on X86_64 || !PAX_MEMORY_UDEREF
10343 ---help---
10344 This option turns on the -fstack-protector GCC feature. This
10345 feature puts, at the beginning of functions, a canary value on
10346@@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10347 config PHYSICAL_START
10348 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10349 default "0x1000000"
10350+ range 0x400000 0x40000000
10351 ---help---
10352 This gives the physical address where the kernel is loaded.
10353
10354@@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10355 config PHYSICAL_ALIGN
10356 hex "Alignment value to which kernel should be aligned" if X86_32
10357 default "0x1000000"
10358+ range 0x400000 0x1000000 if PAX_KERNEXEC
10359 range 0x2000 0x1000000
10360 ---help---
10361 This value puts the alignment restrictions on physical address
10362@@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10363 Say N if you want to disable CPU hotplug.
10364
10365 config COMPAT_VDSO
10366- def_bool y
10367+ def_bool n
10368 prompt "Compat VDSO support"
10369 depends on X86_32 || IA32_EMULATION
10370+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10371 ---help---
10372 Map the 32-bit VDSO to the predictable old-style address too.
10373
10374diff -urNp linux-3.0.4/arch/x86/Kconfig.cpu linux-3.0.4/arch/x86/Kconfig.cpu
10375--- linux-3.0.4/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
10376+++ linux-3.0.4/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
10377@@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10378
10379 config X86_F00F_BUG
10380 def_bool y
10381- depends on M586MMX || M586TSC || M586 || M486 || M386
10382+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10383
10384 config X86_INVD_BUG
10385 def_bool y
10386@@ -362,7 +362,7 @@ config X86_POPAD_OK
10387
10388 config X86_ALIGNMENT_16
10389 def_bool y
10390- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10391+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10392
10393 config X86_INTEL_USERCOPY
10394 def_bool y
10395@@ -408,7 +408,7 @@ config X86_CMPXCHG64
10396 # generates cmov.
10397 config X86_CMOV
10398 def_bool y
10399- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10400+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10401
10402 config X86_MINIMUM_CPU_FAMILY
10403 int
10404diff -urNp linux-3.0.4/arch/x86/Kconfig.debug linux-3.0.4/arch/x86/Kconfig.debug
10405--- linux-3.0.4/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
10406+++ linux-3.0.4/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
10407@@ -81,7 +81,7 @@ config X86_PTDUMP
10408 config DEBUG_RODATA
10409 bool "Write protect kernel read-only data structures"
10410 default y
10411- depends on DEBUG_KERNEL
10412+ depends on DEBUG_KERNEL && BROKEN
10413 ---help---
10414 Mark the kernel read-only data as write-protected in the pagetables,
10415 in order to catch accidental (and incorrect) writes to such const
10416@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10417
10418 config DEBUG_SET_MODULE_RONX
10419 bool "Set loadable kernel module data as NX and text as RO"
10420- depends on MODULES
10421+ depends on MODULES && BROKEN
10422 ---help---
10423 This option helps catch unintended modifications to loadable
10424 kernel module's text and read-only data. It also prevents execution
10425diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile
10426--- linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10427+++ linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10428@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10429 $(call cc-option, -fno-stack-protector) \
10430 $(call cc-option, -mpreferred-stack-boundary=2)
10431 KBUILD_CFLAGS += $(call cc-option, -m32)
10432+ifdef CONSTIFY_PLUGIN
10433+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10434+endif
10435 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10436 GCOV_PROFILE := n
10437
10438diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S
10439--- linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10440+++ linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10441@@ -108,6 +108,9 @@ wakeup_code:
10442 /* Do any other stuff... */
10443
10444 #ifndef CONFIG_64BIT
10445+ /* Recheck NX bit overrides (64bit path does this in trampoline */
10446+ call verify_cpu
10447+
10448 /* This could also be done in C code... */
10449 movl pmode_cr3, %eax
10450 movl %eax, %cr3
10451@@ -131,6 +134,7 @@ wakeup_code:
10452 movl pmode_cr0, %eax
10453 movl %eax, %cr0
10454 jmp pmode_return
10455+# include "../../verify_cpu.S"
10456 #else
10457 pushw $0
10458 pushw trampoline_segment
10459diff -urNp linux-3.0.4/arch/x86/kernel/acpi/sleep.c linux-3.0.4/arch/x86/kernel/acpi/sleep.c
10460--- linux-3.0.4/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10461+++ linux-3.0.4/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10462@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10463 header->trampoline_segment = trampoline_address() >> 4;
10464 #ifdef CONFIG_SMP
10465 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10466+
10467+ pax_open_kernel();
10468 early_gdt_descr.address =
10469 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10470+ pax_close_kernel();
10471+
10472 initial_gs = per_cpu_offset(smp_processor_id());
10473 #endif
10474 initial_code = (unsigned long)wakeup_long64;
10475diff -urNp linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S
10476--- linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10477+++ linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10478@@ -30,13 +30,11 @@ wakeup_pmode_return:
10479 # and restore the stack ... but you need gdt for this to work
10480 movl saved_context_esp, %esp
10481
10482- movl %cs:saved_magic, %eax
10483- cmpl $0x12345678, %eax
10484+ cmpl $0x12345678, saved_magic
10485 jne bogus_magic
10486
10487 # jump to place where we left off
10488- movl saved_eip, %eax
10489- jmp *%eax
10490+ jmp *(saved_eip)
10491
10492 bogus_magic:
10493 jmp bogus_magic
10494diff -urNp linux-3.0.4/arch/x86/kernel/alternative.c linux-3.0.4/arch/x86/kernel/alternative.c
10495--- linux-3.0.4/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10496+++ linux-3.0.4/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10497@@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10498 if (!*poff || ptr < text || ptr >= text_end)
10499 continue;
10500 /* turn DS segment override prefix into lock prefix */
10501- if (*ptr == 0x3e)
10502+ if (*ktla_ktva(ptr) == 0x3e)
10503 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10504 };
10505 mutex_unlock(&text_mutex);
10506@@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10507 if (!*poff || ptr < text || ptr >= text_end)
10508 continue;
10509 /* turn lock prefix into DS segment override prefix */
10510- if (*ptr == 0xf0)
10511+ if (*ktla_ktva(ptr) == 0xf0)
10512 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10513 };
10514 mutex_unlock(&text_mutex);
10515@@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10516
10517 BUG_ON(p->len > MAX_PATCH_LEN);
10518 /* prep the buffer with the original instructions */
10519- memcpy(insnbuf, p->instr, p->len);
10520+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10521 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10522 (unsigned long)p->instr, p->len);
10523
10524@@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10525 if (smp_alt_once)
10526 free_init_pages("SMP alternatives",
10527 (unsigned long)__smp_locks,
10528- (unsigned long)__smp_locks_end);
10529+ PAGE_ALIGN((unsigned long)__smp_locks_end));
10530
10531 restart_nmi();
10532 }
10533@@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10534 * instructions. And on the local CPU you need to be protected again NMI or MCE
10535 * handlers seeing an inconsistent instruction while you patch.
10536 */
10537-void *__init_or_module text_poke_early(void *addr, const void *opcode,
10538+void *__kprobes text_poke_early(void *addr, const void *opcode,
10539 size_t len)
10540 {
10541 unsigned long flags;
10542 local_irq_save(flags);
10543- memcpy(addr, opcode, len);
10544+
10545+ pax_open_kernel();
10546+ memcpy(ktla_ktva(addr), opcode, len);
10547 sync_core();
10548+ pax_close_kernel();
10549+
10550 local_irq_restore(flags);
10551 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10552 that causes hangs on some VIA CPUs. */
10553@@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10554 */
10555 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10556 {
10557- unsigned long flags;
10558- char *vaddr;
10559+ unsigned char *vaddr = ktla_ktva(addr);
10560 struct page *pages[2];
10561- int i;
10562+ size_t i;
10563
10564 if (!core_kernel_text((unsigned long)addr)) {
10565- pages[0] = vmalloc_to_page(addr);
10566- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10567+ pages[0] = vmalloc_to_page(vaddr);
10568+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10569 } else {
10570- pages[0] = virt_to_page(addr);
10571+ pages[0] = virt_to_page(vaddr);
10572 WARN_ON(!PageReserved(pages[0]));
10573- pages[1] = virt_to_page(addr + PAGE_SIZE);
10574+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10575 }
10576 BUG_ON(!pages[0]);
10577- local_irq_save(flags);
10578- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10579- if (pages[1])
10580- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10581- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10582- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10583- clear_fixmap(FIX_TEXT_POKE0);
10584- if (pages[1])
10585- clear_fixmap(FIX_TEXT_POKE1);
10586- local_flush_tlb();
10587- sync_core();
10588- /* Could also do a CLFLUSH here to speed up CPU recovery; but
10589- that causes hangs on some VIA CPUs. */
10590+ text_poke_early(addr, opcode, len);
10591 for (i = 0; i < len; i++)
10592- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10593- local_irq_restore(flags);
10594+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10595 return addr;
10596 }
10597
10598diff -urNp linux-3.0.4/arch/x86/kernel/apic/apic.c linux-3.0.4/arch/x86/kernel/apic/apic.c
10599--- linux-3.0.4/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10600+++ linux-3.0.4/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10601@@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10602 /*
10603 * Debug level, exported for io_apic.c
10604 */
10605-unsigned int apic_verbosity;
10606+int apic_verbosity;
10607
10608 int pic_mode;
10609
10610@@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10611 apic_write(APIC_ESR, 0);
10612 v1 = apic_read(APIC_ESR);
10613 ack_APIC_irq();
10614- atomic_inc(&irq_err_count);
10615+ atomic_inc_unchecked(&irq_err_count);
10616
10617 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10618 smp_processor_id(), v0 , v1);
10619@@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10620 u16 *bios_cpu_apicid;
10621 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10622
10623+ pax_track_stack();
10624+
10625 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10626 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10627
10628diff -urNp linux-3.0.4/arch/x86/kernel/apic/io_apic.c linux-3.0.4/arch/x86/kernel/apic/io_apic.c
10629--- linux-3.0.4/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10630+++ linux-3.0.4/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10631@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10632 }
10633 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10634
10635-void lock_vector_lock(void)
10636+void lock_vector_lock(void) __acquires(vector_lock)
10637 {
10638 /* Used to the online set of cpus does not change
10639 * during assign_irq_vector.
10640@@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10641 raw_spin_lock(&vector_lock);
10642 }
10643
10644-void unlock_vector_lock(void)
10645+void unlock_vector_lock(void) __releases(vector_lock)
10646 {
10647 raw_spin_unlock(&vector_lock);
10648 }
10649@@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10650 ack_APIC_irq();
10651 }
10652
10653-atomic_t irq_mis_count;
10654+atomic_unchecked_t irq_mis_count;
10655
10656 /*
10657 * IO-APIC versions below 0x20 don't support EOI register.
10658@@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10659 * at the cpu.
10660 */
10661 if (!(v & (1 << (i & 0x1f)))) {
10662- atomic_inc(&irq_mis_count);
10663+ atomic_inc_unchecked(&irq_mis_count);
10664
10665 eoi_ioapic_irq(irq, cfg);
10666 }
10667diff -urNp linux-3.0.4/arch/x86/kernel/apm_32.c linux-3.0.4/arch/x86/kernel/apm_32.c
10668--- linux-3.0.4/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
10669+++ linux-3.0.4/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
10670@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10671 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10672 * even though they are called in protected mode.
10673 */
10674-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10675+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10676 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10677
10678 static const char driver_version[] = "1.16ac"; /* no spaces */
10679@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10680 BUG_ON(cpu != 0);
10681 gdt = get_cpu_gdt_table(cpu);
10682 save_desc_40 = gdt[0x40 / 8];
10683+
10684+ pax_open_kernel();
10685 gdt[0x40 / 8] = bad_bios_desc;
10686+ pax_close_kernel();
10687
10688 apm_irq_save(flags);
10689 APM_DO_SAVE_SEGS;
10690@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10691 &call->esi);
10692 APM_DO_RESTORE_SEGS;
10693 apm_irq_restore(flags);
10694+
10695+ pax_open_kernel();
10696 gdt[0x40 / 8] = save_desc_40;
10697+ pax_close_kernel();
10698+
10699 put_cpu();
10700
10701 return call->eax & 0xff;
10702@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10703 BUG_ON(cpu != 0);
10704 gdt = get_cpu_gdt_table(cpu);
10705 save_desc_40 = gdt[0x40 / 8];
10706+
10707+ pax_open_kernel();
10708 gdt[0x40 / 8] = bad_bios_desc;
10709+ pax_close_kernel();
10710
10711 apm_irq_save(flags);
10712 APM_DO_SAVE_SEGS;
10713@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10714 &call->eax);
10715 APM_DO_RESTORE_SEGS;
10716 apm_irq_restore(flags);
10717+
10718+ pax_open_kernel();
10719 gdt[0x40 / 8] = save_desc_40;
10720+ pax_close_kernel();
10721+
10722 put_cpu();
10723 return error;
10724 }
10725@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10726 * code to that CPU.
10727 */
10728 gdt = get_cpu_gdt_table(0);
10729+
10730+ pax_open_kernel();
10731 set_desc_base(&gdt[APM_CS >> 3],
10732 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10733 set_desc_base(&gdt[APM_CS_16 >> 3],
10734 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10735 set_desc_base(&gdt[APM_DS >> 3],
10736 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10737+ pax_close_kernel();
10738
10739 proc_create("apm", 0, NULL, &apm_file_ops);
10740
10741diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets_64.c linux-3.0.4/arch/x86/kernel/asm-offsets_64.c
10742--- linux-3.0.4/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
10743+++ linux-3.0.4/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
10744@@ -69,6 +69,7 @@ int main(void)
10745 BLANK();
10746 #undef ENTRY
10747
10748+ DEFINE(TSS_size, sizeof(struct tss_struct));
10749 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10750 BLANK();
10751
10752diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets.c linux-3.0.4/arch/x86/kernel/asm-offsets.c
10753--- linux-3.0.4/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
10754+++ linux-3.0.4/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
10755@@ -33,6 +33,8 @@ void common(void) {
10756 OFFSET(TI_status, thread_info, status);
10757 OFFSET(TI_addr_limit, thread_info, addr_limit);
10758 OFFSET(TI_preempt_count, thread_info, preempt_count);
10759+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10760+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10761
10762 BLANK();
10763 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10764@@ -53,8 +55,26 @@ void common(void) {
10765 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10766 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10767 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10768+
10769+#ifdef CONFIG_PAX_KERNEXEC
10770+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10771+#endif
10772+
10773+#ifdef CONFIG_PAX_MEMORY_UDEREF
10774+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10775+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10776+#ifdef CONFIG_X86_64
10777+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10778+#endif
10779 #endif
10780
10781+#endif
10782+
10783+ BLANK();
10784+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10785+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10786+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10787+
10788 #ifdef CONFIG_XEN
10789 BLANK();
10790 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10791diff -urNp linux-3.0.4/arch/x86/kernel/cpu/amd.c linux-3.0.4/arch/x86/kernel/cpu/amd.c
10792--- linux-3.0.4/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
10793+++ linux-3.0.4/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
10794@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10795 unsigned int size)
10796 {
10797 /* AMD errata T13 (order #21922) */
10798- if ((c->x86 == 6)) {
10799+ if (c->x86 == 6) {
10800 /* Duron Rev A0 */
10801 if (c->x86_model == 3 && c->x86_mask == 0)
10802 size = 64;
10803diff -urNp linux-3.0.4/arch/x86/kernel/cpu/common.c linux-3.0.4/arch/x86/kernel/cpu/common.c
10804--- linux-3.0.4/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
10805+++ linux-3.0.4/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
10806@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10807
10808 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10809
10810-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10811-#ifdef CONFIG_X86_64
10812- /*
10813- * We need valid kernel segments for data and code in long mode too
10814- * IRET will check the segment types kkeil 2000/10/28
10815- * Also sysret mandates a special GDT layout
10816- *
10817- * TLS descriptors are currently at a different place compared to i386.
10818- * Hopefully nobody expects them at a fixed place (Wine?)
10819- */
10820- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10821- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10822- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10823- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10824- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10825- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10826-#else
10827- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10828- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10829- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10830- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10831- /*
10832- * Segments used for calling PnP BIOS have byte granularity.
10833- * They code segments and data segments have fixed 64k limits,
10834- * the transfer segment sizes are set at run time.
10835- */
10836- /* 32-bit code */
10837- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10838- /* 16-bit code */
10839- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10840- /* 16-bit data */
10841- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10842- /* 16-bit data */
10843- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10844- /* 16-bit data */
10845- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10846- /*
10847- * The APM segments have byte granularity and their bases
10848- * are set at run time. All have 64k limits.
10849- */
10850- /* 32-bit code */
10851- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10852- /* 16-bit code */
10853- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10854- /* data */
10855- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10856-
10857- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10858- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10859- GDT_STACK_CANARY_INIT
10860-#endif
10861-} };
10862-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10863-
10864 static int __init x86_xsave_setup(char *s)
10865 {
10866 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10867@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10868 {
10869 struct desc_ptr gdt_descr;
10870
10871- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10872+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10873 gdt_descr.size = GDT_SIZE - 1;
10874 load_gdt(&gdt_descr);
10875 /* Reload the per-cpu base */
10876@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10877 /* Filter out anything that depends on CPUID levels we don't have */
10878 filter_cpuid_features(c, true);
10879
10880+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10881+ setup_clear_cpu_cap(X86_FEATURE_SEP);
10882+#endif
10883+
10884 /* If the model name is still unset, do table lookup. */
10885 if (!c->x86_model_id[0]) {
10886 const char *p;
10887@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10888 }
10889 __setup("clearcpuid=", setup_disablecpuid);
10890
10891+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10892+EXPORT_PER_CPU_SYMBOL(current_tinfo);
10893+
10894 #ifdef CONFIG_X86_64
10895 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10896
10897@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10898 EXPORT_PER_CPU_SYMBOL(current_task);
10899
10900 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10901- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10902+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10903 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10904
10905 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10906@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10907 {
10908 memset(regs, 0, sizeof(struct pt_regs));
10909 regs->fs = __KERNEL_PERCPU;
10910- regs->gs = __KERNEL_STACK_CANARY;
10911+ savesegment(gs, regs->gs);
10912
10913 return regs;
10914 }
10915@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10916 int i;
10917
10918 cpu = stack_smp_processor_id();
10919- t = &per_cpu(init_tss, cpu);
10920+ t = init_tss + cpu;
10921 oist = &per_cpu(orig_ist, cpu);
10922
10923 #ifdef CONFIG_NUMA
10924@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10925 switch_to_new_gdt(cpu);
10926 loadsegment(fs, 0);
10927
10928- load_idt((const struct desc_ptr *)&idt_descr);
10929+ load_idt(&idt_descr);
10930
10931 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10932 syscall_init();
10933@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10934 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10935 barrier();
10936
10937- x86_configure_nx();
10938 if (cpu != 0)
10939 enable_x2apic();
10940
10941@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10942 {
10943 int cpu = smp_processor_id();
10944 struct task_struct *curr = current;
10945- struct tss_struct *t = &per_cpu(init_tss, cpu);
10946+ struct tss_struct *t = init_tss + cpu;
10947 struct thread_struct *thread = &curr->thread;
10948
10949 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10950diff -urNp linux-3.0.4/arch/x86/kernel/cpu/intel.c linux-3.0.4/arch/x86/kernel/cpu/intel.c
10951--- linux-3.0.4/arch/x86/kernel/cpu/intel.c 2011-09-02 18:11:26.000000000 -0400
10952+++ linux-3.0.4/arch/x86/kernel/cpu/intel.c 2011-08-29 23:30:14.000000000 -0400
10953@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10954 * Update the IDT descriptor and reload the IDT so that
10955 * it uses the read-only mapped virtual address.
10956 */
10957- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10958+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10959 load_idt(&idt_descr);
10960 }
10961 #endif
10962diff -urNp linux-3.0.4/arch/x86/kernel/cpu/Makefile linux-3.0.4/arch/x86/kernel/cpu/Makefile
10963--- linux-3.0.4/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
10964+++ linux-3.0.4/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
10965@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10966 CFLAGS_REMOVE_perf_event.o = -pg
10967 endif
10968
10969-# Make sure load_percpu_segment has no stackprotector
10970-nostackp := $(call cc-option, -fno-stack-protector)
10971-CFLAGS_common.o := $(nostackp)
10972-
10973 obj-y := intel_cacheinfo.o scattered.o topology.o
10974 obj-y += proc.o capflags.o powerflags.o common.o
10975 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10976diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c
10977--- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
10978+++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
10979@@ -46,6 +46,7 @@
10980 #include <asm/ipi.h>
10981 #include <asm/mce.h>
10982 #include <asm/msr.h>
10983+#include <asm/local.h>
10984
10985 #include "mce-internal.h"
10986
10987@@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
10988 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10989 m->cs, m->ip);
10990
10991- if (m->cs == __KERNEL_CS)
10992+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10993 print_symbol("{%s}", m->ip);
10994 pr_cont("\n");
10995 }
10996@@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
10997
10998 #define PANIC_TIMEOUT 5 /* 5 seconds */
10999
11000-static atomic_t mce_paniced;
11001+static atomic_unchecked_t mce_paniced;
11002
11003 static int fake_panic;
11004-static atomic_t mce_fake_paniced;
11005+static atomic_unchecked_t mce_fake_paniced;
11006
11007 /* Panic in progress. Enable interrupts and wait for final IPI */
11008 static void wait_for_panic(void)
11009@@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
11010 /*
11011 * Make sure only one CPU runs in machine check panic
11012 */
11013- if (atomic_inc_return(&mce_paniced) > 1)
11014+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11015 wait_for_panic();
11016 barrier();
11017
11018@@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
11019 console_verbose();
11020 } else {
11021 /* Don't log too much for fake panic */
11022- if (atomic_inc_return(&mce_fake_paniced) > 1)
11023+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11024 return;
11025 }
11026 /* First print corrected ones that are still unlogged */
11027@@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
11028 * might have been modified by someone else.
11029 */
11030 rmb();
11031- if (atomic_read(&mce_paniced))
11032+ if (atomic_read_unchecked(&mce_paniced))
11033 wait_for_panic();
11034 if (!monarch_timeout)
11035 goto out;
11036@@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
11037 */
11038
11039 static DEFINE_SPINLOCK(mce_state_lock);
11040-static int open_count; /* #times opened */
11041+static local_t open_count; /* #times opened */
11042 static int open_exclu; /* already open exclusive? */
11043
11044 static int mce_open(struct inode *inode, struct file *file)
11045 {
11046 spin_lock(&mce_state_lock);
11047
11048- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
11049+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
11050 spin_unlock(&mce_state_lock);
11051
11052 return -EBUSY;
11053@@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
11054
11055 if (file->f_flags & O_EXCL)
11056 open_exclu = 1;
11057- open_count++;
11058+ local_inc(&open_count);
11059
11060 spin_unlock(&mce_state_lock);
11061
11062@@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
11063 {
11064 spin_lock(&mce_state_lock);
11065
11066- open_count--;
11067+ local_dec(&open_count);
11068 open_exclu = 0;
11069
11070 spin_unlock(&mce_state_lock);
11071@@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
11072 static void mce_reset(void)
11073 {
11074 cpu_missing = 0;
11075- atomic_set(&mce_fake_paniced, 0);
11076+ atomic_set_unchecked(&mce_fake_paniced, 0);
11077 atomic_set(&mce_executing, 0);
11078 atomic_set(&mce_callin, 0);
11079 atomic_set(&global_nwo, 0);
11080diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c
11081--- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
11082+++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
11083@@ -215,7 +215,9 @@ static int inject_init(void)
11084 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
11085 return -ENOMEM;
11086 printk(KERN_INFO "Machine check injector initialized\n");
11087- mce_chrdev_ops.write = mce_write;
11088+ pax_open_kernel();
11089+ *(void **)&mce_chrdev_ops.write = mce_write;
11090+ pax_close_kernel();
11091 register_die_notifier(&mce_raise_nb);
11092 return 0;
11093 }
11094diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c
11095--- linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-09-02 18:11:26.000000000 -0400
11096+++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:21.000000000 -0400
11097@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
11098 u64 size_or_mask, size_and_mask;
11099 static bool mtrr_aps_delayed_init;
11100
11101-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11102+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11103
11104 const struct mtrr_ops *mtrr_if;
11105
11106diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h
11107--- linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
11108+++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 19:49:56.000000000 -0400
11109@@ -25,7 +25,7 @@ struct mtrr_ops {
11110 int (*validate_add_page)(unsigned long base, unsigned long size,
11111 unsigned int type);
11112 int (*have_wrcomb)(void);
11113-};
11114+} __do_const;
11115
11116 extern int generic_get_free_region(unsigned long base, unsigned long size,
11117 int replace_reg);
11118diff -urNp linux-3.0.4/arch/x86/kernel/cpu/perf_event.c linux-3.0.4/arch/x86/kernel/cpu/perf_event.c
11119--- linux-3.0.4/arch/x86/kernel/cpu/perf_event.c 2011-07-21 22:17:23.000000000 -0400
11120+++ linux-3.0.4/arch/x86/kernel/cpu/perf_event.c 2011-10-06 04:17:55.000000000 -0400
11121@@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
11122 int i, j, w, wmax, num = 0;
11123 struct hw_perf_event *hwc;
11124
11125+ pax_track_stack();
11126+
11127 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
11128
11129 for (i = 0; i < n; i++) {
11130@@ -1872,7 +1874,7 @@ perf_callchain_user(struct perf_callchai
11131 break;
11132
11133 perf_callchain_store(entry, frame.return_address);
11134- fp = frame.next_frame;
11135+ fp = (const void __force_user *)frame.next_frame;
11136 }
11137 }
11138
11139diff -urNp linux-3.0.4/arch/x86/kernel/crash.c linux-3.0.4/arch/x86/kernel/crash.c
11140--- linux-3.0.4/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
11141+++ linux-3.0.4/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
11142@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
11143 regs = args->regs;
11144
11145 #ifdef CONFIG_X86_32
11146- if (!user_mode_vm(regs)) {
11147+ if (!user_mode(regs)) {
11148 crash_fixup_ss_esp(&fixed_regs, regs);
11149 regs = &fixed_regs;
11150 }
11151diff -urNp linux-3.0.4/arch/x86/kernel/doublefault_32.c linux-3.0.4/arch/x86/kernel/doublefault_32.c
11152--- linux-3.0.4/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
11153+++ linux-3.0.4/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
11154@@ -11,7 +11,7 @@
11155
11156 #define DOUBLEFAULT_STACKSIZE (1024)
11157 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
11158-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
11159+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
11160
11161 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
11162
11163@@ -21,7 +21,7 @@ static void doublefault_fn(void)
11164 unsigned long gdt, tss;
11165
11166 store_gdt(&gdt_desc);
11167- gdt = gdt_desc.address;
11168+ gdt = (unsigned long)gdt_desc.address;
11169
11170 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
11171
11172@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
11173 /* 0x2 bit is always set */
11174 .flags = X86_EFLAGS_SF | 0x2,
11175 .sp = STACK_START,
11176- .es = __USER_DS,
11177+ .es = __KERNEL_DS,
11178 .cs = __KERNEL_CS,
11179 .ss = __KERNEL_DS,
11180- .ds = __USER_DS,
11181+ .ds = __KERNEL_DS,
11182 .fs = __KERNEL_PERCPU,
11183
11184 .__cr3 = __pa_nodebug(swapper_pg_dir),
11185diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_32.c linux-3.0.4/arch/x86/kernel/dumpstack_32.c
11186--- linux-3.0.4/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
11187+++ linux-3.0.4/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
11188@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11189 bp = stack_frame(task, regs);
11190
11191 for (;;) {
11192- struct thread_info *context;
11193+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11194
11195- context = (struct thread_info *)
11196- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11197- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11198+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11199
11200- stack = (unsigned long *)context->previous_esp;
11201- if (!stack)
11202+ if (stack_start == task_stack_page(task))
11203 break;
11204+ stack = *(unsigned long **)stack_start;
11205 if (ops->stack(data, "IRQ") < 0)
11206 break;
11207 touch_nmi_watchdog();
11208@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11209 * When in-kernel, we also print out the stack and code at the
11210 * time of the fault..
11211 */
11212- if (!user_mode_vm(regs)) {
11213+ if (!user_mode(regs)) {
11214 unsigned int code_prologue = code_bytes * 43 / 64;
11215 unsigned int code_len = code_bytes;
11216 unsigned char c;
11217 u8 *ip;
11218+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11219
11220 printk(KERN_EMERG "Stack:\n");
11221 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11222
11223 printk(KERN_EMERG "Code: ");
11224
11225- ip = (u8 *)regs->ip - code_prologue;
11226+ ip = (u8 *)regs->ip - code_prologue + cs_base;
11227 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11228 /* try starting at IP */
11229- ip = (u8 *)regs->ip;
11230+ ip = (u8 *)regs->ip + cs_base;
11231 code_len = code_len - code_prologue + 1;
11232 }
11233 for (i = 0; i < code_len; i++, ip++) {
11234@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11235 printk(" Bad EIP value.");
11236 break;
11237 }
11238- if (ip == (u8 *)regs->ip)
11239+ if (ip == (u8 *)regs->ip + cs_base)
11240 printk("<%02x> ", c);
11241 else
11242 printk("%02x ", c);
11243@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11244 {
11245 unsigned short ud2;
11246
11247+ ip = ktla_ktva(ip);
11248 if (ip < PAGE_OFFSET)
11249 return 0;
11250 if (probe_kernel_address((unsigned short *)ip, ud2))
11251diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_64.c linux-3.0.4/arch/x86/kernel/dumpstack_64.c
11252--- linux-3.0.4/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11253+++ linux-3.0.4/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11254@@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11255 unsigned long *irq_stack_end =
11256 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11257 unsigned used = 0;
11258- struct thread_info *tinfo;
11259 int graph = 0;
11260 unsigned long dummy;
11261+ void *stack_start;
11262
11263 if (!task)
11264 task = current;
11265@@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11266 * current stack address. If the stacks consist of nested
11267 * exceptions
11268 */
11269- tinfo = task_thread_info(task);
11270 for (;;) {
11271 char *id;
11272 unsigned long *estack_end;
11273+
11274 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11275 &used, &id);
11276
11277@@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11278 if (ops->stack(data, id) < 0)
11279 break;
11280
11281- bp = ops->walk_stack(tinfo, stack, bp, ops,
11282+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11283 data, estack_end, &graph);
11284 ops->stack(data, "<EOE>");
11285 /*
11286@@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11287 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11288 if (ops->stack(data, "IRQ") < 0)
11289 break;
11290- bp = ops->walk_stack(tinfo, stack, bp,
11291+ bp = ops->walk_stack(task, irq_stack, stack, bp,
11292 ops, data, irq_stack_end, &graph);
11293 /*
11294 * We link to the next stack (which would be
11295@@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11296 /*
11297 * This handles the process stack:
11298 */
11299- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11300+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11301+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11302 put_cpu();
11303 }
11304 EXPORT_SYMBOL(dump_trace);
11305diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack.c linux-3.0.4/arch/x86/kernel/dumpstack.c
11306--- linux-3.0.4/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11307+++ linux-3.0.4/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11308@@ -2,6 +2,9 @@
11309 * Copyright (C) 1991, 1992 Linus Torvalds
11310 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11311 */
11312+#ifdef CONFIG_GRKERNSEC_HIDESYM
11313+#define __INCLUDED_BY_HIDESYM 1
11314+#endif
11315 #include <linux/kallsyms.h>
11316 #include <linux/kprobes.h>
11317 #include <linux/uaccess.h>
11318@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11319 static void
11320 print_ftrace_graph_addr(unsigned long addr, void *data,
11321 const struct stacktrace_ops *ops,
11322- struct thread_info *tinfo, int *graph)
11323+ struct task_struct *task, int *graph)
11324 {
11325- struct task_struct *task = tinfo->task;
11326 unsigned long ret_addr;
11327 int index = task->curr_ret_stack;
11328
11329@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11330 static inline void
11331 print_ftrace_graph_addr(unsigned long addr, void *data,
11332 const struct stacktrace_ops *ops,
11333- struct thread_info *tinfo, int *graph)
11334+ struct task_struct *task, int *graph)
11335 { }
11336 #endif
11337
11338@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11339 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11340 */
11341
11342-static inline int valid_stack_ptr(struct thread_info *tinfo,
11343- void *p, unsigned int size, void *end)
11344+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11345 {
11346- void *t = tinfo;
11347 if (end) {
11348 if (p < end && p >= (end-THREAD_SIZE))
11349 return 1;
11350@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11351 }
11352
11353 unsigned long
11354-print_context_stack(struct thread_info *tinfo,
11355+print_context_stack(struct task_struct *task, void *stack_start,
11356 unsigned long *stack, unsigned long bp,
11357 const struct stacktrace_ops *ops, void *data,
11358 unsigned long *end, int *graph)
11359 {
11360 struct stack_frame *frame = (struct stack_frame *)bp;
11361
11362- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11363+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11364 unsigned long addr;
11365
11366 addr = *stack;
11367@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11368 } else {
11369 ops->address(data, addr, 0);
11370 }
11371- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11372+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11373 }
11374 stack++;
11375 }
11376@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11377 EXPORT_SYMBOL_GPL(print_context_stack);
11378
11379 unsigned long
11380-print_context_stack_bp(struct thread_info *tinfo,
11381+print_context_stack_bp(struct task_struct *task, void *stack_start,
11382 unsigned long *stack, unsigned long bp,
11383 const struct stacktrace_ops *ops, void *data,
11384 unsigned long *end, int *graph)
11385@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11386 struct stack_frame *frame = (struct stack_frame *)bp;
11387 unsigned long *ret_addr = &frame->return_address;
11388
11389- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11390+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11391 unsigned long addr = *ret_addr;
11392
11393 if (!__kernel_text_address(addr))
11394@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11395 ops->address(data, addr, 1);
11396 frame = frame->next_frame;
11397 ret_addr = &frame->return_address;
11398- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11399+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11400 }
11401
11402 return (unsigned long)frame;
11403@@ -186,7 +186,7 @@ void dump_stack(void)
11404
11405 bp = stack_frame(current, NULL);
11406 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11407- current->pid, current->comm, print_tainted(),
11408+ task_pid_nr(current), current->comm, print_tainted(),
11409 init_utsname()->release,
11410 (int)strcspn(init_utsname()->version, " "),
11411 init_utsname()->version);
11412@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11413 }
11414 EXPORT_SYMBOL_GPL(oops_begin);
11415
11416+extern void gr_handle_kernel_exploit(void);
11417+
11418 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11419 {
11420 if (regs && kexec_should_crash(current))
11421@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11422 panic("Fatal exception in interrupt");
11423 if (panic_on_oops)
11424 panic("Fatal exception");
11425- do_exit(signr);
11426+
11427+ gr_handle_kernel_exploit();
11428+
11429+ do_group_exit(signr);
11430 }
11431
11432 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11433@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11434
11435 show_registers(regs);
11436 #ifdef CONFIG_X86_32
11437- if (user_mode_vm(regs)) {
11438+ if (user_mode(regs)) {
11439 sp = regs->sp;
11440 ss = regs->ss & 0xffff;
11441 } else {
11442@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11443 unsigned long flags = oops_begin();
11444 int sig = SIGSEGV;
11445
11446- if (!user_mode_vm(regs))
11447+ if (!user_mode(regs))
11448 report_bug(regs->ip, regs);
11449
11450 if (__die(str, regs, err))
11451diff -urNp linux-3.0.4/arch/x86/kernel/early_printk.c linux-3.0.4/arch/x86/kernel/early_printk.c
11452--- linux-3.0.4/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11453+++ linux-3.0.4/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11454@@ -7,6 +7,7 @@
11455 #include <linux/pci_regs.h>
11456 #include <linux/pci_ids.h>
11457 #include <linux/errno.h>
11458+#include <linux/sched.h>
11459 #include <asm/io.h>
11460 #include <asm/processor.h>
11461 #include <asm/fcntl.h>
11462@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11463 int n;
11464 va_list ap;
11465
11466+ pax_track_stack();
11467+
11468 va_start(ap, fmt);
11469 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11470 early_console->write(early_console, buf, n);
11471diff -urNp linux-3.0.4/arch/x86/kernel/entry_32.S linux-3.0.4/arch/x86/kernel/entry_32.S
11472--- linux-3.0.4/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11473+++ linux-3.0.4/arch/x86/kernel/entry_32.S 2011-08-30 18:23:52.000000000 -0400
11474@@ -185,13 +185,146 @@
11475 /*CFI_REL_OFFSET gs, PT_GS*/
11476 .endm
11477 .macro SET_KERNEL_GS reg
11478+
11479+#ifdef CONFIG_CC_STACKPROTECTOR
11480 movl $(__KERNEL_STACK_CANARY), \reg
11481+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11482+ movl $(__USER_DS), \reg
11483+#else
11484+ xorl \reg, \reg
11485+#endif
11486+
11487 movl \reg, %gs
11488 .endm
11489
11490 #endif /* CONFIG_X86_32_LAZY_GS */
11491
11492-.macro SAVE_ALL
11493+.macro pax_enter_kernel
11494+#ifdef CONFIG_PAX_KERNEXEC
11495+ call pax_enter_kernel
11496+#endif
11497+.endm
11498+
11499+.macro pax_exit_kernel
11500+#ifdef CONFIG_PAX_KERNEXEC
11501+ call pax_exit_kernel
11502+#endif
11503+.endm
11504+
11505+#ifdef CONFIG_PAX_KERNEXEC
11506+ENTRY(pax_enter_kernel)
11507+#ifdef CONFIG_PARAVIRT
11508+ pushl %eax
11509+ pushl %ecx
11510+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11511+ mov %eax, %esi
11512+#else
11513+ mov %cr0, %esi
11514+#endif
11515+ bts $16, %esi
11516+ jnc 1f
11517+ mov %cs, %esi
11518+ cmp $__KERNEL_CS, %esi
11519+ jz 3f
11520+ ljmp $__KERNEL_CS, $3f
11521+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11522+2:
11523+#ifdef CONFIG_PARAVIRT
11524+ mov %esi, %eax
11525+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11526+#else
11527+ mov %esi, %cr0
11528+#endif
11529+3:
11530+#ifdef CONFIG_PARAVIRT
11531+ popl %ecx
11532+ popl %eax
11533+#endif
11534+ ret
11535+ENDPROC(pax_enter_kernel)
11536+
11537+ENTRY(pax_exit_kernel)
11538+#ifdef CONFIG_PARAVIRT
11539+ pushl %eax
11540+ pushl %ecx
11541+#endif
11542+ mov %cs, %esi
11543+ cmp $__KERNEXEC_KERNEL_CS, %esi
11544+ jnz 2f
11545+#ifdef CONFIG_PARAVIRT
11546+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11547+ mov %eax, %esi
11548+#else
11549+ mov %cr0, %esi
11550+#endif
11551+ btr $16, %esi
11552+ ljmp $__KERNEL_CS, $1f
11553+1:
11554+#ifdef CONFIG_PARAVIRT
11555+ mov %esi, %eax
11556+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11557+#else
11558+ mov %esi, %cr0
11559+#endif
11560+2:
11561+#ifdef CONFIG_PARAVIRT
11562+ popl %ecx
11563+ popl %eax
11564+#endif
11565+ ret
11566+ENDPROC(pax_exit_kernel)
11567+#endif
11568+
11569+.macro pax_erase_kstack
11570+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11571+ call pax_erase_kstack
11572+#endif
11573+.endm
11574+
11575+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11576+/*
11577+ * ebp: thread_info
11578+ * ecx, edx: can be clobbered
11579+ */
11580+ENTRY(pax_erase_kstack)
11581+ pushl %edi
11582+ pushl %eax
11583+
11584+ mov TI_lowest_stack(%ebp), %edi
11585+ mov $-0xBEEF, %eax
11586+ std
11587+
11588+1: mov %edi, %ecx
11589+ and $THREAD_SIZE_asm - 1, %ecx
11590+ shr $2, %ecx
11591+ repne scasl
11592+ jecxz 2f
11593+
11594+ cmp $2*16, %ecx
11595+ jc 2f
11596+
11597+ mov $2*16, %ecx
11598+ repe scasl
11599+ jecxz 2f
11600+ jne 1b
11601+
11602+2: cld
11603+ mov %esp, %ecx
11604+ sub %edi, %ecx
11605+ shr $2, %ecx
11606+ rep stosl
11607+
11608+ mov TI_task_thread_sp0(%ebp), %edi
11609+ sub $128, %edi
11610+ mov %edi, TI_lowest_stack(%ebp)
11611+
11612+ popl %eax
11613+ popl %edi
11614+ ret
11615+ENDPROC(pax_erase_kstack)
11616+#endif
11617+
11618+.macro __SAVE_ALL _DS
11619 cld
11620 PUSH_GS
11621 pushl_cfi %fs
11622@@ -214,7 +347,7 @@
11623 CFI_REL_OFFSET ecx, 0
11624 pushl_cfi %ebx
11625 CFI_REL_OFFSET ebx, 0
11626- movl $(__USER_DS), %edx
11627+ movl $\_DS, %edx
11628 movl %edx, %ds
11629 movl %edx, %es
11630 movl $(__KERNEL_PERCPU), %edx
11631@@ -222,6 +355,15 @@
11632 SET_KERNEL_GS %edx
11633 .endm
11634
11635+.macro SAVE_ALL
11636+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11637+ __SAVE_ALL __KERNEL_DS
11638+ pax_enter_kernel
11639+#else
11640+ __SAVE_ALL __USER_DS
11641+#endif
11642+.endm
11643+
11644 .macro RESTORE_INT_REGS
11645 popl_cfi %ebx
11646 CFI_RESTORE ebx
11647@@ -332,7 +474,15 @@ check_userspace:
11648 movb PT_CS(%esp), %al
11649 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11650 cmpl $USER_RPL, %eax
11651+
11652+#ifdef CONFIG_PAX_KERNEXEC
11653+ jae resume_userspace
11654+
11655+ PAX_EXIT_KERNEL
11656+ jmp resume_kernel
11657+#else
11658 jb resume_kernel # not returning to v8086 or userspace
11659+#endif
11660
11661 ENTRY(resume_userspace)
11662 LOCKDEP_SYS_EXIT
11663@@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11664 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11665 # int/exception return?
11666 jne work_pending
11667- jmp restore_all
11668+ jmp restore_all_pax
11669 END(ret_from_exception)
11670
11671 #ifdef CONFIG_PREEMPT
11672@@ -394,23 +544,34 @@ sysenter_past_esp:
11673 /*CFI_REL_OFFSET cs, 0*/
11674 /*
11675 * Push current_thread_info()->sysenter_return to the stack.
11676- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11677- * pushed above; +8 corresponds to copy_thread's esp0 setting.
11678 */
11679- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11680+ pushl_cfi $0
11681 CFI_REL_OFFSET eip, 0
11682
11683 pushl_cfi %eax
11684 SAVE_ALL
11685+ GET_THREAD_INFO(%ebp)
11686+ movl TI_sysenter_return(%ebp),%ebp
11687+ movl %ebp,PT_EIP(%esp)
11688 ENABLE_INTERRUPTS(CLBR_NONE)
11689
11690 /*
11691 * Load the potential sixth argument from user stack.
11692 * Careful about security.
11693 */
11694+ movl PT_OLDESP(%esp),%ebp
11695+
11696+#ifdef CONFIG_PAX_MEMORY_UDEREF
11697+ mov PT_OLDSS(%esp),%ds
11698+1: movl %ds:(%ebp),%ebp
11699+ push %ss
11700+ pop %ds
11701+#else
11702 cmpl $__PAGE_OFFSET-3,%ebp
11703 jae syscall_fault
11704 1: movl (%ebp),%ebp
11705+#endif
11706+
11707 movl %ebp,PT_EBP(%esp)
11708 .section __ex_table,"a"
11709 .align 4
11710@@ -433,12 +594,24 @@ sysenter_do_call:
11711 testl $_TIF_ALLWORK_MASK, %ecx
11712 jne sysexit_audit
11713 sysenter_exit:
11714+
11715+#ifdef CONFIG_PAX_RANDKSTACK
11716+ pushl_cfi %eax
11717+ movl %esp, %eax
11718+ call pax_randomize_kstack
11719+ popl_cfi %eax
11720+#endif
11721+
11722+ pax_erase_kstack
11723+
11724 /* if something modifies registers it must also disable sysexit */
11725 movl PT_EIP(%esp), %edx
11726 movl PT_OLDESP(%esp), %ecx
11727 xorl %ebp,%ebp
11728 TRACE_IRQS_ON
11729 1: mov PT_FS(%esp), %fs
11730+2: mov PT_DS(%esp), %ds
11731+3: mov PT_ES(%esp), %es
11732 PTGS_TO_GS
11733 ENABLE_INTERRUPTS_SYSEXIT
11734
11735@@ -455,6 +628,9 @@ sysenter_audit:
11736 movl %eax,%edx /* 2nd arg: syscall number */
11737 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11738 call audit_syscall_entry
11739+
11740+ pax_erase_kstack
11741+
11742 pushl_cfi %ebx
11743 movl PT_EAX(%esp),%eax /* reload syscall number */
11744 jmp sysenter_do_call
11745@@ -481,11 +657,17 @@ sysexit_audit:
11746
11747 CFI_ENDPROC
11748 .pushsection .fixup,"ax"
11749-2: movl $0,PT_FS(%esp)
11750+4: movl $0,PT_FS(%esp)
11751+ jmp 1b
11752+5: movl $0,PT_DS(%esp)
11753+ jmp 1b
11754+6: movl $0,PT_ES(%esp)
11755 jmp 1b
11756 .section __ex_table,"a"
11757 .align 4
11758- .long 1b,2b
11759+ .long 1b,4b
11760+ .long 2b,5b
11761+ .long 3b,6b
11762 .popsection
11763 PTGS_TO_GS_EX
11764 ENDPROC(ia32_sysenter_target)
11765@@ -518,6 +700,15 @@ syscall_exit:
11766 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11767 jne syscall_exit_work
11768
11769+restore_all_pax:
11770+
11771+#ifdef CONFIG_PAX_RANDKSTACK
11772+ movl %esp, %eax
11773+ call pax_randomize_kstack
11774+#endif
11775+
11776+ pax_erase_kstack
11777+
11778 restore_all:
11779 TRACE_IRQS_IRET
11780 restore_all_notrace:
11781@@ -577,14 +768,34 @@ ldt_ss:
11782 * compensating for the offset by changing to the ESPFIX segment with
11783 * a base address that matches for the difference.
11784 */
11785-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11786+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11787 mov %esp, %edx /* load kernel esp */
11788 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11789 mov %dx, %ax /* eax: new kernel esp */
11790 sub %eax, %edx /* offset (low word is 0) */
11791+#ifdef CONFIG_SMP
11792+ movl PER_CPU_VAR(cpu_number), %ebx
11793+ shll $PAGE_SHIFT_asm, %ebx
11794+ addl $cpu_gdt_table, %ebx
11795+#else
11796+ movl $cpu_gdt_table, %ebx
11797+#endif
11798 shr $16, %edx
11799- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11800- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11801+
11802+#ifdef CONFIG_PAX_KERNEXEC
11803+ mov %cr0, %esi
11804+ btr $16, %esi
11805+ mov %esi, %cr0
11806+#endif
11807+
11808+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11809+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11810+
11811+#ifdef CONFIG_PAX_KERNEXEC
11812+ bts $16, %esi
11813+ mov %esi, %cr0
11814+#endif
11815+
11816 pushl_cfi $__ESPFIX_SS
11817 pushl_cfi %eax /* new kernel esp */
11818 /* Disable interrupts, but do not irqtrace this section: we
11819@@ -613,29 +824,23 @@ work_resched:
11820 movl TI_flags(%ebp), %ecx
11821 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11822 # than syscall tracing?
11823- jz restore_all
11824+ jz restore_all_pax
11825 testb $_TIF_NEED_RESCHED, %cl
11826 jnz work_resched
11827
11828 work_notifysig: # deal with pending signals and
11829 # notify-resume requests
11830+ movl %esp, %eax
11831 #ifdef CONFIG_VM86
11832 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11833- movl %esp, %eax
11834- jne work_notifysig_v86 # returning to kernel-space or
11835+ jz 1f # returning to kernel-space or
11836 # vm86-space
11837- xorl %edx, %edx
11838- call do_notify_resume
11839- jmp resume_userspace_sig
11840
11841- ALIGN
11842-work_notifysig_v86:
11843 pushl_cfi %ecx # save ti_flags for do_notify_resume
11844 call save_v86_state # %eax contains pt_regs pointer
11845 popl_cfi %ecx
11846 movl %eax, %esp
11847-#else
11848- movl %esp, %eax
11849+1:
11850 #endif
11851 xorl %edx, %edx
11852 call do_notify_resume
11853@@ -648,6 +853,9 @@ syscall_trace_entry:
11854 movl $-ENOSYS,PT_EAX(%esp)
11855 movl %esp, %eax
11856 call syscall_trace_enter
11857+
11858+ pax_erase_kstack
11859+
11860 /* What it returned is what we'll actually use. */
11861 cmpl $(nr_syscalls), %eax
11862 jnae syscall_call
11863@@ -670,6 +878,10 @@ END(syscall_exit_work)
11864
11865 RING0_INT_FRAME # can't unwind into user space anyway
11866 syscall_fault:
11867+#ifdef CONFIG_PAX_MEMORY_UDEREF
11868+ push %ss
11869+ pop %ds
11870+#endif
11871 GET_THREAD_INFO(%ebp)
11872 movl $-EFAULT,PT_EAX(%esp)
11873 jmp resume_userspace
11874@@ -752,6 +964,36 @@ ptregs_clone:
11875 CFI_ENDPROC
11876 ENDPROC(ptregs_clone)
11877
11878+ ALIGN;
11879+ENTRY(kernel_execve)
11880+ CFI_STARTPROC
11881+ pushl_cfi %ebp
11882+ sub $PT_OLDSS+4,%esp
11883+ pushl_cfi %edi
11884+ pushl_cfi %ecx
11885+ pushl_cfi %eax
11886+ lea 3*4(%esp),%edi
11887+ mov $PT_OLDSS/4+1,%ecx
11888+ xorl %eax,%eax
11889+ rep stosl
11890+ popl_cfi %eax
11891+ popl_cfi %ecx
11892+ popl_cfi %edi
11893+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11894+ pushl_cfi %esp
11895+ call sys_execve
11896+ add $4,%esp
11897+ CFI_ADJUST_CFA_OFFSET -4
11898+ GET_THREAD_INFO(%ebp)
11899+ test %eax,%eax
11900+ jz syscall_exit
11901+ add $PT_OLDSS+4,%esp
11902+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11903+ popl_cfi %ebp
11904+ ret
11905+ CFI_ENDPROC
11906+ENDPROC(kernel_execve)
11907+
11908 .macro FIXUP_ESPFIX_STACK
11909 /*
11910 * Switch back for ESPFIX stack to the normal zerobased stack
11911@@ -761,8 +1003,15 @@ ENDPROC(ptregs_clone)
11912 * normal stack and adjusts ESP with the matching offset.
11913 */
11914 /* fixup the stack */
11915- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11916- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11917+#ifdef CONFIG_SMP
11918+ movl PER_CPU_VAR(cpu_number), %ebx
11919+ shll $PAGE_SHIFT_asm, %ebx
11920+ addl $cpu_gdt_table, %ebx
11921+#else
11922+ movl $cpu_gdt_table, %ebx
11923+#endif
11924+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11925+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11926 shl $16, %eax
11927 addl %esp, %eax /* the adjusted stack pointer */
11928 pushl_cfi $__KERNEL_DS
11929@@ -1213,7 +1462,6 @@ return_to_handler:
11930 jmp *%ecx
11931 #endif
11932
11933-.section .rodata,"a"
11934 #include "syscall_table_32.S"
11935
11936 syscall_table_size=(.-sys_call_table)
11937@@ -1259,9 +1507,12 @@ error_code:
11938 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11939 REG_TO_PTGS %ecx
11940 SET_KERNEL_GS %ecx
11941- movl $(__USER_DS), %ecx
11942+ movl $(__KERNEL_DS), %ecx
11943 movl %ecx, %ds
11944 movl %ecx, %es
11945+
11946+ pax_enter_kernel
11947+
11948 TRACE_IRQS_OFF
11949 movl %esp,%eax # pt_regs pointer
11950 call *%edi
11951@@ -1346,6 +1597,9 @@ nmi_stack_correct:
11952 xorl %edx,%edx # zero error code
11953 movl %esp,%eax # pt_regs pointer
11954 call do_nmi
11955+
11956+ pax_exit_kernel
11957+
11958 jmp restore_all_notrace
11959 CFI_ENDPROC
11960
11961@@ -1382,6 +1636,9 @@ nmi_espfix_stack:
11962 FIXUP_ESPFIX_STACK # %eax == %esp
11963 xorl %edx,%edx # zero error code
11964 call do_nmi
11965+
11966+ pax_exit_kernel
11967+
11968 RESTORE_REGS
11969 lss 12+4(%esp), %esp # back to espfix stack
11970 CFI_ADJUST_CFA_OFFSET -24
11971diff -urNp linux-3.0.4/arch/x86/kernel/entry_64.S linux-3.0.4/arch/x86/kernel/entry_64.S
11972--- linux-3.0.4/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
11973+++ linux-3.0.4/arch/x86/kernel/entry_64.S 2011-10-06 04:17:55.000000000 -0400
11974@@ -53,6 +53,8 @@
11975 #include <asm/paravirt.h>
11976 #include <asm/ftrace.h>
11977 #include <asm/percpu.h>
11978+#include <asm/pgtable.h>
11979+#include <asm/alternative-asm.h>
11980
11981 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11982 #include <linux/elf-em.h>
11983@@ -176,6 +178,264 @@ ENTRY(native_usergs_sysret64)
11984 ENDPROC(native_usergs_sysret64)
11985 #endif /* CONFIG_PARAVIRT */
11986
11987+ .macro ljmpq sel, off
11988+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11989+ .byte 0x48; ljmp *1234f(%rip)
11990+ .pushsection .rodata
11991+ .align 16
11992+ 1234: .quad \off; .word \sel
11993+ .popsection
11994+#else
11995+ pushq $\sel
11996+ pushq $\off
11997+ lretq
11998+#endif
11999+ .endm
12000+
12001+ .macro pax_enter_kernel
12002+#ifdef CONFIG_PAX_KERNEXEC
12003+ call pax_enter_kernel
12004+#endif
12005+ .endm
12006+
12007+ .macro pax_exit_kernel
12008+#ifdef CONFIG_PAX_KERNEXEC
12009+ call pax_exit_kernel
12010+#endif
12011+ .endm
12012+
12013+#ifdef CONFIG_PAX_KERNEXEC
12014+ENTRY(pax_enter_kernel)
12015+ pushq %rdi
12016+
12017+#ifdef CONFIG_PARAVIRT
12018+ PV_SAVE_REGS(CLBR_RDI)
12019+#endif
12020+
12021+ GET_CR0_INTO_RDI
12022+ bts $16,%rdi
12023+ jnc 1f
12024+ mov %cs,%edi
12025+ cmp $__KERNEL_CS,%edi
12026+ jz 3f
12027+ ljmpq __KERNEL_CS,3f
12028+1: ljmpq __KERNEXEC_KERNEL_CS,2f
12029+2: SET_RDI_INTO_CR0
12030+3:
12031+
12032+#ifdef CONFIG_PARAVIRT
12033+ PV_RESTORE_REGS(CLBR_RDI)
12034+#endif
12035+
12036+ popq %rdi
12037+ retq
12038+ENDPROC(pax_enter_kernel)
12039+
12040+ENTRY(pax_exit_kernel)
12041+ pushq %rdi
12042+
12043+#ifdef CONFIG_PARAVIRT
12044+ PV_SAVE_REGS(CLBR_RDI)
12045+#endif
12046+
12047+ mov %cs,%rdi
12048+ cmp $__KERNEXEC_KERNEL_CS,%edi
12049+ jnz 2f
12050+ GET_CR0_INTO_RDI
12051+ btr $16,%rdi
12052+ ljmpq __KERNEL_CS,1f
12053+1: SET_RDI_INTO_CR0
12054+2:
12055+
12056+#ifdef CONFIG_PARAVIRT
12057+ PV_RESTORE_REGS(CLBR_RDI);
12058+#endif
12059+
12060+ popq %rdi
12061+ retq
12062+ENDPROC(pax_exit_kernel)
12063+#endif
12064+
12065+ .macro pax_enter_kernel_user
12066+#ifdef CONFIG_PAX_MEMORY_UDEREF
12067+ call pax_enter_kernel_user
12068+#endif
12069+ .endm
12070+
12071+ .macro pax_exit_kernel_user
12072+#ifdef CONFIG_PAX_MEMORY_UDEREF
12073+ call pax_exit_kernel_user
12074+#endif
12075+#ifdef CONFIG_PAX_RANDKSTACK
12076+ push %rax
12077+ call pax_randomize_kstack
12078+ pop %rax
12079+#endif
12080+ .endm
12081+
12082+#ifdef CONFIG_PAX_MEMORY_UDEREF
12083+ENTRY(pax_enter_kernel_user)
12084+ pushq %rdi
12085+ pushq %rbx
12086+
12087+#ifdef CONFIG_PARAVIRT
12088+ PV_SAVE_REGS(CLBR_RDI)
12089+#endif
12090+
12091+ GET_CR3_INTO_RDI
12092+ mov %rdi,%rbx
12093+ add $__START_KERNEL_map,%rbx
12094+ sub phys_base(%rip),%rbx
12095+
12096+#ifdef CONFIG_PARAVIRT
12097+ pushq %rdi
12098+ cmpl $0, pv_info+PARAVIRT_enabled
12099+ jz 1f
12100+ i = 0
12101+ .rept USER_PGD_PTRS
12102+ mov i*8(%rbx),%rsi
12103+ mov $0,%sil
12104+ lea i*8(%rbx),%rdi
12105+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12106+ i = i + 1
12107+ .endr
12108+ jmp 2f
12109+1:
12110+#endif
12111+
12112+ i = 0
12113+ .rept USER_PGD_PTRS
12114+ movb $0,i*8(%rbx)
12115+ i = i + 1
12116+ .endr
12117+
12118+#ifdef CONFIG_PARAVIRT
12119+2: popq %rdi
12120+#endif
12121+ SET_RDI_INTO_CR3
12122+
12123+#ifdef CONFIG_PAX_KERNEXEC
12124+ GET_CR0_INTO_RDI
12125+ bts $16,%rdi
12126+ SET_RDI_INTO_CR0
12127+#endif
12128+
12129+#ifdef CONFIG_PARAVIRT
12130+ PV_RESTORE_REGS(CLBR_RDI)
12131+#endif
12132+
12133+ popq %rbx
12134+ popq %rdi
12135+ retq
12136+ENDPROC(pax_enter_kernel_user)
12137+
12138+ENTRY(pax_exit_kernel_user)
12139+ push %rdi
12140+
12141+#ifdef CONFIG_PARAVIRT
12142+ pushq %rbx
12143+ PV_SAVE_REGS(CLBR_RDI)
12144+#endif
12145+
12146+#ifdef CONFIG_PAX_KERNEXEC
12147+ GET_CR0_INTO_RDI
12148+ btr $16,%rdi
12149+ SET_RDI_INTO_CR0
12150+#endif
12151+
12152+ GET_CR3_INTO_RDI
12153+ add $__START_KERNEL_map,%rdi
12154+ sub phys_base(%rip),%rdi
12155+
12156+#ifdef CONFIG_PARAVIRT
12157+ cmpl $0, pv_info+PARAVIRT_enabled
12158+ jz 1f
12159+ mov %rdi,%rbx
12160+ i = 0
12161+ .rept USER_PGD_PTRS
12162+ mov i*8(%rbx),%rsi
12163+ mov $0x67,%sil
12164+ lea i*8(%rbx),%rdi
12165+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12166+ i = i + 1
12167+ .endr
12168+ jmp 2f
12169+1:
12170+#endif
12171+
12172+ i = 0
12173+ .rept USER_PGD_PTRS
12174+ movb $0x67,i*8(%rdi)
12175+ i = i + 1
12176+ .endr
12177+
12178+#ifdef CONFIG_PARAVIRT
12179+2: PV_RESTORE_REGS(CLBR_RDI)
12180+ popq %rbx
12181+#endif
12182+
12183+ popq %rdi
12184+ retq
12185+ENDPROC(pax_exit_kernel_user)
12186+#endif
12187+
12188+ .macro pax_erase_kstack
12189+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12190+ call pax_erase_kstack
12191+#endif
12192+ .endm
12193+
12194+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12195+/*
12196+ * r10: thread_info
12197+ * rcx, rdx: can be clobbered
12198+ */
12199+ENTRY(pax_erase_kstack)
12200+ pushq %rdi
12201+ pushq %rax
12202+ pushq %r10
12203+
12204+ GET_THREAD_INFO(%r10)
12205+ mov TI_lowest_stack(%r10), %rdi
12206+ mov $-0xBEEF, %rax
12207+ std
12208+
12209+1: mov %edi, %ecx
12210+ and $THREAD_SIZE_asm - 1, %ecx
12211+ shr $3, %ecx
12212+ repne scasq
12213+ jecxz 2f
12214+
12215+ cmp $2*8, %ecx
12216+ jc 2f
12217+
12218+ mov $2*8, %ecx
12219+ repe scasq
12220+ jecxz 2f
12221+ jne 1b
12222+
12223+2: cld
12224+ mov %esp, %ecx
12225+ sub %edi, %ecx
12226+
12227+ cmp $THREAD_SIZE_asm, %rcx
12228+ jb 3f
12229+ ud2
12230+3:
12231+
12232+ shr $3, %ecx
12233+ rep stosq
12234+
12235+ mov TI_task_thread_sp0(%r10), %rdi
12236+ sub $256, %rdi
12237+ mov %rdi, TI_lowest_stack(%r10)
12238+
12239+ popq %r10
12240+ popq %rax
12241+ popq %rdi
12242+ ret
12243+ENDPROC(pax_erase_kstack)
12244+#endif
12245
12246 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12247 #ifdef CONFIG_TRACE_IRQFLAGS
12248@@ -318,7 +578,7 @@ ENTRY(save_args)
12249 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12250 movq_cfi rbp, 8 /* push %rbp */
12251 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12252- testl $3, CS(%rdi)
12253+ testb $3, CS(%rdi)
12254 je 1f
12255 SWAPGS
12256 /*
12257@@ -409,7 +669,7 @@ ENTRY(ret_from_fork)
12258
12259 RESTORE_REST
12260
12261- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12262+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12263 je int_ret_from_sys_call
12264
12265 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12266@@ -455,7 +715,7 @@ END(ret_from_fork)
12267 ENTRY(system_call)
12268 CFI_STARTPROC simple
12269 CFI_SIGNAL_FRAME
12270- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12271+ CFI_DEF_CFA rsp,0
12272 CFI_REGISTER rip,rcx
12273 /*CFI_REGISTER rflags,r11*/
12274 SWAPGS_UNSAFE_STACK
12275@@ -468,12 +728,13 @@ ENTRY(system_call_after_swapgs)
12276
12277 movq %rsp,PER_CPU_VAR(old_rsp)
12278 movq PER_CPU_VAR(kernel_stack),%rsp
12279+ pax_enter_kernel_user
12280 /*
12281 * No need to follow this irqs off/on section - it's straight
12282 * and short:
12283 */
12284 ENABLE_INTERRUPTS(CLBR_NONE)
12285- SAVE_ARGS 8,1
12286+ SAVE_ARGS 8*6,1
12287 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12288 movq %rcx,RIP-ARGOFFSET(%rsp)
12289 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12290@@ -502,6 +763,8 @@ sysret_check:
12291 andl %edi,%edx
12292 jnz sysret_careful
12293 CFI_REMEMBER_STATE
12294+ pax_exit_kernel_user
12295+ pax_erase_kstack
12296 /*
12297 * sysretq will re-enable interrupts:
12298 */
12299@@ -560,6 +823,9 @@ auditsys:
12300 movq %rax,%rsi /* 2nd arg: syscall number */
12301 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12302 call audit_syscall_entry
12303+
12304+ pax_erase_kstack
12305+
12306 LOAD_ARGS 0 /* reload call-clobbered registers */
12307 jmp system_call_fastpath
12308
12309@@ -590,6 +856,9 @@ tracesys:
12310 FIXUP_TOP_OF_STACK %rdi
12311 movq %rsp,%rdi
12312 call syscall_trace_enter
12313+
12314+ pax_erase_kstack
12315+
12316 /*
12317 * Reload arg registers from stack in case ptrace changed them.
12318 * We don't reload %rax because syscall_trace_enter() returned
12319@@ -611,7 +880,7 @@ tracesys:
12320 GLOBAL(int_ret_from_sys_call)
12321 DISABLE_INTERRUPTS(CLBR_NONE)
12322 TRACE_IRQS_OFF
12323- testl $3,CS-ARGOFFSET(%rsp)
12324+ testb $3,CS-ARGOFFSET(%rsp)
12325 je retint_restore_args
12326 movl $_TIF_ALLWORK_MASK,%edi
12327 /* edi: mask to check */
12328@@ -793,6 +1062,16 @@ END(interrupt)
12329 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12330 call save_args
12331 PARTIAL_FRAME 0
12332+#ifdef CONFIG_PAX_MEMORY_UDEREF
12333+ testb $3, CS(%rdi)
12334+ jnz 1f
12335+ pax_enter_kernel
12336+ jmp 2f
12337+1: pax_enter_kernel_user
12338+2:
12339+#else
12340+ pax_enter_kernel
12341+#endif
12342 call \func
12343 .endm
12344
12345@@ -825,7 +1104,7 @@ ret_from_intr:
12346 CFI_ADJUST_CFA_OFFSET -8
12347 exit_intr:
12348 GET_THREAD_INFO(%rcx)
12349- testl $3,CS-ARGOFFSET(%rsp)
12350+ testb $3,CS-ARGOFFSET(%rsp)
12351 je retint_kernel
12352
12353 /* Interrupt came from user space */
12354@@ -847,12 +1126,16 @@ retint_swapgs: /* return to user-space
12355 * The iretq could re-enable interrupts:
12356 */
12357 DISABLE_INTERRUPTS(CLBR_ANY)
12358+ pax_exit_kernel_user
12359+ pax_erase_kstack
12360 TRACE_IRQS_IRETQ
12361 SWAPGS
12362 jmp restore_args
12363
12364 retint_restore_args: /* return to kernel space */
12365 DISABLE_INTERRUPTS(CLBR_ANY)
12366+ pax_exit_kernel
12367+ pax_force_retaddr RIP-ARGOFFSET
12368 /*
12369 * The iretq could re-enable interrupts:
12370 */
12371@@ -1027,6 +1310,16 @@ ENTRY(\sym)
12372 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12373 call error_entry
12374 DEFAULT_FRAME 0
12375+#ifdef CONFIG_PAX_MEMORY_UDEREF
12376+ testb $3, CS(%rsp)
12377+ jnz 1f
12378+ pax_enter_kernel
12379+ jmp 2f
12380+1: pax_enter_kernel_user
12381+2:
12382+#else
12383+ pax_enter_kernel
12384+#endif
12385 movq %rsp,%rdi /* pt_regs pointer */
12386 xorl %esi,%esi /* no error code */
12387 call \do_sym
12388@@ -1044,6 +1337,16 @@ ENTRY(\sym)
12389 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12390 call save_paranoid
12391 TRACE_IRQS_OFF
12392+#ifdef CONFIG_PAX_MEMORY_UDEREF
12393+ testb $3, CS(%rsp)
12394+ jnz 1f
12395+ pax_enter_kernel
12396+ jmp 2f
12397+1: pax_enter_kernel_user
12398+2:
12399+#else
12400+ pax_enter_kernel
12401+#endif
12402 movq %rsp,%rdi /* pt_regs pointer */
12403 xorl %esi,%esi /* no error code */
12404 call \do_sym
12405@@ -1052,7 +1355,7 @@ ENTRY(\sym)
12406 END(\sym)
12407 .endm
12408
12409-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12410+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12411 .macro paranoidzeroentry_ist sym do_sym ist
12412 ENTRY(\sym)
12413 INTR_FRAME
12414@@ -1062,8 +1365,24 @@ ENTRY(\sym)
12415 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12416 call save_paranoid
12417 TRACE_IRQS_OFF
12418+#ifdef CONFIG_PAX_MEMORY_UDEREF
12419+ testb $3, CS(%rsp)
12420+ jnz 1f
12421+ pax_enter_kernel
12422+ jmp 2f
12423+1: pax_enter_kernel_user
12424+2:
12425+#else
12426+ pax_enter_kernel
12427+#endif
12428 movq %rsp,%rdi /* pt_regs pointer */
12429 xorl %esi,%esi /* no error code */
12430+#ifdef CONFIG_SMP
12431+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12432+ lea init_tss(%r12), %r12
12433+#else
12434+ lea init_tss(%rip), %r12
12435+#endif
12436 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12437 call \do_sym
12438 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12439@@ -1080,6 +1399,16 @@ ENTRY(\sym)
12440 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12441 call error_entry
12442 DEFAULT_FRAME 0
12443+#ifdef CONFIG_PAX_MEMORY_UDEREF
12444+ testb $3, CS(%rsp)
12445+ jnz 1f
12446+ pax_enter_kernel
12447+ jmp 2f
12448+1: pax_enter_kernel_user
12449+2:
12450+#else
12451+ pax_enter_kernel
12452+#endif
12453 movq %rsp,%rdi /* pt_regs pointer */
12454 movq ORIG_RAX(%rsp),%rsi /* get error code */
12455 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12456@@ -1099,6 +1428,16 @@ ENTRY(\sym)
12457 call save_paranoid
12458 DEFAULT_FRAME 0
12459 TRACE_IRQS_OFF
12460+#ifdef CONFIG_PAX_MEMORY_UDEREF
12461+ testb $3, CS(%rsp)
12462+ jnz 1f
12463+ pax_enter_kernel
12464+ jmp 2f
12465+1: pax_enter_kernel_user
12466+2:
12467+#else
12468+ pax_enter_kernel
12469+#endif
12470 movq %rsp,%rdi /* pt_regs pointer */
12471 movq ORIG_RAX(%rsp),%rsi /* get error code */
12472 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12473@@ -1361,16 +1700,31 @@ ENTRY(paranoid_exit)
12474 TRACE_IRQS_OFF
12475 testl %ebx,%ebx /* swapgs needed? */
12476 jnz paranoid_restore
12477- testl $3,CS(%rsp)
12478+ testb $3,CS(%rsp)
12479 jnz paranoid_userspace
12480+#ifdef CONFIG_PAX_MEMORY_UDEREF
12481+ pax_exit_kernel
12482+ TRACE_IRQS_IRETQ 0
12483+ SWAPGS_UNSAFE_STACK
12484+ RESTORE_ALL 8
12485+ pax_force_retaddr
12486+ jmp irq_return
12487+#endif
12488 paranoid_swapgs:
12489+#ifdef CONFIG_PAX_MEMORY_UDEREF
12490+ pax_exit_kernel_user
12491+#else
12492+ pax_exit_kernel
12493+#endif
12494 TRACE_IRQS_IRETQ 0
12495 SWAPGS_UNSAFE_STACK
12496 RESTORE_ALL 8
12497 jmp irq_return
12498 paranoid_restore:
12499+ pax_exit_kernel
12500 TRACE_IRQS_IRETQ 0
12501 RESTORE_ALL 8
12502+ pax_force_retaddr
12503 jmp irq_return
12504 paranoid_userspace:
12505 GET_THREAD_INFO(%rcx)
12506@@ -1426,7 +1780,7 @@ ENTRY(error_entry)
12507 movq_cfi r14, R14+8
12508 movq_cfi r15, R15+8
12509 xorl %ebx,%ebx
12510- testl $3,CS+8(%rsp)
12511+ testb $3,CS+8(%rsp)
12512 je error_kernelspace
12513 error_swapgs:
12514 SWAPGS
12515@@ -1490,6 +1844,16 @@ ENTRY(nmi)
12516 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12517 call save_paranoid
12518 DEFAULT_FRAME 0
12519+#ifdef CONFIG_PAX_MEMORY_UDEREF
12520+ testb $3, CS(%rsp)
12521+ jnz 1f
12522+ pax_enter_kernel
12523+ jmp 2f
12524+1: pax_enter_kernel_user
12525+2:
12526+#else
12527+ pax_enter_kernel
12528+#endif
12529 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12530 movq %rsp,%rdi
12531 movq $-1,%rsi
12532@@ -1500,12 +1864,28 @@ ENTRY(nmi)
12533 DISABLE_INTERRUPTS(CLBR_NONE)
12534 testl %ebx,%ebx /* swapgs needed? */
12535 jnz nmi_restore
12536- testl $3,CS(%rsp)
12537+ testb $3,CS(%rsp)
12538 jnz nmi_userspace
12539+#ifdef CONFIG_PAX_MEMORY_UDEREF
12540+ pax_exit_kernel
12541+ SWAPGS_UNSAFE_STACK
12542+ RESTORE_ALL 8
12543+ pax_force_retaddr
12544+ jmp irq_return
12545+#endif
12546 nmi_swapgs:
12547+#ifdef CONFIG_PAX_MEMORY_UDEREF
12548+ pax_exit_kernel_user
12549+#else
12550+ pax_exit_kernel
12551+#endif
12552 SWAPGS_UNSAFE_STACK
12553+ RESTORE_ALL 8
12554+ jmp irq_return
12555 nmi_restore:
12556+ pax_exit_kernel
12557 RESTORE_ALL 8
12558+ pax_force_retaddr
12559 jmp irq_return
12560 nmi_userspace:
12561 GET_THREAD_INFO(%rcx)
12562diff -urNp linux-3.0.4/arch/x86/kernel/ftrace.c linux-3.0.4/arch/x86/kernel/ftrace.c
12563--- linux-3.0.4/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
12564+++ linux-3.0.4/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
12565@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12566 static const void *mod_code_newcode; /* holds the text to write to the IP */
12567
12568 static unsigned nmi_wait_count;
12569-static atomic_t nmi_update_count = ATOMIC_INIT(0);
12570+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12571
12572 int ftrace_arch_read_dyn_info(char *buf, int size)
12573 {
12574@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12575
12576 r = snprintf(buf, size, "%u %u",
12577 nmi_wait_count,
12578- atomic_read(&nmi_update_count));
12579+ atomic_read_unchecked(&nmi_update_count));
12580 return r;
12581 }
12582
12583@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12584
12585 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12586 smp_rmb();
12587+ pax_open_kernel();
12588 ftrace_mod_code();
12589- atomic_inc(&nmi_update_count);
12590+ pax_close_kernel();
12591+ atomic_inc_unchecked(&nmi_update_count);
12592 }
12593 /* Must have previous changes seen before executions */
12594 smp_mb();
12595@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12596 {
12597 unsigned char replaced[MCOUNT_INSN_SIZE];
12598
12599+ ip = ktla_ktva(ip);
12600+
12601 /*
12602 * Note: Due to modules and __init, code can
12603 * disappear and change, we need to protect against faulting
12604@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12605 unsigned char old[MCOUNT_INSN_SIZE], *new;
12606 int ret;
12607
12608- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12609+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12610 new = ftrace_call_replace(ip, (unsigned long)func);
12611 ret = ftrace_modify_code(ip, old, new);
12612
12613@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12614 {
12615 unsigned char code[MCOUNT_INSN_SIZE];
12616
12617+ ip = ktla_ktva(ip);
12618+
12619 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12620 return -EFAULT;
12621
12622diff -urNp linux-3.0.4/arch/x86/kernel/head32.c linux-3.0.4/arch/x86/kernel/head32.c
12623--- linux-3.0.4/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
12624+++ linux-3.0.4/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
12625@@ -19,6 +19,7 @@
12626 #include <asm/io_apic.h>
12627 #include <asm/bios_ebda.h>
12628 #include <asm/tlbflush.h>
12629+#include <asm/boot.h>
12630
12631 static void __init i386_default_early_setup(void)
12632 {
12633@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
12634 {
12635 memblock_init();
12636
12637- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12638+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12639
12640 #ifdef CONFIG_BLK_DEV_INITRD
12641 /* Reserve INITRD */
12642diff -urNp linux-3.0.4/arch/x86/kernel/head_32.S linux-3.0.4/arch/x86/kernel/head_32.S
12643--- linux-3.0.4/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
12644+++ linux-3.0.4/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
12645@@ -25,6 +25,12 @@
12646 /* Physical address */
12647 #define pa(X) ((X) - __PAGE_OFFSET)
12648
12649+#ifdef CONFIG_PAX_KERNEXEC
12650+#define ta(X) (X)
12651+#else
12652+#define ta(X) ((X) - __PAGE_OFFSET)
12653+#endif
12654+
12655 /*
12656 * References to members of the new_cpu_data structure.
12657 */
12658@@ -54,11 +60,7 @@
12659 * and small than max_low_pfn, otherwise will waste some page table entries
12660 */
12661
12662-#if PTRS_PER_PMD > 1
12663-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12664-#else
12665-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12666-#endif
12667+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12668
12669 /* Number of possible pages in the lowmem region */
12670 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12671@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12672 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12673
12674 /*
12675+ * Real beginning of normal "text" segment
12676+ */
12677+ENTRY(stext)
12678+ENTRY(_stext)
12679+
12680+/*
12681 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12682 * %esi points to the real-mode code as a 32-bit pointer.
12683 * CS and DS must be 4 GB flat segments, but we don't depend on
12684@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12685 * can.
12686 */
12687 __HEAD
12688+
12689+#ifdef CONFIG_PAX_KERNEXEC
12690+ jmp startup_32
12691+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12692+.fill PAGE_SIZE-5,1,0xcc
12693+#endif
12694+
12695 ENTRY(startup_32)
12696 movl pa(stack_start),%ecx
12697
12698@@ -105,6 +120,57 @@ ENTRY(startup_32)
12699 2:
12700 leal -__PAGE_OFFSET(%ecx),%esp
12701
12702+#ifdef CONFIG_SMP
12703+ movl $pa(cpu_gdt_table),%edi
12704+ movl $__per_cpu_load,%eax
12705+ movw %ax,__KERNEL_PERCPU + 2(%edi)
12706+ rorl $16,%eax
12707+ movb %al,__KERNEL_PERCPU + 4(%edi)
12708+ movb %ah,__KERNEL_PERCPU + 7(%edi)
12709+ movl $__per_cpu_end - 1,%eax
12710+ subl $__per_cpu_start,%eax
12711+ movw %ax,__KERNEL_PERCPU + 0(%edi)
12712+#endif
12713+
12714+#ifdef CONFIG_PAX_MEMORY_UDEREF
12715+ movl $NR_CPUS,%ecx
12716+ movl $pa(cpu_gdt_table),%edi
12717+1:
12718+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12719+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12720+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12721+ addl $PAGE_SIZE_asm,%edi
12722+ loop 1b
12723+#endif
12724+
12725+#ifdef CONFIG_PAX_KERNEXEC
12726+ movl $pa(boot_gdt),%edi
12727+ movl $__LOAD_PHYSICAL_ADDR,%eax
12728+ movw %ax,__BOOT_CS + 2(%edi)
12729+ rorl $16,%eax
12730+ movb %al,__BOOT_CS + 4(%edi)
12731+ movb %ah,__BOOT_CS + 7(%edi)
12732+ rorl $16,%eax
12733+
12734+ ljmp $(__BOOT_CS),$1f
12735+1:
12736+
12737+ movl $NR_CPUS,%ecx
12738+ movl $pa(cpu_gdt_table),%edi
12739+ addl $__PAGE_OFFSET,%eax
12740+1:
12741+ movw %ax,__KERNEL_CS + 2(%edi)
12742+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12743+ rorl $16,%eax
12744+ movb %al,__KERNEL_CS + 4(%edi)
12745+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12746+ movb %ah,__KERNEL_CS + 7(%edi)
12747+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12748+ rorl $16,%eax
12749+ addl $PAGE_SIZE_asm,%edi
12750+ loop 1b
12751+#endif
12752+
12753 /*
12754 * Clear BSS first so that there are no surprises...
12755 */
12756@@ -195,8 +261,11 @@ ENTRY(startup_32)
12757 movl %eax, pa(max_pfn_mapped)
12758
12759 /* Do early initialization of the fixmap area */
12760- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12761- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12762+#ifdef CONFIG_COMPAT_VDSO
12763+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12764+#else
12765+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12766+#endif
12767 #else /* Not PAE */
12768
12769 page_pde_offset = (__PAGE_OFFSET >> 20);
12770@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12771 movl %eax, pa(max_pfn_mapped)
12772
12773 /* Do early initialization of the fixmap area */
12774- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12775- movl %eax,pa(initial_page_table+0xffc)
12776+#ifdef CONFIG_COMPAT_VDSO
12777+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12778+#else
12779+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12780+#endif
12781 #endif
12782
12783 #ifdef CONFIG_PARAVIRT
12784@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12785 cmpl $num_subarch_entries, %eax
12786 jae bad_subarch
12787
12788- movl pa(subarch_entries)(,%eax,4), %eax
12789- subl $__PAGE_OFFSET, %eax
12790- jmp *%eax
12791+ jmp *pa(subarch_entries)(,%eax,4)
12792
12793 bad_subarch:
12794 WEAK(lguest_entry)
12795@@ -255,10 +325,10 @@ WEAK(xen_entry)
12796 __INITDATA
12797
12798 subarch_entries:
12799- .long default_entry /* normal x86/PC */
12800- .long lguest_entry /* lguest hypervisor */
12801- .long xen_entry /* Xen hypervisor */
12802- .long default_entry /* Moorestown MID */
12803+ .long ta(default_entry) /* normal x86/PC */
12804+ .long ta(lguest_entry) /* lguest hypervisor */
12805+ .long ta(xen_entry) /* Xen hypervisor */
12806+ .long ta(default_entry) /* Moorestown MID */
12807 num_subarch_entries = (. - subarch_entries) / 4
12808 .previous
12809 #else
12810@@ -312,6 +382,7 @@ default_entry:
12811 orl %edx,%eax
12812 movl %eax,%cr4
12813
12814+#ifdef CONFIG_X86_PAE
12815 testb $X86_CR4_PAE, %al # check if PAE is enabled
12816 jz 6f
12817
12818@@ -340,6 +411,9 @@ default_entry:
12819 /* Make changes effective */
12820 wrmsr
12821
12822+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12823+#endif
12824+
12825 6:
12826
12827 /*
12828@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12829 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12830 movl %eax,%ss # after changing gdt.
12831
12832- movl $(__USER_DS),%eax # DS/ES contains default USER segment
12833+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12834 movl %eax,%ds
12835 movl %eax,%es
12836
12837@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12838 */
12839 cmpb $0,ready
12840 jne 1f
12841- movl $gdt_page,%eax
12842+ movl $cpu_gdt_table,%eax
12843 movl $stack_canary,%ecx
12844+#ifdef CONFIG_SMP
12845+ addl $__per_cpu_load,%ecx
12846+#endif
12847 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12848 shrl $16, %ecx
12849 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12850 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12851 1:
12852-#endif
12853 movl $(__KERNEL_STACK_CANARY),%eax
12854+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12855+ movl $(__USER_DS),%eax
12856+#else
12857+ xorl %eax,%eax
12858+#endif
12859 movl %eax,%gs
12860
12861 xorl %eax,%eax # Clear LDT
12862@@ -558,22 +639,22 @@ early_page_fault:
12863 jmp early_fault
12864
12865 early_fault:
12866- cld
12867 #ifdef CONFIG_PRINTK
12868+ cmpl $1,%ss:early_recursion_flag
12869+ je hlt_loop
12870+ incl %ss:early_recursion_flag
12871+ cld
12872 pusha
12873 movl $(__KERNEL_DS),%eax
12874 movl %eax,%ds
12875 movl %eax,%es
12876- cmpl $2,early_recursion_flag
12877- je hlt_loop
12878- incl early_recursion_flag
12879 movl %cr2,%eax
12880 pushl %eax
12881 pushl %edx /* trapno */
12882 pushl $fault_msg
12883 call printk
12884+; call dump_stack
12885 #endif
12886- call dump_stack
12887 hlt_loop:
12888 hlt
12889 jmp hlt_loop
12890@@ -581,8 +662,11 @@ hlt_loop:
12891 /* This is the default interrupt "handler" :-) */
12892 ALIGN
12893 ignore_int:
12894- cld
12895 #ifdef CONFIG_PRINTK
12896+ cmpl $2,%ss:early_recursion_flag
12897+ je hlt_loop
12898+ incl %ss:early_recursion_flag
12899+ cld
12900 pushl %eax
12901 pushl %ecx
12902 pushl %edx
12903@@ -591,9 +675,6 @@ ignore_int:
12904 movl $(__KERNEL_DS),%eax
12905 movl %eax,%ds
12906 movl %eax,%es
12907- cmpl $2,early_recursion_flag
12908- je hlt_loop
12909- incl early_recursion_flag
12910 pushl 16(%esp)
12911 pushl 24(%esp)
12912 pushl 32(%esp)
12913@@ -622,29 +703,43 @@ ENTRY(initial_code)
12914 /*
12915 * BSS section
12916 */
12917-__PAGE_ALIGNED_BSS
12918- .align PAGE_SIZE
12919 #ifdef CONFIG_X86_PAE
12920+.section .initial_pg_pmd,"a",@progbits
12921 initial_pg_pmd:
12922 .fill 1024*KPMDS,4,0
12923 #else
12924+.section .initial_page_table,"a",@progbits
12925 ENTRY(initial_page_table)
12926 .fill 1024,4,0
12927 #endif
12928+.section .initial_pg_fixmap,"a",@progbits
12929 initial_pg_fixmap:
12930 .fill 1024,4,0
12931+.section .empty_zero_page,"a",@progbits
12932 ENTRY(empty_zero_page)
12933 .fill 4096,1,0
12934+.section .swapper_pg_dir,"a",@progbits
12935 ENTRY(swapper_pg_dir)
12936+#ifdef CONFIG_X86_PAE
12937+ .fill 4,8,0
12938+#else
12939 .fill 1024,4,0
12940+#endif
12941+
12942+/*
12943+ * The IDT has to be page-aligned to simplify the Pentium
12944+ * F0 0F bug workaround.. We have a special link segment
12945+ * for this.
12946+ */
12947+.section .idt,"a",@progbits
12948+ENTRY(idt_table)
12949+ .fill 256,8,0
12950
12951 /*
12952 * This starts the data section.
12953 */
12954 #ifdef CONFIG_X86_PAE
12955-__PAGE_ALIGNED_DATA
12956- /* Page-aligned for the benefit of paravirt? */
12957- .align PAGE_SIZE
12958+.section .initial_page_table,"a",@progbits
12959 ENTRY(initial_page_table)
12960 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12961 # if KPMDS == 3
12962@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12963 # error "Kernel PMDs should be 1, 2 or 3"
12964 # endif
12965 .align PAGE_SIZE /* needs to be page-sized too */
12966+
12967+#ifdef CONFIG_PAX_PER_CPU_PGD
12968+ENTRY(cpu_pgd)
12969+ .rept NR_CPUS
12970+ .fill 4,8,0
12971+ .endr
12972+#endif
12973+
12974 #endif
12975
12976 .data
12977 .balign 4
12978 ENTRY(stack_start)
12979- .long init_thread_union+THREAD_SIZE
12980+ .long init_thread_union+THREAD_SIZE-8
12981+
12982+ready: .byte 0
12983
12984+.section .rodata,"a",@progbits
12985 early_recursion_flag:
12986 .long 0
12987
12988-ready: .byte 0
12989-
12990 int_msg:
12991 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12992
12993@@ -707,7 +811,7 @@ fault_msg:
12994 .word 0 # 32 bit align gdt_desc.address
12995 boot_gdt_descr:
12996 .word __BOOT_DS+7
12997- .long boot_gdt - __PAGE_OFFSET
12998+ .long pa(boot_gdt)
12999
13000 .word 0 # 32-bit align idt_desc.address
13001 idt_descr:
13002@@ -718,7 +822,7 @@ idt_descr:
13003 .word 0 # 32 bit align gdt_desc.address
13004 ENTRY(early_gdt_descr)
13005 .word GDT_ENTRIES*8-1
13006- .long gdt_page /* Overwritten for secondary CPUs */
13007+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
13008
13009 /*
13010 * The boot_gdt must mirror the equivalent in setup.S and is
13011@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
13012 .align L1_CACHE_BYTES
13013 ENTRY(boot_gdt)
13014 .fill GDT_ENTRY_BOOT_CS,8,0
13015- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
13016- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
13017+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
13018+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
13019+
13020+ .align PAGE_SIZE_asm
13021+ENTRY(cpu_gdt_table)
13022+ .rept NR_CPUS
13023+ .quad 0x0000000000000000 /* NULL descriptor */
13024+ .quad 0x0000000000000000 /* 0x0b reserved */
13025+ .quad 0x0000000000000000 /* 0x13 reserved */
13026+ .quad 0x0000000000000000 /* 0x1b reserved */
13027+
13028+#ifdef CONFIG_PAX_KERNEXEC
13029+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
13030+#else
13031+ .quad 0x0000000000000000 /* 0x20 unused */
13032+#endif
13033+
13034+ .quad 0x0000000000000000 /* 0x28 unused */
13035+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
13036+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
13037+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
13038+ .quad 0x0000000000000000 /* 0x4b reserved */
13039+ .quad 0x0000000000000000 /* 0x53 reserved */
13040+ .quad 0x0000000000000000 /* 0x5b reserved */
13041+
13042+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
13043+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
13044+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
13045+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
13046+
13047+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
13048+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
13049+
13050+ /*
13051+ * Segments used for calling PnP BIOS have byte granularity.
13052+ * The code segments and data segments have fixed 64k limits,
13053+ * the transfer segment sizes are set at run time.
13054+ */
13055+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
13056+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
13057+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
13058+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
13059+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
13060+
13061+ /*
13062+ * The APM segments have byte granularity and their bases
13063+ * are set at run time. All have 64k limits.
13064+ */
13065+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
13066+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
13067+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
13068+
13069+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
13070+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
13071+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
13072+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
13073+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
13074+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
13075+
13076+ /* Be sure this is zeroed to avoid false validations in Xen */
13077+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
13078+ .endr
13079diff -urNp linux-3.0.4/arch/x86/kernel/head_64.S linux-3.0.4/arch/x86/kernel/head_64.S
13080--- linux-3.0.4/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
13081+++ linux-3.0.4/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
13082@@ -19,6 +19,7 @@
13083 #include <asm/cache.h>
13084 #include <asm/processor-flags.h>
13085 #include <asm/percpu.h>
13086+#include <asm/cpufeature.h>
13087
13088 #ifdef CONFIG_PARAVIRT
13089 #include <asm/asm-offsets.h>
13090@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
13091 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
13092 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
13093 L3_START_KERNEL = pud_index(__START_KERNEL_map)
13094+L4_VMALLOC_START = pgd_index(VMALLOC_START)
13095+L3_VMALLOC_START = pud_index(VMALLOC_START)
13096+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
13097+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
13098
13099 .text
13100 __HEAD
13101@@ -85,35 +90,22 @@ startup_64:
13102 */
13103 addq %rbp, init_level4_pgt + 0(%rip)
13104 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
13105+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
13106+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
13107 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
13108
13109 addq %rbp, level3_ident_pgt + 0(%rip)
13110+#ifndef CONFIG_XEN
13111+ addq %rbp, level3_ident_pgt + 8(%rip)
13112+#endif
13113
13114- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
13115- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
13116+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
13117
13118- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13119+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
13120+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
13121
13122- /* Add an Identity mapping if I am above 1G */
13123- leaq _text(%rip), %rdi
13124- andq $PMD_PAGE_MASK, %rdi
13125-
13126- movq %rdi, %rax
13127- shrq $PUD_SHIFT, %rax
13128- andq $(PTRS_PER_PUD - 1), %rax
13129- jz ident_complete
13130-
13131- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
13132- leaq level3_ident_pgt(%rip), %rbx
13133- movq %rdx, 0(%rbx, %rax, 8)
13134-
13135- movq %rdi, %rax
13136- shrq $PMD_SHIFT, %rax
13137- andq $(PTRS_PER_PMD - 1), %rax
13138- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
13139- leaq level2_spare_pgt(%rip), %rbx
13140- movq %rdx, 0(%rbx, %rax, 8)
13141-ident_complete:
13142+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13143+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
13144
13145 /*
13146 * Fixup the kernel text+data virtual addresses. Note that
13147@@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
13148 * after the boot processor executes this code.
13149 */
13150
13151- /* Enable PAE mode and PGE */
13152- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
13153+ /* Enable PAE mode and PSE/PGE */
13154+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
13155 movq %rax, %cr4
13156
13157 /* Setup early boot stage 4 level pagetables. */
13158@@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
13159 movl $MSR_EFER, %ecx
13160 rdmsr
13161 btsl $_EFER_SCE, %eax /* Enable System Call */
13162- btl $20,%edi /* No Execute supported? */
13163+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
13164 jnc 1f
13165 btsl $_EFER_NX, %eax
13166+ leaq init_level4_pgt(%rip), %rdi
13167+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
13168+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
13169+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
13170+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
13171 1: wrmsr /* Make changes effective */
13172
13173 /* Setup cr0 */
13174@@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
13175 bad_address:
13176 jmp bad_address
13177
13178- .section ".init.text","ax"
13179+ __INIT
13180 #ifdef CONFIG_EARLY_PRINTK
13181 .globl early_idt_handlers
13182 early_idt_handlers:
13183@@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
13184 #endif /* EARLY_PRINTK */
13185 1: hlt
13186 jmp 1b
13187+ .previous
13188
13189 #ifdef CONFIG_EARLY_PRINTK
13190+ __INITDATA
13191 early_recursion_flag:
13192 .long 0
13193+ .previous
13194
13195+ .section .rodata,"a",@progbits
13196 early_idt_msg:
13197 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
13198 early_idt_ripmsg:
13199 .asciz "RIP %s\n"
13200-#endif /* CONFIG_EARLY_PRINTK */
13201 .previous
13202+#endif /* CONFIG_EARLY_PRINTK */
13203
13204+ .section .rodata,"a",@progbits
13205 #define NEXT_PAGE(name) \
13206 .balign PAGE_SIZE; \
13207 ENTRY(name)
13208@@ -338,7 +340,6 @@ ENTRY(name)
13209 i = i + 1 ; \
13210 .endr
13211
13212- .data
13213 /*
13214 * This default setting generates an ident mapping at address 0x100000
13215 * and a mapping for the kernel that precisely maps virtual address
13216@@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
13217 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13218 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13219 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13220+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
13221+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13222+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13223+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13224 .org init_level4_pgt + L4_START_KERNEL*8, 0
13225 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13226 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13227
13228+#ifdef CONFIG_PAX_PER_CPU_PGD
13229+NEXT_PAGE(cpu_pgd)
13230+ .rept NR_CPUS
13231+ .fill 512,8,0
13232+ .endr
13233+#endif
13234+
13235 NEXT_PAGE(level3_ident_pgt)
13236 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13237+#ifdef CONFIG_XEN
13238 .fill 511,8,0
13239+#else
13240+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13241+ .fill 510,8,0
13242+#endif
13243+
13244+NEXT_PAGE(level3_vmalloc_pgt)
13245+ .fill 512,8,0
13246+
13247+NEXT_PAGE(level3_vmemmap_pgt)
13248+ .fill L3_VMEMMAP_START,8,0
13249+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13250
13251 NEXT_PAGE(level3_kernel_pgt)
13252 .fill L3_START_KERNEL,8,0
13253@@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13254 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13255 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13256
13257+NEXT_PAGE(level2_vmemmap_pgt)
13258+ .fill 512,8,0
13259+
13260 NEXT_PAGE(level2_fixmap_pgt)
13261- .fill 506,8,0
13262- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13263- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13264- .fill 5,8,0
13265+ .fill 507,8,0
13266+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13267+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13268+ .fill 4,8,0
13269
13270-NEXT_PAGE(level1_fixmap_pgt)
13271+NEXT_PAGE(level1_vsyscall_pgt)
13272 .fill 512,8,0
13273
13274-NEXT_PAGE(level2_ident_pgt)
13275- /* Since I easily can, map the first 1G.
13276+ /* Since I easily can, map the first 2G.
13277 * Don't set NX because code runs from these pages.
13278 */
13279- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13280+NEXT_PAGE(level2_ident_pgt)
13281+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13282
13283 NEXT_PAGE(level2_kernel_pgt)
13284 /*
13285@@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13286 * If you want to increase this then increase MODULES_VADDR
13287 * too.)
13288 */
13289- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13290- KERNEL_IMAGE_SIZE/PMD_SIZE)
13291-
13292-NEXT_PAGE(level2_spare_pgt)
13293- .fill 512, 8, 0
13294+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13295
13296 #undef PMDS
13297 #undef NEXT_PAGE
13298
13299- .data
13300+ .align PAGE_SIZE
13301+ENTRY(cpu_gdt_table)
13302+ .rept NR_CPUS
13303+ .quad 0x0000000000000000 /* NULL descriptor */
13304+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13305+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13306+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13307+ .quad 0x00cffb000000ffff /* __USER32_CS */
13308+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13309+ .quad 0x00affb000000ffff /* __USER_CS */
13310+
13311+#ifdef CONFIG_PAX_KERNEXEC
13312+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13313+#else
13314+ .quad 0x0 /* unused */
13315+#endif
13316+
13317+ .quad 0,0 /* TSS */
13318+ .quad 0,0 /* LDT */
13319+ .quad 0,0,0 /* three TLS descriptors */
13320+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
13321+ /* asm/segment.h:GDT_ENTRIES must match this */
13322+
13323+ /* zero the remaining page */
13324+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13325+ .endr
13326+
13327 .align 16
13328 .globl early_gdt_descr
13329 early_gdt_descr:
13330 .word GDT_ENTRIES*8-1
13331 early_gdt_descr_base:
13332- .quad INIT_PER_CPU_VAR(gdt_page)
13333+ .quad cpu_gdt_table
13334
13335 ENTRY(phys_base)
13336 /* This must match the first entry in level2_kernel_pgt */
13337 .quad 0x0000000000000000
13338
13339 #include "../../x86/xen/xen-head.S"
13340-
13341- .section .bss, "aw", @nobits
13342+
13343+ .section .rodata,"a",@progbits
13344 .align L1_CACHE_BYTES
13345 ENTRY(idt_table)
13346- .skip IDT_ENTRIES * 16
13347+ .fill 512,8,0
13348
13349 __PAGE_ALIGNED_BSS
13350 .align PAGE_SIZE
13351diff -urNp linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c
13352--- linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13353+++ linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13354@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13355 EXPORT_SYMBOL(cmpxchg8b_emu);
13356 #endif
13357
13358+EXPORT_SYMBOL_GPL(cpu_gdt_table);
13359+
13360 /* Networking helper routines. */
13361 EXPORT_SYMBOL(csum_partial_copy_generic);
13362+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13363+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13364
13365 EXPORT_SYMBOL(__get_user_1);
13366 EXPORT_SYMBOL(__get_user_2);
13367@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13368
13369 EXPORT_SYMBOL(csum_partial);
13370 EXPORT_SYMBOL(empty_zero_page);
13371+
13372+#ifdef CONFIG_PAX_KERNEXEC
13373+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13374+#endif
13375diff -urNp linux-3.0.4/arch/x86/kernel/i8259.c linux-3.0.4/arch/x86/kernel/i8259.c
13376--- linux-3.0.4/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13377+++ linux-3.0.4/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13378@@ -210,7 +210,7 @@ spurious_8259A_irq:
13379 "spurious 8259A interrupt: IRQ%d.\n", irq);
13380 spurious_irq_mask |= irqmask;
13381 }
13382- atomic_inc(&irq_err_count);
13383+ atomic_inc_unchecked(&irq_err_count);
13384 /*
13385 * Theoretically we do not have to handle this IRQ,
13386 * but in Linux this does not cause problems and is
13387diff -urNp linux-3.0.4/arch/x86/kernel/init_task.c linux-3.0.4/arch/x86/kernel/init_task.c
13388--- linux-3.0.4/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
13389+++ linux-3.0.4/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
13390@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13391 * way process stacks are handled. This is done by having a special
13392 * "init_task" linker map entry..
13393 */
13394-union thread_union init_thread_union __init_task_data =
13395- { INIT_THREAD_INFO(init_task) };
13396+union thread_union init_thread_union __init_task_data;
13397
13398 /*
13399 * Initial task structure.
13400@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13401 * section. Since TSS's are completely CPU-local, we want them
13402 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13403 */
13404-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13405-
13406+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13407+EXPORT_SYMBOL(init_tss);
13408diff -urNp linux-3.0.4/arch/x86/kernel/ioport.c linux-3.0.4/arch/x86/kernel/ioport.c
13409--- linux-3.0.4/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
13410+++ linux-3.0.4/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
13411@@ -6,6 +6,7 @@
13412 #include <linux/sched.h>
13413 #include <linux/kernel.h>
13414 #include <linux/capability.h>
13415+#include <linux/security.h>
13416 #include <linux/errno.h>
13417 #include <linux/types.h>
13418 #include <linux/ioport.h>
13419@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13420
13421 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13422 return -EINVAL;
13423+#ifdef CONFIG_GRKERNSEC_IO
13424+ if (turn_on && grsec_disable_privio) {
13425+ gr_handle_ioperm();
13426+ return -EPERM;
13427+ }
13428+#endif
13429 if (turn_on && !capable(CAP_SYS_RAWIO))
13430 return -EPERM;
13431
13432@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13433 * because the ->io_bitmap_max value must match the bitmap
13434 * contents:
13435 */
13436- tss = &per_cpu(init_tss, get_cpu());
13437+ tss = init_tss + get_cpu();
13438
13439 if (turn_on)
13440 bitmap_clear(t->io_bitmap_ptr, from, num);
13441@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13442 return -EINVAL;
13443 /* Trying to gain more privileges? */
13444 if (level > old) {
13445+#ifdef CONFIG_GRKERNSEC_IO
13446+ if (grsec_disable_privio) {
13447+ gr_handle_iopl();
13448+ return -EPERM;
13449+ }
13450+#endif
13451 if (!capable(CAP_SYS_RAWIO))
13452 return -EPERM;
13453 }
13454diff -urNp linux-3.0.4/arch/x86/kernel/irq_32.c linux-3.0.4/arch/x86/kernel/irq_32.c
13455--- linux-3.0.4/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
13456+++ linux-3.0.4/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
13457@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13458 __asm__ __volatile__("andl %%esp,%0" :
13459 "=r" (sp) : "0" (THREAD_SIZE - 1));
13460
13461- return sp < (sizeof(struct thread_info) + STACK_WARN);
13462+ return sp < STACK_WARN;
13463 }
13464
13465 static void print_stack_overflow(void)
13466@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13467 * per-CPU IRQ handling contexts (thread information and stack)
13468 */
13469 union irq_ctx {
13470- struct thread_info tinfo;
13471- u32 stack[THREAD_SIZE/sizeof(u32)];
13472+ unsigned long previous_esp;
13473+ u32 stack[THREAD_SIZE/sizeof(u32)];
13474 } __attribute__((aligned(THREAD_SIZE)));
13475
13476 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13477@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13478 static inline int
13479 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13480 {
13481- union irq_ctx *curctx, *irqctx;
13482+ union irq_ctx *irqctx;
13483 u32 *isp, arg1, arg2;
13484
13485- curctx = (union irq_ctx *) current_thread_info();
13486 irqctx = __this_cpu_read(hardirq_ctx);
13487
13488 /*
13489@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13490 * handler) we can't do that and just have to keep using the
13491 * current stack (which is the irq stack already after all)
13492 */
13493- if (unlikely(curctx == irqctx))
13494+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13495 return 0;
13496
13497 /* build the stack frame on the IRQ stack */
13498- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13499- irqctx->tinfo.task = curctx->tinfo.task;
13500- irqctx->tinfo.previous_esp = current_stack_pointer;
13501+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13502+ irqctx->previous_esp = current_stack_pointer;
13503
13504- /*
13505- * Copy the softirq bits in preempt_count so that the
13506- * softirq checks work in the hardirq context.
13507- */
13508- irqctx->tinfo.preempt_count =
13509- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13510- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13511+#ifdef CONFIG_PAX_MEMORY_UDEREF
13512+ __set_fs(MAKE_MM_SEG(0));
13513+#endif
13514
13515 if (unlikely(overflow))
13516 call_on_stack(print_stack_overflow, isp);
13517@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13518 : "0" (irq), "1" (desc), "2" (isp),
13519 "D" (desc->handle_irq)
13520 : "memory", "cc", "ecx");
13521+
13522+#ifdef CONFIG_PAX_MEMORY_UDEREF
13523+ __set_fs(current_thread_info()->addr_limit);
13524+#endif
13525+
13526 return 1;
13527 }
13528
13529@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13530 */
13531 void __cpuinit irq_ctx_init(int cpu)
13532 {
13533- union irq_ctx *irqctx;
13534-
13535 if (per_cpu(hardirq_ctx, cpu))
13536 return;
13537
13538- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13539- THREAD_FLAGS,
13540- THREAD_ORDER));
13541- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13542- irqctx->tinfo.cpu = cpu;
13543- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13544- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13545-
13546- per_cpu(hardirq_ctx, cpu) = irqctx;
13547-
13548- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13549- THREAD_FLAGS,
13550- THREAD_ORDER));
13551- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13552- irqctx->tinfo.cpu = cpu;
13553- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13554-
13555- per_cpu(softirq_ctx, cpu) = irqctx;
13556+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13557+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13558
13559 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13560 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13561@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13562 asmlinkage void do_softirq(void)
13563 {
13564 unsigned long flags;
13565- struct thread_info *curctx;
13566 union irq_ctx *irqctx;
13567 u32 *isp;
13568
13569@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13570 local_irq_save(flags);
13571
13572 if (local_softirq_pending()) {
13573- curctx = current_thread_info();
13574 irqctx = __this_cpu_read(softirq_ctx);
13575- irqctx->tinfo.task = curctx->task;
13576- irqctx->tinfo.previous_esp = current_stack_pointer;
13577+ irqctx->previous_esp = current_stack_pointer;
13578
13579 /* build the stack frame on the softirq stack */
13580- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13581+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13582+
13583+#ifdef CONFIG_PAX_MEMORY_UDEREF
13584+ __set_fs(MAKE_MM_SEG(0));
13585+#endif
13586
13587 call_on_stack(__do_softirq, isp);
13588+
13589+#ifdef CONFIG_PAX_MEMORY_UDEREF
13590+ __set_fs(current_thread_info()->addr_limit);
13591+#endif
13592+
13593 /*
13594 * Shouldn't happen, we returned above if in_interrupt():
13595 */
13596diff -urNp linux-3.0.4/arch/x86/kernel/irq.c linux-3.0.4/arch/x86/kernel/irq.c
13597--- linux-3.0.4/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
13598+++ linux-3.0.4/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
13599@@ -17,7 +17,7 @@
13600 #include <asm/mce.h>
13601 #include <asm/hw_irq.h>
13602
13603-atomic_t irq_err_count;
13604+atomic_unchecked_t irq_err_count;
13605
13606 /* Function pointer for generic interrupt vector handling */
13607 void (*x86_platform_ipi_callback)(void) = NULL;
13608@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13609 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13610 seq_printf(p, " Machine check polls\n");
13611 #endif
13612- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13613+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13614 #if defined(CONFIG_X86_IO_APIC)
13615- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13616+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13617 #endif
13618 return 0;
13619 }
13620@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13621
13622 u64 arch_irq_stat(void)
13623 {
13624- u64 sum = atomic_read(&irq_err_count);
13625+ u64 sum = atomic_read_unchecked(&irq_err_count);
13626
13627 #ifdef CONFIG_X86_IO_APIC
13628- sum += atomic_read(&irq_mis_count);
13629+ sum += atomic_read_unchecked(&irq_mis_count);
13630 #endif
13631 return sum;
13632 }
13633diff -urNp linux-3.0.4/arch/x86/kernel/kgdb.c linux-3.0.4/arch/x86/kernel/kgdb.c
13634--- linux-3.0.4/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
13635+++ linux-3.0.4/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
13636@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13637 #ifdef CONFIG_X86_32
13638 switch (regno) {
13639 case GDB_SS:
13640- if (!user_mode_vm(regs))
13641+ if (!user_mode(regs))
13642 *(unsigned long *)mem = __KERNEL_DS;
13643 break;
13644 case GDB_SP:
13645- if (!user_mode_vm(regs))
13646+ if (!user_mode(regs))
13647 *(unsigned long *)mem = kernel_stack_pointer(regs);
13648 break;
13649 case GDB_GS:
13650@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13651 case 'k':
13652 /* clear the trace bit */
13653 linux_regs->flags &= ~X86_EFLAGS_TF;
13654- atomic_set(&kgdb_cpu_doing_single_step, -1);
13655+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13656
13657 /* set the trace bit if we're stepping */
13658 if (remcomInBuffer[0] == 's') {
13659 linux_regs->flags |= X86_EFLAGS_TF;
13660- atomic_set(&kgdb_cpu_doing_single_step,
13661+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13662 raw_smp_processor_id());
13663 }
13664
13665@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13666 return NOTIFY_DONE;
13667
13668 case DIE_DEBUG:
13669- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13670+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13671 if (user_mode(regs))
13672 return single_step_cont(regs, args);
13673 break;
13674diff -urNp linux-3.0.4/arch/x86/kernel/kprobes.c linux-3.0.4/arch/x86/kernel/kprobes.c
13675--- linux-3.0.4/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
13676+++ linux-3.0.4/arch/x86/kernel/kprobes.c 2011-08-23 21:47:55.000000000 -0400
13677@@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13678 } __attribute__((packed)) *insn;
13679
13680 insn = (struct __arch_relative_insn *)from;
13681+
13682+ pax_open_kernel();
13683 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13684 insn->op = op;
13685+ pax_close_kernel();
13686 }
13687
13688 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13689@@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13690 kprobe_opcode_t opcode;
13691 kprobe_opcode_t *orig_opcodes = opcodes;
13692
13693- if (search_exception_tables((unsigned long)opcodes))
13694+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13695 return 0; /* Page fault may occur on this address. */
13696
13697 retry:
13698@@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13699 }
13700 }
13701 insn_get_length(&insn);
13702+ pax_open_kernel();
13703 memcpy(dest, insn.kaddr, insn.length);
13704+ pax_close_kernel();
13705
13706 #ifdef CONFIG_X86_64
13707 if (insn_rip_relative(&insn)) {
13708@@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13709 (u8 *) dest;
13710 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13711 disp = (u8 *) dest + insn_offset_displacement(&insn);
13712+ pax_open_kernel();
13713 *(s32 *) disp = (s32) newdisp;
13714+ pax_close_kernel();
13715 }
13716 #endif
13717 return insn.length;
13718@@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13719 */
13720 __copy_instruction(p->ainsn.insn, p->addr, 0);
13721
13722- if (can_boost(p->addr))
13723+ if (can_boost(ktla_ktva(p->addr)))
13724 p->ainsn.boostable = 0;
13725 else
13726 p->ainsn.boostable = -1;
13727
13728- p->opcode = *p->addr;
13729+ p->opcode = *(ktla_ktva(p->addr));
13730 }
13731
13732 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13733@@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13734 * nor set current_kprobe, because it doesn't use single
13735 * stepping.
13736 */
13737- regs->ip = (unsigned long)p->ainsn.insn;
13738+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13739 preempt_enable_no_resched();
13740 return;
13741 }
13742@@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13743 if (p->opcode == BREAKPOINT_INSTRUCTION)
13744 regs->ip = (unsigned long)p->addr;
13745 else
13746- regs->ip = (unsigned long)p->ainsn.insn;
13747+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13748 }
13749
13750 /*
13751@@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13752 setup_singlestep(p, regs, kcb, 0);
13753 return 1;
13754 }
13755- } else if (*addr != BREAKPOINT_INSTRUCTION) {
13756+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13757 /*
13758 * The breakpoint instruction was removed right
13759 * after we hit it. Another cpu has removed
13760@@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13761 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13762 {
13763 unsigned long *tos = stack_addr(regs);
13764- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13765+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13766 unsigned long orig_ip = (unsigned long)p->addr;
13767 kprobe_opcode_t *insn = p->ainsn.insn;
13768
13769@@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13770 struct die_args *args = data;
13771 int ret = NOTIFY_DONE;
13772
13773- if (args->regs && user_mode_vm(args->regs))
13774+ if (args->regs && user_mode(args->regs))
13775 return ret;
13776
13777 switch (val) {
13778@@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13779 * Verify if the address gap is in 2GB range, because this uses
13780 * a relative jump.
13781 */
13782- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13783+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13784 if (abs(rel) > 0x7fffffff)
13785 return -ERANGE;
13786
13787@@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13788 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13789
13790 /* Set probe function call */
13791- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13792+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13793
13794 /* Set returning jmp instruction at the tail of out-of-line buffer */
13795 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13796- (u8 *)op->kp.addr + op->optinsn.size);
13797+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13798
13799 flush_icache_range((unsigned long) buf,
13800 (unsigned long) buf + TMPL_END_IDX +
13801@@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13802 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13803
13804 /* Backup instructions which will be replaced by jump address */
13805- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13806+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13807 RELATIVE_ADDR_SIZE);
13808
13809 insn_buf[0] = RELATIVEJUMP_OPCODE;
13810diff -urNp linux-3.0.4/arch/x86/kernel/kvm.c linux-3.0.4/arch/x86/kernel/kvm.c
13811--- linux-3.0.4/arch/x86/kernel/kvm.c 2011-07-21 22:17:23.000000000 -0400
13812+++ linux-3.0.4/arch/x86/kernel/kvm.c 2011-08-24 18:10:12.000000000 -0400
13813@@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
13814 pv_mmu_ops.set_pud = kvm_set_pud;
13815 #if PAGETABLE_LEVELS == 4
13816 pv_mmu_ops.set_pgd = kvm_set_pgd;
13817+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
13818 #endif
13819 #endif
13820 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
13821diff -urNp linux-3.0.4/arch/x86/kernel/ldt.c linux-3.0.4/arch/x86/kernel/ldt.c
13822--- linux-3.0.4/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
13823+++ linux-3.0.4/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
13824@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13825 if (reload) {
13826 #ifdef CONFIG_SMP
13827 preempt_disable();
13828- load_LDT(pc);
13829+ load_LDT_nolock(pc);
13830 if (!cpumask_equal(mm_cpumask(current->mm),
13831 cpumask_of(smp_processor_id())))
13832 smp_call_function(flush_ldt, current->mm, 1);
13833 preempt_enable();
13834 #else
13835- load_LDT(pc);
13836+ load_LDT_nolock(pc);
13837 #endif
13838 }
13839 if (oldsize) {
13840@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13841 return err;
13842
13843 for (i = 0; i < old->size; i++)
13844- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13845+ write_ldt_entry(new->ldt, i, old->ldt + i);
13846 return 0;
13847 }
13848
13849@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13850 retval = copy_ldt(&mm->context, &old_mm->context);
13851 mutex_unlock(&old_mm->context.lock);
13852 }
13853+
13854+ if (tsk == current) {
13855+ mm->context.vdso = 0;
13856+
13857+#ifdef CONFIG_X86_32
13858+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13859+ mm->context.user_cs_base = 0UL;
13860+ mm->context.user_cs_limit = ~0UL;
13861+
13862+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13863+ cpus_clear(mm->context.cpu_user_cs_mask);
13864+#endif
13865+
13866+#endif
13867+#endif
13868+
13869+ }
13870+
13871 return retval;
13872 }
13873
13874@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13875 }
13876 }
13877
13878+#ifdef CONFIG_PAX_SEGMEXEC
13879+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13880+ error = -EINVAL;
13881+ goto out_unlock;
13882+ }
13883+#endif
13884+
13885 fill_ldt(&ldt, &ldt_info);
13886 if (oldmode)
13887 ldt.avl = 0;
13888diff -urNp linux-3.0.4/arch/x86/kernel/machine_kexec_32.c linux-3.0.4/arch/x86/kernel/machine_kexec_32.c
13889--- linux-3.0.4/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
13890+++ linux-3.0.4/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
13891@@ -27,7 +27,7 @@
13892 #include <asm/cacheflush.h>
13893 #include <asm/debugreg.h>
13894
13895-static void set_idt(void *newidt, __u16 limit)
13896+static void set_idt(struct desc_struct *newidt, __u16 limit)
13897 {
13898 struct desc_ptr curidt;
13899
13900@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13901 }
13902
13903
13904-static void set_gdt(void *newgdt, __u16 limit)
13905+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13906 {
13907 struct desc_ptr curgdt;
13908
13909@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13910 }
13911
13912 control_page = page_address(image->control_code_page);
13913- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13914+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13915
13916 relocate_kernel_ptr = control_page;
13917 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13918diff -urNp linux-3.0.4/arch/x86/kernel/microcode_intel.c linux-3.0.4/arch/x86/kernel/microcode_intel.c
13919--- linux-3.0.4/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
13920+++ linux-3.0.4/arch/x86/kernel/microcode_intel.c 2011-10-06 04:17:55.000000000 -0400
13921@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13922
13923 static int get_ucode_user(void *to, const void *from, size_t n)
13924 {
13925- return copy_from_user(to, from, n);
13926+ return copy_from_user(to, (const void __force_user *)from, n);
13927 }
13928
13929 static enum ucode_state
13930 request_microcode_user(int cpu, const void __user *buf, size_t size)
13931 {
13932- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13933+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
13934 }
13935
13936 static void microcode_fini_cpu(int cpu)
13937diff -urNp linux-3.0.4/arch/x86/kernel/module.c linux-3.0.4/arch/x86/kernel/module.c
13938--- linux-3.0.4/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
13939+++ linux-3.0.4/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
13940@@ -36,21 +36,66 @@
13941 #define DEBUGP(fmt...)
13942 #endif
13943
13944-void *module_alloc(unsigned long size)
13945+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13946 {
13947 if (PAGE_ALIGN(size) > MODULES_LEN)
13948 return NULL;
13949 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13950- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13951+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13952 -1, __builtin_return_address(0));
13953 }
13954
13955+void *module_alloc(unsigned long size)
13956+{
13957+
13958+#ifdef CONFIG_PAX_KERNEXEC
13959+ return __module_alloc(size, PAGE_KERNEL);
13960+#else
13961+ return __module_alloc(size, PAGE_KERNEL_EXEC);
13962+#endif
13963+
13964+}
13965+
13966 /* Free memory returned from module_alloc */
13967 void module_free(struct module *mod, void *module_region)
13968 {
13969 vfree(module_region);
13970 }
13971
13972+#ifdef CONFIG_PAX_KERNEXEC
13973+#ifdef CONFIG_X86_32
13974+void *module_alloc_exec(unsigned long size)
13975+{
13976+ struct vm_struct *area;
13977+
13978+ if (size == 0)
13979+ return NULL;
13980+
13981+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13982+ return area ? area->addr : NULL;
13983+}
13984+EXPORT_SYMBOL(module_alloc_exec);
13985+
13986+void module_free_exec(struct module *mod, void *module_region)
13987+{
13988+ vunmap(module_region);
13989+}
13990+EXPORT_SYMBOL(module_free_exec);
13991+#else
13992+void module_free_exec(struct module *mod, void *module_region)
13993+{
13994+ module_free(mod, module_region);
13995+}
13996+EXPORT_SYMBOL(module_free_exec);
13997+
13998+void *module_alloc_exec(unsigned long size)
13999+{
14000+ return __module_alloc(size, PAGE_KERNEL_RX);
14001+}
14002+EXPORT_SYMBOL(module_alloc_exec);
14003+#endif
14004+#endif
14005+
14006 /* We don't need anything special. */
14007 int module_frob_arch_sections(Elf_Ehdr *hdr,
14008 Elf_Shdr *sechdrs,
14009@@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14010 unsigned int i;
14011 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
14012 Elf32_Sym *sym;
14013- uint32_t *location;
14014+ uint32_t *plocation, location;
14015
14016 DEBUGP("Applying relocate section %u to %u\n", relsec,
14017 sechdrs[relsec].sh_info);
14018 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
14019 /* This is where to make the change */
14020- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
14021- + rel[i].r_offset;
14022+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
14023+ location = (uint32_t)plocation;
14024+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
14025+ plocation = ktla_ktva((void *)plocation);
14026 /* This is the symbol it is referring to. Note that all
14027 undefined symbols have been resolved. */
14028 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
14029@@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14030 switch (ELF32_R_TYPE(rel[i].r_info)) {
14031 case R_386_32:
14032 /* We add the value into the location given */
14033- *location += sym->st_value;
14034+ pax_open_kernel();
14035+ *plocation += sym->st_value;
14036+ pax_close_kernel();
14037 break;
14038 case R_386_PC32:
14039 /* Add the value, subtract its postition */
14040- *location += sym->st_value - (uint32_t)location;
14041+ pax_open_kernel();
14042+ *plocation += sym->st_value - location;
14043+ pax_close_kernel();
14044 break;
14045 default:
14046 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
14047@@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
14048 case R_X86_64_NONE:
14049 break;
14050 case R_X86_64_64:
14051+ pax_open_kernel();
14052 *(u64 *)loc = val;
14053+ pax_close_kernel();
14054 break;
14055 case R_X86_64_32:
14056+ pax_open_kernel();
14057 *(u32 *)loc = val;
14058+ pax_close_kernel();
14059 if (val != *(u32 *)loc)
14060 goto overflow;
14061 break;
14062 case R_X86_64_32S:
14063+ pax_open_kernel();
14064 *(s32 *)loc = val;
14065+ pax_close_kernel();
14066 if ((s64)val != *(s32 *)loc)
14067 goto overflow;
14068 break;
14069 case R_X86_64_PC32:
14070 val -= (u64)loc;
14071+ pax_open_kernel();
14072 *(u32 *)loc = val;
14073+ pax_close_kernel();
14074+
14075 #if 0
14076 if ((s64)val != *(s32 *)loc)
14077 goto overflow;
14078diff -urNp linux-3.0.4/arch/x86/kernel/paravirt.c linux-3.0.4/arch/x86/kernel/paravirt.c
14079--- linux-3.0.4/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
14080+++ linux-3.0.4/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
14081@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
14082 {
14083 return x;
14084 }
14085+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14086+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
14087+#endif
14088
14089 void __init default_banner(void)
14090 {
14091@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
14092 * corresponding structure. */
14093 static void *get_call_destination(u8 type)
14094 {
14095- struct paravirt_patch_template tmpl = {
14096+ const struct paravirt_patch_template tmpl = {
14097 .pv_init_ops = pv_init_ops,
14098 .pv_time_ops = pv_time_ops,
14099 .pv_cpu_ops = pv_cpu_ops,
14100@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
14101 .pv_lock_ops = pv_lock_ops,
14102 #endif
14103 };
14104+
14105+ pax_track_stack();
14106+
14107 return *((void **)&tmpl + type);
14108 }
14109
14110@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
14111 if (opfunc == NULL)
14112 /* If there's no function, patch it with a ud2a (BUG) */
14113 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
14114- else if (opfunc == _paravirt_nop)
14115+ else if (opfunc == (void *)_paravirt_nop)
14116 /* If the operation is a nop, then nop the callsite */
14117 ret = paravirt_patch_nop();
14118
14119 /* identity functions just return their single argument */
14120- else if (opfunc == _paravirt_ident_32)
14121+ else if (opfunc == (void *)_paravirt_ident_32)
14122 ret = paravirt_patch_ident_32(insnbuf, len);
14123- else if (opfunc == _paravirt_ident_64)
14124+ else if (opfunc == (void *)_paravirt_ident_64)
14125 ret = paravirt_patch_ident_64(insnbuf, len);
14126+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14127+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
14128+ ret = paravirt_patch_ident_64(insnbuf, len);
14129+#endif
14130
14131 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
14132 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
14133@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
14134 if (insn_len > len || start == NULL)
14135 insn_len = len;
14136 else
14137- memcpy(insnbuf, start, insn_len);
14138+ memcpy(insnbuf, ktla_ktva(start), insn_len);
14139
14140 return insn_len;
14141 }
14142@@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
14143 preempt_enable();
14144 }
14145
14146-struct pv_info pv_info = {
14147+struct pv_info pv_info __read_only = {
14148 .name = "bare hardware",
14149 .paravirt_enabled = 0,
14150 .kernel_rpl = 0,
14151 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
14152 };
14153
14154-struct pv_init_ops pv_init_ops = {
14155+struct pv_init_ops pv_init_ops __read_only = {
14156 .patch = native_patch,
14157 };
14158
14159-struct pv_time_ops pv_time_ops = {
14160+struct pv_time_ops pv_time_ops __read_only = {
14161 .sched_clock = native_sched_clock,
14162 };
14163
14164-struct pv_irq_ops pv_irq_ops = {
14165+struct pv_irq_ops pv_irq_ops __read_only = {
14166 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
14167 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
14168 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
14169@@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
14170 #endif
14171 };
14172
14173-struct pv_cpu_ops pv_cpu_ops = {
14174+struct pv_cpu_ops pv_cpu_ops __read_only = {
14175 .cpuid = native_cpuid,
14176 .get_debugreg = native_get_debugreg,
14177 .set_debugreg = native_set_debugreg,
14178@@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
14179 .end_context_switch = paravirt_nop,
14180 };
14181
14182-struct pv_apic_ops pv_apic_ops = {
14183+struct pv_apic_ops pv_apic_ops __read_only = {
14184 #ifdef CONFIG_X86_LOCAL_APIC
14185 .startup_ipi_hook = paravirt_nop,
14186 #endif
14187 };
14188
14189-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
14190+#ifdef CONFIG_X86_32
14191+#ifdef CONFIG_X86_PAE
14192+/* 64-bit pagetable entries */
14193+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
14194+#else
14195 /* 32-bit pagetable entries */
14196 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
14197+#endif
14198 #else
14199 /* 64-bit pagetable entries */
14200 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
14201 #endif
14202
14203-struct pv_mmu_ops pv_mmu_ops = {
14204+struct pv_mmu_ops pv_mmu_ops __read_only = {
14205
14206 .read_cr2 = native_read_cr2,
14207 .write_cr2 = native_write_cr2,
14208@@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
14209 .make_pud = PTE_IDENT,
14210
14211 .set_pgd = native_set_pgd,
14212+ .set_pgd_batched = native_set_pgd_batched,
14213 #endif
14214 #endif /* PAGETABLE_LEVELS >= 3 */
14215
14216@@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
14217 },
14218
14219 .set_fixmap = native_set_fixmap,
14220+
14221+#ifdef CONFIG_PAX_KERNEXEC
14222+ .pax_open_kernel = native_pax_open_kernel,
14223+ .pax_close_kernel = native_pax_close_kernel,
14224+#endif
14225+
14226 };
14227
14228 EXPORT_SYMBOL_GPL(pv_time_ops);
14229diff -urNp linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c
14230--- linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
14231+++ linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
14232@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14233 arch_spin_lock(lock);
14234 }
14235
14236-struct pv_lock_ops pv_lock_ops = {
14237+struct pv_lock_ops pv_lock_ops __read_only = {
14238 #ifdef CONFIG_SMP
14239 .spin_is_locked = __ticket_spin_is_locked,
14240 .spin_is_contended = __ticket_spin_is_contended,
14241diff -urNp linux-3.0.4/arch/x86/kernel/pci-iommu_table.c linux-3.0.4/arch/x86/kernel/pci-iommu_table.c
14242--- linux-3.0.4/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
14243+++ linux-3.0.4/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
14244@@ -2,7 +2,7 @@
14245 #include <asm/iommu_table.h>
14246 #include <linux/string.h>
14247 #include <linux/kallsyms.h>
14248-
14249+#include <linux/sched.h>
14250
14251 #define DEBUG 1
14252
14253@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14254 {
14255 struct iommu_table_entry *p, *q, *x;
14256
14257+ pax_track_stack();
14258+
14259 /* Simple cyclic dependency checker. */
14260 for (p = start; p < finish; p++) {
14261 q = find_dependents_of(start, finish, p);
14262diff -urNp linux-3.0.4/arch/x86/kernel/process_32.c linux-3.0.4/arch/x86/kernel/process_32.c
14263--- linux-3.0.4/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14264+++ linux-3.0.4/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14265@@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14266 unsigned long thread_saved_pc(struct task_struct *tsk)
14267 {
14268 return ((unsigned long *)tsk->thread.sp)[3];
14269+//XXX return tsk->thread.eip;
14270 }
14271
14272 #ifndef CONFIG_SMP
14273@@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14274 unsigned long sp;
14275 unsigned short ss, gs;
14276
14277- if (user_mode_vm(regs)) {
14278+ if (user_mode(regs)) {
14279 sp = regs->sp;
14280 ss = regs->ss & 0xffff;
14281- gs = get_user_gs(regs);
14282 } else {
14283 sp = kernel_stack_pointer(regs);
14284 savesegment(ss, ss);
14285- savesegment(gs, gs);
14286 }
14287+ gs = get_user_gs(regs);
14288
14289 show_regs_common();
14290
14291@@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14292 struct task_struct *tsk;
14293 int err;
14294
14295- childregs = task_pt_regs(p);
14296+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14297 *childregs = *regs;
14298 childregs->ax = 0;
14299 childregs->sp = sp;
14300
14301 p->thread.sp = (unsigned long) childregs;
14302 p->thread.sp0 = (unsigned long) (childregs+1);
14303+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14304
14305 p->thread.ip = (unsigned long) ret_from_fork;
14306
14307@@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14308 struct thread_struct *prev = &prev_p->thread,
14309 *next = &next_p->thread;
14310 int cpu = smp_processor_id();
14311- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14312+ struct tss_struct *tss = init_tss + cpu;
14313 bool preload_fpu;
14314
14315 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14316@@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14317 */
14318 lazy_save_gs(prev->gs);
14319
14320+#ifdef CONFIG_PAX_MEMORY_UDEREF
14321+ __set_fs(task_thread_info(next_p)->addr_limit);
14322+#endif
14323+
14324 /*
14325 * Load the per-thread Thread-Local Storage descriptor.
14326 */
14327@@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14328 */
14329 arch_end_context_switch(next_p);
14330
14331+ percpu_write(current_task, next_p);
14332+ percpu_write(current_tinfo, &next_p->tinfo);
14333+
14334 if (preload_fpu)
14335 __math_state_restore();
14336
14337@@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14338 if (prev->gs | next->gs)
14339 lazy_load_gs(next->gs);
14340
14341- percpu_write(current_task, next_p);
14342-
14343 return prev_p;
14344 }
14345
14346@@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14347 } while (count++ < 16);
14348 return 0;
14349 }
14350-
14351diff -urNp linux-3.0.4/arch/x86/kernel/process_64.c linux-3.0.4/arch/x86/kernel/process_64.c
14352--- linux-3.0.4/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
14353+++ linux-3.0.4/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
14354@@ -87,7 +87,7 @@ static void __exit_idle(void)
14355 void exit_idle(void)
14356 {
14357 /* idle loop has pid 0 */
14358- if (current->pid)
14359+ if (task_pid_nr(current))
14360 return;
14361 __exit_idle();
14362 }
14363@@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14364 struct pt_regs *childregs;
14365 struct task_struct *me = current;
14366
14367- childregs = ((struct pt_regs *)
14368- (THREAD_SIZE + task_stack_page(p))) - 1;
14369+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14370 *childregs = *regs;
14371
14372 childregs->ax = 0;
14373@@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14374 p->thread.sp = (unsigned long) childregs;
14375 p->thread.sp0 = (unsigned long) (childregs+1);
14376 p->thread.usersp = me->thread.usersp;
14377+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14378
14379 set_tsk_thread_flag(p, TIF_FORK);
14380
14381@@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14382 struct thread_struct *prev = &prev_p->thread;
14383 struct thread_struct *next = &next_p->thread;
14384 int cpu = smp_processor_id();
14385- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14386+ struct tss_struct *tss = init_tss + cpu;
14387 unsigned fsindex, gsindex;
14388 bool preload_fpu;
14389
14390@@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14391 prev->usersp = percpu_read(old_rsp);
14392 percpu_write(old_rsp, next->usersp);
14393 percpu_write(current_task, next_p);
14394+ percpu_write(current_tinfo, &next_p->tinfo);
14395
14396- percpu_write(kernel_stack,
14397- (unsigned long)task_stack_page(next_p) +
14398- THREAD_SIZE - KERNEL_STACK_OFFSET);
14399+ percpu_write(kernel_stack, next->sp0);
14400
14401 /*
14402 * Now maybe reload the debug registers and handle I/O bitmaps
14403@@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14404 if (!p || p == current || p->state == TASK_RUNNING)
14405 return 0;
14406 stack = (unsigned long)task_stack_page(p);
14407- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14408+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14409 return 0;
14410 fp = *(u64 *)(p->thread.sp);
14411 do {
14412- if (fp < (unsigned long)stack ||
14413- fp >= (unsigned long)stack+THREAD_SIZE)
14414+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14415 return 0;
14416 ip = *(u64 *)(fp+8);
14417 if (!in_sched_functions(ip))
14418diff -urNp linux-3.0.4/arch/x86/kernel/process.c linux-3.0.4/arch/x86/kernel/process.c
14419--- linux-3.0.4/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
14420+++ linux-3.0.4/arch/x86/kernel/process.c 2011-08-30 18:23:52.000000000 -0400
14421@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14422
14423 void free_thread_info(struct thread_info *ti)
14424 {
14425- free_thread_xstate(ti->task);
14426 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14427 }
14428
14429+static struct kmem_cache *task_struct_cachep;
14430+
14431 void arch_task_cache_init(void)
14432 {
14433- task_xstate_cachep =
14434- kmem_cache_create("task_xstate", xstate_size,
14435+ /* create a slab on which task_structs can be allocated */
14436+ task_struct_cachep =
14437+ kmem_cache_create("task_struct", sizeof(struct task_struct),
14438+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14439+
14440+ task_xstate_cachep =
14441+ kmem_cache_create("task_xstate", xstate_size,
14442 __alignof__(union thread_xstate),
14443- SLAB_PANIC | SLAB_NOTRACK, NULL);
14444+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14445+}
14446+
14447+struct task_struct *alloc_task_struct_node(int node)
14448+{
14449+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14450+}
14451+
14452+void free_task_struct(struct task_struct *task)
14453+{
14454+ free_thread_xstate(task);
14455+ kmem_cache_free(task_struct_cachep, task);
14456 }
14457
14458 /*
14459@@ -70,7 +87,7 @@ void exit_thread(void)
14460 unsigned long *bp = t->io_bitmap_ptr;
14461
14462 if (bp) {
14463- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14464+ struct tss_struct *tss = init_tss + get_cpu();
14465
14466 t->io_bitmap_ptr = NULL;
14467 clear_thread_flag(TIF_IO_BITMAP);
14468@@ -106,7 +123,7 @@ void show_regs_common(void)
14469
14470 printk(KERN_CONT "\n");
14471 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14472- current->pid, current->comm, print_tainted(),
14473+ task_pid_nr(current), current->comm, print_tainted(),
14474 init_utsname()->release,
14475 (int)strcspn(init_utsname()->version, " "),
14476 init_utsname()->version);
14477@@ -120,6 +137,9 @@ void flush_thread(void)
14478 {
14479 struct task_struct *tsk = current;
14480
14481+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14482+ loadsegment(gs, 0);
14483+#endif
14484 flush_ptrace_hw_breakpoint(tsk);
14485 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14486 /*
14487@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14488 regs.di = (unsigned long) arg;
14489
14490 #ifdef CONFIG_X86_32
14491- regs.ds = __USER_DS;
14492- regs.es = __USER_DS;
14493+ regs.ds = __KERNEL_DS;
14494+ regs.es = __KERNEL_DS;
14495 regs.fs = __KERNEL_PERCPU;
14496- regs.gs = __KERNEL_STACK_CANARY;
14497+ savesegment(gs, regs.gs);
14498 #else
14499 regs.ss = __KERNEL_DS;
14500 #endif
14501@@ -403,7 +423,7 @@ void default_idle(void)
14502 EXPORT_SYMBOL(default_idle);
14503 #endif
14504
14505-void stop_this_cpu(void *dummy)
14506+__noreturn void stop_this_cpu(void *dummy)
14507 {
14508 local_irq_disable();
14509 /*
14510@@ -668,16 +688,37 @@ static int __init idle_setup(char *str)
14511 }
14512 early_param("idle", idle_setup);
14513
14514-unsigned long arch_align_stack(unsigned long sp)
14515+#ifdef CONFIG_PAX_RANDKSTACK
14516+void pax_randomize_kstack(struct pt_regs *regs)
14517 {
14518- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14519- sp -= get_random_int() % 8192;
14520- return sp & ~0xf;
14521-}
14522+ struct thread_struct *thread = &current->thread;
14523+ unsigned long time;
14524
14525-unsigned long arch_randomize_brk(struct mm_struct *mm)
14526-{
14527- unsigned long range_end = mm->brk + 0x02000000;
14528- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14529-}
14530+ if (!randomize_va_space)
14531+ return;
14532+
14533+ if (v8086_mode(regs))
14534+ return;
14535
14536+ rdtscl(time);
14537+
14538+ /* P4 seems to return a 0 LSB, ignore it */
14539+#ifdef CONFIG_MPENTIUM4
14540+ time &= 0x3EUL;
14541+ time <<= 2;
14542+#elif defined(CONFIG_X86_64)
14543+ time &= 0xFUL;
14544+ time <<= 4;
14545+#else
14546+ time &= 0x1FUL;
14547+ time <<= 3;
14548+#endif
14549+
14550+ thread->sp0 ^= time;
14551+ load_sp0(init_tss + smp_processor_id(), thread);
14552+
14553+#ifdef CONFIG_X86_64
14554+ percpu_write(kernel_stack, thread->sp0);
14555+#endif
14556+}
14557+#endif
14558diff -urNp linux-3.0.4/arch/x86/kernel/ptrace.c linux-3.0.4/arch/x86/kernel/ptrace.c
14559--- linux-3.0.4/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
14560+++ linux-3.0.4/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
14561@@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14562 unsigned long addr, unsigned long data)
14563 {
14564 int ret;
14565- unsigned long __user *datap = (unsigned long __user *)data;
14566+ unsigned long __user *datap = (__force unsigned long __user *)data;
14567
14568 switch (request) {
14569 /* read the word at location addr in the USER area. */
14570@@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14571 if ((int) addr < 0)
14572 return -EIO;
14573 ret = do_get_thread_area(child, addr,
14574- (struct user_desc __user *)data);
14575+ (__force struct user_desc __user *) data);
14576 break;
14577
14578 case PTRACE_SET_THREAD_AREA:
14579 if ((int) addr < 0)
14580 return -EIO;
14581 ret = do_set_thread_area(child, addr,
14582- (struct user_desc __user *)data, 0);
14583+ (__force struct user_desc __user *) data, 0);
14584 break;
14585 #endif
14586
14587@@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14588 memset(info, 0, sizeof(*info));
14589 info->si_signo = SIGTRAP;
14590 info->si_code = si_code;
14591- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14592+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14593 }
14594
14595 void user_single_step_siginfo(struct task_struct *tsk,
14596diff -urNp linux-3.0.4/arch/x86/kernel/pvclock.c linux-3.0.4/arch/x86/kernel/pvclock.c
14597--- linux-3.0.4/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
14598+++ linux-3.0.4/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
14599@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14600 return pv_tsc_khz;
14601 }
14602
14603-static atomic64_t last_value = ATOMIC64_INIT(0);
14604+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14605
14606 void pvclock_resume(void)
14607 {
14608- atomic64_set(&last_value, 0);
14609+ atomic64_set_unchecked(&last_value, 0);
14610 }
14611
14612 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14613@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14614 * updating at the same time, and one of them could be slightly behind,
14615 * making the assumption that last_value always go forward fail to hold.
14616 */
14617- last = atomic64_read(&last_value);
14618+ last = atomic64_read_unchecked(&last_value);
14619 do {
14620 if (ret < last)
14621 return last;
14622- last = atomic64_cmpxchg(&last_value, last, ret);
14623+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14624 } while (unlikely(last != ret));
14625
14626 return ret;
14627diff -urNp linux-3.0.4/arch/x86/kernel/reboot.c linux-3.0.4/arch/x86/kernel/reboot.c
14628--- linux-3.0.4/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
14629+++ linux-3.0.4/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
14630@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14631 EXPORT_SYMBOL(pm_power_off);
14632
14633 static const struct desc_ptr no_idt = {};
14634-static int reboot_mode;
14635+static unsigned short reboot_mode;
14636 enum reboot_type reboot_type = BOOT_ACPI;
14637 int reboot_force;
14638
14639@@ -315,13 +315,17 @@ core_initcall(reboot_init);
14640 extern const unsigned char machine_real_restart_asm[];
14641 extern const u64 machine_real_restart_gdt[3];
14642
14643-void machine_real_restart(unsigned int type)
14644+__noreturn void machine_real_restart(unsigned int type)
14645 {
14646 void *restart_va;
14647 unsigned long restart_pa;
14648- void (*restart_lowmem)(unsigned int);
14649+ void (* __noreturn restart_lowmem)(unsigned int);
14650 u64 *lowmem_gdt;
14651
14652+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14653+ struct desc_struct *gdt;
14654+#endif
14655+
14656 local_irq_disable();
14657
14658 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14659@@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
14660 boot)". This seems like a fairly standard thing that gets set by
14661 REBOOT.COM programs, and the previous reset routine did this
14662 too. */
14663- *((unsigned short *)0x472) = reboot_mode;
14664+ *(unsigned short *)(__va(0x472)) = reboot_mode;
14665
14666 /* Patch the GDT in the low memory trampoline */
14667 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14668
14669 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14670 restart_pa = virt_to_phys(restart_va);
14671- restart_lowmem = (void (*)(unsigned int))restart_pa;
14672+ restart_lowmem = (void *)restart_pa;
14673
14674 /* GDT[0]: GDT self-pointer */
14675 lowmem_gdt[0] =
14676@@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
14677 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14678
14679 /* Jump to the identity-mapped low memory code */
14680+
14681+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14682+ gdt = get_cpu_gdt_table(smp_processor_id());
14683+ pax_open_kernel();
14684+#ifdef CONFIG_PAX_MEMORY_UDEREF
14685+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14686+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14687+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14688+#endif
14689+#ifdef CONFIG_PAX_KERNEXEC
14690+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14691+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14692+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14693+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14694+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14695+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14696+#endif
14697+ pax_close_kernel();
14698+#endif
14699+
14700+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14701+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14702+ unreachable();
14703+#else
14704 restart_lowmem(type);
14705+#endif
14706+
14707 }
14708 #ifdef CONFIG_APM_MODULE
14709 EXPORT_SYMBOL(machine_real_restart);
14710@@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
14711 * try to force a triple fault and then cycle between hitting the keyboard
14712 * controller and doing that
14713 */
14714-static void native_machine_emergency_restart(void)
14715+__noreturn static void native_machine_emergency_restart(void)
14716 {
14717 int i;
14718 int attempt = 0;
14719@@ -647,13 +677,13 @@ void native_machine_shutdown(void)
14720 #endif
14721 }
14722
14723-static void __machine_emergency_restart(int emergency)
14724+static __noreturn void __machine_emergency_restart(int emergency)
14725 {
14726 reboot_emergency = emergency;
14727 machine_ops.emergency_restart();
14728 }
14729
14730-static void native_machine_restart(char *__unused)
14731+static __noreturn void native_machine_restart(char *__unused)
14732 {
14733 printk("machine restart\n");
14734
14735@@ -662,7 +692,7 @@ static void native_machine_restart(char
14736 __machine_emergency_restart(0);
14737 }
14738
14739-static void native_machine_halt(void)
14740+static __noreturn void native_machine_halt(void)
14741 {
14742 /* stop other cpus and apics */
14743 machine_shutdown();
14744@@ -673,7 +703,7 @@ static void native_machine_halt(void)
14745 stop_this_cpu(NULL);
14746 }
14747
14748-static void native_machine_power_off(void)
14749+__noreturn static void native_machine_power_off(void)
14750 {
14751 if (pm_power_off) {
14752 if (!reboot_force)
14753@@ -682,6 +712,7 @@ static void native_machine_power_off(voi
14754 }
14755 /* a fallback in case there is no PM info available */
14756 tboot_shutdown(TB_SHUTDOWN_HALT);
14757+ unreachable();
14758 }
14759
14760 struct machine_ops machine_ops = {
14761diff -urNp linux-3.0.4/arch/x86/kernel/setup.c linux-3.0.4/arch/x86/kernel/setup.c
14762--- linux-3.0.4/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
14763+++ linux-3.0.4/arch/x86/kernel/setup.c 2011-10-06 04:17:55.000000000 -0400
14764@@ -447,7 +447,7 @@ static void __init parse_setup_data(void
14765
14766 switch (data->type) {
14767 case SETUP_E820_EXT:
14768- parse_e820_ext(data);
14769+ parse_e820_ext((struct setup_data __force_kernel *)data);
14770 break;
14771 case SETUP_DTB:
14772 add_dtb(pa_data);
14773@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14774 * area (640->1Mb) as ram even though it is not.
14775 * take them out.
14776 */
14777- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14778+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14779 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14780 }
14781
14782@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
14783
14784 if (!boot_params.hdr.root_flags)
14785 root_mountflags &= ~MS_RDONLY;
14786- init_mm.start_code = (unsigned long) _text;
14787- init_mm.end_code = (unsigned long) _etext;
14788+ init_mm.start_code = ktla_ktva((unsigned long) _text);
14789+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
14790 init_mm.end_data = (unsigned long) _edata;
14791 init_mm.brk = _brk_end;
14792
14793- code_resource.start = virt_to_phys(_text);
14794- code_resource.end = virt_to_phys(_etext)-1;
14795- data_resource.start = virt_to_phys(_etext);
14796+ code_resource.start = virt_to_phys(ktla_ktva(_text));
14797+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14798+ data_resource.start = virt_to_phys(_sdata);
14799 data_resource.end = virt_to_phys(_edata)-1;
14800 bss_resource.start = virt_to_phys(&__bss_start);
14801 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14802diff -urNp linux-3.0.4/arch/x86/kernel/setup_percpu.c linux-3.0.4/arch/x86/kernel/setup_percpu.c
14803--- linux-3.0.4/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
14804+++ linux-3.0.4/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
14805@@ -21,19 +21,17 @@
14806 #include <asm/cpu.h>
14807 #include <asm/stackprotector.h>
14808
14809-DEFINE_PER_CPU(int, cpu_number);
14810+#ifdef CONFIG_SMP
14811+DEFINE_PER_CPU(unsigned int, cpu_number);
14812 EXPORT_PER_CPU_SYMBOL(cpu_number);
14813+#endif
14814
14815-#ifdef CONFIG_X86_64
14816 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14817-#else
14818-#define BOOT_PERCPU_OFFSET 0
14819-#endif
14820
14821 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14822 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14823
14824-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14825+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14826 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14827 };
14828 EXPORT_SYMBOL(__per_cpu_offset);
14829@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14830 {
14831 #ifdef CONFIG_X86_32
14832 struct desc_struct gdt;
14833+ unsigned long base = per_cpu_offset(cpu);
14834
14835- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14836- 0x2 | DESCTYPE_S, 0x8);
14837- gdt.s = 1;
14838+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14839+ 0x83 | DESCTYPE_S, 0xC);
14840 write_gdt_entry(get_cpu_gdt_table(cpu),
14841 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14842 #endif
14843@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14844 /* alrighty, percpu areas up and running */
14845 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14846 for_each_possible_cpu(cpu) {
14847+#ifdef CONFIG_CC_STACKPROTECTOR
14848+#ifdef CONFIG_X86_32
14849+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
14850+#endif
14851+#endif
14852 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14853 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14854 per_cpu(cpu_number, cpu) = cpu;
14855@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14856 */
14857 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14858 #endif
14859+#ifdef CONFIG_CC_STACKPROTECTOR
14860+#ifdef CONFIG_X86_32
14861+ if (!cpu)
14862+ per_cpu(stack_canary.canary, cpu) = canary;
14863+#endif
14864+#endif
14865 /*
14866 * Up to this point, the boot CPU has been using .init.data
14867 * area. Reload any changed state for the boot CPU.
14868diff -urNp linux-3.0.4/arch/x86/kernel/signal.c linux-3.0.4/arch/x86/kernel/signal.c
14869--- linux-3.0.4/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
14870+++ linux-3.0.4/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
14871@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14872 * Align the stack pointer according to the i386 ABI,
14873 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14874 */
14875- sp = ((sp + 4) & -16ul) - 4;
14876+ sp = ((sp - 12) & -16ul) - 4;
14877 #else /* !CONFIG_X86_32 */
14878 sp = round_down(sp, 16) - 8;
14879 #endif
14880@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14881 * Return an always-bogus address instead so we will die with SIGSEGV.
14882 */
14883 if (onsigstack && !likely(on_sig_stack(sp)))
14884- return (void __user *)-1L;
14885+ return (__force void __user *)-1L;
14886
14887 /* save i387 state */
14888 if (used_math() && save_i387_xstate(*fpstate) < 0)
14889- return (void __user *)-1L;
14890+ return (__force void __user *)-1L;
14891
14892 return (void __user *)sp;
14893 }
14894@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14895 }
14896
14897 if (current->mm->context.vdso)
14898- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14899+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14900 else
14901- restorer = &frame->retcode;
14902+ restorer = (void __user *)&frame->retcode;
14903 if (ka->sa.sa_flags & SA_RESTORER)
14904 restorer = ka->sa.sa_restorer;
14905
14906@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14907 * reasons and because gdb uses it as a signature to notice
14908 * signal handler stack frames.
14909 */
14910- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14911+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14912
14913 if (err)
14914 return -EFAULT;
14915@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14916 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14917
14918 /* Set up to return from userspace. */
14919- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14920+ if (current->mm->context.vdso)
14921+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14922+ else
14923+ restorer = (void __user *)&frame->retcode;
14924 if (ka->sa.sa_flags & SA_RESTORER)
14925 restorer = ka->sa.sa_restorer;
14926 put_user_ex(restorer, &frame->pretcode);
14927@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14928 * reasons and because gdb uses it as a signature to notice
14929 * signal handler stack frames.
14930 */
14931- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14932+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14933 } put_user_catch(err);
14934
14935 if (err)
14936@@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
14937 int signr;
14938 sigset_t *oldset;
14939
14940+ pax_track_stack();
14941+
14942 /*
14943 * We want the common case to go fast, which is why we may in certain
14944 * cases get here from kernel mode. Just return without doing anything
14945@@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
14946 * X86_32: vm86 regs switched out by assembly code before reaching
14947 * here, so testing against kernel CS suffices.
14948 */
14949- if (!user_mode(regs))
14950+ if (!user_mode_novm(regs))
14951 return;
14952
14953 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14954diff -urNp linux-3.0.4/arch/x86/kernel/smpboot.c linux-3.0.4/arch/x86/kernel/smpboot.c
14955--- linux-3.0.4/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
14956+++ linux-3.0.4/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
14957@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14958 set_idle_for_cpu(cpu, c_idle.idle);
14959 do_rest:
14960 per_cpu(current_task, cpu) = c_idle.idle;
14961+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14962 #ifdef CONFIG_X86_32
14963 /* Stack for startup_32 can be just as for start_secondary onwards */
14964 irq_ctx_init(cpu);
14965 #else
14966 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14967 initial_gs = per_cpu_offset(cpu);
14968- per_cpu(kernel_stack, cpu) =
14969- (unsigned long)task_stack_page(c_idle.idle) -
14970- KERNEL_STACK_OFFSET + THREAD_SIZE;
14971+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14972 #endif
14973+
14974+ pax_open_kernel();
14975 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14976+ pax_close_kernel();
14977+
14978 initial_code = (unsigned long)start_secondary;
14979 stack_start = c_idle.idle->thread.sp;
14980
14981@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14982
14983 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14984
14985+#ifdef CONFIG_PAX_PER_CPU_PGD
14986+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14987+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14988+ KERNEL_PGD_PTRS);
14989+#endif
14990+
14991 err = do_boot_cpu(apicid, cpu);
14992 if (err) {
14993 pr_debug("do_boot_cpu failed %d\n", err);
14994diff -urNp linux-3.0.4/arch/x86/kernel/step.c linux-3.0.4/arch/x86/kernel/step.c
14995--- linux-3.0.4/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
14996+++ linux-3.0.4/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
14997@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14998 struct desc_struct *desc;
14999 unsigned long base;
15000
15001- seg &= ~7UL;
15002+ seg >>= 3;
15003
15004 mutex_lock(&child->mm->context.lock);
15005- if (unlikely((seg >> 3) >= child->mm->context.size))
15006+ if (unlikely(seg >= child->mm->context.size))
15007 addr = -1L; /* bogus selector, access would fault */
15008 else {
15009 desc = child->mm->context.ldt + seg;
15010@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
15011 addr += base;
15012 }
15013 mutex_unlock(&child->mm->context.lock);
15014- }
15015+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
15016+ addr = ktla_ktva(addr);
15017
15018 return addr;
15019 }
15020@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
15021 unsigned char opcode[15];
15022 unsigned long addr = convert_ip_to_linear(child, regs);
15023
15024+ if (addr == -EINVAL)
15025+ return 0;
15026+
15027 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
15028 for (i = 0; i < copied; i++) {
15029 switch (opcode[i]) {
15030@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
15031
15032 #ifdef CONFIG_X86_64
15033 case 0x40 ... 0x4f:
15034- if (regs->cs != __USER_CS)
15035+ if ((regs->cs & 0xffff) != __USER_CS)
15036 /* 32-bit mode: register increment */
15037 return 0;
15038 /* 64-bit mode: REX prefix */
15039diff -urNp linux-3.0.4/arch/x86/kernel/syscall_table_32.S linux-3.0.4/arch/x86/kernel/syscall_table_32.S
15040--- linux-3.0.4/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
15041+++ linux-3.0.4/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
15042@@ -1,3 +1,4 @@
15043+.section .rodata,"a",@progbits
15044 ENTRY(sys_call_table)
15045 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
15046 .long sys_exit
15047diff -urNp linux-3.0.4/arch/x86/kernel/sys_i386_32.c linux-3.0.4/arch/x86/kernel/sys_i386_32.c
15048--- linux-3.0.4/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
15049+++ linux-3.0.4/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
15050@@ -24,17 +24,224 @@
15051
15052 #include <asm/syscalls.h>
15053
15054-/*
15055- * Do a system call from kernel instead of calling sys_execve so we
15056- * end up with proper pt_regs.
15057- */
15058-int kernel_execve(const char *filename,
15059- const char *const argv[],
15060- const char *const envp[])
15061+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
15062 {
15063- long __res;
15064- asm volatile ("int $0x80"
15065- : "=a" (__res)
15066- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
15067- return __res;
15068+ unsigned long pax_task_size = TASK_SIZE;
15069+
15070+#ifdef CONFIG_PAX_SEGMEXEC
15071+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
15072+ pax_task_size = SEGMEXEC_TASK_SIZE;
15073+#endif
15074+
15075+ if (len > pax_task_size || addr > pax_task_size - len)
15076+ return -EINVAL;
15077+
15078+ return 0;
15079+}
15080+
15081+unsigned long
15082+arch_get_unmapped_area(struct file *filp, unsigned long addr,
15083+ unsigned long len, unsigned long pgoff, unsigned long flags)
15084+{
15085+ struct mm_struct *mm = current->mm;
15086+ struct vm_area_struct *vma;
15087+ unsigned long start_addr, pax_task_size = TASK_SIZE;
15088+
15089+#ifdef CONFIG_PAX_SEGMEXEC
15090+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15091+ pax_task_size = SEGMEXEC_TASK_SIZE;
15092+#endif
15093+
15094+ pax_task_size -= PAGE_SIZE;
15095+
15096+ if (len > pax_task_size)
15097+ return -ENOMEM;
15098+
15099+ if (flags & MAP_FIXED)
15100+ return addr;
15101+
15102+#ifdef CONFIG_PAX_RANDMMAP
15103+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15104+#endif
15105+
15106+ if (addr) {
15107+ addr = PAGE_ALIGN(addr);
15108+ if (pax_task_size - len >= addr) {
15109+ vma = find_vma(mm, addr);
15110+ if (check_heap_stack_gap(vma, addr, len))
15111+ return addr;
15112+ }
15113+ }
15114+ if (len > mm->cached_hole_size) {
15115+ start_addr = addr = mm->free_area_cache;
15116+ } else {
15117+ start_addr = addr = mm->mmap_base;
15118+ mm->cached_hole_size = 0;
15119+ }
15120+
15121+#ifdef CONFIG_PAX_PAGEEXEC
15122+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
15123+ start_addr = 0x00110000UL;
15124+
15125+#ifdef CONFIG_PAX_RANDMMAP
15126+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15127+ start_addr += mm->delta_mmap & 0x03FFF000UL;
15128+#endif
15129+
15130+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
15131+ start_addr = addr = mm->mmap_base;
15132+ else
15133+ addr = start_addr;
15134+ }
15135+#endif
15136+
15137+full_search:
15138+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
15139+ /* At this point: (!vma || addr < vma->vm_end). */
15140+ if (pax_task_size - len < addr) {
15141+ /*
15142+ * Start a new search - just in case we missed
15143+ * some holes.
15144+ */
15145+ if (start_addr != mm->mmap_base) {
15146+ start_addr = addr = mm->mmap_base;
15147+ mm->cached_hole_size = 0;
15148+ goto full_search;
15149+ }
15150+ return -ENOMEM;
15151+ }
15152+ if (check_heap_stack_gap(vma, addr, len))
15153+ break;
15154+ if (addr + mm->cached_hole_size < vma->vm_start)
15155+ mm->cached_hole_size = vma->vm_start - addr;
15156+ addr = vma->vm_end;
15157+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
15158+ start_addr = addr = mm->mmap_base;
15159+ mm->cached_hole_size = 0;
15160+ goto full_search;
15161+ }
15162+ }
15163+
15164+ /*
15165+ * Remember the place where we stopped the search:
15166+ */
15167+ mm->free_area_cache = addr + len;
15168+ return addr;
15169+}
15170+
15171+unsigned long
15172+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
15173+ const unsigned long len, const unsigned long pgoff,
15174+ const unsigned long flags)
15175+{
15176+ struct vm_area_struct *vma;
15177+ struct mm_struct *mm = current->mm;
15178+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
15179+
15180+#ifdef CONFIG_PAX_SEGMEXEC
15181+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15182+ pax_task_size = SEGMEXEC_TASK_SIZE;
15183+#endif
15184+
15185+ pax_task_size -= PAGE_SIZE;
15186+
15187+ /* requested length too big for entire address space */
15188+ if (len > pax_task_size)
15189+ return -ENOMEM;
15190+
15191+ if (flags & MAP_FIXED)
15192+ return addr;
15193+
15194+#ifdef CONFIG_PAX_PAGEEXEC
15195+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
15196+ goto bottomup;
15197+#endif
15198+
15199+#ifdef CONFIG_PAX_RANDMMAP
15200+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15201+#endif
15202+
15203+ /* requesting a specific address */
15204+ if (addr) {
15205+ addr = PAGE_ALIGN(addr);
15206+ if (pax_task_size - len >= addr) {
15207+ vma = find_vma(mm, addr);
15208+ if (check_heap_stack_gap(vma, addr, len))
15209+ return addr;
15210+ }
15211+ }
15212+
15213+ /* check if free_area_cache is useful for us */
15214+ if (len <= mm->cached_hole_size) {
15215+ mm->cached_hole_size = 0;
15216+ mm->free_area_cache = mm->mmap_base;
15217+ }
15218+
15219+ /* either no address requested or can't fit in requested address hole */
15220+ addr = mm->free_area_cache;
15221+
15222+ /* make sure it can fit in the remaining address space */
15223+ if (addr > len) {
15224+ vma = find_vma(mm, addr-len);
15225+ if (check_heap_stack_gap(vma, addr - len, len))
15226+ /* remember the address as a hint for next time */
15227+ return (mm->free_area_cache = addr-len);
15228+ }
15229+
15230+ if (mm->mmap_base < len)
15231+ goto bottomup;
15232+
15233+ addr = mm->mmap_base-len;
15234+
15235+ do {
15236+ /*
15237+ * Lookup failure means no vma is above this address,
15238+ * else if new region fits below vma->vm_start,
15239+ * return with success:
15240+ */
15241+ vma = find_vma(mm, addr);
15242+ if (check_heap_stack_gap(vma, addr, len))
15243+ /* remember the address as a hint for next time */
15244+ return (mm->free_area_cache = addr);
15245+
15246+ /* remember the largest hole we saw so far */
15247+ if (addr + mm->cached_hole_size < vma->vm_start)
15248+ mm->cached_hole_size = vma->vm_start - addr;
15249+
15250+ /* try just below the current vma->vm_start */
15251+ addr = skip_heap_stack_gap(vma, len);
15252+ } while (!IS_ERR_VALUE(addr));
15253+
15254+bottomup:
15255+ /*
15256+ * A failed mmap() very likely causes application failure,
15257+ * so fall back to the bottom-up function here. This scenario
15258+ * can happen with large stack limits and large mmap()
15259+ * allocations.
15260+ */
15261+
15262+#ifdef CONFIG_PAX_SEGMEXEC
15263+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15264+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15265+ else
15266+#endif
15267+
15268+ mm->mmap_base = TASK_UNMAPPED_BASE;
15269+
15270+#ifdef CONFIG_PAX_RANDMMAP
15271+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15272+ mm->mmap_base += mm->delta_mmap;
15273+#endif
15274+
15275+ mm->free_area_cache = mm->mmap_base;
15276+ mm->cached_hole_size = ~0UL;
15277+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15278+ /*
15279+ * Restore the topdown base:
15280+ */
15281+ mm->mmap_base = base;
15282+ mm->free_area_cache = base;
15283+ mm->cached_hole_size = ~0UL;
15284+
15285+ return addr;
15286 }
15287diff -urNp linux-3.0.4/arch/x86/kernel/sys_x86_64.c linux-3.0.4/arch/x86/kernel/sys_x86_64.c
15288--- linux-3.0.4/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15289+++ linux-3.0.4/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15290@@ -32,8 +32,8 @@ out:
15291 return error;
15292 }
15293
15294-static void find_start_end(unsigned long flags, unsigned long *begin,
15295- unsigned long *end)
15296+static void find_start_end(struct mm_struct *mm, unsigned long flags,
15297+ unsigned long *begin, unsigned long *end)
15298 {
15299 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15300 unsigned long new_begin;
15301@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15302 *begin = new_begin;
15303 }
15304 } else {
15305- *begin = TASK_UNMAPPED_BASE;
15306+ *begin = mm->mmap_base;
15307 *end = TASK_SIZE;
15308 }
15309 }
15310@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15311 if (flags & MAP_FIXED)
15312 return addr;
15313
15314- find_start_end(flags, &begin, &end);
15315+ find_start_end(mm, flags, &begin, &end);
15316
15317 if (len > end)
15318 return -ENOMEM;
15319
15320+#ifdef CONFIG_PAX_RANDMMAP
15321+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15322+#endif
15323+
15324 if (addr) {
15325 addr = PAGE_ALIGN(addr);
15326 vma = find_vma(mm, addr);
15327- if (end - len >= addr &&
15328- (!vma || addr + len <= vma->vm_start))
15329+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15330 return addr;
15331 }
15332 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15333@@ -106,7 +109,7 @@ full_search:
15334 }
15335 return -ENOMEM;
15336 }
15337- if (!vma || addr + len <= vma->vm_start) {
15338+ if (check_heap_stack_gap(vma, addr, len)) {
15339 /*
15340 * Remember the place where we stopped the search:
15341 */
15342@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15343 {
15344 struct vm_area_struct *vma;
15345 struct mm_struct *mm = current->mm;
15346- unsigned long addr = addr0;
15347+ unsigned long base = mm->mmap_base, addr = addr0;
15348
15349 /* requested length too big for entire address space */
15350 if (len > TASK_SIZE)
15351@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15352 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15353 goto bottomup;
15354
15355+#ifdef CONFIG_PAX_RANDMMAP
15356+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15357+#endif
15358+
15359 /* requesting a specific address */
15360 if (addr) {
15361 addr = PAGE_ALIGN(addr);
15362- vma = find_vma(mm, addr);
15363- if (TASK_SIZE - len >= addr &&
15364- (!vma || addr + len <= vma->vm_start))
15365- return addr;
15366+ if (TASK_SIZE - len >= addr) {
15367+ vma = find_vma(mm, addr);
15368+ if (check_heap_stack_gap(vma, addr, len))
15369+ return addr;
15370+ }
15371 }
15372
15373 /* check if free_area_cache is useful for us */
15374@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15375 /* make sure it can fit in the remaining address space */
15376 if (addr > len) {
15377 vma = find_vma(mm, addr-len);
15378- if (!vma || addr <= vma->vm_start)
15379+ if (check_heap_stack_gap(vma, addr - len, len))
15380 /* remember the address as a hint for next time */
15381 return mm->free_area_cache = addr-len;
15382 }
15383@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15384 * return with success:
15385 */
15386 vma = find_vma(mm, addr);
15387- if (!vma || addr+len <= vma->vm_start)
15388+ if (check_heap_stack_gap(vma, addr, len))
15389 /* remember the address as a hint for next time */
15390 return mm->free_area_cache = addr;
15391
15392@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15393 mm->cached_hole_size = vma->vm_start - addr;
15394
15395 /* try just below the current vma->vm_start */
15396- addr = vma->vm_start-len;
15397- } while (len < vma->vm_start);
15398+ addr = skip_heap_stack_gap(vma, len);
15399+ } while (!IS_ERR_VALUE(addr));
15400
15401 bottomup:
15402 /*
15403@@ -198,13 +206,21 @@ bottomup:
15404 * can happen with large stack limits and large mmap()
15405 * allocations.
15406 */
15407+ mm->mmap_base = TASK_UNMAPPED_BASE;
15408+
15409+#ifdef CONFIG_PAX_RANDMMAP
15410+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15411+ mm->mmap_base += mm->delta_mmap;
15412+#endif
15413+
15414+ mm->free_area_cache = mm->mmap_base;
15415 mm->cached_hole_size = ~0UL;
15416- mm->free_area_cache = TASK_UNMAPPED_BASE;
15417 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15418 /*
15419 * Restore the topdown base:
15420 */
15421- mm->free_area_cache = mm->mmap_base;
15422+ mm->mmap_base = base;
15423+ mm->free_area_cache = base;
15424 mm->cached_hole_size = ~0UL;
15425
15426 return addr;
15427diff -urNp linux-3.0.4/arch/x86/kernel/tboot.c linux-3.0.4/arch/x86/kernel/tboot.c
15428--- linux-3.0.4/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15429+++ linux-3.0.4/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15430@@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15431
15432 void tboot_shutdown(u32 shutdown_type)
15433 {
15434- void (*shutdown)(void);
15435+ void (* __noreturn shutdown)(void);
15436
15437 if (!tboot_enabled())
15438 return;
15439@@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15440
15441 switch_to_tboot_pt();
15442
15443- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15444+ shutdown = (void *)tboot->shutdown_entry;
15445 shutdown();
15446
15447 /* should not reach here */
15448@@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15449 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15450 }
15451
15452-static atomic_t ap_wfs_count;
15453+static atomic_unchecked_t ap_wfs_count;
15454
15455 static int tboot_wait_for_aps(int num_aps)
15456 {
15457@@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15458 {
15459 switch (action) {
15460 case CPU_DYING:
15461- atomic_inc(&ap_wfs_count);
15462+ atomic_inc_unchecked(&ap_wfs_count);
15463 if (num_online_cpus() == 1)
15464- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15465+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15466 return NOTIFY_BAD;
15467 break;
15468 }
15469@@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15470
15471 tboot_create_trampoline();
15472
15473- atomic_set(&ap_wfs_count, 0);
15474+ atomic_set_unchecked(&ap_wfs_count, 0);
15475 register_hotcpu_notifier(&tboot_cpu_notifier);
15476 return 0;
15477 }
15478diff -urNp linux-3.0.4/arch/x86/kernel/time.c linux-3.0.4/arch/x86/kernel/time.c
15479--- linux-3.0.4/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
15480+++ linux-3.0.4/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
15481@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
15482 {
15483 unsigned long pc = instruction_pointer(regs);
15484
15485- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15486+ if (!user_mode(regs) && in_lock_functions(pc)) {
15487 #ifdef CONFIG_FRAME_POINTER
15488- return *(unsigned long *)(regs->bp + sizeof(long));
15489+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15490 #else
15491 unsigned long *sp =
15492 (unsigned long *)kernel_stack_pointer(regs);
15493@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
15494 * or above a saved flags. Eflags has bits 22-31 zero,
15495 * kernel addresses don't.
15496 */
15497+
15498+#ifdef CONFIG_PAX_KERNEXEC
15499+ return ktla_ktva(sp[0]);
15500+#else
15501 if (sp[0] >> 22)
15502 return sp[0];
15503 if (sp[1] >> 22)
15504 return sp[1];
15505 #endif
15506+
15507+#endif
15508 }
15509 return pc;
15510 }
15511diff -urNp linux-3.0.4/arch/x86/kernel/tls.c linux-3.0.4/arch/x86/kernel/tls.c
15512--- linux-3.0.4/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
15513+++ linux-3.0.4/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
15514@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15515 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15516 return -EINVAL;
15517
15518+#ifdef CONFIG_PAX_SEGMEXEC
15519+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15520+ return -EINVAL;
15521+#endif
15522+
15523 set_tls_desc(p, idx, &info, 1);
15524
15525 return 0;
15526diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_32.S linux-3.0.4/arch/x86/kernel/trampoline_32.S
15527--- linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
15528+++ linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
15529@@ -32,6 +32,12 @@
15530 #include <asm/segment.h>
15531 #include <asm/page_types.h>
15532
15533+#ifdef CONFIG_PAX_KERNEXEC
15534+#define ta(X) (X)
15535+#else
15536+#define ta(X) ((X) - __PAGE_OFFSET)
15537+#endif
15538+
15539 #ifdef CONFIG_SMP
15540
15541 .section ".x86_trampoline","a"
15542@@ -62,7 +68,7 @@ r_base = .
15543 inc %ax # protected mode (PE) bit
15544 lmsw %ax # into protected mode
15545 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15546- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15547+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
15548
15549 # These need to be in the same 64K segment as the above;
15550 # hence we don't use the boot_gdt_descr defined in head.S
15551diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_64.S linux-3.0.4/arch/x86/kernel/trampoline_64.S
15552--- linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
15553+++ linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
15554@@ -90,7 +90,7 @@ startup_32:
15555 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15556 movl %eax, %ds
15557
15558- movl $X86_CR4_PAE, %eax
15559+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15560 movl %eax, %cr4 # Enable PAE mode
15561
15562 # Setup trampoline 4 level pagetables
15563@@ -138,7 +138,7 @@ tidt:
15564 # so the kernel can live anywhere
15565 .balign 4
15566 tgdt:
15567- .short tgdt_end - tgdt # gdt limit
15568+ .short tgdt_end - tgdt - 1 # gdt limit
15569 .long tgdt - r_base
15570 .short 0
15571 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15572diff -urNp linux-3.0.4/arch/x86/kernel/traps.c linux-3.0.4/arch/x86/kernel/traps.c
15573--- linux-3.0.4/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
15574+++ linux-3.0.4/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
15575@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15576
15577 /* Do we ignore FPU interrupts ? */
15578 char ignore_fpu_irq;
15579-
15580-/*
15581- * The IDT has to be page-aligned to simplify the Pentium
15582- * F0 0F bug workaround.
15583- */
15584-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15585 #endif
15586
15587 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15588@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15589 }
15590
15591 static void __kprobes
15592-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15593+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15594 long error_code, siginfo_t *info)
15595 {
15596 struct task_struct *tsk = current;
15597
15598 #ifdef CONFIG_X86_32
15599- if (regs->flags & X86_VM_MASK) {
15600+ if (v8086_mode(regs)) {
15601 /*
15602 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15603 * On nmi (interrupt 2), do_trap should not be called.
15604@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15605 }
15606 #endif
15607
15608- if (!user_mode(regs))
15609+ if (!user_mode_novm(regs))
15610 goto kernel_trap;
15611
15612 #ifdef CONFIG_X86_32
15613@@ -157,7 +151,7 @@ trap_signal:
15614 printk_ratelimit()) {
15615 printk(KERN_INFO
15616 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15617- tsk->comm, tsk->pid, str,
15618+ tsk->comm, task_pid_nr(tsk), str,
15619 regs->ip, regs->sp, error_code);
15620 print_vma_addr(" in ", regs->ip);
15621 printk("\n");
15622@@ -174,8 +168,20 @@ kernel_trap:
15623 if (!fixup_exception(regs)) {
15624 tsk->thread.error_code = error_code;
15625 tsk->thread.trap_no = trapnr;
15626+
15627+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15628+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15629+ str = "PAX: suspicious stack segment fault";
15630+#endif
15631+
15632 die(str, regs, error_code);
15633 }
15634+
15635+#ifdef CONFIG_PAX_REFCOUNT
15636+ if (trapnr == 4)
15637+ pax_report_refcount_overflow(regs);
15638+#endif
15639+
15640 return;
15641
15642 #ifdef CONFIG_X86_32
15643@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15644 conditional_sti(regs);
15645
15646 #ifdef CONFIG_X86_32
15647- if (regs->flags & X86_VM_MASK)
15648+ if (v8086_mode(regs))
15649 goto gp_in_vm86;
15650 #endif
15651
15652 tsk = current;
15653- if (!user_mode(regs))
15654+ if (!user_mode_novm(regs))
15655 goto gp_in_kernel;
15656
15657+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15658+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15659+ struct mm_struct *mm = tsk->mm;
15660+ unsigned long limit;
15661+
15662+ down_write(&mm->mmap_sem);
15663+ limit = mm->context.user_cs_limit;
15664+ if (limit < TASK_SIZE) {
15665+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15666+ up_write(&mm->mmap_sem);
15667+ return;
15668+ }
15669+ up_write(&mm->mmap_sem);
15670+ }
15671+#endif
15672+
15673 tsk->thread.error_code = error_code;
15674 tsk->thread.trap_no = 13;
15675
15676@@ -304,6 +326,13 @@ gp_in_kernel:
15677 if (notify_die(DIE_GPF, "general protection fault", regs,
15678 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15679 return;
15680+
15681+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15682+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15683+ die("PAX: suspicious general protection fault", regs, error_code);
15684+ else
15685+#endif
15686+
15687 die("general protection fault", regs, error_code);
15688 }
15689
15690@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15691 dotraplinkage notrace __kprobes void
15692 do_nmi(struct pt_regs *regs, long error_code)
15693 {
15694+
15695+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15696+ if (!user_mode(regs)) {
15697+ unsigned long cs = regs->cs & 0xFFFF;
15698+ unsigned long ip = ktva_ktla(regs->ip);
15699+
15700+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15701+ regs->ip = ip;
15702+ }
15703+#endif
15704+
15705 nmi_enter();
15706
15707 inc_irq_stat(__nmi_count);
15708@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15709 /* It's safe to allow irq's after DR6 has been saved */
15710 preempt_conditional_sti(regs);
15711
15712- if (regs->flags & X86_VM_MASK) {
15713+ if (v8086_mode(regs)) {
15714 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15715 error_code, 1);
15716 preempt_conditional_cli(regs);
15717@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15718 * We already checked v86 mode above, so we can check for kernel mode
15719 * by just checking the CPL of CS.
15720 */
15721- if ((dr6 & DR_STEP) && !user_mode(regs)) {
15722+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15723 tsk->thread.debugreg6 &= ~DR_STEP;
15724 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15725 regs->flags &= ~X86_EFLAGS_TF;
15726@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15727 return;
15728 conditional_sti(regs);
15729
15730- if (!user_mode_vm(regs))
15731+ if (!user_mode(regs))
15732 {
15733 if (!fixup_exception(regs)) {
15734 task->thread.error_code = error_code;
15735@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15736 void __math_state_restore(void)
15737 {
15738 struct thread_info *thread = current_thread_info();
15739- struct task_struct *tsk = thread->task;
15740+ struct task_struct *tsk = current;
15741
15742 /*
15743 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15744@@ -750,8 +790,7 @@ void __math_state_restore(void)
15745 */
15746 asmlinkage void math_state_restore(void)
15747 {
15748- struct thread_info *thread = current_thread_info();
15749- struct task_struct *tsk = thread->task;
15750+ struct task_struct *tsk = current;
15751
15752 if (!tsk_used_math(tsk)) {
15753 local_irq_enable();
15754diff -urNp linux-3.0.4/arch/x86/kernel/verify_cpu.S linux-3.0.4/arch/x86/kernel/verify_cpu.S
15755--- linux-3.0.4/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
15756+++ linux-3.0.4/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
15757@@ -20,6 +20,7 @@
15758 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15759 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15760 * arch/x86/kernel/head_32.S: processor startup
15761+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15762 *
15763 * verify_cpu, returns the status of longmode and SSE in register %eax.
15764 * 0: Success 1: Failure
15765diff -urNp linux-3.0.4/arch/x86/kernel/vm86_32.c linux-3.0.4/arch/x86/kernel/vm86_32.c
15766--- linux-3.0.4/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
15767+++ linux-3.0.4/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
15768@@ -41,6 +41,7 @@
15769 #include <linux/ptrace.h>
15770 #include <linux/audit.h>
15771 #include <linux/stddef.h>
15772+#include <linux/grsecurity.h>
15773
15774 #include <asm/uaccess.h>
15775 #include <asm/io.h>
15776@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15777 do_exit(SIGSEGV);
15778 }
15779
15780- tss = &per_cpu(init_tss, get_cpu());
15781+ tss = init_tss + get_cpu();
15782 current->thread.sp0 = current->thread.saved_sp0;
15783 current->thread.sysenter_cs = __KERNEL_CS;
15784 load_sp0(tss, &current->thread);
15785@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15786 struct task_struct *tsk;
15787 int tmp, ret = -EPERM;
15788
15789+#ifdef CONFIG_GRKERNSEC_VM86
15790+ if (!capable(CAP_SYS_RAWIO)) {
15791+ gr_handle_vm86();
15792+ goto out;
15793+ }
15794+#endif
15795+
15796 tsk = current;
15797 if (tsk->thread.saved_sp0)
15798 goto out;
15799@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15800 int tmp, ret;
15801 struct vm86plus_struct __user *v86;
15802
15803+#ifdef CONFIG_GRKERNSEC_VM86
15804+ if (!capable(CAP_SYS_RAWIO)) {
15805+ gr_handle_vm86();
15806+ ret = -EPERM;
15807+ goto out;
15808+ }
15809+#endif
15810+
15811 tsk = current;
15812 switch (cmd) {
15813 case VM86_REQUEST_IRQ:
15814@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15815 tsk->thread.saved_fs = info->regs32->fs;
15816 tsk->thread.saved_gs = get_user_gs(info->regs32);
15817
15818- tss = &per_cpu(init_tss, get_cpu());
15819+ tss = init_tss + get_cpu();
15820 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15821 if (cpu_has_sep)
15822 tsk->thread.sysenter_cs = 0;
15823@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15824 goto cannot_handle;
15825 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15826 goto cannot_handle;
15827- intr_ptr = (unsigned long __user *) (i << 2);
15828+ intr_ptr = (__force unsigned long __user *) (i << 2);
15829 if (get_user(segoffs, intr_ptr))
15830 goto cannot_handle;
15831 if ((segoffs >> 16) == BIOSSEG)
15832diff -urNp linux-3.0.4/arch/x86/kernel/vmlinux.lds.S linux-3.0.4/arch/x86/kernel/vmlinux.lds.S
15833--- linux-3.0.4/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
15834+++ linux-3.0.4/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
15835@@ -26,6 +26,13 @@
15836 #include <asm/page_types.h>
15837 #include <asm/cache.h>
15838 #include <asm/boot.h>
15839+#include <asm/segment.h>
15840+
15841+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15842+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15843+#else
15844+#define __KERNEL_TEXT_OFFSET 0
15845+#endif
15846
15847 #undef i386 /* in case the preprocessor is a 32bit one */
15848
15849@@ -69,31 +76,46 @@ jiffies_64 = jiffies;
15850
15851 PHDRS {
15852 text PT_LOAD FLAGS(5); /* R_E */
15853+#ifdef CONFIG_X86_32
15854+ module PT_LOAD FLAGS(5); /* R_E */
15855+#endif
15856+#ifdef CONFIG_XEN
15857+ rodata PT_LOAD FLAGS(5); /* R_E */
15858+#else
15859+ rodata PT_LOAD FLAGS(4); /* R__ */
15860+#endif
15861 data PT_LOAD FLAGS(6); /* RW_ */
15862 #ifdef CONFIG_X86_64
15863 user PT_LOAD FLAGS(5); /* R_E */
15864+#endif
15865+ init.begin PT_LOAD FLAGS(6); /* RW_ */
15866 #ifdef CONFIG_SMP
15867 percpu PT_LOAD FLAGS(6); /* RW_ */
15868 #endif
15869+ text.init PT_LOAD FLAGS(5); /* R_E */
15870+ text.exit PT_LOAD FLAGS(5); /* R_E */
15871 init PT_LOAD FLAGS(7); /* RWE */
15872-#endif
15873 note PT_NOTE FLAGS(0); /* ___ */
15874 }
15875
15876 SECTIONS
15877 {
15878 #ifdef CONFIG_X86_32
15879- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15880- phys_startup_32 = startup_32 - LOAD_OFFSET;
15881+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15882 #else
15883- . = __START_KERNEL;
15884- phys_startup_64 = startup_64 - LOAD_OFFSET;
15885+ . = __START_KERNEL;
15886 #endif
15887
15888 /* Text and read-only data */
15889- .text : AT(ADDR(.text) - LOAD_OFFSET) {
15890- _text = .;
15891+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15892 /* bootstrapping code */
15893+#ifdef CONFIG_X86_32
15894+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15895+#else
15896+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15897+#endif
15898+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15899+ _text = .;
15900 HEAD_TEXT
15901 #ifdef CONFIG_X86_32
15902 . = ALIGN(PAGE_SIZE);
15903@@ -109,13 +131,47 @@ SECTIONS
15904 IRQENTRY_TEXT
15905 *(.fixup)
15906 *(.gnu.warning)
15907- /* End of text section */
15908- _etext = .;
15909 } :text = 0x9090
15910
15911- NOTES :text :note
15912+ . += __KERNEL_TEXT_OFFSET;
15913+
15914+#ifdef CONFIG_X86_32
15915+ . = ALIGN(PAGE_SIZE);
15916+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15917+
15918+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15919+ MODULES_EXEC_VADDR = .;
15920+ BYTE(0)
15921+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15922+ . = ALIGN(HPAGE_SIZE);
15923+ MODULES_EXEC_END = . - 1;
15924+#endif
15925+
15926+ } :module
15927+#endif
15928+
15929+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15930+ /* End of text section */
15931+ _etext = . - __KERNEL_TEXT_OFFSET;
15932+ }
15933+
15934+#ifdef CONFIG_X86_32
15935+ . = ALIGN(PAGE_SIZE);
15936+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15937+ *(.idt)
15938+ . = ALIGN(PAGE_SIZE);
15939+ *(.empty_zero_page)
15940+ *(.initial_pg_fixmap)
15941+ *(.initial_pg_pmd)
15942+ *(.initial_page_table)
15943+ *(.swapper_pg_dir)
15944+ } :rodata
15945+#endif
15946+
15947+ . = ALIGN(PAGE_SIZE);
15948+ NOTES :rodata :note
15949
15950- EXCEPTION_TABLE(16) :text = 0x9090
15951+ EXCEPTION_TABLE(16) :rodata
15952
15953 #if defined(CONFIG_DEBUG_RODATA)
15954 /* .text should occupy whole number of pages */
15955@@ -127,16 +183,20 @@ SECTIONS
15956
15957 /* Data */
15958 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15959+
15960+#ifdef CONFIG_PAX_KERNEXEC
15961+ . = ALIGN(HPAGE_SIZE);
15962+#else
15963+ . = ALIGN(PAGE_SIZE);
15964+#endif
15965+
15966 /* Start of data section */
15967 _sdata = .;
15968
15969 /* init_task */
15970 INIT_TASK_DATA(THREAD_SIZE)
15971
15972-#ifdef CONFIG_X86_32
15973- /* 32 bit has nosave before _edata */
15974 NOSAVE_DATA
15975-#endif
15976
15977 PAGE_ALIGNED_DATA(PAGE_SIZE)
15978
15979@@ -208,12 +268,19 @@ SECTIONS
15980 #endif /* CONFIG_X86_64 */
15981
15982 /* Init code and data - will be freed after init */
15983- . = ALIGN(PAGE_SIZE);
15984 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15985+ BYTE(0)
15986+
15987+#ifdef CONFIG_PAX_KERNEXEC
15988+ . = ALIGN(HPAGE_SIZE);
15989+#else
15990+ . = ALIGN(PAGE_SIZE);
15991+#endif
15992+
15993 __init_begin = .; /* paired with __init_end */
15994- }
15995+ } :init.begin
15996
15997-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15998+#ifdef CONFIG_SMP
15999 /*
16000 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
16001 * output PHDR, so the next output section - .init.text - should
16002@@ -222,12 +289,27 @@ SECTIONS
16003 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
16004 #endif
16005
16006- INIT_TEXT_SECTION(PAGE_SIZE)
16007-#ifdef CONFIG_X86_64
16008- :init
16009-#endif
16010+ . = ALIGN(PAGE_SIZE);
16011+ init_begin = .;
16012+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
16013+ VMLINUX_SYMBOL(_sinittext) = .;
16014+ INIT_TEXT
16015+ VMLINUX_SYMBOL(_einittext) = .;
16016+ . = ALIGN(PAGE_SIZE);
16017+ } :text.init
16018
16019- INIT_DATA_SECTION(16)
16020+ /*
16021+ * .exit.text is discard at runtime, not link time, to deal with
16022+ * references from .altinstructions and .eh_frame
16023+ */
16024+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16025+ EXIT_TEXT
16026+ . = ALIGN(16);
16027+ } :text.exit
16028+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
16029+
16030+ . = ALIGN(PAGE_SIZE);
16031+ INIT_DATA_SECTION(16) :init
16032
16033 /*
16034 * Code and data for a variety of lowlevel trampolines, to be
16035@@ -301,19 +383,12 @@ SECTIONS
16036 }
16037
16038 . = ALIGN(8);
16039- /*
16040- * .exit.text is discard at runtime, not link time, to deal with
16041- * references from .altinstructions and .eh_frame
16042- */
16043- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
16044- EXIT_TEXT
16045- }
16046
16047 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
16048 EXIT_DATA
16049 }
16050
16051-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
16052+#ifndef CONFIG_SMP
16053 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
16054 #endif
16055
16056@@ -332,16 +407,10 @@ SECTIONS
16057 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
16058 __smp_locks = .;
16059 *(.smp_locks)
16060- . = ALIGN(PAGE_SIZE);
16061 __smp_locks_end = .;
16062+ . = ALIGN(PAGE_SIZE);
16063 }
16064
16065-#ifdef CONFIG_X86_64
16066- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
16067- NOSAVE_DATA
16068- }
16069-#endif
16070-
16071 /* BSS */
16072 . = ALIGN(PAGE_SIZE);
16073 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
16074@@ -357,6 +426,7 @@ SECTIONS
16075 __brk_base = .;
16076 . += 64 * 1024; /* 64k alignment slop space */
16077 *(.brk_reservation) /* areas brk users have reserved */
16078+ . = ALIGN(HPAGE_SIZE);
16079 __brk_limit = .;
16080 }
16081
16082@@ -383,13 +453,12 @@ SECTIONS
16083 * for the boot processor.
16084 */
16085 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
16086-INIT_PER_CPU(gdt_page);
16087 INIT_PER_CPU(irq_stack_union);
16088
16089 /*
16090 * Build-time check on the image size:
16091 */
16092-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
16093+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
16094 "kernel image bigger than KERNEL_IMAGE_SIZE");
16095
16096 #ifdef CONFIG_SMP
16097diff -urNp linux-3.0.4/arch/x86/kernel/vsyscall_64.c linux-3.0.4/arch/x86/kernel/vsyscall_64.c
16098--- linux-3.0.4/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
16099+++ linux-3.0.4/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
16100@@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
16101 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
16102 {
16103 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
16104- .sysctl_enabled = 1,
16105+ .sysctl_enabled = 0,
16106 };
16107
16108 void update_vsyscall_tz(void)
16109@@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
16110 static ctl_table kernel_table2[] = {
16111 { .procname = "vsyscall64",
16112 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
16113- .mode = 0644,
16114+ .mode = 0444,
16115 .proc_handler = proc_dointvec },
16116 {}
16117 };
16118diff -urNp linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c
16119--- linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
16120+++ linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
16121@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
16122 EXPORT_SYMBOL(copy_user_generic_string);
16123 EXPORT_SYMBOL(copy_user_generic_unrolled);
16124 EXPORT_SYMBOL(__copy_user_nocache);
16125-EXPORT_SYMBOL(_copy_from_user);
16126-EXPORT_SYMBOL(_copy_to_user);
16127
16128 EXPORT_SYMBOL(copy_page);
16129 EXPORT_SYMBOL(clear_page);
16130diff -urNp linux-3.0.4/arch/x86/kernel/xsave.c linux-3.0.4/arch/x86/kernel/xsave.c
16131--- linux-3.0.4/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
16132+++ linux-3.0.4/arch/x86/kernel/xsave.c 2011-10-06 04:17:55.000000000 -0400
16133@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
16134 fx_sw_user->xstate_size > fx_sw_user->extended_size)
16135 return -EINVAL;
16136
16137- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
16138+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
16139 fx_sw_user->extended_size -
16140 FP_XSTATE_MAGIC2_SIZE));
16141 if (err)
16142@@ -267,7 +267,7 @@ fx_only:
16143 * the other extended state.
16144 */
16145 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
16146- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
16147+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
16148 }
16149
16150 /*
16151@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
16152 if (use_xsave())
16153 err = restore_user_xstate(buf);
16154 else
16155- err = fxrstor_checking((__force struct i387_fxsave_struct *)
16156+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
16157 buf);
16158 if (unlikely(err)) {
16159 /*
16160diff -urNp linux-3.0.4/arch/x86/kvm/emulate.c linux-3.0.4/arch/x86/kvm/emulate.c
16161--- linux-3.0.4/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
16162+++ linux-3.0.4/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
16163@@ -96,7 +96,7 @@
16164 #define Src2ImmByte (2<<29)
16165 #define Src2One (3<<29)
16166 #define Src2Imm (4<<29)
16167-#define Src2Mask (7<<29)
16168+#define Src2Mask (7U<<29)
16169
16170 #define X2(x...) x, x
16171 #define X3(x...) X2(x), x
16172@@ -207,6 +207,7 @@ struct gprefix {
16173
16174 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
16175 do { \
16176+ unsigned long _tmp; \
16177 __asm__ __volatile__ ( \
16178 _PRE_EFLAGS("0", "4", "2") \
16179 _op _suffix " %"_x"3,%1; " \
16180@@ -220,8 +221,6 @@ struct gprefix {
16181 /* Raw emulation: instruction has two explicit operands. */
16182 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
16183 do { \
16184- unsigned long _tmp; \
16185- \
16186 switch ((_dst).bytes) { \
16187 case 2: \
16188 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
16189@@ -237,7 +236,6 @@ struct gprefix {
16190
16191 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16192 do { \
16193- unsigned long _tmp; \
16194 switch ((_dst).bytes) { \
16195 case 1: \
16196 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16197diff -urNp linux-3.0.4/arch/x86/kvm/lapic.c linux-3.0.4/arch/x86/kvm/lapic.c
16198--- linux-3.0.4/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
16199+++ linux-3.0.4/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
16200@@ -53,7 +53,7 @@
16201 #define APIC_BUS_CYCLE_NS 1
16202
16203 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16204-#define apic_debug(fmt, arg...)
16205+#define apic_debug(fmt, arg...) do {} while (0)
16206
16207 #define APIC_LVT_NUM 6
16208 /* 14 is the version for Xeon and Pentium 8.4.8*/
16209diff -urNp linux-3.0.4/arch/x86/kvm/mmu.c linux-3.0.4/arch/x86/kvm/mmu.c
16210--- linux-3.0.4/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
16211+++ linux-3.0.4/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
16212@@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16213
16214 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16215
16216- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16217+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16218
16219 /*
16220 * Assume that the pte write on a page table of the same type
16221@@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16222 }
16223
16224 spin_lock(&vcpu->kvm->mmu_lock);
16225- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16226+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16227 gentry = 0;
16228 kvm_mmu_free_some_pages(vcpu);
16229 ++vcpu->kvm->stat.mmu_pte_write;
16230diff -urNp linux-3.0.4/arch/x86/kvm/paging_tmpl.h linux-3.0.4/arch/x86/kvm/paging_tmpl.h
16231--- linux-3.0.4/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
16232+++ linux-3.0.4/arch/x86/kvm/paging_tmpl.h 2011-10-06 04:17:55.000000000 -0400
16233@@ -182,7 +182,7 @@ walk:
16234 break;
16235 }
16236
16237- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
16238+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
16239 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) {
16240 present = false;
16241 break;
16242@@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
16243 unsigned long mmu_seq;
16244 bool map_writable;
16245
16246+ pax_track_stack();
16247+
16248 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16249
16250 r = mmu_topup_memory_caches(vcpu);
16251@@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16252 if (need_flush)
16253 kvm_flush_remote_tlbs(vcpu->kvm);
16254
16255- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16256+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16257
16258 spin_unlock(&vcpu->kvm->mmu_lock);
16259
16260diff -urNp linux-3.0.4/arch/x86/kvm/svm.c linux-3.0.4/arch/x86/kvm/svm.c
16261--- linux-3.0.4/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
16262+++ linux-3.0.4/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
16263@@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
16264 int cpu = raw_smp_processor_id();
16265
16266 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16267+
16268+ pax_open_kernel();
16269 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16270+ pax_close_kernel();
16271+
16272 load_TR_desc();
16273 }
16274
16275@@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16276 #endif
16277 #endif
16278
16279+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16280+ __set_fs(current_thread_info()->addr_limit);
16281+#endif
16282+
16283 reload_tss(vcpu);
16284
16285 local_irq_disable();
16286diff -urNp linux-3.0.4/arch/x86/kvm/vmx.c linux-3.0.4/arch/x86/kvm/vmx.c
16287--- linux-3.0.4/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16288+++ linux-3.0.4/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16289@@ -797,7 +797,11 @@ static void reload_tss(void)
16290 struct desc_struct *descs;
16291
16292 descs = (void *)gdt->address;
16293+
16294+ pax_open_kernel();
16295 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16296+ pax_close_kernel();
16297+
16298 load_TR_desc();
16299 }
16300
16301@@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16302 if (!cpu_has_vmx_flexpriority())
16303 flexpriority_enabled = 0;
16304
16305- if (!cpu_has_vmx_tpr_shadow())
16306- kvm_x86_ops->update_cr8_intercept = NULL;
16307+ if (!cpu_has_vmx_tpr_shadow()) {
16308+ pax_open_kernel();
16309+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16310+ pax_close_kernel();
16311+ }
16312
16313 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16314 kvm_disable_largepages();
16315@@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16316 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16317
16318 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16319- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16320+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16321 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16322 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16323 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16324@@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16325 "jmp .Lkvm_vmx_return \n\t"
16326 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16327 ".Lkvm_vmx_return: "
16328+
16329+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16330+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16331+ ".Lkvm_vmx_return2: "
16332+#endif
16333+
16334 /* Save guest registers, load host registers, keep flags */
16335 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16336 "pop %0 \n\t"
16337@@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16338 #endif
16339 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16340 [wordsize]"i"(sizeof(ulong))
16341+
16342+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16343+ ,[cs]"i"(__KERNEL_CS)
16344+#endif
16345+
16346 : "cc", "memory"
16347 , R"ax", R"bx", R"di", R"si"
16348 #ifdef CONFIG_X86_64
16349@@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16350
16351 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16352
16353- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16354+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16355+
16356+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16357+ loadsegment(fs, __KERNEL_PERCPU);
16358+#endif
16359+
16360+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16361+ __set_fs(current_thread_info()->addr_limit);
16362+#endif
16363+
16364 vmx->launched = 1;
16365
16366 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16367diff -urNp linux-3.0.4/arch/x86/kvm/x86.c linux-3.0.4/arch/x86/kvm/x86.c
16368--- linux-3.0.4/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
16369+++ linux-3.0.4/arch/x86/kvm/x86.c 2011-10-06 04:17:55.000000000 -0400
16370@@ -1313,8 +1313,8 @@ static int xen_hvm_config(struct kvm_vcp
16371 {
16372 struct kvm *kvm = vcpu->kvm;
16373 int lm = is_long_mode(vcpu);
16374- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
16375- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
16376+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
16377+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
16378 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
16379 : kvm->arch.xen_hvm_config.blob_size_32;
16380 u32 page_num = data & ~PAGE_MASK;
16381@@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16382 if (n < msr_list.nmsrs)
16383 goto out;
16384 r = -EFAULT;
16385+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16386+ goto out;
16387 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16388 num_msrs_to_save * sizeof(u32)))
16389 goto out;
16390@@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16391 struct kvm_cpuid2 *cpuid,
16392 struct kvm_cpuid_entry2 __user *entries)
16393 {
16394- int r;
16395+ int r, i;
16396
16397 r = -E2BIG;
16398 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16399 goto out;
16400 r = -EFAULT;
16401- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16402- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16403+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16404 goto out;
16405+ for (i = 0; i < cpuid->nent; ++i) {
16406+ struct kvm_cpuid_entry2 cpuid_entry;
16407+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16408+ goto out;
16409+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
16410+ }
16411 vcpu->arch.cpuid_nent = cpuid->nent;
16412 kvm_apic_set_version(vcpu);
16413 kvm_x86_ops->cpuid_update(vcpu);
16414@@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16415 struct kvm_cpuid2 *cpuid,
16416 struct kvm_cpuid_entry2 __user *entries)
16417 {
16418- int r;
16419+ int r, i;
16420
16421 r = -E2BIG;
16422 if (cpuid->nent < vcpu->arch.cpuid_nent)
16423 goto out;
16424 r = -EFAULT;
16425- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16426- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16427+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16428 goto out;
16429+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16430+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16431+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16432+ goto out;
16433+ }
16434 return 0;
16435
16436 out:
16437@@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16438 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16439 struct kvm_interrupt *irq)
16440 {
16441- if (irq->irq < 0 || irq->irq >= 256)
16442+ if (irq->irq >= 256)
16443 return -EINVAL;
16444 if (irqchip_in_kernel(vcpu->kvm))
16445 return -ENXIO;
16446@@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16447 }
16448 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16449
16450-int kvm_arch_init(void *opaque)
16451+int kvm_arch_init(const void *opaque)
16452 {
16453 int r;
16454 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16455diff -urNp linux-3.0.4/arch/x86/lguest/boot.c linux-3.0.4/arch/x86/lguest/boot.c
16456--- linux-3.0.4/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
16457+++ linux-3.0.4/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
16458@@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16459 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16460 * Launcher to reboot us.
16461 */
16462-static void lguest_restart(char *reason)
16463+static __noreturn void lguest_restart(char *reason)
16464 {
16465 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16466+ BUG();
16467 }
16468
16469 /*G:050
16470diff -urNp linux-3.0.4/arch/x86/lib/atomic64_32.c linux-3.0.4/arch/x86/lib/atomic64_32.c
16471--- linux-3.0.4/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
16472+++ linux-3.0.4/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
16473@@ -8,18 +8,30 @@
16474
16475 long long atomic64_read_cx8(long long, const atomic64_t *v);
16476 EXPORT_SYMBOL(atomic64_read_cx8);
16477+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16478+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16479 long long atomic64_set_cx8(long long, const atomic64_t *v);
16480 EXPORT_SYMBOL(atomic64_set_cx8);
16481+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16482+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16483 long long atomic64_xchg_cx8(long long, unsigned high);
16484 EXPORT_SYMBOL(atomic64_xchg_cx8);
16485 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16486 EXPORT_SYMBOL(atomic64_add_return_cx8);
16487+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16488+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16489 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16490 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16491+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16492+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16493 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16494 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16495+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16496+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16497 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16498 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16499+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16500+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16501 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16502 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16503 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16504@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16505 #ifndef CONFIG_X86_CMPXCHG64
16506 long long atomic64_read_386(long long, const atomic64_t *v);
16507 EXPORT_SYMBOL(atomic64_read_386);
16508+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16509+EXPORT_SYMBOL(atomic64_read_unchecked_386);
16510 long long atomic64_set_386(long long, const atomic64_t *v);
16511 EXPORT_SYMBOL(atomic64_set_386);
16512+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16513+EXPORT_SYMBOL(atomic64_set_unchecked_386);
16514 long long atomic64_xchg_386(long long, unsigned high);
16515 EXPORT_SYMBOL(atomic64_xchg_386);
16516 long long atomic64_add_return_386(long long a, atomic64_t *v);
16517 EXPORT_SYMBOL(atomic64_add_return_386);
16518+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16519+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16520 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16521 EXPORT_SYMBOL(atomic64_sub_return_386);
16522+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16523+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16524 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16525 EXPORT_SYMBOL(atomic64_inc_return_386);
16526+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16527+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16528 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16529 EXPORT_SYMBOL(atomic64_dec_return_386);
16530+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16531+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16532 long long atomic64_add_386(long long a, atomic64_t *v);
16533 EXPORT_SYMBOL(atomic64_add_386);
16534+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16535+EXPORT_SYMBOL(atomic64_add_unchecked_386);
16536 long long atomic64_sub_386(long long a, atomic64_t *v);
16537 EXPORT_SYMBOL(atomic64_sub_386);
16538+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16539+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16540 long long atomic64_inc_386(long long a, atomic64_t *v);
16541 EXPORT_SYMBOL(atomic64_inc_386);
16542+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16543+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16544 long long atomic64_dec_386(long long a, atomic64_t *v);
16545 EXPORT_SYMBOL(atomic64_dec_386);
16546+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16547+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16548 long long atomic64_dec_if_positive_386(atomic64_t *v);
16549 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16550 int atomic64_inc_not_zero_386(atomic64_t *v);
16551diff -urNp linux-3.0.4/arch/x86/lib/atomic64_386_32.S linux-3.0.4/arch/x86/lib/atomic64_386_32.S
16552--- linux-3.0.4/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
16553+++ linux-3.0.4/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
16554@@ -48,6 +48,10 @@ BEGIN(read)
16555 movl (v), %eax
16556 movl 4(v), %edx
16557 RET_ENDP
16558+BEGIN(read_unchecked)
16559+ movl (v), %eax
16560+ movl 4(v), %edx
16561+RET_ENDP
16562 #undef v
16563
16564 #define v %esi
16565@@ -55,6 +59,10 @@ BEGIN(set)
16566 movl %ebx, (v)
16567 movl %ecx, 4(v)
16568 RET_ENDP
16569+BEGIN(set_unchecked)
16570+ movl %ebx, (v)
16571+ movl %ecx, 4(v)
16572+RET_ENDP
16573 #undef v
16574
16575 #define v %esi
16576@@ -70,6 +78,20 @@ RET_ENDP
16577 BEGIN(add)
16578 addl %eax, (v)
16579 adcl %edx, 4(v)
16580+
16581+#ifdef CONFIG_PAX_REFCOUNT
16582+ jno 0f
16583+ subl %eax, (v)
16584+ sbbl %edx, 4(v)
16585+ int $4
16586+0:
16587+ _ASM_EXTABLE(0b, 0b)
16588+#endif
16589+
16590+RET_ENDP
16591+BEGIN(add_unchecked)
16592+ addl %eax, (v)
16593+ adcl %edx, 4(v)
16594 RET_ENDP
16595 #undef v
16596
16597@@ -77,6 +99,24 @@ RET_ENDP
16598 BEGIN(add_return)
16599 addl (v), %eax
16600 adcl 4(v), %edx
16601+
16602+#ifdef CONFIG_PAX_REFCOUNT
16603+ into
16604+1234:
16605+ _ASM_EXTABLE(1234b, 2f)
16606+#endif
16607+
16608+ movl %eax, (v)
16609+ movl %edx, 4(v)
16610+
16611+#ifdef CONFIG_PAX_REFCOUNT
16612+2:
16613+#endif
16614+
16615+RET_ENDP
16616+BEGIN(add_return_unchecked)
16617+ addl (v), %eax
16618+ adcl 4(v), %edx
16619 movl %eax, (v)
16620 movl %edx, 4(v)
16621 RET_ENDP
16622@@ -86,6 +126,20 @@ RET_ENDP
16623 BEGIN(sub)
16624 subl %eax, (v)
16625 sbbl %edx, 4(v)
16626+
16627+#ifdef CONFIG_PAX_REFCOUNT
16628+ jno 0f
16629+ addl %eax, (v)
16630+ adcl %edx, 4(v)
16631+ int $4
16632+0:
16633+ _ASM_EXTABLE(0b, 0b)
16634+#endif
16635+
16636+RET_ENDP
16637+BEGIN(sub_unchecked)
16638+ subl %eax, (v)
16639+ sbbl %edx, 4(v)
16640 RET_ENDP
16641 #undef v
16642
16643@@ -96,6 +150,27 @@ BEGIN(sub_return)
16644 sbbl $0, %edx
16645 addl (v), %eax
16646 adcl 4(v), %edx
16647+
16648+#ifdef CONFIG_PAX_REFCOUNT
16649+ into
16650+1234:
16651+ _ASM_EXTABLE(1234b, 2f)
16652+#endif
16653+
16654+ movl %eax, (v)
16655+ movl %edx, 4(v)
16656+
16657+#ifdef CONFIG_PAX_REFCOUNT
16658+2:
16659+#endif
16660+
16661+RET_ENDP
16662+BEGIN(sub_return_unchecked)
16663+ negl %edx
16664+ negl %eax
16665+ sbbl $0, %edx
16666+ addl (v), %eax
16667+ adcl 4(v), %edx
16668 movl %eax, (v)
16669 movl %edx, 4(v)
16670 RET_ENDP
16671@@ -105,6 +180,20 @@ RET_ENDP
16672 BEGIN(inc)
16673 addl $1, (v)
16674 adcl $0, 4(v)
16675+
16676+#ifdef CONFIG_PAX_REFCOUNT
16677+ jno 0f
16678+ subl $1, (v)
16679+ sbbl $0, 4(v)
16680+ int $4
16681+0:
16682+ _ASM_EXTABLE(0b, 0b)
16683+#endif
16684+
16685+RET_ENDP
16686+BEGIN(inc_unchecked)
16687+ addl $1, (v)
16688+ adcl $0, 4(v)
16689 RET_ENDP
16690 #undef v
16691
16692@@ -114,6 +203,26 @@ BEGIN(inc_return)
16693 movl 4(v), %edx
16694 addl $1, %eax
16695 adcl $0, %edx
16696+
16697+#ifdef CONFIG_PAX_REFCOUNT
16698+ into
16699+1234:
16700+ _ASM_EXTABLE(1234b, 2f)
16701+#endif
16702+
16703+ movl %eax, (v)
16704+ movl %edx, 4(v)
16705+
16706+#ifdef CONFIG_PAX_REFCOUNT
16707+2:
16708+#endif
16709+
16710+RET_ENDP
16711+BEGIN(inc_return_unchecked)
16712+ movl (v), %eax
16713+ movl 4(v), %edx
16714+ addl $1, %eax
16715+ adcl $0, %edx
16716 movl %eax, (v)
16717 movl %edx, 4(v)
16718 RET_ENDP
16719@@ -123,6 +232,20 @@ RET_ENDP
16720 BEGIN(dec)
16721 subl $1, (v)
16722 sbbl $0, 4(v)
16723+
16724+#ifdef CONFIG_PAX_REFCOUNT
16725+ jno 0f
16726+ addl $1, (v)
16727+ adcl $0, 4(v)
16728+ int $4
16729+0:
16730+ _ASM_EXTABLE(0b, 0b)
16731+#endif
16732+
16733+RET_ENDP
16734+BEGIN(dec_unchecked)
16735+ subl $1, (v)
16736+ sbbl $0, 4(v)
16737 RET_ENDP
16738 #undef v
16739
16740@@ -132,6 +255,26 @@ BEGIN(dec_return)
16741 movl 4(v), %edx
16742 subl $1, %eax
16743 sbbl $0, %edx
16744+
16745+#ifdef CONFIG_PAX_REFCOUNT
16746+ into
16747+1234:
16748+ _ASM_EXTABLE(1234b, 2f)
16749+#endif
16750+
16751+ movl %eax, (v)
16752+ movl %edx, 4(v)
16753+
16754+#ifdef CONFIG_PAX_REFCOUNT
16755+2:
16756+#endif
16757+
16758+RET_ENDP
16759+BEGIN(dec_return_unchecked)
16760+ movl (v), %eax
16761+ movl 4(v), %edx
16762+ subl $1, %eax
16763+ sbbl $0, %edx
16764 movl %eax, (v)
16765 movl %edx, 4(v)
16766 RET_ENDP
16767@@ -143,6 +286,13 @@ BEGIN(add_unless)
16768 adcl %edx, %edi
16769 addl (v), %eax
16770 adcl 4(v), %edx
16771+
16772+#ifdef CONFIG_PAX_REFCOUNT
16773+ into
16774+1234:
16775+ _ASM_EXTABLE(1234b, 2f)
16776+#endif
16777+
16778 cmpl %eax, %esi
16779 je 3f
16780 1:
16781@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16782 1:
16783 addl $1, %eax
16784 adcl $0, %edx
16785+
16786+#ifdef CONFIG_PAX_REFCOUNT
16787+ into
16788+1234:
16789+ _ASM_EXTABLE(1234b, 2f)
16790+#endif
16791+
16792 movl %eax, (v)
16793 movl %edx, 4(v)
16794 movl $1, %eax
16795@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16796 movl 4(v), %edx
16797 subl $1, %eax
16798 sbbl $0, %edx
16799+
16800+#ifdef CONFIG_PAX_REFCOUNT
16801+ into
16802+1234:
16803+ _ASM_EXTABLE(1234b, 1f)
16804+#endif
16805+
16806 js 1f
16807 movl %eax, (v)
16808 movl %edx, 4(v)
16809diff -urNp linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S
16810--- linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
16811+++ linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S 2011-10-06 04:17:55.000000000 -0400
16812@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
16813 CFI_STARTPROC
16814
16815 read64 %ecx
16816+ pax_force_retaddr
16817 ret
16818 CFI_ENDPROC
16819 ENDPROC(atomic64_read_cx8)
16820
16821+ENTRY(atomic64_read_unchecked_cx8)
16822+ CFI_STARTPROC
16823+
16824+ read64 %ecx
16825+ pax_force_retaddr
16826+ ret
16827+ CFI_ENDPROC
16828+ENDPROC(atomic64_read_unchecked_cx8)
16829+
16830 ENTRY(atomic64_set_cx8)
16831 CFI_STARTPROC
16832
16833@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
16834 cmpxchg8b (%esi)
16835 jne 1b
16836
16837+ pax_force_retaddr
16838 ret
16839 CFI_ENDPROC
16840 ENDPROC(atomic64_set_cx8)
16841
16842+ENTRY(atomic64_set_unchecked_cx8)
16843+ CFI_STARTPROC
16844+
16845+1:
16846+/* we don't need LOCK_PREFIX since aligned 64-bit writes
16847+ * are atomic on 586 and newer */
16848+ cmpxchg8b (%esi)
16849+ jne 1b
16850+
16851+ pax_force_retaddr
16852+ ret
16853+ CFI_ENDPROC
16854+ENDPROC(atomic64_set_unchecked_cx8)
16855+
16856 ENTRY(atomic64_xchg_cx8)
16857 CFI_STARTPROC
16858
16859@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
16860 cmpxchg8b (%esi)
16861 jne 1b
16862
16863+ pax_force_retaddr
16864 ret
16865 CFI_ENDPROC
16866 ENDPROC(atomic64_xchg_cx8)
16867
16868-.macro addsub_return func ins insc
16869-ENTRY(atomic64_\func\()_return_cx8)
16870+.macro addsub_return func ins insc unchecked=""
16871+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16872 CFI_STARTPROC
16873 SAVE ebp
16874 SAVE ebx
16875@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
16876 movl %edx, %ecx
16877 \ins\()l %esi, %ebx
16878 \insc\()l %edi, %ecx
16879+
16880+.ifb \unchecked
16881+#ifdef CONFIG_PAX_REFCOUNT
16882+ into
16883+2:
16884+ _ASM_EXTABLE(2b, 3f)
16885+#endif
16886+.endif
16887+
16888 LOCK_PREFIX
16889 cmpxchg8b (%ebp)
16890 jne 1b
16891-
16892-10:
16893 movl %ebx, %eax
16894 movl %ecx, %edx
16895+
16896+.ifb \unchecked
16897+#ifdef CONFIG_PAX_REFCOUNT
16898+3:
16899+#endif
16900+.endif
16901+
16902 RESTORE edi
16903 RESTORE esi
16904 RESTORE ebx
16905 RESTORE ebp
16906+ pax_force_retaddr
16907 ret
16908 CFI_ENDPROC
16909-ENDPROC(atomic64_\func\()_return_cx8)
16910+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16911 .endm
16912
16913 addsub_return add add adc
16914 addsub_return sub sub sbb
16915+addsub_return add add adc _unchecked
16916+addsub_return sub sub sbb _unchecked
16917
16918-.macro incdec_return func ins insc
16919-ENTRY(atomic64_\func\()_return_cx8)
16920+.macro incdec_return func ins insc unchecked
16921+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16922 CFI_STARTPROC
16923 SAVE ebx
16924
16925@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
16926 movl %edx, %ecx
16927 \ins\()l $1, %ebx
16928 \insc\()l $0, %ecx
16929+
16930+.ifb \unchecked
16931+#ifdef CONFIG_PAX_REFCOUNT
16932+ into
16933+2:
16934+ _ASM_EXTABLE(2b, 3f)
16935+#endif
16936+.endif
16937+
16938 LOCK_PREFIX
16939 cmpxchg8b (%esi)
16940 jne 1b
16941
16942-10:
16943 movl %ebx, %eax
16944 movl %ecx, %edx
16945+
16946+.ifb \unchecked
16947+#ifdef CONFIG_PAX_REFCOUNT
16948+3:
16949+#endif
16950+.endif
16951+
16952 RESTORE ebx
16953+ pax_force_retaddr
16954 ret
16955 CFI_ENDPROC
16956-ENDPROC(atomic64_\func\()_return_cx8)
16957+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16958 .endm
16959
16960 incdec_return inc add adc
16961 incdec_return dec sub sbb
16962+incdec_return inc add adc _unchecked
16963+incdec_return dec sub sbb _unchecked
16964
16965 ENTRY(atomic64_dec_if_positive_cx8)
16966 CFI_STARTPROC
16967@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16968 movl %edx, %ecx
16969 subl $1, %ebx
16970 sbb $0, %ecx
16971+
16972+#ifdef CONFIG_PAX_REFCOUNT
16973+ into
16974+1234:
16975+ _ASM_EXTABLE(1234b, 2f)
16976+#endif
16977+
16978 js 2f
16979 LOCK_PREFIX
16980 cmpxchg8b (%esi)
16981@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
16982 movl %ebx, %eax
16983 movl %ecx, %edx
16984 RESTORE ebx
16985+ pax_force_retaddr
16986 ret
16987 CFI_ENDPROC
16988 ENDPROC(atomic64_dec_if_positive_cx8)
16989@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
16990 movl %edx, %ecx
16991 addl %esi, %ebx
16992 adcl %edi, %ecx
16993+
16994+#ifdef CONFIG_PAX_REFCOUNT
16995+ into
16996+1234:
16997+ _ASM_EXTABLE(1234b, 3f)
16998+#endif
16999+
17000 LOCK_PREFIX
17001 cmpxchg8b (%ebp)
17002 jne 1b
17003@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
17004 CFI_ADJUST_CFA_OFFSET -8
17005 RESTORE ebx
17006 RESTORE ebp
17007+ pax_force_retaddr
17008 ret
17009 4:
17010 cmpl %edx, 4(%esp)
17011@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
17012 movl %edx, %ecx
17013 addl $1, %ebx
17014 adcl $0, %ecx
17015+
17016+#ifdef CONFIG_PAX_REFCOUNT
17017+ into
17018+1234:
17019+ _ASM_EXTABLE(1234b, 3f)
17020+#endif
17021+
17022 LOCK_PREFIX
17023 cmpxchg8b (%esi)
17024 jne 1b
17025@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
17026 movl $1, %eax
17027 3:
17028 RESTORE ebx
17029+ pax_force_retaddr
17030 ret
17031 4:
17032 testl %edx, %edx
17033diff -urNp linux-3.0.4/arch/x86/lib/checksum_32.S linux-3.0.4/arch/x86/lib/checksum_32.S
17034--- linux-3.0.4/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
17035+++ linux-3.0.4/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
17036@@ -28,7 +28,8 @@
17037 #include <linux/linkage.h>
17038 #include <asm/dwarf2.h>
17039 #include <asm/errno.h>
17040-
17041+#include <asm/segment.h>
17042+
17043 /*
17044 * computes a partial checksum, e.g. for TCP/UDP fragments
17045 */
17046@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
17047
17048 #define ARGBASE 16
17049 #define FP 12
17050-
17051-ENTRY(csum_partial_copy_generic)
17052+
17053+ENTRY(csum_partial_copy_generic_to_user)
17054 CFI_STARTPROC
17055+
17056+#ifdef CONFIG_PAX_MEMORY_UDEREF
17057+ pushl_cfi %gs
17058+ popl_cfi %es
17059+ jmp csum_partial_copy_generic
17060+#endif
17061+
17062+ENTRY(csum_partial_copy_generic_from_user)
17063+
17064+#ifdef CONFIG_PAX_MEMORY_UDEREF
17065+ pushl_cfi %gs
17066+ popl_cfi %ds
17067+#endif
17068+
17069+ENTRY(csum_partial_copy_generic)
17070 subl $4,%esp
17071 CFI_ADJUST_CFA_OFFSET 4
17072 pushl_cfi %edi
17073@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
17074 jmp 4f
17075 SRC(1: movw (%esi), %bx )
17076 addl $2, %esi
17077-DST( movw %bx, (%edi) )
17078+DST( movw %bx, %es:(%edi) )
17079 addl $2, %edi
17080 addw %bx, %ax
17081 adcl $0, %eax
17082@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
17083 SRC(1: movl (%esi), %ebx )
17084 SRC( movl 4(%esi), %edx )
17085 adcl %ebx, %eax
17086-DST( movl %ebx, (%edi) )
17087+DST( movl %ebx, %es:(%edi) )
17088 adcl %edx, %eax
17089-DST( movl %edx, 4(%edi) )
17090+DST( movl %edx, %es:4(%edi) )
17091
17092 SRC( movl 8(%esi), %ebx )
17093 SRC( movl 12(%esi), %edx )
17094 adcl %ebx, %eax
17095-DST( movl %ebx, 8(%edi) )
17096+DST( movl %ebx, %es:8(%edi) )
17097 adcl %edx, %eax
17098-DST( movl %edx, 12(%edi) )
17099+DST( movl %edx, %es:12(%edi) )
17100
17101 SRC( movl 16(%esi), %ebx )
17102 SRC( movl 20(%esi), %edx )
17103 adcl %ebx, %eax
17104-DST( movl %ebx, 16(%edi) )
17105+DST( movl %ebx, %es:16(%edi) )
17106 adcl %edx, %eax
17107-DST( movl %edx, 20(%edi) )
17108+DST( movl %edx, %es:20(%edi) )
17109
17110 SRC( movl 24(%esi), %ebx )
17111 SRC( movl 28(%esi), %edx )
17112 adcl %ebx, %eax
17113-DST( movl %ebx, 24(%edi) )
17114+DST( movl %ebx, %es:24(%edi) )
17115 adcl %edx, %eax
17116-DST( movl %edx, 28(%edi) )
17117+DST( movl %edx, %es:28(%edi) )
17118
17119 lea 32(%esi), %esi
17120 lea 32(%edi), %edi
17121@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
17122 shrl $2, %edx # This clears CF
17123 SRC(3: movl (%esi), %ebx )
17124 adcl %ebx, %eax
17125-DST( movl %ebx, (%edi) )
17126+DST( movl %ebx, %es:(%edi) )
17127 lea 4(%esi), %esi
17128 lea 4(%edi), %edi
17129 dec %edx
17130@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
17131 jb 5f
17132 SRC( movw (%esi), %cx )
17133 leal 2(%esi), %esi
17134-DST( movw %cx, (%edi) )
17135+DST( movw %cx, %es:(%edi) )
17136 leal 2(%edi), %edi
17137 je 6f
17138 shll $16,%ecx
17139 SRC(5: movb (%esi), %cl )
17140-DST( movb %cl, (%edi) )
17141+DST( movb %cl, %es:(%edi) )
17142 6: addl %ecx, %eax
17143 adcl $0, %eax
17144 7:
17145@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
17146
17147 6001:
17148 movl ARGBASE+20(%esp), %ebx # src_err_ptr
17149- movl $-EFAULT, (%ebx)
17150+ movl $-EFAULT, %ss:(%ebx)
17151
17152 # zero the complete destination - computing the rest
17153 # is too much work
17154@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
17155
17156 6002:
17157 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17158- movl $-EFAULT,(%ebx)
17159+ movl $-EFAULT,%ss:(%ebx)
17160 jmp 5000b
17161
17162 .previous
17163
17164+ pushl_cfi %ss
17165+ popl_cfi %ds
17166+ pushl_cfi %ss
17167+ popl_cfi %es
17168 popl_cfi %ebx
17169 CFI_RESTORE ebx
17170 popl_cfi %esi
17171@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
17172 popl_cfi %ecx # equivalent to addl $4,%esp
17173 ret
17174 CFI_ENDPROC
17175-ENDPROC(csum_partial_copy_generic)
17176+ENDPROC(csum_partial_copy_generic_to_user)
17177
17178 #else
17179
17180 /* Version for PentiumII/PPro */
17181
17182 #define ROUND1(x) \
17183+ nop; nop; nop; \
17184 SRC(movl x(%esi), %ebx ) ; \
17185 addl %ebx, %eax ; \
17186- DST(movl %ebx, x(%edi) ) ;
17187+ DST(movl %ebx, %es:x(%edi)) ;
17188
17189 #define ROUND(x) \
17190+ nop; nop; nop; \
17191 SRC(movl x(%esi), %ebx ) ; \
17192 adcl %ebx, %eax ; \
17193- DST(movl %ebx, x(%edi) ) ;
17194+ DST(movl %ebx, %es:x(%edi)) ;
17195
17196 #define ARGBASE 12
17197-
17198-ENTRY(csum_partial_copy_generic)
17199+
17200+ENTRY(csum_partial_copy_generic_to_user)
17201 CFI_STARTPROC
17202+
17203+#ifdef CONFIG_PAX_MEMORY_UDEREF
17204+ pushl_cfi %gs
17205+ popl_cfi %es
17206+ jmp csum_partial_copy_generic
17207+#endif
17208+
17209+ENTRY(csum_partial_copy_generic_from_user)
17210+
17211+#ifdef CONFIG_PAX_MEMORY_UDEREF
17212+ pushl_cfi %gs
17213+ popl_cfi %ds
17214+#endif
17215+
17216+ENTRY(csum_partial_copy_generic)
17217 pushl_cfi %ebx
17218 CFI_REL_OFFSET ebx, 0
17219 pushl_cfi %edi
17220@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
17221 subl %ebx, %edi
17222 lea -1(%esi),%edx
17223 andl $-32,%edx
17224- lea 3f(%ebx,%ebx), %ebx
17225+ lea 3f(%ebx,%ebx,2), %ebx
17226 testl %esi, %esi
17227 jmp *%ebx
17228 1: addl $64,%esi
17229@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
17230 jb 5f
17231 SRC( movw (%esi), %dx )
17232 leal 2(%esi), %esi
17233-DST( movw %dx, (%edi) )
17234+DST( movw %dx, %es:(%edi) )
17235 leal 2(%edi), %edi
17236 je 6f
17237 shll $16,%edx
17238 5:
17239 SRC( movb (%esi), %dl )
17240-DST( movb %dl, (%edi) )
17241+DST( movb %dl, %es:(%edi) )
17242 6: addl %edx, %eax
17243 adcl $0, %eax
17244 7:
17245 .section .fixup, "ax"
17246 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
17247- movl $-EFAULT, (%ebx)
17248+ movl $-EFAULT, %ss:(%ebx)
17249 # zero the complete destination (computing the rest is too much work)
17250 movl ARGBASE+8(%esp),%edi # dst
17251 movl ARGBASE+12(%esp),%ecx # len
17252@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
17253 rep; stosb
17254 jmp 7b
17255 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17256- movl $-EFAULT, (%ebx)
17257+ movl $-EFAULT, %ss:(%ebx)
17258 jmp 7b
17259 .previous
17260
17261+#ifdef CONFIG_PAX_MEMORY_UDEREF
17262+ pushl_cfi %ss
17263+ popl_cfi %ds
17264+ pushl_cfi %ss
17265+ popl_cfi %es
17266+#endif
17267+
17268 popl_cfi %esi
17269 CFI_RESTORE esi
17270 popl_cfi %edi
17271@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
17272 CFI_RESTORE ebx
17273 ret
17274 CFI_ENDPROC
17275-ENDPROC(csum_partial_copy_generic)
17276+ENDPROC(csum_partial_copy_generic_to_user)
17277
17278 #undef ROUND
17279 #undef ROUND1
17280diff -urNp linux-3.0.4/arch/x86/lib/clear_page_64.S linux-3.0.4/arch/x86/lib/clear_page_64.S
17281--- linux-3.0.4/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
17282+++ linux-3.0.4/arch/x86/lib/clear_page_64.S 2011-10-06 04:17:55.000000000 -0400
17283@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
17284 movl $4096/8,%ecx
17285 xorl %eax,%eax
17286 rep stosq
17287+ pax_force_retaddr
17288 ret
17289 CFI_ENDPROC
17290 ENDPROC(clear_page_c)
17291@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
17292 movl $4096,%ecx
17293 xorl %eax,%eax
17294 rep stosb
17295+ pax_force_retaddr
17296 ret
17297 CFI_ENDPROC
17298 ENDPROC(clear_page_c_e)
17299@@ -43,6 +45,7 @@ ENTRY(clear_page)
17300 leaq 64(%rdi),%rdi
17301 jnz .Lloop
17302 nop
17303+ pax_force_retaddr
17304 ret
17305 CFI_ENDPROC
17306 .Lclear_page_end:
17307@@ -58,7 +61,7 @@ ENDPROC(clear_page)
17308
17309 #include <asm/cpufeature.h>
17310
17311- .section .altinstr_replacement,"ax"
17312+ .section .altinstr_replacement,"a"
17313 1: .byte 0xeb /* jmp <disp8> */
17314 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
17315 2: .byte 0xeb /* jmp <disp8> */
17316diff -urNp linux-3.0.4/arch/x86/lib/copy_page_64.S linux-3.0.4/arch/x86/lib/copy_page_64.S
17317--- linux-3.0.4/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
17318+++ linux-3.0.4/arch/x86/lib/copy_page_64.S 2011-10-06 04:17:55.000000000 -0400
17319@@ -2,12 +2,14 @@
17320
17321 #include <linux/linkage.h>
17322 #include <asm/dwarf2.h>
17323+#include <asm/alternative-asm.h>
17324
17325 ALIGN
17326 copy_page_c:
17327 CFI_STARTPROC
17328 movl $4096/8,%ecx
17329 rep movsq
17330+ pax_force_retaddr
17331 ret
17332 CFI_ENDPROC
17333 ENDPROC(copy_page_c)
17334@@ -94,6 +96,7 @@ ENTRY(copy_page)
17335 CFI_RESTORE r13
17336 addq $3*8,%rsp
17337 CFI_ADJUST_CFA_OFFSET -3*8
17338+ pax_force_retaddr
17339 ret
17340 .Lcopy_page_end:
17341 CFI_ENDPROC
17342@@ -104,7 +107,7 @@ ENDPROC(copy_page)
17343
17344 #include <asm/cpufeature.h>
17345
17346- .section .altinstr_replacement,"ax"
17347+ .section .altinstr_replacement,"a"
17348 1: .byte 0xeb /* jmp <disp8> */
17349 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
17350 2:
17351diff -urNp linux-3.0.4/arch/x86/lib/copy_user_64.S linux-3.0.4/arch/x86/lib/copy_user_64.S
17352--- linux-3.0.4/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
17353+++ linux-3.0.4/arch/x86/lib/copy_user_64.S 2011-10-06 04:17:55.000000000 -0400
17354@@ -16,6 +16,7 @@
17355 #include <asm/thread_info.h>
17356 #include <asm/cpufeature.h>
17357 #include <asm/alternative-asm.h>
17358+#include <asm/pgtable.h>
17359
17360 /*
17361 * By placing feature2 after feature1 in altinstructions section, we logically
17362@@ -29,7 +30,7 @@
17363 .byte 0xe9 /* 32bit jump */
17364 .long \orig-1f /* by default jump to orig */
17365 1:
17366- .section .altinstr_replacement,"ax"
17367+ .section .altinstr_replacement,"a"
17368 2: .byte 0xe9 /* near jump with 32bit immediate */
17369 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
17370 3: .byte 0xe9 /* near jump with 32bit immediate */
17371@@ -71,47 +72,20 @@
17372 #endif
17373 .endm
17374
17375-/* Standard copy_to_user with segment limit checking */
17376-ENTRY(_copy_to_user)
17377- CFI_STARTPROC
17378- GET_THREAD_INFO(%rax)
17379- movq %rdi,%rcx
17380- addq %rdx,%rcx
17381- jc bad_to_user
17382- cmpq TI_addr_limit(%rax),%rcx
17383- ja bad_to_user
17384- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17385- copy_user_generic_unrolled,copy_user_generic_string, \
17386- copy_user_enhanced_fast_string
17387- CFI_ENDPROC
17388-ENDPROC(_copy_to_user)
17389-
17390-/* Standard copy_from_user with segment limit checking */
17391-ENTRY(_copy_from_user)
17392- CFI_STARTPROC
17393- GET_THREAD_INFO(%rax)
17394- movq %rsi,%rcx
17395- addq %rdx,%rcx
17396- jc bad_from_user
17397- cmpq TI_addr_limit(%rax),%rcx
17398- ja bad_from_user
17399- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17400- copy_user_generic_unrolled,copy_user_generic_string, \
17401- copy_user_enhanced_fast_string
17402- CFI_ENDPROC
17403-ENDPROC(_copy_from_user)
17404-
17405 .section .fixup,"ax"
17406 /* must zero dest */
17407 ENTRY(bad_from_user)
17408 bad_from_user:
17409 CFI_STARTPROC
17410+ testl %edx,%edx
17411+ js bad_to_user
17412 movl %edx,%ecx
17413 xorl %eax,%eax
17414 rep
17415 stosb
17416 bad_to_user:
17417 movl %edx,%eax
17418+ pax_force_retaddr
17419 ret
17420 CFI_ENDPROC
17421 ENDPROC(bad_from_user)
17422@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
17423 decl %ecx
17424 jnz 21b
17425 23: xor %eax,%eax
17426+ pax_force_retaddr
17427 ret
17428
17429 .section .fixup,"ax"
17430@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
17431 3: rep
17432 movsb
17433 4: xorl %eax,%eax
17434+ pax_force_retaddr
17435 ret
17436
17437 .section .fixup,"ax"
17438@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
17439 1: rep
17440 movsb
17441 2: xorl %eax,%eax
17442+ pax_force_retaddr
17443 ret
17444
17445 .section .fixup,"ax"
17446diff -urNp linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S
17447--- linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
17448+++ linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S 2011-10-06 04:17:55.000000000 -0400
17449@@ -8,12 +8,14 @@
17450
17451 #include <linux/linkage.h>
17452 #include <asm/dwarf2.h>
17453+#include <asm/alternative-asm.h>
17454
17455 #define FIX_ALIGNMENT 1
17456
17457 #include <asm/current.h>
17458 #include <asm/asm-offsets.h>
17459 #include <asm/thread_info.h>
17460+#include <asm/pgtable.h>
17461
17462 .macro ALIGN_DESTINATION
17463 #ifdef FIX_ALIGNMENT
17464@@ -50,6 +52,15 @@
17465 */
17466 ENTRY(__copy_user_nocache)
17467 CFI_STARTPROC
17468+
17469+#ifdef CONFIG_PAX_MEMORY_UDEREF
17470+ mov $PAX_USER_SHADOW_BASE,%rcx
17471+ cmp %rcx,%rsi
17472+ jae 1f
17473+ add %rcx,%rsi
17474+1:
17475+#endif
17476+
17477 cmpl $8,%edx
17478 jb 20f /* less then 8 bytes, go to byte copy loop */
17479 ALIGN_DESTINATION
17480@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
17481 jnz 21b
17482 23: xorl %eax,%eax
17483 sfence
17484+ pax_force_retaddr
17485 ret
17486
17487 .section .fixup,"ax"
17488diff -urNp linux-3.0.4/arch/x86/lib/csum-copy_64.S linux-3.0.4/arch/x86/lib/csum-copy_64.S
17489--- linux-3.0.4/arch/x86/lib/csum-copy_64.S 2011-07-21 22:17:23.000000000 -0400
17490+++ linux-3.0.4/arch/x86/lib/csum-copy_64.S 2011-10-06 04:17:55.000000000 -0400
17491@@ -8,6 +8,7 @@
17492 #include <linux/linkage.h>
17493 #include <asm/dwarf2.h>
17494 #include <asm/errno.h>
17495+#include <asm/alternative-asm.h>
17496
17497 /*
17498 * Checksum copy with exception handling.
17499@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
17500 CFI_RESTORE rbp
17501 addq $7*8, %rsp
17502 CFI_ADJUST_CFA_OFFSET -7*8
17503+ pax_force_retaddr
17504 ret
17505 CFI_RESTORE_STATE
17506
17507diff -urNp linux-3.0.4/arch/x86/lib/csum-wrappers_64.c linux-3.0.4/arch/x86/lib/csum-wrappers_64.c
17508--- linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
17509+++ linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-10-06 04:17:55.000000000 -0400
17510@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void _
17511 len -= 2;
17512 }
17513 }
17514- isum = csum_partial_copy_generic((__force const void *)src,
17515+
17516+#ifdef CONFIG_PAX_MEMORY_UDEREF
17517+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17518+ src += PAX_USER_SHADOW_BASE;
17519+#endif
17520+
17521+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
17522 dst, len, isum, errp, NULL);
17523 if (unlikely(*errp))
17524 goto out_err;
17525@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *sr
17526 }
17527
17528 *errp = 0;
17529- return csum_partial_copy_generic(src, (void __force *)dst,
17530+
17531+#ifdef CONFIG_PAX_MEMORY_UDEREF
17532+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17533+ dst += PAX_USER_SHADOW_BASE;
17534+#endif
17535+
17536+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
17537 len, isum, NULL, errp);
17538 }
17539 EXPORT_SYMBOL(csum_partial_copy_to_user);
17540diff -urNp linux-3.0.4/arch/x86/lib/getuser.S linux-3.0.4/arch/x86/lib/getuser.S
17541--- linux-3.0.4/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
17542+++ linux-3.0.4/arch/x86/lib/getuser.S 2011-08-23 21:47:55.000000000 -0400
17543@@ -33,14 +33,35 @@
17544 #include <asm/asm-offsets.h>
17545 #include <asm/thread_info.h>
17546 #include <asm/asm.h>
17547+#include <asm/segment.h>
17548+#include <asm/pgtable.h>
17549+
17550+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17551+#define __copyuser_seg gs;
17552+#else
17553+#define __copyuser_seg
17554+#endif
17555
17556 .text
17557 ENTRY(__get_user_1)
17558 CFI_STARTPROC
17559+
17560+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17561 GET_THREAD_INFO(%_ASM_DX)
17562 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17563 jae bad_get_user
17564-1: movzb (%_ASM_AX),%edx
17565+
17566+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17567+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17568+ cmp %_ASM_DX,%_ASM_AX
17569+ jae 1234f
17570+ add %_ASM_DX,%_ASM_AX
17571+1234:
17572+#endif
17573+
17574+#endif
17575+
17576+1: __copyuser_seg movzb (%_ASM_AX),%edx
17577 xor %eax,%eax
17578 ret
17579 CFI_ENDPROC
17580@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17581 ENTRY(__get_user_2)
17582 CFI_STARTPROC
17583 add $1,%_ASM_AX
17584+
17585+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17586 jc bad_get_user
17587 GET_THREAD_INFO(%_ASM_DX)
17588 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17589 jae bad_get_user
17590-2: movzwl -1(%_ASM_AX),%edx
17591+
17592+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17593+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17594+ cmp %_ASM_DX,%_ASM_AX
17595+ jae 1234f
17596+ add %_ASM_DX,%_ASM_AX
17597+1234:
17598+#endif
17599+
17600+#endif
17601+
17602+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17603 xor %eax,%eax
17604 ret
17605 CFI_ENDPROC
17606@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17607 ENTRY(__get_user_4)
17608 CFI_STARTPROC
17609 add $3,%_ASM_AX
17610+
17611+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17612 jc bad_get_user
17613 GET_THREAD_INFO(%_ASM_DX)
17614 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17615 jae bad_get_user
17616-3: mov -3(%_ASM_AX),%edx
17617+
17618+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17619+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17620+ cmp %_ASM_DX,%_ASM_AX
17621+ jae 1234f
17622+ add %_ASM_DX,%_ASM_AX
17623+1234:
17624+#endif
17625+
17626+#endif
17627+
17628+3: __copyuser_seg mov -3(%_ASM_AX),%edx
17629 xor %eax,%eax
17630 ret
17631 CFI_ENDPROC
17632@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17633 GET_THREAD_INFO(%_ASM_DX)
17634 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17635 jae bad_get_user
17636+
17637+#ifdef CONFIG_PAX_MEMORY_UDEREF
17638+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17639+ cmp %_ASM_DX,%_ASM_AX
17640+ jae 1234f
17641+ add %_ASM_DX,%_ASM_AX
17642+1234:
17643+#endif
17644+
17645 4: movq -7(%_ASM_AX),%_ASM_DX
17646 xor %eax,%eax
17647 ret
17648diff -urNp linux-3.0.4/arch/x86/lib/insn.c linux-3.0.4/arch/x86/lib/insn.c
17649--- linux-3.0.4/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
17650+++ linux-3.0.4/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
17651@@ -21,6 +21,11 @@
17652 #include <linux/string.h>
17653 #include <asm/inat.h>
17654 #include <asm/insn.h>
17655+#ifdef __KERNEL__
17656+#include <asm/pgtable_types.h>
17657+#else
17658+#define ktla_ktva(addr) addr
17659+#endif
17660
17661 #define get_next(t, insn) \
17662 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17663@@ -40,8 +45,8 @@
17664 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17665 {
17666 memset(insn, 0, sizeof(*insn));
17667- insn->kaddr = kaddr;
17668- insn->next_byte = kaddr;
17669+ insn->kaddr = ktla_ktva(kaddr);
17670+ insn->next_byte = ktla_ktva(kaddr);
17671 insn->x86_64 = x86_64 ? 1 : 0;
17672 insn->opnd_bytes = 4;
17673 if (x86_64)
17674diff -urNp linux-3.0.4/arch/x86/lib/iomap_copy_64.S linux-3.0.4/arch/x86/lib/iomap_copy_64.S
17675--- linux-3.0.4/arch/x86/lib/iomap_copy_64.S 2011-07-21 22:17:23.000000000 -0400
17676+++ linux-3.0.4/arch/x86/lib/iomap_copy_64.S 2011-10-06 04:17:55.000000000 -0400
17677@@ -17,6 +17,7 @@
17678
17679 #include <linux/linkage.h>
17680 #include <asm/dwarf2.h>
17681+#include <asm/alternative-asm.h>
17682
17683 /*
17684 * override generic version in lib/iomap_copy.c
17685@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
17686 CFI_STARTPROC
17687 movl %edx,%ecx
17688 rep movsd
17689+ pax_force_retaddr
17690 ret
17691 CFI_ENDPROC
17692 ENDPROC(__iowrite32_copy)
17693diff -urNp linux-3.0.4/arch/x86/lib/memcpy_64.S linux-3.0.4/arch/x86/lib/memcpy_64.S
17694--- linux-3.0.4/arch/x86/lib/memcpy_64.S 2011-07-21 22:17:23.000000000 -0400
17695+++ linux-3.0.4/arch/x86/lib/memcpy_64.S 2011-10-06 04:17:55.000000000 -0400
17696@@ -34,6 +34,7 @@
17697 rep movsq
17698 movl %edx, %ecx
17699 rep movsb
17700+ pax_force_retaddr
17701 ret
17702 .Lmemcpy_e:
17703 .previous
17704@@ -51,6 +52,7 @@
17705
17706 movl %edx, %ecx
17707 rep movsb
17708+ pax_force_retaddr
17709 ret
17710 .Lmemcpy_e_e:
17711 .previous
17712@@ -141,6 +143,7 @@ ENTRY(memcpy)
17713 movq %r9, 1*8(%rdi)
17714 movq %r10, -2*8(%rdi, %rdx)
17715 movq %r11, -1*8(%rdi, %rdx)
17716+ pax_force_retaddr
17717 retq
17718 .p2align 4
17719 .Lless_16bytes:
17720@@ -153,6 +156,7 @@ ENTRY(memcpy)
17721 movq -1*8(%rsi, %rdx), %r9
17722 movq %r8, 0*8(%rdi)
17723 movq %r9, -1*8(%rdi, %rdx)
17724+ pax_force_retaddr
17725 retq
17726 .p2align 4
17727 .Lless_8bytes:
17728@@ -166,6 +170,7 @@ ENTRY(memcpy)
17729 movl -4(%rsi, %rdx), %r8d
17730 movl %ecx, (%rdi)
17731 movl %r8d, -4(%rdi, %rdx)
17732+ pax_force_retaddr
17733 retq
17734 .p2align 4
17735 .Lless_3bytes:
17736@@ -183,6 +188,7 @@ ENTRY(memcpy)
17737 jnz .Lloop_1
17738
17739 .Lend:
17740+ pax_force_retaddr
17741 retq
17742 CFI_ENDPROC
17743 ENDPROC(memcpy)
17744diff -urNp linux-3.0.4/arch/x86/lib/memmove_64.S linux-3.0.4/arch/x86/lib/memmove_64.S
17745--- linux-3.0.4/arch/x86/lib/memmove_64.S 2011-07-21 22:17:23.000000000 -0400
17746+++ linux-3.0.4/arch/x86/lib/memmove_64.S 2011-10-06 04:17:55.000000000 -0400
17747@@ -9,6 +9,7 @@
17748 #include <linux/linkage.h>
17749 #include <asm/dwarf2.h>
17750 #include <asm/cpufeature.h>
17751+#include <asm/alternative-asm.h>
17752
17753 #undef memmove
17754
17755@@ -201,6 +202,7 @@ ENTRY(memmove)
17756 movb (%rsi), %r11b
17757 movb %r11b, (%rdi)
17758 13:
17759+ pax_force_retaddr
17760 retq
17761 CFI_ENDPROC
17762
17763@@ -209,6 +211,7 @@ ENTRY(memmove)
17764 /* Forward moving data. */
17765 movq %rdx, %rcx
17766 rep movsb
17767+ pax_force_retaddr
17768 retq
17769 .Lmemmove_end_forward_efs:
17770 .previous
17771diff -urNp linux-3.0.4/arch/x86/lib/memset_64.S linux-3.0.4/arch/x86/lib/memset_64.S
17772--- linux-3.0.4/arch/x86/lib/memset_64.S 2011-07-21 22:17:23.000000000 -0400
17773+++ linux-3.0.4/arch/x86/lib/memset_64.S 2011-10-06 04:17:55.000000000 -0400
17774@@ -31,6 +31,7 @@
17775 movl %r8d,%ecx
17776 rep stosb
17777 movq %r9,%rax
17778+ pax_force_retaddr
17779 ret
17780 .Lmemset_e:
17781 .previous
17782@@ -53,6 +54,7 @@
17783 movl %edx,%ecx
17784 rep stosb
17785 movq %r9,%rax
17786+ pax_force_retaddr
17787 ret
17788 .Lmemset_e_e:
17789 .previous
17790@@ -121,6 +123,7 @@ ENTRY(__memset)
17791
17792 .Lende:
17793 movq %r10,%rax
17794+ pax_force_retaddr
17795 ret
17796
17797 CFI_RESTORE_STATE
17798diff -urNp linux-3.0.4/arch/x86/lib/mmx_32.c linux-3.0.4/arch/x86/lib/mmx_32.c
17799--- linux-3.0.4/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
17800+++ linux-3.0.4/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
17801@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17802 {
17803 void *p;
17804 int i;
17805+ unsigned long cr0;
17806
17807 if (unlikely(in_interrupt()))
17808 return __memcpy(to, from, len);
17809@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17810 kernel_fpu_begin();
17811
17812 __asm__ __volatile__ (
17813- "1: prefetch (%0)\n" /* This set is 28 bytes */
17814- " prefetch 64(%0)\n"
17815- " prefetch 128(%0)\n"
17816- " prefetch 192(%0)\n"
17817- " prefetch 256(%0)\n"
17818+ "1: prefetch (%1)\n" /* This set is 28 bytes */
17819+ " prefetch 64(%1)\n"
17820+ " prefetch 128(%1)\n"
17821+ " prefetch 192(%1)\n"
17822+ " prefetch 256(%1)\n"
17823 "2: \n"
17824 ".section .fixup, \"ax\"\n"
17825- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17826+ "3: \n"
17827+
17828+#ifdef CONFIG_PAX_KERNEXEC
17829+ " movl %%cr0, %0\n"
17830+ " movl %0, %%eax\n"
17831+ " andl $0xFFFEFFFF, %%eax\n"
17832+ " movl %%eax, %%cr0\n"
17833+#endif
17834+
17835+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17836+
17837+#ifdef CONFIG_PAX_KERNEXEC
17838+ " movl %0, %%cr0\n"
17839+#endif
17840+
17841 " jmp 2b\n"
17842 ".previous\n"
17843 _ASM_EXTABLE(1b, 3b)
17844- : : "r" (from));
17845+ : "=&r" (cr0) : "r" (from) : "ax");
17846
17847 for ( ; i > 5; i--) {
17848 __asm__ __volatile__ (
17849- "1: prefetch 320(%0)\n"
17850- "2: movq (%0), %%mm0\n"
17851- " movq 8(%0), %%mm1\n"
17852- " movq 16(%0), %%mm2\n"
17853- " movq 24(%0), %%mm3\n"
17854- " movq %%mm0, (%1)\n"
17855- " movq %%mm1, 8(%1)\n"
17856- " movq %%mm2, 16(%1)\n"
17857- " movq %%mm3, 24(%1)\n"
17858- " movq 32(%0), %%mm0\n"
17859- " movq 40(%0), %%mm1\n"
17860- " movq 48(%0), %%mm2\n"
17861- " movq 56(%0), %%mm3\n"
17862- " movq %%mm0, 32(%1)\n"
17863- " movq %%mm1, 40(%1)\n"
17864- " movq %%mm2, 48(%1)\n"
17865- " movq %%mm3, 56(%1)\n"
17866+ "1: prefetch 320(%1)\n"
17867+ "2: movq (%1), %%mm0\n"
17868+ " movq 8(%1), %%mm1\n"
17869+ " movq 16(%1), %%mm2\n"
17870+ " movq 24(%1), %%mm3\n"
17871+ " movq %%mm0, (%2)\n"
17872+ " movq %%mm1, 8(%2)\n"
17873+ " movq %%mm2, 16(%2)\n"
17874+ " movq %%mm3, 24(%2)\n"
17875+ " movq 32(%1), %%mm0\n"
17876+ " movq 40(%1), %%mm1\n"
17877+ " movq 48(%1), %%mm2\n"
17878+ " movq 56(%1), %%mm3\n"
17879+ " movq %%mm0, 32(%2)\n"
17880+ " movq %%mm1, 40(%2)\n"
17881+ " movq %%mm2, 48(%2)\n"
17882+ " movq %%mm3, 56(%2)\n"
17883 ".section .fixup, \"ax\"\n"
17884- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17885+ "3:\n"
17886+
17887+#ifdef CONFIG_PAX_KERNEXEC
17888+ " movl %%cr0, %0\n"
17889+ " movl %0, %%eax\n"
17890+ " andl $0xFFFEFFFF, %%eax\n"
17891+ " movl %%eax, %%cr0\n"
17892+#endif
17893+
17894+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17895+
17896+#ifdef CONFIG_PAX_KERNEXEC
17897+ " movl %0, %%cr0\n"
17898+#endif
17899+
17900 " jmp 2b\n"
17901 ".previous\n"
17902 _ASM_EXTABLE(1b, 3b)
17903- : : "r" (from), "r" (to) : "memory");
17904+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17905
17906 from += 64;
17907 to += 64;
17908@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17909 static void fast_copy_page(void *to, void *from)
17910 {
17911 int i;
17912+ unsigned long cr0;
17913
17914 kernel_fpu_begin();
17915
17916@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17917 * but that is for later. -AV
17918 */
17919 __asm__ __volatile__(
17920- "1: prefetch (%0)\n"
17921- " prefetch 64(%0)\n"
17922- " prefetch 128(%0)\n"
17923- " prefetch 192(%0)\n"
17924- " prefetch 256(%0)\n"
17925+ "1: prefetch (%1)\n"
17926+ " prefetch 64(%1)\n"
17927+ " prefetch 128(%1)\n"
17928+ " prefetch 192(%1)\n"
17929+ " prefetch 256(%1)\n"
17930 "2: \n"
17931 ".section .fixup, \"ax\"\n"
17932- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17933+ "3: \n"
17934+
17935+#ifdef CONFIG_PAX_KERNEXEC
17936+ " movl %%cr0, %0\n"
17937+ " movl %0, %%eax\n"
17938+ " andl $0xFFFEFFFF, %%eax\n"
17939+ " movl %%eax, %%cr0\n"
17940+#endif
17941+
17942+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17943+
17944+#ifdef CONFIG_PAX_KERNEXEC
17945+ " movl %0, %%cr0\n"
17946+#endif
17947+
17948 " jmp 2b\n"
17949 ".previous\n"
17950- _ASM_EXTABLE(1b, 3b) : : "r" (from));
17951+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17952
17953 for (i = 0; i < (4096-320)/64; i++) {
17954 __asm__ __volatile__ (
17955- "1: prefetch 320(%0)\n"
17956- "2: movq (%0), %%mm0\n"
17957- " movntq %%mm0, (%1)\n"
17958- " movq 8(%0), %%mm1\n"
17959- " movntq %%mm1, 8(%1)\n"
17960- " movq 16(%0), %%mm2\n"
17961- " movntq %%mm2, 16(%1)\n"
17962- " movq 24(%0), %%mm3\n"
17963- " movntq %%mm3, 24(%1)\n"
17964- " movq 32(%0), %%mm4\n"
17965- " movntq %%mm4, 32(%1)\n"
17966- " movq 40(%0), %%mm5\n"
17967- " movntq %%mm5, 40(%1)\n"
17968- " movq 48(%0), %%mm6\n"
17969- " movntq %%mm6, 48(%1)\n"
17970- " movq 56(%0), %%mm7\n"
17971- " movntq %%mm7, 56(%1)\n"
17972+ "1: prefetch 320(%1)\n"
17973+ "2: movq (%1), %%mm0\n"
17974+ " movntq %%mm0, (%2)\n"
17975+ " movq 8(%1), %%mm1\n"
17976+ " movntq %%mm1, 8(%2)\n"
17977+ " movq 16(%1), %%mm2\n"
17978+ " movntq %%mm2, 16(%2)\n"
17979+ " movq 24(%1), %%mm3\n"
17980+ " movntq %%mm3, 24(%2)\n"
17981+ " movq 32(%1), %%mm4\n"
17982+ " movntq %%mm4, 32(%2)\n"
17983+ " movq 40(%1), %%mm5\n"
17984+ " movntq %%mm5, 40(%2)\n"
17985+ " movq 48(%1), %%mm6\n"
17986+ " movntq %%mm6, 48(%2)\n"
17987+ " movq 56(%1), %%mm7\n"
17988+ " movntq %%mm7, 56(%2)\n"
17989 ".section .fixup, \"ax\"\n"
17990- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17991+ "3:\n"
17992+
17993+#ifdef CONFIG_PAX_KERNEXEC
17994+ " movl %%cr0, %0\n"
17995+ " movl %0, %%eax\n"
17996+ " andl $0xFFFEFFFF, %%eax\n"
17997+ " movl %%eax, %%cr0\n"
17998+#endif
17999+
18000+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18001+
18002+#ifdef CONFIG_PAX_KERNEXEC
18003+ " movl %0, %%cr0\n"
18004+#endif
18005+
18006 " jmp 2b\n"
18007 ".previous\n"
18008- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
18009+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18010
18011 from += 64;
18012 to += 64;
18013@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
18014 static void fast_copy_page(void *to, void *from)
18015 {
18016 int i;
18017+ unsigned long cr0;
18018
18019 kernel_fpu_begin();
18020
18021 __asm__ __volatile__ (
18022- "1: prefetch (%0)\n"
18023- " prefetch 64(%0)\n"
18024- " prefetch 128(%0)\n"
18025- " prefetch 192(%0)\n"
18026- " prefetch 256(%0)\n"
18027+ "1: prefetch (%1)\n"
18028+ " prefetch 64(%1)\n"
18029+ " prefetch 128(%1)\n"
18030+ " prefetch 192(%1)\n"
18031+ " prefetch 256(%1)\n"
18032 "2: \n"
18033 ".section .fixup, \"ax\"\n"
18034- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18035+ "3: \n"
18036+
18037+#ifdef CONFIG_PAX_KERNEXEC
18038+ " movl %%cr0, %0\n"
18039+ " movl %0, %%eax\n"
18040+ " andl $0xFFFEFFFF, %%eax\n"
18041+ " movl %%eax, %%cr0\n"
18042+#endif
18043+
18044+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18045+
18046+#ifdef CONFIG_PAX_KERNEXEC
18047+ " movl %0, %%cr0\n"
18048+#endif
18049+
18050 " jmp 2b\n"
18051 ".previous\n"
18052- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18053+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18054
18055 for (i = 0; i < 4096/64; i++) {
18056 __asm__ __volatile__ (
18057- "1: prefetch 320(%0)\n"
18058- "2: movq (%0), %%mm0\n"
18059- " movq 8(%0), %%mm1\n"
18060- " movq 16(%0), %%mm2\n"
18061- " movq 24(%0), %%mm3\n"
18062- " movq %%mm0, (%1)\n"
18063- " movq %%mm1, 8(%1)\n"
18064- " movq %%mm2, 16(%1)\n"
18065- " movq %%mm3, 24(%1)\n"
18066- " movq 32(%0), %%mm0\n"
18067- " movq 40(%0), %%mm1\n"
18068- " movq 48(%0), %%mm2\n"
18069- " movq 56(%0), %%mm3\n"
18070- " movq %%mm0, 32(%1)\n"
18071- " movq %%mm1, 40(%1)\n"
18072- " movq %%mm2, 48(%1)\n"
18073- " movq %%mm3, 56(%1)\n"
18074+ "1: prefetch 320(%1)\n"
18075+ "2: movq (%1), %%mm0\n"
18076+ " movq 8(%1), %%mm1\n"
18077+ " movq 16(%1), %%mm2\n"
18078+ " movq 24(%1), %%mm3\n"
18079+ " movq %%mm0, (%2)\n"
18080+ " movq %%mm1, 8(%2)\n"
18081+ " movq %%mm2, 16(%2)\n"
18082+ " movq %%mm3, 24(%2)\n"
18083+ " movq 32(%1), %%mm0\n"
18084+ " movq 40(%1), %%mm1\n"
18085+ " movq 48(%1), %%mm2\n"
18086+ " movq 56(%1), %%mm3\n"
18087+ " movq %%mm0, 32(%2)\n"
18088+ " movq %%mm1, 40(%2)\n"
18089+ " movq %%mm2, 48(%2)\n"
18090+ " movq %%mm3, 56(%2)\n"
18091 ".section .fixup, \"ax\"\n"
18092- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18093+ "3:\n"
18094+
18095+#ifdef CONFIG_PAX_KERNEXEC
18096+ " movl %%cr0, %0\n"
18097+ " movl %0, %%eax\n"
18098+ " andl $0xFFFEFFFF, %%eax\n"
18099+ " movl %%eax, %%cr0\n"
18100+#endif
18101+
18102+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18103+
18104+#ifdef CONFIG_PAX_KERNEXEC
18105+ " movl %0, %%cr0\n"
18106+#endif
18107+
18108 " jmp 2b\n"
18109 ".previous\n"
18110 _ASM_EXTABLE(1b, 3b)
18111- : : "r" (from), "r" (to) : "memory");
18112+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18113
18114 from += 64;
18115 to += 64;
18116diff -urNp linux-3.0.4/arch/x86/lib/putuser.S linux-3.0.4/arch/x86/lib/putuser.S
18117--- linux-3.0.4/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
18118+++ linux-3.0.4/arch/x86/lib/putuser.S 2011-08-23 21:47:55.000000000 -0400
18119@@ -15,7 +15,8 @@
18120 #include <asm/thread_info.h>
18121 #include <asm/errno.h>
18122 #include <asm/asm.h>
18123-
18124+#include <asm/segment.h>
18125+#include <asm/pgtable.h>
18126
18127 /*
18128 * __put_user_X
18129@@ -29,52 +30,119 @@
18130 * as they get called from within inline assembly.
18131 */
18132
18133-#define ENTER CFI_STARTPROC ; \
18134- GET_THREAD_INFO(%_ASM_BX)
18135+#define ENTER CFI_STARTPROC
18136 #define EXIT ret ; \
18137 CFI_ENDPROC
18138
18139+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18140+#define _DEST %_ASM_CX,%_ASM_BX
18141+#else
18142+#define _DEST %_ASM_CX
18143+#endif
18144+
18145+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18146+#define __copyuser_seg gs;
18147+#else
18148+#define __copyuser_seg
18149+#endif
18150+
18151 .text
18152 ENTRY(__put_user_1)
18153 ENTER
18154+
18155+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18156+ GET_THREAD_INFO(%_ASM_BX)
18157 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
18158 jae bad_put_user
18159-1: movb %al,(%_ASM_CX)
18160+
18161+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18162+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18163+ cmp %_ASM_BX,%_ASM_CX
18164+ jb 1234f
18165+ xor %ebx,%ebx
18166+1234:
18167+#endif
18168+
18169+#endif
18170+
18171+1: __copyuser_seg movb %al,(_DEST)
18172 xor %eax,%eax
18173 EXIT
18174 ENDPROC(__put_user_1)
18175
18176 ENTRY(__put_user_2)
18177 ENTER
18178+
18179+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18180+ GET_THREAD_INFO(%_ASM_BX)
18181 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18182 sub $1,%_ASM_BX
18183 cmp %_ASM_BX,%_ASM_CX
18184 jae bad_put_user
18185-2: movw %ax,(%_ASM_CX)
18186+
18187+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18188+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18189+ cmp %_ASM_BX,%_ASM_CX
18190+ jb 1234f
18191+ xor %ebx,%ebx
18192+1234:
18193+#endif
18194+
18195+#endif
18196+
18197+2: __copyuser_seg movw %ax,(_DEST)
18198 xor %eax,%eax
18199 EXIT
18200 ENDPROC(__put_user_2)
18201
18202 ENTRY(__put_user_4)
18203 ENTER
18204+
18205+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18206+ GET_THREAD_INFO(%_ASM_BX)
18207 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18208 sub $3,%_ASM_BX
18209 cmp %_ASM_BX,%_ASM_CX
18210 jae bad_put_user
18211-3: movl %eax,(%_ASM_CX)
18212+
18213+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18214+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18215+ cmp %_ASM_BX,%_ASM_CX
18216+ jb 1234f
18217+ xor %ebx,%ebx
18218+1234:
18219+#endif
18220+
18221+#endif
18222+
18223+3: __copyuser_seg movl %eax,(_DEST)
18224 xor %eax,%eax
18225 EXIT
18226 ENDPROC(__put_user_4)
18227
18228 ENTRY(__put_user_8)
18229 ENTER
18230+
18231+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18232+ GET_THREAD_INFO(%_ASM_BX)
18233 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18234 sub $7,%_ASM_BX
18235 cmp %_ASM_BX,%_ASM_CX
18236 jae bad_put_user
18237-4: mov %_ASM_AX,(%_ASM_CX)
18238+
18239+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18240+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18241+ cmp %_ASM_BX,%_ASM_CX
18242+ jb 1234f
18243+ xor %ebx,%ebx
18244+1234:
18245+#endif
18246+
18247+#endif
18248+
18249+4: __copyuser_seg mov %_ASM_AX,(_DEST)
18250 #ifdef CONFIG_X86_32
18251-5: movl %edx,4(%_ASM_CX)
18252+5: __copyuser_seg movl %edx,4(_DEST)
18253 #endif
18254 xor %eax,%eax
18255 EXIT
18256diff -urNp linux-3.0.4/arch/x86/lib/rwlock_64.S linux-3.0.4/arch/x86/lib/rwlock_64.S
18257--- linux-3.0.4/arch/x86/lib/rwlock_64.S 2011-07-21 22:17:23.000000000 -0400
18258+++ linux-3.0.4/arch/x86/lib/rwlock_64.S 2011-10-06 04:17:55.000000000 -0400
18259@@ -17,6 +17,7 @@ ENTRY(__write_lock_failed)
18260 LOCK_PREFIX
18261 subl $RW_LOCK_BIAS,(%rdi)
18262 jnz __write_lock_failed
18263+ pax_force_retaddr
18264 ret
18265 CFI_ENDPROC
18266 END(__write_lock_failed)
18267@@ -33,6 +34,7 @@ ENTRY(__read_lock_failed)
18268 LOCK_PREFIX
18269 decl (%rdi)
18270 js __read_lock_failed
18271+ pax_force_retaddr
18272 ret
18273 CFI_ENDPROC
18274 END(__read_lock_failed)
18275diff -urNp linux-3.0.4/arch/x86/lib/rwsem_64.S linux-3.0.4/arch/x86/lib/rwsem_64.S
18276--- linux-3.0.4/arch/x86/lib/rwsem_64.S 2011-07-21 22:17:23.000000000 -0400
18277+++ linux-3.0.4/arch/x86/lib/rwsem_64.S 2011-10-06 04:17:55.000000000 -0400
18278@@ -51,6 +51,7 @@ ENTRY(call_rwsem_down_read_failed)
18279 popq_cfi %rdx
18280 CFI_RESTORE rdx
18281 restore_common_regs
18282+ pax_force_retaddr
18283 ret
18284 CFI_ENDPROC
18285 ENDPROC(call_rwsem_down_read_failed)
18286@@ -61,6 +62,7 @@ ENTRY(call_rwsem_down_write_failed)
18287 movq %rax,%rdi
18288 call rwsem_down_write_failed
18289 restore_common_regs
18290+ pax_force_retaddr
18291 ret
18292 CFI_ENDPROC
18293 ENDPROC(call_rwsem_down_write_failed)
18294@@ -73,6 +75,7 @@ ENTRY(call_rwsem_wake)
18295 movq %rax,%rdi
18296 call rwsem_wake
18297 restore_common_regs
18298+ pax_force_retaddr
18299 1: ret
18300 CFI_ENDPROC
18301 ENDPROC(call_rwsem_wake)
18302@@ -88,6 +91,7 @@ ENTRY(call_rwsem_downgrade_wake)
18303 popq_cfi %rdx
18304 CFI_RESTORE rdx
18305 restore_common_regs
18306+ pax_force_retaddr
18307 ret
18308 CFI_ENDPROC
18309 ENDPROC(call_rwsem_downgrade_wake)
18310diff -urNp linux-3.0.4/arch/x86/lib/thunk_64.S linux-3.0.4/arch/x86/lib/thunk_64.S
18311--- linux-3.0.4/arch/x86/lib/thunk_64.S 2011-07-21 22:17:23.000000000 -0400
18312+++ linux-3.0.4/arch/x86/lib/thunk_64.S 2011-10-06 04:17:55.000000000 -0400
18313@@ -10,7 +10,8 @@
18314 #include <asm/dwarf2.h>
18315 #include <asm/calling.h>
18316 #include <asm/rwlock.h>
18317-
18318+ #include <asm/alternative-asm.h>
18319+
18320 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
18321 .macro thunk name,func
18322 .globl \name
18323@@ -50,5 +51,6 @@
18324 SAVE_ARGS
18325 restore:
18326 RESTORE_ARGS
18327- ret
18328+ pax_force_retaddr
18329+ ret
18330 CFI_ENDPROC
18331diff -urNp linux-3.0.4/arch/x86/lib/usercopy_32.c linux-3.0.4/arch/x86/lib/usercopy_32.c
18332--- linux-3.0.4/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
18333+++ linux-3.0.4/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
18334@@ -43,7 +43,7 @@ do { \
18335 __asm__ __volatile__( \
18336 " testl %1,%1\n" \
18337 " jz 2f\n" \
18338- "0: lodsb\n" \
18339+ "0: "__copyuser_seg"lodsb\n" \
18340 " stosb\n" \
18341 " testb %%al,%%al\n" \
18342 " jz 1f\n" \
18343@@ -128,10 +128,12 @@ do { \
18344 int __d0; \
18345 might_fault(); \
18346 __asm__ __volatile__( \
18347+ __COPYUSER_SET_ES \
18348 "0: rep; stosl\n" \
18349 " movl %2,%0\n" \
18350 "1: rep; stosb\n" \
18351 "2:\n" \
18352+ __COPYUSER_RESTORE_ES \
18353 ".section .fixup,\"ax\"\n" \
18354 "3: lea 0(%2,%0,4),%0\n" \
18355 " jmp 2b\n" \
18356@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
18357 might_fault();
18358
18359 __asm__ __volatile__(
18360+ __COPYUSER_SET_ES
18361 " testl %0, %0\n"
18362 " jz 3f\n"
18363 " andl %0,%%ecx\n"
18364@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
18365 " subl %%ecx,%0\n"
18366 " addl %0,%%eax\n"
18367 "1:\n"
18368+ __COPYUSER_RESTORE_ES
18369 ".section .fixup,\"ax\"\n"
18370 "2: xorl %%eax,%%eax\n"
18371 " jmp 1b\n"
18372@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
18373
18374 #ifdef CONFIG_X86_INTEL_USERCOPY
18375 static unsigned long
18376-__copy_user_intel(void __user *to, const void *from, unsigned long size)
18377+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
18378 {
18379 int d0, d1;
18380 __asm__ __volatile__(
18381@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
18382 " .align 2,0x90\n"
18383 "3: movl 0(%4), %%eax\n"
18384 "4: movl 4(%4), %%edx\n"
18385- "5: movl %%eax, 0(%3)\n"
18386- "6: movl %%edx, 4(%3)\n"
18387+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
18388+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
18389 "7: movl 8(%4), %%eax\n"
18390 "8: movl 12(%4),%%edx\n"
18391- "9: movl %%eax, 8(%3)\n"
18392- "10: movl %%edx, 12(%3)\n"
18393+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
18394+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
18395 "11: movl 16(%4), %%eax\n"
18396 "12: movl 20(%4), %%edx\n"
18397- "13: movl %%eax, 16(%3)\n"
18398- "14: movl %%edx, 20(%3)\n"
18399+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
18400+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
18401 "15: movl 24(%4), %%eax\n"
18402 "16: movl 28(%4), %%edx\n"
18403- "17: movl %%eax, 24(%3)\n"
18404- "18: movl %%edx, 28(%3)\n"
18405+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
18406+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
18407 "19: movl 32(%4), %%eax\n"
18408 "20: movl 36(%4), %%edx\n"
18409- "21: movl %%eax, 32(%3)\n"
18410- "22: movl %%edx, 36(%3)\n"
18411+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
18412+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
18413 "23: movl 40(%4), %%eax\n"
18414 "24: movl 44(%4), %%edx\n"
18415- "25: movl %%eax, 40(%3)\n"
18416- "26: movl %%edx, 44(%3)\n"
18417+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
18418+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
18419 "27: movl 48(%4), %%eax\n"
18420 "28: movl 52(%4), %%edx\n"
18421- "29: movl %%eax, 48(%3)\n"
18422- "30: movl %%edx, 52(%3)\n"
18423+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
18424+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
18425 "31: movl 56(%4), %%eax\n"
18426 "32: movl 60(%4), %%edx\n"
18427- "33: movl %%eax, 56(%3)\n"
18428- "34: movl %%edx, 60(%3)\n"
18429+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
18430+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
18431 " addl $-64, %0\n"
18432 " addl $64, %4\n"
18433 " addl $64, %3\n"
18434@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
18435 " shrl $2, %0\n"
18436 " andl $3, %%eax\n"
18437 " cld\n"
18438+ __COPYUSER_SET_ES
18439 "99: rep; movsl\n"
18440 "36: movl %%eax, %0\n"
18441 "37: rep; movsb\n"
18442 "100:\n"
18443+ __COPYUSER_RESTORE_ES
18444+ ".section .fixup,\"ax\"\n"
18445+ "101: lea 0(%%eax,%0,4),%0\n"
18446+ " jmp 100b\n"
18447+ ".previous\n"
18448+ ".section __ex_table,\"a\"\n"
18449+ " .align 4\n"
18450+ " .long 1b,100b\n"
18451+ " .long 2b,100b\n"
18452+ " .long 3b,100b\n"
18453+ " .long 4b,100b\n"
18454+ " .long 5b,100b\n"
18455+ " .long 6b,100b\n"
18456+ " .long 7b,100b\n"
18457+ " .long 8b,100b\n"
18458+ " .long 9b,100b\n"
18459+ " .long 10b,100b\n"
18460+ " .long 11b,100b\n"
18461+ " .long 12b,100b\n"
18462+ " .long 13b,100b\n"
18463+ " .long 14b,100b\n"
18464+ " .long 15b,100b\n"
18465+ " .long 16b,100b\n"
18466+ " .long 17b,100b\n"
18467+ " .long 18b,100b\n"
18468+ " .long 19b,100b\n"
18469+ " .long 20b,100b\n"
18470+ " .long 21b,100b\n"
18471+ " .long 22b,100b\n"
18472+ " .long 23b,100b\n"
18473+ " .long 24b,100b\n"
18474+ " .long 25b,100b\n"
18475+ " .long 26b,100b\n"
18476+ " .long 27b,100b\n"
18477+ " .long 28b,100b\n"
18478+ " .long 29b,100b\n"
18479+ " .long 30b,100b\n"
18480+ " .long 31b,100b\n"
18481+ " .long 32b,100b\n"
18482+ " .long 33b,100b\n"
18483+ " .long 34b,100b\n"
18484+ " .long 35b,100b\n"
18485+ " .long 36b,100b\n"
18486+ " .long 37b,100b\n"
18487+ " .long 99b,101b\n"
18488+ ".previous"
18489+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
18490+ : "1"(to), "2"(from), "0"(size)
18491+ : "eax", "edx", "memory");
18492+ return size;
18493+}
18494+
18495+static unsigned long
18496+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
18497+{
18498+ int d0, d1;
18499+ __asm__ __volatile__(
18500+ " .align 2,0x90\n"
18501+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
18502+ " cmpl $67, %0\n"
18503+ " jbe 3f\n"
18504+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
18505+ " .align 2,0x90\n"
18506+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
18507+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
18508+ "5: movl %%eax, 0(%3)\n"
18509+ "6: movl %%edx, 4(%3)\n"
18510+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
18511+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
18512+ "9: movl %%eax, 8(%3)\n"
18513+ "10: movl %%edx, 12(%3)\n"
18514+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
18515+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
18516+ "13: movl %%eax, 16(%3)\n"
18517+ "14: movl %%edx, 20(%3)\n"
18518+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
18519+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
18520+ "17: movl %%eax, 24(%3)\n"
18521+ "18: movl %%edx, 28(%3)\n"
18522+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
18523+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
18524+ "21: movl %%eax, 32(%3)\n"
18525+ "22: movl %%edx, 36(%3)\n"
18526+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
18527+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
18528+ "25: movl %%eax, 40(%3)\n"
18529+ "26: movl %%edx, 44(%3)\n"
18530+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
18531+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
18532+ "29: movl %%eax, 48(%3)\n"
18533+ "30: movl %%edx, 52(%3)\n"
18534+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
18535+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
18536+ "33: movl %%eax, 56(%3)\n"
18537+ "34: movl %%edx, 60(%3)\n"
18538+ " addl $-64, %0\n"
18539+ " addl $64, %4\n"
18540+ " addl $64, %3\n"
18541+ " cmpl $63, %0\n"
18542+ " ja 1b\n"
18543+ "35: movl %0, %%eax\n"
18544+ " shrl $2, %0\n"
18545+ " andl $3, %%eax\n"
18546+ " cld\n"
18547+ "99: rep; "__copyuser_seg" movsl\n"
18548+ "36: movl %%eax, %0\n"
18549+ "37: rep; "__copyuser_seg" movsb\n"
18550+ "100:\n"
18551 ".section .fixup,\"ax\"\n"
18552 "101: lea 0(%%eax,%0,4),%0\n"
18553 " jmp 100b\n"
18554@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
18555 int d0, d1;
18556 __asm__ __volatile__(
18557 " .align 2,0x90\n"
18558- "0: movl 32(%4), %%eax\n"
18559+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18560 " cmpl $67, %0\n"
18561 " jbe 2f\n"
18562- "1: movl 64(%4), %%eax\n"
18563+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18564 " .align 2,0x90\n"
18565- "2: movl 0(%4), %%eax\n"
18566- "21: movl 4(%4), %%edx\n"
18567+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18568+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18569 " movl %%eax, 0(%3)\n"
18570 " movl %%edx, 4(%3)\n"
18571- "3: movl 8(%4), %%eax\n"
18572- "31: movl 12(%4),%%edx\n"
18573+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18574+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18575 " movl %%eax, 8(%3)\n"
18576 " movl %%edx, 12(%3)\n"
18577- "4: movl 16(%4), %%eax\n"
18578- "41: movl 20(%4), %%edx\n"
18579+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18580+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18581 " movl %%eax, 16(%3)\n"
18582 " movl %%edx, 20(%3)\n"
18583- "10: movl 24(%4), %%eax\n"
18584- "51: movl 28(%4), %%edx\n"
18585+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18586+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18587 " movl %%eax, 24(%3)\n"
18588 " movl %%edx, 28(%3)\n"
18589- "11: movl 32(%4), %%eax\n"
18590- "61: movl 36(%4), %%edx\n"
18591+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18592+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18593 " movl %%eax, 32(%3)\n"
18594 " movl %%edx, 36(%3)\n"
18595- "12: movl 40(%4), %%eax\n"
18596- "71: movl 44(%4), %%edx\n"
18597+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18598+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18599 " movl %%eax, 40(%3)\n"
18600 " movl %%edx, 44(%3)\n"
18601- "13: movl 48(%4), %%eax\n"
18602- "81: movl 52(%4), %%edx\n"
18603+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18604+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18605 " movl %%eax, 48(%3)\n"
18606 " movl %%edx, 52(%3)\n"
18607- "14: movl 56(%4), %%eax\n"
18608- "91: movl 60(%4), %%edx\n"
18609+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18610+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18611 " movl %%eax, 56(%3)\n"
18612 " movl %%edx, 60(%3)\n"
18613 " addl $-64, %0\n"
18614@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
18615 " shrl $2, %0\n"
18616 " andl $3, %%eax\n"
18617 " cld\n"
18618- "6: rep; movsl\n"
18619+ "6: rep; "__copyuser_seg" movsl\n"
18620 " movl %%eax,%0\n"
18621- "7: rep; movsb\n"
18622+ "7: rep; "__copyuser_seg" movsb\n"
18623 "8:\n"
18624 ".section .fixup,\"ax\"\n"
18625 "9: lea 0(%%eax,%0,4),%0\n"
18626@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
18627
18628 __asm__ __volatile__(
18629 " .align 2,0x90\n"
18630- "0: movl 32(%4), %%eax\n"
18631+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18632 " cmpl $67, %0\n"
18633 " jbe 2f\n"
18634- "1: movl 64(%4), %%eax\n"
18635+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18636 " .align 2,0x90\n"
18637- "2: movl 0(%4), %%eax\n"
18638- "21: movl 4(%4), %%edx\n"
18639+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18640+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18641 " movnti %%eax, 0(%3)\n"
18642 " movnti %%edx, 4(%3)\n"
18643- "3: movl 8(%4), %%eax\n"
18644- "31: movl 12(%4),%%edx\n"
18645+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18646+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18647 " movnti %%eax, 8(%3)\n"
18648 " movnti %%edx, 12(%3)\n"
18649- "4: movl 16(%4), %%eax\n"
18650- "41: movl 20(%4), %%edx\n"
18651+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18652+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18653 " movnti %%eax, 16(%3)\n"
18654 " movnti %%edx, 20(%3)\n"
18655- "10: movl 24(%4), %%eax\n"
18656- "51: movl 28(%4), %%edx\n"
18657+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18658+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18659 " movnti %%eax, 24(%3)\n"
18660 " movnti %%edx, 28(%3)\n"
18661- "11: movl 32(%4), %%eax\n"
18662- "61: movl 36(%4), %%edx\n"
18663+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18664+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18665 " movnti %%eax, 32(%3)\n"
18666 " movnti %%edx, 36(%3)\n"
18667- "12: movl 40(%4), %%eax\n"
18668- "71: movl 44(%4), %%edx\n"
18669+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18670+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18671 " movnti %%eax, 40(%3)\n"
18672 " movnti %%edx, 44(%3)\n"
18673- "13: movl 48(%4), %%eax\n"
18674- "81: movl 52(%4), %%edx\n"
18675+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18676+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18677 " movnti %%eax, 48(%3)\n"
18678 " movnti %%edx, 52(%3)\n"
18679- "14: movl 56(%4), %%eax\n"
18680- "91: movl 60(%4), %%edx\n"
18681+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18682+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18683 " movnti %%eax, 56(%3)\n"
18684 " movnti %%edx, 60(%3)\n"
18685 " addl $-64, %0\n"
18686@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18687 " shrl $2, %0\n"
18688 " andl $3, %%eax\n"
18689 " cld\n"
18690- "6: rep; movsl\n"
18691+ "6: rep; "__copyuser_seg" movsl\n"
18692 " movl %%eax,%0\n"
18693- "7: rep; movsb\n"
18694+ "7: rep; "__copyuser_seg" movsb\n"
18695 "8:\n"
18696 ".section .fixup,\"ax\"\n"
18697 "9: lea 0(%%eax,%0,4),%0\n"
18698@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18699
18700 __asm__ __volatile__(
18701 " .align 2,0x90\n"
18702- "0: movl 32(%4), %%eax\n"
18703+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18704 " cmpl $67, %0\n"
18705 " jbe 2f\n"
18706- "1: movl 64(%4), %%eax\n"
18707+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18708 " .align 2,0x90\n"
18709- "2: movl 0(%4), %%eax\n"
18710- "21: movl 4(%4), %%edx\n"
18711+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18712+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18713 " movnti %%eax, 0(%3)\n"
18714 " movnti %%edx, 4(%3)\n"
18715- "3: movl 8(%4), %%eax\n"
18716- "31: movl 12(%4),%%edx\n"
18717+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18718+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18719 " movnti %%eax, 8(%3)\n"
18720 " movnti %%edx, 12(%3)\n"
18721- "4: movl 16(%4), %%eax\n"
18722- "41: movl 20(%4), %%edx\n"
18723+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18724+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18725 " movnti %%eax, 16(%3)\n"
18726 " movnti %%edx, 20(%3)\n"
18727- "10: movl 24(%4), %%eax\n"
18728- "51: movl 28(%4), %%edx\n"
18729+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18730+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18731 " movnti %%eax, 24(%3)\n"
18732 " movnti %%edx, 28(%3)\n"
18733- "11: movl 32(%4), %%eax\n"
18734- "61: movl 36(%4), %%edx\n"
18735+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18736+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18737 " movnti %%eax, 32(%3)\n"
18738 " movnti %%edx, 36(%3)\n"
18739- "12: movl 40(%4), %%eax\n"
18740- "71: movl 44(%4), %%edx\n"
18741+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18742+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18743 " movnti %%eax, 40(%3)\n"
18744 " movnti %%edx, 44(%3)\n"
18745- "13: movl 48(%4), %%eax\n"
18746- "81: movl 52(%4), %%edx\n"
18747+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18748+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18749 " movnti %%eax, 48(%3)\n"
18750 " movnti %%edx, 52(%3)\n"
18751- "14: movl 56(%4), %%eax\n"
18752- "91: movl 60(%4), %%edx\n"
18753+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18754+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18755 " movnti %%eax, 56(%3)\n"
18756 " movnti %%edx, 60(%3)\n"
18757 " addl $-64, %0\n"
18758@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18759 " shrl $2, %0\n"
18760 " andl $3, %%eax\n"
18761 " cld\n"
18762- "6: rep; movsl\n"
18763+ "6: rep; "__copyuser_seg" movsl\n"
18764 " movl %%eax,%0\n"
18765- "7: rep; movsb\n"
18766+ "7: rep; "__copyuser_seg" movsb\n"
18767 "8:\n"
18768 ".section .fixup,\"ax\"\n"
18769 "9: lea 0(%%eax,%0,4),%0\n"
18770@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18771 */
18772 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18773 unsigned long size);
18774-unsigned long __copy_user_intel(void __user *to, const void *from,
18775+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18776+ unsigned long size);
18777+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18778 unsigned long size);
18779 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18780 const void __user *from, unsigned long size);
18781 #endif /* CONFIG_X86_INTEL_USERCOPY */
18782
18783 /* Generic arbitrary sized copy. */
18784-#define __copy_user(to, from, size) \
18785+#define __copy_user(to, from, size, prefix, set, restore) \
18786 do { \
18787 int __d0, __d1, __d2; \
18788 __asm__ __volatile__( \
18789+ set \
18790 " cmp $7,%0\n" \
18791 " jbe 1f\n" \
18792 " movl %1,%0\n" \
18793 " negl %0\n" \
18794 " andl $7,%0\n" \
18795 " subl %0,%3\n" \
18796- "4: rep; movsb\n" \
18797+ "4: rep; "prefix"movsb\n" \
18798 " movl %3,%0\n" \
18799 " shrl $2,%0\n" \
18800 " andl $3,%3\n" \
18801 " .align 2,0x90\n" \
18802- "0: rep; movsl\n" \
18803+ "0: rep; "prefix"movsl\n" \
18804 " movl %3,%0\n" \
18805- "1: rep; movsb\n" \
18806+ "1: rep; "prefix"movsb\n" \
18807 "2:\n" \
18808+ restore \
18809 ".section .fixup,\"ax\"\n" \
18810 "5: addl %3,%0\n" \
18811 " jmp 2b\n" \
18812@@ -682,14 +799,14 @@ do { \
18813 " negl %0\n" \
18814 " andl $7,%0\n" \
18815 " subl %0,%3\n" \
18816- "4: rep; movsb\n" \
18817+ "4: rep; "__copyuser_seg"movsb\n" \
18818 " movl %3,%0\n" \
18819 " shrl $2,%0\n" \
18820 " andl $3,%3\n" \
18821 " .align 2,0x90\n" \
18822- "0: rep; movsl\n" \
18823+ "0: rep; "__copyuser_seg"movsl\n" \
18824 " movl %3,%0\n" \
18825- "1: rep; movsb\n" \
18826+ "1: rep; "__copyuser_seg"movsb\n" \
18827 "2:\n" \
18828 ".section .fixup,\"ax\"\n" \
18829 "5: addl %3,%0\n" \
18830@@ -775,9 +892,9 @@ survive:
18831 }
18832 #endif
18833 if (movsl_is_ok(to, from, n))
18834- __copy_user(to, from, n);
18835+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18836 else
18837- n = __copy_user_intel(to, from, n);
18838+ n = __generic_copy_to_user_intel(to, from, n);
18839 return n;
18840 }
18841 EXPORT_SYMBOL(__copy_to_user_ll);
18842@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18843 unsigned long n)
18844 {
18845 if (movsl_is_ok(to, from, n))
18846- __copy_user(to, from, n);
18847+ __copy_user(to, from, n, __copyuser_seg, "", "");
18848 else
18849- n = __copy_user_intel((void __user *)to,
18850- (const void *)from, n);
18851+ n = __generic_copy_from_user_intel(to, from, n);
18852 return n;
18853 }
18854 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18855@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18856 if (n > 64 && cpu_has_xmm2)
18857 n = __copy_user_intel_nocache(to, from, n);
18858 else
18859- __copy_user(to, from, n);
18860+ __copy_user(to, from, n, __copyuser_seg, "", "");
18861 #else
18862- __copy_user(to, from, n);
18863+ __copy_user(to, from, n, __copyuser_seg, "", "");
18864 #endif
18865 return n;
18866 }
18867 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18868
18869-/**
18870- * copy_to_user: - Copy a block of data into user space.
18871- * @to: Destination address, in user space.
18872- * @from: Source address, in kernel space.
18873- * @n: Number of bytes to copy.
18874- *
18875- * Context: User context only. This function may sleep.
18876- *
18877- * Copy data from kernel space to user space.
18878- *
18879- * Returns number of bytes that could not be copied.
18880- * On success, this will be zero.
18881- */
18882-unsigned long
18883-copy_to_user(void __user *to, const void *from, unsigned long n)
18884+void copy_from_user_overflow(void)
18885 {
18886- if (access_ok(VERIFY_WRITE, to, n))
18887- n = __copy_to_user(to, from, n);
18888- return n;
18889+ WARN(1, "Buffer overflow detected!\n");
18890 }
18891-EXPORT_SYMBOL(copy_to_user);
18892+EXPORT_SYMBOL(copy_from_user_overflow);
18893
18894-/**
18895- * copy_from_user: - Copy a block of data from user space.
18896- * @to: Destination address, in kernel space.
18897- * @from: Source address, in user space.
18898- * @n: Number of bytes to copy.
18899- *
18900- * Context: User context only. This function may sleep.
18901- *
18902- * Copy data from user space to kernel space.
18903- *
18904- * Returns number of bytes that could not be copied.
18905- * On success, this will be zero.
18906- *
18907- * If some data could not be copied, this function will pad the copied
18908- * data to the requested size using zero bytes.
18909- */
18910-unsigned long
18911-_copy_from_user(void *to, const void __user *from, unsigned long n)
18912+void copy_to_user_overflow(void)
18913 {
18914- if (access_ok(VERIFY_READ, from, n))
18915- n = __copy_from_user(to, from, n);
18916- else
18917- memset(to, 0, n);
18918- return n;
18919+ WARN(1, "Buffer overflow detected!\n");
18920 }
18921-EXPORT_SYMBOL(_copy_from_user);
18922+EXPORT_SYMBOL(copy_to_user_overflow);
18923
18924-void copy_from_user_overflow(void)
18925+#ifdef CONFIG_PAX_MEMORY_UDEREF
18926+void __set_fs(mm_segment_t x)
18927 {
18928- WARN(1, "Buffer overflow detected!\n");
18929+ switch (x.seg) {
18930+ case 0:
18931+ loadsegment(gs, 0);
18932+ break;
18933+ case TASK_SIZE_MAX:
18934+ loadsegment(gs, __USER_DS);
18935+ break;
18936+ case -1UL:
18937+ loadsegment(gs, __KERNEL_DS);
18938+ break;
18939+ default:
18940+ BUG();
18941+ }
18942+ return;
18943 }
18944-EXPORT_SYMBOL(copy_from_user_overflow);
18945+EXPORT_SYMBOL(__set_fs);
18946+
18947+void set_fs(mm_segment_t x)
18948+{
18949+ current_thread_info()->addr_limit = x;
18950+ __set_fs(x);
18951+}
18952+EXPORT_SYMBOL(set_fs);
18953+#endif
18954diff -urNp linux-3.0.4/arch/x86/lib/usercopy_64.c linux-3.0.4/arch/x86/lib/usercopy_64.c
18955--- linux-3.0.4/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
18956+++ linux-3.0.4/arch/x86/lib/usercopy_64.c 2011-10-06 04:17:55.000000000 -0400
18957@@ -42,6 +42,12 @@ long
18958 __strncpy_from_user(char *dst, const char __user *src, long count)
18959 {
18960 long res;
18961+
18962+#ifdef CONFIG_PAX_MEMORY_UDEREF
18963+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18964+ src += PAX_USER_SHADOW_BASE;
18965+#endif
18966+
18967 __do_strncpy_from_user(dst, src, count, res);
18968 return res;
18969 }
18970@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18971 {
18972 long __d0;
18973 might_fault();
18974+
18975+#ifdef CONFIG_PAX_MEMORY_UDEREF
18976+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18977+ addr += PAX_USER_SHADOW_BASE;
18978+#endif
18979+
18980 /* no memory constraint because it doesn't change any memory gcc knows
18981 about */
18982 asm volatile(
18983@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18984
18985 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18986 {
18987- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18988- return copy_user_generic((__force void *)to, (__force void *)from, len);
18989- }
18990- return len;
18991+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18992+
18993+#ifdef CONFIG_PAX_MEMORY_UDEREF
18994+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18995+ to += PAX_USER_SHADOW_BASE;
18996+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18997+ from += PAX_USER_SHADOW_BASE;
18998+#endif
18999+
19000+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
19001+ }
19002+ return len;
19003 }
19004 EXPORT_SYMBOL(copy_in_user);
19005
19006@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
19007 * it is not necessary to optimize tail handling.
19008 */
19009 unsigned long
19010-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
19011+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest)
19012 {
19013 char c;
19014 unsigned zero_len;
19015diff -urNp linux-3.0.4/arch/x86/Makefile linux-3.0.4/arch/x86/Makefile
19016--- linux-3.0.4/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
19017+++ linux-3.0.4/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
19018@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
19019 else
19020 BITS := 64
19021 UTS_MACHINE := x86_64
19022+ biarch := $(call cc-option,-m64)
19023 CHECKFLAGS += -D__x86_64__ -m64
19024
19025 KBUILD_AFLAGS += -m64
19026@@ -195,3 +196,12 @@ define archhelp
19027 echo ' FDARGS="..." arguments for the booted kernel'
19028 echo ' FDINITRD=file initrd for the booted kernel'
19029 endef
19030+
19031+define OLD_LD
19032+
19033+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
19034+*** Please upgrade your binutils to 2.18 or newer
19035+endef
19036+
19037+archprepare:
19038+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
19039diff -urNp linux-3.0.4/arch/x86/mm/extable.c linux-3.0.4/arch/x86/mm/extable.c
19040--- linux-3.0.4/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
19041+++ linux-3.0.4/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
19042@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
19043 const struct exception_table_entry *fixup;
19044
19045 #ifdef CONFIG_PNPBIOS
19046- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19047+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19048 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19049 extern u32 pnp_bios_is_utter_crap;
19050 pnp_bios_is_utter_crap = 1;
19051diff -urNp linux-3.0.4/arch/x86/mm/fault.c linux-3.0.4/arch/x86/mm/fault.c
19052--- linux-3.0.4/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
19053+++ linux-3.0.4/arch/x86/mm/fault.c 2011-10-06 04:17:55.000000000 -0400
19054@@ -13,10 +13,18 @@
19055 #include <linux/perf_event.h> /* perf_sw_event */
19056 #include <linux/hugetlb.h> /* hstate_index_to_shift */
19057 #include <linux/prefetch.h> /* prefetchw */
19058+#include <linux/unistd.h>
19059+#include <linux/compiler.h>
19060
19061 #include <asm/traps.h> /* dotraplinkage, ... */
19062 #include <asm/pgalloc.h> /* pgd_*(), ... */
19063 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
19064+#include <asm/vsyscall.h>
19065+#include <asm/tlbflush.h>
19066+
19067+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19068+#include <asm/stacktrace.h>
19069+#endif
19070
19071 /*
19072 * Page fault error code bits:
19073@@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
19074 int ret = 0;
19075
19076 /* kprobe_running() needs smp_processor_id() */
19077- if (kprobes_built_in() && !user_mode_vm(regs)) {
19078+ if (kprobes_built_in() && !user_mode(regs)) {
19079 preempt_disable();
19080 if (kprobe_running() && kprobe_fault_handler(regs, 14))
19081 ret = 1;
19082@@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
19083 return !instr_lo || (instr_lo>>1) == 1;
19084 case 0x00:
19085 /* Prefetch instruction is 0x0F0D or 0x0F18 */
19086- if (probe_kernel_address(instr, opcode))
19087+ if (user_mode(regs)) {
19088+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19089+ return 0;
19090+ } else if (probe_kernel_address(instr, opcode))
19091 return 0;
19092
19093 *prefetch = (instr_lo == 0xF) &&
19094@@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
19095 while (instr < max_instr) {
19096 unsigned char opcode;
19097
19098- if (probe_kernel_address(instr, opcode))
19099+ if (user_mode(regs)) {
19100+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19101+ break;
19102+ } else if (probe_kernel_address(instr, opcode))
19103 break;
19104
19105 instr++;
19106@@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
19107 force_sig_info(si_signo, &info, tsk);
19108 }
19109
19110+#ifdef CONFIG_PAX_EMUTRAMP
19111+static int pax_handle_fetch_fault(struct pt_regs *regs);
19112+#endif
19113+
19114+#ifdef CONFIG_PAX_PAGEEXEC
19115+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
19116+{
19117+ pgd_t *pgd;
19118+ pud_t *pud;
19119+ pmd_t *pmd;
19120+
19121+ pgd = pgd_offset(mm, address);
19122+ if (!pgd_present(*pgd))
19123+ return NULL;
19124+ pud = pud_offset(pgd, address);
19125+ if (!pud_present(*pud))
19126+ return NULL;
19127+ pmd = pmd_offset(pud, address);
19128+ if (!pmd_present(*pmd))
19129+ return NULL;
19130+ return pmd;
19131+}
19132+#endif
19133+
19134 DEFINE_SPINLOCK(pgd_lock);
19135 LIST_HEAD(pgd_list);
19136
19137@@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
19138 for (address = VMALLOC_START & PMD_MASK;
19139 address >= TASK_SIZE && address < FIXADDR_TOP;
19140 address += PMD_SIZE) {
19141+
19142+#ifdef CONFIG_PAX_PER_CPU_PGD
19143+ unsigned long cpu;
19144+#else
19145 struct page *page;
19146+#endif
19147
19148 spin_lock(&pgd_lock);
19149+
19150+#ifdef CONFIG_PAX_PER_CPU_PGD
19151+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19152+ pgd_t *pgd = get_cpu_pgd(cpu);
19153+ pmd_t *ret;
19154+#else
19155 list_for_each_entry(page, &pgd_list, lru) {
19156+ pgd_t *pgd = page_address(page);
19157 spinlock_t *pgt_lock;
19158 pmd_t *ret;
19159
19160@@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
19161 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19162
19163 spin_lock(pgt_lock);
19164- ret = vmalloc_sync_one(page_address(page), address);
19165+#endif
19166+
19167+ ret = vmalloc_sync_one(pgd, address);
19168+
19169+#ifndef CONFIG_PAX_PER_CPU_PGD
19170 spin_unlock(pgt_lock);
19171+#endif
19172
19173 if (!ret)
19174 break;
19175@@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
19176 * an interrupt in the middle of a task switch..
19177 */
19178 pgd_paddr = read_cr3();
19179+
19180+#ifdef CONFIG_PAX_PER_CPU_PGD
19181+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19182+#endif
19183+
19184 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19185 if (!pmd_k)
19186 return -1;
19187@@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
19188 * happen within a race in page table update. In the later
19189 * case just flush:
19190 */
19191+
19192+#ifdef CONFIG_PAX_PER_CPU_PGD
19193+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
19194+ pgd = pgd_offset_cpu(smp_processor_id(), address);
19195+#else
19196 pgd = pgd_offset(current->active_mm, address);
19197+#endif
19198+
19199 pgd_ref = pgd_offset_k(address);
19200 if (pgd_none(*pgd_ref))
19201 return -1;
19202@@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
19203 static int is_errata100(struct pt_regs *regs, unsigned long address)
19204 {
19205 #ifdef CONFIG_X86_64
19206- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
19207+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
19208 return 1;
19209 #endif
19210 return 0;
19211@@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
19212 }
19213
19214 static const char nx_warning[] = KERN_CRIT
19215-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
19216+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
19217
19218 static void
19219 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
19220@@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
19221 if (!oops_may_print())
19222 return;
19223
19224- if (error_code & PF_INSTR) {
19225+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
19226 unsigned int level;
19227
19228 pte_t *pte = lookup_address(address, &level);
19229
19230 if (pte && pte_present(*pte) && !pte_exec(*pte))
19231- printk(nx_warning, current_uid());
19232+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
19233+ }
19234+
19235+#ifdef CONFIG_PAX_KERNEXEC
19236+ if (init_mm.start_code <= address && address < init_mm.end_code) {
19237+ if (current->signal->curr_ip)
19238+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19239+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
19240+ else
19241+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19242+ current->comm, task_pid_nr(current), current_uid(), current_euid());
19243 }
19244+#endif
19245
19246 printk(KERN_ALERT "BUG: unable to handle kernel ");
19247 if (address < PAGE_SIZE)
19248@@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
19249 unsigned long address, int si_code)
19250 {
19251 struct task_struct *tsk = current;
19252+#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19253+ struct mm_struct *mm = tsk->mm;
19254+#endif
19255+
19256+#ifdef CONFIG_X86_64
19257+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
19258+ if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
19259+ regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
19260+ regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
19261+ regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
19262+ return;
19263+ }
19264+ }
19265+#endif
19266+
19267+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19268+ if (mm && (error_code & PF_USER)) {
19269+ unsigned long ip = regs->ip;
19270+
19271+ if (v8086_mode(regs))
19272+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
19273+
19274+ /*
19275+ * It's possible to have interrupts off here:
19276+ */
19277+ local_irq_enable();
19278+
19279+#ifdef CONFIG_PAX_PAGEEXEC
19280+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
19281+ (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
19282+
19283+#ifdef CONFIG_PAX_EMUTRAMP
19284+ switch (pax_handle_fetch_fault(regs)) {
19285+ case 2:
19286+ return;
19287+ }
19288+#endif
19289+
19290+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19291+ do_group_exit(SIGKILL);
19292+ }
19293+#endif
19294+
19295+#ifdef CONFIG_PAX_SEGMEXEC
19296+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
19297+
19298+#ifdef CONFIG_PAX_EMUTRAMP
19299+ switch (pax_handle_fetch_fault(regs)) {
19300+ case 2:
19301+ return;
19302+ }
19303+#endif
19304+
19305+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19306+ do_group_exit(SIGKILL);
19307+ }
19308+#endif
19309+
19310+ }
19311+#endif
19312
19313 /* User mode accesses just cause a SIGSEGV */
19314 if (error_code & PF_USER) {
19315@@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
19316 return 1;
19317 }
19318
19319+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19320+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
19321+{
19322+ pte_t *pte;
19323+ pmd_t *pmd;
19324+ spinlock_t *ptl;
19325+ unsigned char pte_mask;
19326+
19327+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
19328+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
19329+ return 0;
19330+
19331+ /* PaX: it's our fault, let's handle it if we can */
19332+
19333+ /* PaX: take a look at read faults before acquiring any locks */
19334+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
19335+ /* instruction fetch attempt from a protected page in user mode */
19336+ up_read(&mm->mmap_sem);
19337+
19338+#ifdef CONFIG_PAX_EMUTRAMP
19339+ switch (pax_handle_fetch_fault(regs)) {
19340+ case 2:
19341+ return 1;
19342+ }
19343+#endif
19344+
19345+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
19346+ do_group_exit(SIGKILL);
19347+ }
19348+
19349+ pmd = pax_get_pmd(mm, address);
19350+ if (unlikely(!pmd))
19351+ return 0;
19352+
19353+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
19354+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
19355+ pte_unmap_unlock(pte, ptl);
19356+ return 0;
19357+ }
19358+
19359+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
19360+ /* write attempt to a protected page in user mode */
19361+ pte_unmap_unlock(pte, ptl);
19362+ return 0;
19363+ }
19364+
19365+#ifdef CONFIG_SMP
19366+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
19367+#else
19368+ if (likely(address > get_limit(regs->cs)))
19369+#endif
19370+ {
19371+ set_pte(pte, pte_mkread(*pte));
19372+ __flush_tlb_one(address);
19373+ pte_unmap_unlock(pte, ptl);
19374+ up_read(&mm->mmap_sem);
19375+ return 1;
19376+ }
19377+
19378+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
19379+
19380+ /*
19381+ * PaX: fill DTLB with user rights and retry
19382+ */
19383+ __asm__ __volatile__ (
19384+ "orb %2,(%1)\n"
19385+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
19386+/*
19387+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
19388+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
19389+ * page fault when examined during a TLB load attempt. this is true not only
19390+ * for PTEs holding a non-present entry but also present entries that will
19391+ * raise a page fault (such as those set up by PaX, or the copy-on-write
19392+ * mechanism). in effect it means that we do *not* need to flush the TLBs
19393+ * for our target pages since their PTEs are simply not in the TLBs at all.
19394+
19395+ * the best thing in omitting it is that we gain around 15-20% speed in the
19396+ * fast path of the page fault handler and can get rid of tracing since we
19397+ * can no longer flush unintended entries.
19398+ */
19399+ "invlpg (%0)\n"
19400+#endif
19401+ __copyuser_seg"testb $0,(%0)\n"
19402+ "xorb %3,(%1)\n"
19403+ :
19404+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
19405+ : "memory", "cc");
19406+ pte_unmap_unlock(pte, ptl);
19407+ up_read(&mm->mmap_sem);
19408+ return 1;
19409+}
19410+#endif
19411+
19412 /*
19413 * Handle a spurious fault caused by a stale TLB entry.
19414 *
19415@@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
19416 static inline int
19417 access_error(unsigned long error_code, struct vm_area_struct *vma)
19418 {
19419+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
19420+ return 1;
19421+
19422 if (error_code & PF_WRITE) {
19423 /* write, present and write, not present: */
19424 if (unlikely(!(vma->vm_flags & VM_WRITE)))
19425@@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
19426 {
19427 struct vm_area_struct *vma;
19428 struct task_struct *tsk;
19429- unsigned long address;
19430 struct mm_struct *mm;
19431 int fault;
19432 int write = error_code & PF_WRITE;
19433 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
19434 (write ? FAULT_FLAG_WRITE : 0);
19435
19436+ /* Get the faulting address: */
19437+ unsigned long address = read_cr2();
19438+
19439+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19440+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
19441+ if (!search_exception_tables(regs->ip)) {
19442+ bad_area_nosemaphore(regs, error_code, address);
19443+ return;
19444+ }
19445+ if (address < PAX_USER_SHADOW_BASE) {
19446+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
19447+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
19448+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
19449+ } else
19450+ address -= PAX_USER_SHADOW_BASE;
19451+ }
19452+#endif
19453+
19454 tsk = current;
19455 mm = tsk->mm;
19456
19457- /* Get the faulting address: */
19458- address = read_cr2();
19459-
19460 /*
19461 * Detect and handle instructions that would cause a page fault for
19462 * both a tracked kernel page and a userspace page.
19463@@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
19464 * User-mode registers count as a user access even for any
19465 * potential system fault or CPU buglet:
19466 */
19467- if (user_mode_vm(regs)) {
19468+ if (user_mode(regs)) {
19469 local_irq_enable();
19470 error_code |= PF_USER;
19471 } else {
19472@@ -1103,6 +1351,11 @@ retry:
19473 might_sleep();
19474 }
19475
19476+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19477+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
19478+ return;
19479+#endif
19480+
19481 vma = find_vma(mm, address);
19482 if (unlikely(!vma)) {
19483 bad_area(regs, error_code, address);
19484@@ -1114,18 +1367,24 @@ retry:
19485 bad_area(regs, error_code, address);
19486 return;
19487 }
19488- if (error_code & PF_USER) {
19489- /*
19490- * Accessing the stack below %sp is always a bug.
19491- * The large cushion allows instructions like enter
19492- * and pusha to work. ("enter $65535, $31" pushes
19493- * 32 pointers and then decrements %sp by 65535.)
19494- */
19495- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
19496- bad_area(regs, error_code, address);
19497- return;
19498- }
19499+ /*
19500+ * Accessing the stack below %sp is always a bug.
19501+ * The large cushion allows instructions like enter
19502+ * and pusha to work. ("enter $65535, $31" pushes
19503+ * 32 pointers and then decrements %sp by 65535.)
19504+ */
19505+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
19506+ bad_area(regs, error_code, address);
19507+ return;
19508 }
19509+
19510+#ifdef CONFIG_PAX_SEGMEXEC
19511+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
19512+ bad_area(regs, error_code, address);
19513+ return;
19514+ }
19515+#endif
19516+
19517 if (unlikely(expand_stack(vma, address))) {
19518 bad_area(regs, error_code, address);
19519 return;
19520@@ -1180,3 +1439,199 @@ good_area:
19521
19522 up_read(&mm->mmap_sem);
19523 }
19524+
19525+#ifdef CONFIG_PAX_EMUTRAMP
19526+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
19527+{
19528+ int err;
19529+
19530+ do { /* PaX: gcc trampoline emulation #1 */
19531+ unsigned char mov1, mov2;
19532+ unsigned short jmp;
19533+ unsigned int addr1, addr2;
19534+
19535+#ifdef CONFIG_X86_64
19536+ if ((regs->ip + 11) >> 32)
19537+ break;
19538+#endif
19539+
19540+ err = get_user(mov1, (unsigned char __user *)regs->ip);
19541+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19542+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
19543+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19544+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
19545+
19546+ if (err)
19547+ break;
19548+
19549+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
19550+ regs->cx = addr1;
19551+ regs->ax = addr2;
19552+ regs->ip = addr2;
19553+ return 2;
19554+ }
19555+ } while (0);
19556+
19557+ do { /* PaX: gcc trampoline emulation #2 */
19558+ unsigned char mov, jmp;
19559+ unsigned int addr1, addr2;
19560+
19561+#ifdef CONFIG_X86_64
19562+ if ((regs->ip + 9) >> 32)
19563+ break;
19564+#endif
19565+
19566+ err = get_user(mov, (unsigned char __user *)regs->ip);
19567+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19568+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
19569+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19570+
19571+ if (err)
19572+ break;
19573+
19574+ if (mov == 0xB9 && jmp == 0xE9) {
19575+ regs->cx = addr1;
19576+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
19577+ return 2;
19578+ }
19579+ } while (0);
19580+
19581+ return 1; /* PaX in action */
19582+}
19583+
19584+#ifdef CONFIG_X86_64
19585+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
19586+{
19587+ int err;
19588+
19589+ do { /* PaX: gcc trampoline emulation #1 */
19590+ unsigned short mov1, mov2, jmp1;
19591+ unsigned char jmp2;
19592+ unsigned int addr1;
19593+ unsigned long addr2;
19594+
19595+ err = get_user(mov1, (unsigned short __user *)regs->ip);
19596+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
19597+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
19598+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
19599+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
19600+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
19601+
19602+ if (err)
19603+ break;
19604+
19605+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19606+ regs->r11 = addr1;
19607+ regs->r10 = addr2;
19608+ regs->ip = addr1;
19609+ return 2;
19610+ }
19611+ } while (0);
19612+
19613+ do { /* PaX: gcc trampoline emulation #2 */
19614+ unsigned short mov1, mov2, jmp1;
19615+ unsigned char jmp2;
19616+ unsigned long addr1, addr2;
19617+
19618+ err = get_user(mov1, (unsigned short __user *)regs->ip);
19619+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
19620+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
19621+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
19622+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
19623+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
19624+
19625+ if (err)
19626+ break;
19627+
19628+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19629+ regs->r11 = addr1;
19630+ regs->r10 = addr2;
19631+ regs->ip = addr1;
19632+ return 2;
19633+ }
19634+ } while (0);
19635+
19636+ return 1; /* PaX in action */
19637+}
19638+#endif
19639+
19640+/*
19641+ * PaX: decide what to do with offenders (regs->ip = fault address)
19642+ *
19643+ * returns 1 when task should be killed
19644+ * 2 when gcc trampoline was detected
19645+ */
19646+static int pax_handle_fetch_fault(struct pt_regs *regs)
19647+{
19648+ if (v8086_mode(regs))
19649+ return 1;
19650+
19651+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19652+ return 1;
19653+
19654+#ifdef CONFIG_X86_32
19655+ return pax_handle_fetch_fault_32(regs);
19656+#else
19657+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19658+ return pax_handle_fetch_fault_32(regs);
19659+ else
19660+ return pax_handle_fetch_fault_64(regs);
19661+#endif
19662+}
19663+#endif
19664+
19665+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19666+void pax_report_insns(void *pc, void *sp)
19667+{
19668+ long i;
19669+
19670+ printk(KERN_ERR "PAX: bytes at PC: ");
19671+ for (i = 0; i < 20; i++) {
19672+ unsigned char c;
19673+ if (get_user(c, (unsigned char __force_user *)pc+i))
19674+ printk(KERN_CONT "?? ");
19675+ else
19676+ printk(KERN_CONT "%02x ", c);
19677+ }
19678+ printk("\n");
19679+
19680+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19681+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
19682+ unsigned long c;
19683+ if (get_user(c, (unsigned long __force_user *)sp+i))
19684+#ifdef CONFIG_X86_32
19685+ printk(KERN_CONT "???????? ");
19686+#else
19687+ printk(KERN_CONT "???????????????? ");
19688+#endif
19689+ else
19690+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19691+ }
19692+ printk("\n");
19693+}
19694+#endif
19695+
19696+/**
19697+ * probe_kernel_write(): safely attempt to write to a location
19698+ * @dst: address to write to
19699+ * @src: pointer to the data that shall be written
19700+ * @size: size of the data chunk
19701+ *
19702+ * Safely write to address @dst from the buffer at @src. If a kernel fault
19703+ * happens, handle that and return -EFAULT.
19704+ */
19705+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19706+{
19707+ long ret;
19708+ mm_segment_t old_fs = get_fs();
19709+
19710+ set_fs(KERNEL_DS);
19711+ pagefault_disable();
19712+ pax_open_kernel();
19713+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
19714+ pax_close_kernel();
19715+ pagefault_enable();
19716+ set_fs(old_fs);
19717+
19718+ return ret ? -EFAULT : 0;
19719+}
19720diff -urNp linux-3.0.4/arch/x86/mm/gup.c linux-3.0.4/arch/x86/mm/gup.c
19721--- linux-3.0.4/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
19722+++ linux-3.0.4/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
19723@@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19724 addr = start;
19725 len = (unsigned long) nr_pages << PAGE_SHIFT;
19726 end = start + len;
19727- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19728+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19729 (void __user *)start, len)))
19730 return 0;
19731
19732diff -urNp linux-3.0.4/arch/x86/mm/highmem_32.c linux-3.0.4/arch/x86/mm/highmem_32.c
19733--- linux-3.0.4/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
19734+++ linux-3.0.4/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
19735@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19736 idx = type + KM_TYPE_NR*smp_processor_id();
19737 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19738 BUG_ON(!pte_none(*(kmap_pte-idx)));
19739+
19740+ pax_open_kernel();
19741 set_pte(kmap_pte-idx, mk_pte(page, prot));
19742+ pax_close_kernel();
19743
19744 return (void *)vaddr;
19745 }
19746diff -urNp linux-3.0.4/arch/x86/mm/hugetlbpage.c linux-3.0.4/arch/x86/mm/hugetlbpage.c
19747--- linux-3.0.4/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
19748+++ linux-3.0.4/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
19749@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19750 struct hstate *h = hstate_file(file);
19751 struct mm_struct *mm = current->mm;
19752 struct vm_area_struct *vma;
19753- unsigned long start_addr;
19754+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19755+
19756+#ifdef CONFIG_PAX_SEGMEXEC
19757+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19758+ pax_task_size = SEGMEXEC_TASK_SIZE;
19759+#endif
19760+
19761+ pax_task_size -= PAGE_SIZE;
19762
19763 if (len > mm->cached_hole_size) {
19764- start_addr = mm->free_area_cache;
19765+ start_addr = mm->free_area_cache;
19766 } else {
19767- start_addr = TASK_UNMAPPED_BASE;
19768- mm->cached_hole_size = 0;
19769+ start_addr = mm->mmap_base;
19770+ mm->cached_hole_size = 0;
19771 }
19772
19773 full_search:
19774@@ -280,26 +287,27 @@ full_search:
19775
19776 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19777 /* At this point: (!vma || addr < vma->vm_end). */
19778- if (TASK_SIZE - len < addr) {
19779+ if (pax_task_size - len < addr) {
19780 /*
19781 * Start a new search - just in case we missed
19782 * some holes.
19783 */
19784- if (start_addr != TASK_UNMAPPED_BASE) {
19785- start_addr = TASK_UNMAPPED_BASE;
19786+ if (start_addr != mm->mmap_base) {
19787+ start_addr = mm->mmap_base;
19788 mm->cached_hole_size = 0;
19789 goto full_search;
19790 }
19791 return -ENOMEM;
19792 }
19793- if (!vma || addr + len <= vma->vm_start) {
19794- mm->free_area_cache = addr + len;
19795- return addr;
19796- }
19797+ if (check_heap_stack_gap(vma, addr, len))
19798+ break;
19799 if (addr + mm->cached_hole_size < vma->vm_start)
19800 mm->cached_hole_size = vma->vm_start - addr;
19801 addr = ALIGN(vma->vm_end, huge_page_size(h));
19802 }
19803+
19804+ mm->free_area_cache = addr + len;
19805+ return addr;
19806 }
19807
19808 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19809@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19810 {
19811 struct hstate *h = hstate_file(file);
19812 struct mm_struct *mm = current->mm;
19813- struct vm_area_struct *vma, *prev_vma;
19814- unsigned long base = mm->mmap_base, addr = addr0;
19815+ struct vm_area_struct *vma;
19816+ unsigned long base = mm->mmap_base, addr;
19817 unsigned long largest_hole = mm->cached_hole_size;
19818- int first_time = 1;
19819
19820 /* don't allow allocations above current base */
19821 if (mm->free_area_cache > base)
19822@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19823 largest_hole = 0;
19824 mm->free_area_cache = base;
19825 }
19826-try_again:
19827+
19828 /* make sure it can fit in the remaining address space */
19829 if (mm->free_area_cache < len)
19830 goto fail;
19831
19832 /* either no address requested or can't fit in requested address hole */
19833- addr = (mm->free_area_cache - len) & huge_page_mask(h);
19834+ addr = (mm->free_area_cache - len);
19835 do {
19836+ addr &= huge_page_mask(h);
19837+ vma = find_vma(mm, addr);
19838 /*
19839 * Lookup failure means no vma is above this address,
19840 * i.e. return with success:
19841- */
19842- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19843- return addr;
19844-
19845- /*
19846 * new region fits between prev_vma->vm_end and
19847 * vma->vm_start, use it:
19848 */
19849- if (addr + len <= vma->vm_start &&
19850- (!prev_vma || (addr >= prev_vma->vm_end))) {
19851+ if (check_heap_stack_gap(vma, addr, len)) {
19852 /* remember the address as a hint for next time */
19853- mm->cached_hole_size = largest_hole;
19854- return (mm->free_area_cache = addr);
19855- } else {
19856- /* pull free_area_cache down to the first hole */
19857- if (mm->free_area_cache == vma->vm_end) {
19858- mm->free_area_cache = vma->vm_start;
19859- mm->cached_hole_size = largest_hole;
19860- }
19861+ mm->cached_hole_size = largest_hole;
19862+ return (mm->free_area_cache = addr);
19863+ }
19864+ /* pull free_area_cache down to the first hole */
19865+ if (mm->free_area_cache == vma->vm_end) {
19866+ mm->free_area_cache = vma->vm_start;
19867+ mm->cached_hole_size = largest_hole;
19868 }
19869
19870 /* remember the largest hole we saw so far */
19871 if (addr + largest_hole < vma->vm_start)
19872- largest_hole = vma->vm_start - addr;
19873+ largest_hole = vma->vm_start - addr;
19874
19875 /* try just below the current vma->vm_start */
19876- addr = (vma->vm_start - len) & huge_page_mask(h);
19877- } while (len <= vma->vm_start);
19878+ addr = skip_heap_stack_gap(vma, len);
19879+ } while (!IS_ERR_VALUE(addr));
19880
19881 fail:
19882 /*
19883- * if hint left us with no space for the requested
19884- * mapping then try again:
19885- */
19886- if (first_time) {
19887- mm->free_area_cache = base;
19888- largest_hole = 0;
19889- first_time = 0;
19890- goto try_again;
19891- }
19892- /*
19893 * A failed mmap() very likely causes application failure,
19894 * so fall back to the bottom-up function here. This scenario
19895 * can happen with large stack limits and large mmap()
19896 * allocations.
19897 */
19898- mm->free_area_cache = TASK_UNMAPPED_BASE;
19899+
19900+#ifdef CONFIG_PAX_SEGMEXEC
19901+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19902+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19903+ else
19904+#endif
19905+
19906+ mm->mmap_base = TASK_UNMAPPED_BASE;
19907+
19908+#ifdef CONFIG_PAX_RANDMMAP
19909+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19910+ mm->mmap_base += mm->delta_mmap;
19911+#endif
19912+
19913+ mm->free_area_cache = mm->mmap_base;
19914 mm->cached_hole_size = ~0UL;
19915 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19916 len, pgoff, flags);
19917@@ -386,6 +392,7 @@ fail:
19918 /*
19919 * Restore the topdown base:
19920 */
19921+ mm->mmap_base = base;
19922 mm->free_area_cache = base;
19923 mm->cached_hole_size = ~0UL;
19924
19925@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19926 struct hstate *h = hstate_file(file);
19927 struct mm_struct *mm = current->mm;
19928 struct vm_area_struct *vma;
19929+ unsigned long pax_task_size = TASK_SIZE;
19930
19931 if (len & ~huge_page_mask(h))
19932 return -EINVAL;
19933- if (len > TASK_SIZE)
19934+
19935+#ifdef CONFIG_PAX_SEGMEXEC
19936+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19937+ pax_task_size = SEGMEXEC_TASK_SIZE;
19938+#endif
19939+
19940+ pax_task_size -= PAGE_SIZE;
19941+
19942+ if (len > pax_task_size)
19943 return -ENOMEM;
19944
19945 if (flags & MAP_FIXED) {
19946@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19947 if (addr) {
19948 addr = ALIGN(addr, huge_page_size(h));
19949 vma = find_vma(mm, addr);
19950- if (TASK_SIZE - len >= addr &&
19951- (!vma || addr + len <= vma->vm_start))
19952+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19953 return addr;
19954 }
19955 if (mm->get_unmapped_area == arch_get_unmapped_area)
19956diff -urNp linux-3.0.4/arch/x86/mm/init_32.c linux-3.0.4/arch/x86/mm/init_32.c
19957--- linux-3.0.4/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
19958+++ linux-3.0.4/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
19959@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19960 }
19961
19962 /*
19963- * Creates a middle page table and puts a pointer to it in the
19964- * given global directory entry. This only returns the gd entry
19965- * in non-PAE compilation mode, since the middle layer is folded.
19966- */
19967-static pmd_t * __init one_md_table_init(pgd_t *pgd)
19968-{
19969- pud_t *pud;
19970- pmd_t *pmd_table;
19971-
19972-#ifdef CONFIG_X86_PAE
19973- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19974- if (after_bootmem)
19975- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19976- else
19977- pmd_table = (pmd_t *)alloc_low_page();
19978- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19979- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19980- pud = pud_offset(pgd, 0);
19981- BUG_ON(pmd_table != pmd_offset(pud, 0));
19982-
19983- return pmd_table;
19984- }
19985-#endif
19986- pud = pud_offset(pgd, 0);
19987- pmd_table = pmd_offset(pud, 0);
19988-
19989- return pmd_table;
19990-}
19991-
19992-/*
19993 * Create a page table and place a pointer to it in a middle page
19994 * directory entry:
19995 */
19996@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19997 page_table = (pte_t *)alloc_low_page();
19998
19999 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20000+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20001+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20002+#else
20003 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20004+#endif
20005 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20006 }
20007
20008 return pte_offset_kernel(pmd, 0);
20009 }
20010
20011+static pmd_t * __init one_md_table_init(pgd_t *pgd)
20012+{
20013+ pud_t *pud;
20014+ pmd_t *pmd_table;
20015+
20016+ pud = pud_offset(pgd, 0);
20017+ pmd_table = pmd_offset(pud, 0);
20018+
20019+ return pmd_table;
20020+}
20021+
20022 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20023 {
20024 int pgd_idx = pgd_index(vaddr);
20025@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
20026 int pgd_idx, pmd_idx;
20027 unsigned long vaddr;
20028 pgd_t *pgd;
20029+ pud_t *pud;
20030 pmd_t *pmd;
20031 pte_t *pte = NULL;
20032
20033@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
20034 pgd = pgd_base + pgd_idx;
20035
20036 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
20037- pmd = one_md_table_init(pgd);
20038- pmd = pmd + pmd_index(vaddr);
20039+ pud = pud_offset(pgd, vaddr);
20040+ pmd = pmd_offset(pud, vaddr);
20041+
20042+#ifdef CONFIG_X86_PAE
20043+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20044+#endif
20045+
20046 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
20047 pmd++, pmd_idx++) {
20048 pte = page_table_kmap_check(one_page_table_init(pmd),
20049@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
20050 }
20051 }
20052
20053-static inline int is_kernel_text(unsigned long addr)
20054+static inline int is_kernel_text(unsigned long start, unsigned long end)
20055 {
20056- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
20057- return 1;
20058- return 0;
20059+ if ((start > ktla_ktva((unsigned long)_etext) ||
20060+ end <= ktla_ktva((unsigned long)_stext)) &&
20061+ (start > ktla_ktva((unsigned long)_einittext) ||
20062+ end <= ktla_ktva((unsigned long)_sinittext)) &&
20063+
20064+#ifdef CONFIG_ACPI_SLEEP
20065+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
20066+#endif
20067+
20068+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
20069+ return 0;
20070+ return 1;
20071 }
20072
20073 /*
20074@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
20075 unsigned long last_map_addr = end;
20076 unsigned long start_pfn, end_pfn;
20077 pgd_t *pgd_base = swapper_pg_dir;
20078- int pgd_idx, pmd_idx, pte_ofs;
20079+ unsigned int pgd_idx, pmd_idx, pte_ofs;
20080 unsigned long pfn;
20081 pgd_t *pgd;
20082+ pud_t *pud;
20083 pmd_t *pmd;
20084 pte_t *pte;
20085 unsigned pages_2m, pages_4k;
20086@@ -281,8 +282,13 @@ repeat:
20087 pfn = start_pfn;
20088 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20089 pgd = pgd_base + pgd_idx;
20090- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
20091- pmd = one_md_table_init(pgd);
20092+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
20093+ pud = pud_offset(pgd, 0);
20094+ pmd = pmd_offset(pud, 0);
20095+
20096+#ifdef CONFIG_X86_PAE
20097+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20098+#endif
20099
20100 if (pfn >= end_pfn)
20101 continue;
20102@@ -294,14 +300,13 @@ repeat:
20103 #endif
20104 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
20105 pmd++, pmd_idx++) {
20106- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
20107+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
20108
20109 /*
20110 * Map with big pages if possible, otherwise
20111 * create normal page tables:
20112 */
20113 if (use_pse) {
20114- unsigned int addr2;
20115 pgprot_t prot = PAGE_KERNEL_LARGE;
20116 /*
20117 * first pass will use the same initial
20118@@ -311,11 +316,7 @@ repeat:
20119 __pgprot(PTE_IDENT_ATTR |
20120 _PAGE_PSE);
20121
20122- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
20123- PAGE_OFFSET + PAGE_SIZE-1;
20124-
20125- if (is_kernel_text(addr) ||
20126- is_kernel_text(addr2))
20127+ if (is_kernel_text(address, address + PMD_SIZE))
20128 prot = PAGE_KERNEL_LARGE_EXEC;
20129
20130 pages_2m++;
20131@@ -332,7 +333,7 @@ repeat:
20132 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20133 pte += pte_ofs;
20134 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
20135- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
20136+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
20137 pgprot_t prot = PAGE_KERNEL;
20138 /*
20139 * first pass will use the same initial
20140@@ -340,7 +341,7 @@ repeat:
20141 */
20142 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
20143
20144- if (is_kernel_text(addr))
20145+ if (is_kernel_text(address, address + PAGE_SIZE))
20146 prot = PAGE_KERNEL_EXEC;
20147
20148 pages_4k++;
20149@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
20150
20151 pud = pud_offset(pgd, va);
20152 pmd = pmd_offset(pud, va);
20153- if (!pmd_present(*pmd))
20154+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
20155 break;
20156
20157 pte = pte_offset_kernel(pmd, va);
20158@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
20159
20160 static void __init pagetable_init(void)
20161 {
20162- pgd_t *pgd_base = swapper_pg_dir;
20163-
20164- permanent_kmaps_init(pgd_base);
20165+ permanent_kmaps_init(swapper_pg_dir);
20166 }
20167
20168-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20169+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20170 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20171
20172 /* user-defined highmem size */
20173@@ -757,6 +756,12 @@ void __init mem_init(void)
20174
20175 pci_iommu_alloc();
20176
20177+#ifdef CONFIG_PAX_PER_CPU_PGD
20178+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20179+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20180+ KERNEL_PGD_PTRS);
20181+#endif
20182+
20183 #ifdef CONFIG_FLATMEM
20184 BUG_ON(!mem_map);
20185 #endif
20186@@ -774,7 +779,7 @@ void __init mem_init(void)
20187 set_highmem_pages_init();
20188
20189 codesize = (unsigned long) &_etext - (unsigned long) &_text;
20190- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
20191+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
20192 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
20193
20194 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
20195@@ -815,10 +820,10 @@ void __init mem_init(void)
20196 ((unsigned long)&__init_end -
20197 (unsigned long)&__init_begin) >> 10,
20198
20199- (unsigned long)&_etext, (unsigned long)&_edata,
20200- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
20201+ (unsigned long)&_sdata, (unsigned long)&_edata,
20202+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
20203
20204- (unsigned long)&_text, (unsigned long)&_etext,
20205+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
20206 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
20207
20208 /*
20209@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
20210 if (!kernel_set_to_readonly)
20211 return;
20212
20213+ start = ktla_ktva(start);
20214 pr_debug("Set kernel text: %lx - %lx for read write\n",
20215 start, start+size);
20216
20217@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
20218 if (!kernel_set_to_readonly)
20219 return;
20220
20221+ start = ktla_ktva(start);
20222 pr_debug("Set kernel text: %lx - %lx for read only\n",
20223 start, start+size);
20224
20225@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
20226 unsigned long start = PFN_ALIGN(_text);
20227 unsigned long size = PFN_ALIGN(_etext) - start;
20228
20229+ start = ktla_ktva(start);
20230 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
20231 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
20232 size >> 10);
20233diff -urNp linux-3.0.4/arch/x86/mm/init_64.c linux-3.0.4/arch/x86/mm/init_64.c
20234--- linux-3.0.4/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
20235+++ linux-3.0.4/arch/x86/mm/init_64.c 2011-10-06 04:17:55.000000000 -0400
20236@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
20237 * around without checking the pgd every time.
20238 */
20239
20240-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
20241+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
20242 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20243
20244 int force_personality32;
20245@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
20246
20247 for (address = start; address <= end; address += PGDIR_SIZE) {
20248 const pgd_t *pgd_ref = pgd_offset_k(address);
20249+
20250+#ifdef CONFIG_PAX_PER_CPU_PGD
20251+ unsigned long cpu;
20252+#else
20253 struct page *page;
20254+#endif
20255
20256 if (pgd_none(*pgd_ref))
20257 continue;
20258
20259 spin_lock(&pgd_lock);
20260+
20261+#ifdef CONFIG_PAX_PER_CPU_PGD
20262+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20263+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
20264+#else
20265 list_for_each_entry(page, &pgd_list, lru) {
20266 pgd_t *pgd;
20267 spinlock_t *pgt_lock;
20268@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
20269 /* the pgt_lock only for Xen */
20270 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
20271 spin_lock(pgt_lock);
20272+#endif
20273
20274 if (pgd_none(*pgd))
20275 set_pgd(pgd, *pgd_ref);
20276@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
20277 BUG_ON(pgd_page_vaddr(*pgd)
20278 != pgd_page_vaddr(*pgd_ref));
20279
20280+#ifndef CONFIG_PAX_PER_CPU_PGD
20281 spin_unlock(pgt_lock);
20282+#endif
20283+
20284 }
20285 spin_unlock(&pgd_lock);
20286 }
20287@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
20288 pmd = fill_pmd(pud, vaddr);
20289 pte = fill_pte(pmd, vaddr);
20290
20291+ pax_open_kernel();
20292 set_pte(pte, new_pte);
20293+ pax_close_kernel();
20294
20295 /*
20296 * It's enough to flush this one mapping.
20297@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
20298 pgd = pgd_offset_k((unsigned long)__va(phys));
20299 if (pgd_none(*pgd)) {
20300 pud = (pud_t *) spp_getpage();
20301- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
20302- _PAGE_USER));
20303+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
20304 }
20305 pud = pud_offset(pgd, (unsigned long)__va(phys));
20306 if (pud_none(*pud)) {
20307 pmd = (pmd_t *) spp_getpage();
20308- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
20309- _PAGE_USER));
20310+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
20311 }
20312 pmd = pmd_offset(pud, phys);
20313 BUG_ON(!pmd_none(*pmd));
20314@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsign
20315 if (pfn >= pgt_buf_top)
20316 panic("alloc_low_page: ran out of memory");
20317
20318- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
20319+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
20320 clear_page(adr);
20321 *phys = pfn * PAGE_SIZE;
20322 return adr;
20323@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *vi
20324
20325 phys = __pa(virt);
20326 left = phys & (PAGE_SIZE - 1);
20327- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
20328+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
20329 adr = (void *)(((unsigned long)adr) | left);
20330
20331 return adr;
20332@@ -693,6 +707,12 @@ void __init mem_init(void)
20333
20334 pci_iommu_alloc();
20335
20336+#ifdef CONFIG_PAX_PER_CPU_PGD
20337+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20338+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20339+ KERNEL_PGD_PTRS);
20340+#endif
20341+
20342 /* clear_bss() already clear the empty_zero_page */
20343
20344 reservedpages = 0;
20345@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
20346 static struct vm_area_struct gate_vma = {
20347 .vm_start = VSYSCALL_START,
20348 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
20349- .vm_page_prot = PAGE_READONLY_EXEC,
20350- .vm_flags = VM_READ | VM_EXEC
20351+ .vm_page_prot = PAGE_READONLY,
20352+ .vm_flags = VM_READ
20353 };
20354
20355 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
20356@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
20357
20358 const char *arch_vma_name(struct vm_area_struct *vma)
20359 {
20360- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
20361+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
20362 return "[vdso]";
20363 if (vma == &gate_vma)
20364 return "[vsyscall]";
20365diff -urNp linux-3.0.4/arch/x86/mm/init.c linux-3.0.4/arch/x86/mm/init.c
20366--- linux-3.0.4/arch/x86/mm/init.c 2011-07-21 22:17:23.000000000 -0400
20367+++ linux-3.0.4/arch/x86/mm/init.c 2011-08-23 21:48:14.000000000 -0400
20368@@ -31,7 +31,7 @@ int direct_gbpages
20369 static void __init find_early_table_space(unsigned long end, int use_pse,
20370 int use_gbpages)
20371 {
20372- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
20373+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
20374 phys_addr_t base;
20375
20376 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
20377@@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
20378 */
20379 int devmem_is_allowed(unsigned long pagenr)
20380 {
20381- if (pagenr <= 256)
20382+#ifdef CONFIG_GRKERNSEC_KMEM
20383+ /* allow BDA */
20384+ if (!pagenr)
20385+ return 1;
20386+ /* allow EBDA */
20387+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
20388+ return 1;
20389+#else
20390+ if (!pagenr)
20391+ return 1;
20392+#ifdef CONFIG_VM86
20393+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
20394+ return 1;
20395+#endif
20396+#endif
20397+
20398+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
20399 return 1;
20400+#ifdef CONFIG_GRKERNSEC_KMEM
20401+ /* throw out everything else below 1MB */
20402+ if (pagenr <= 256)
20403+ return 0;
20404+#endif
20405 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
20406 return 0;
20407 if (!page_is_ram(pagenr))
20408 return 1;
20409+
20410 return 0;
20411 }
20412
20413@@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
20414
20415 void free_initmem(void)
20416 {
20417+
20418+#ifdef CONFIG_PAX_KERNEXEC
20419+#ifdef CONFIG_X86_32
20420+ /* PaX: limit KERNEL_CS to actual size */
20421+ unsigned long addr, limit;
20422+ struct desc_struct d;
20423+ int cpu;
20424+
20425+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
20426+ limit = (limit - 1UL) >> PAGE_SHIFT;
20427+
20428+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
20429+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
20430+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
20431+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
20432+ }
20433+
20434+ /* PaX: make KERNEL_CS read-only */
20435+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
20436+ if (!paravirt_enabled())
20437+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
20438+/*
20439+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
20440+ pgd = pgd_offset_k(addr);
20441+ pud = pud_offset(pgd, addr);
20442+ pmd = pmd_offset(pud, addr);
20443+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20444+ }
20445+*/
20446+#ifdef CONFIG_X86_PAE
20447+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
20448+/*
20449+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
20450+ pgd = pgd_offset_k(addr);
20451+ pud = pud_offset(pgd, addr);
20452+ pmd = pmd_offset(pud, addr);
20453+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
20454+ }
20455+*/
20456+#endif
20457+
20458+#ifdef CONFIG_MODULES
20459+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
20460+#endif
20461+
20462+#else
20463+ pgd_t *pgd;
20464+ pud_t *pud;
20465+ pmd_t *pmd;
20466+ unsigned long addr, end;
20467+
20468+ /* PaX: make kernel code/rodata read-only, rest non-executable */
20469+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
20470+ pgd = pgd_offset_k(addr);
20471+ pud = pud_offset(pgd, addr);
20472+ pmd = pmd_offset(pud, addr);
20473+ if (!pmd_present(*pmd))
20474+ continue;
20475+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
20476+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20477+ else
20478+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
20479+ }
20480+
20481+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
20482+ end = addr + KERNEL_IMAGE_SIZE;
20483+ for (; addr < end; addr += PMD_SIZE) {
20484+ pgd = pgd_offset_k(addr);
20485+ pud = pud_offset(pgd, addr);
20486+ pmd = pmd_offset(pud, addr);
20487+ if (!pmd_present(*pmd))
20488+ continue;
20489+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
20490+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20491+ }
20492+#endif
20493+
20494+ flush_tlb_all();
20495+#endif
20496+
20497 free_init_pages("unused kernel memory",
20498 (unsigned long)(&__init_begin),
20499 (unsigned long)(&__init_end));
20500diff -urNp linux-3.0.4/arch/x86/mm/iomap_32.c linux-3.0.4/arch/x86/mm/iomap_32.c
20501--- linux-3.0.4/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
20502+++ linux-3.0.4/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
20503@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
20504 type = kmap_atomic_idx_push();
20505 idx = type + KM_TYPE_NR * smp_processor_id();
20506 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20507+
20508+ pax_open_kernel();
20509 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
20510+ pax_close_kernel();
20511+
20512 arch_flush_lazy_mmu_mode();
20513
20514 return (void *)vaddr;
20515diff -urNp linux-3.0.4/arch/x86/mm/ioremap.c linux-3.0.4/arch/x86/mm/ioremap.c
20516--- linux-3.0.4/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
20517+++ linux-3.0.4/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
20518@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
20519 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
20520 int is_ram = page_is_ram(pfn);
20521
20522- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
20523+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
20524 return NULL;
20525 WARN_ON_ONCE(is_ram);
20526 }
20527@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
20528 early_param("early_ioremap_debug", early_ioremap_debug_setup);
20529
20530 static __initdata int after_paging_init;
20531-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
20532+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
20533
20534 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
20535 {
20536@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
20537 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
20538
20539 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
20540- memset(bm_pte, 0, sizeof(bm_pte));
20541- pmd_populate_kernel(&init_mm, pmd, bm_pte);
20542+ pmd_populate_user(&init_mm, pmd, bm_pte);
20543
20544 /*
20545 * The boot-ioremap range spans multiple pmds, for which
20546diff -urNp linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c
20547--- linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
20548+++ linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
20549@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
20550 * memory (e.g. tracked pages)? For now, we need this to avoid
20551 * invoking kmemcheck for PnP BIOS calls.
20552 */
20553- if (regs->flags & X86_VM_MASK)
20554+ if (v8086_mode(regs))
20555 return false;
20556- if (regs->cs != __KERNEL_CS)
20557+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
20558 return false;
20559
20560 pte = kmemcheck_pte_lookup(address);
20561diff -urNp linux-3.0.4/arch/x86/mm/mmap.c linux-3.0.4/arch/x86/mm/mmap.c
20562--- linux-3.0.4/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
20563+++ linux-3.0.4/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
20564@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
20565 * Leave an at least ~128 MB hole with possible stack randomization.
20566 */
20567 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
20568-#define MAX_GAP (TASK_SIZE/6*5)
20569+#define MAX_GAP (pax_task_size/6*5)
20570
20571 /*
20572 * True on X86_32 or when emulating IA32 on X86_64
20573@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
20574 return rnd << PAGE_SHIFT;
20575 }
20576
20577-static unsigned long mmap_base(void)
20578+static unsigned long mmap_base(struct mm_struct *mm)
20579 {
20580 unsigned long gap = rlimit(RLIMIT_STACK);
20581+ unsigned long pax_task_size = TASK_SIZE;
20582+
20583+#ifdef CONFIG_PAX_SEGMEXEC
20584+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20585+ pax_task_size = SEGMEXEC_TASK_SIZE;
20586+#endif
20587
20588 if (gap < MIN_GAP)
20589 gap = MIN_GAP;
20590 else if (gap > MAX_GAP)
20591 gap = MAX_GAP;
20592
20593- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
20594+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
20595 }
20596
20597 /*
20598 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
20599 * does, but not when emulating X86_32
20600 */
20601-static unsigned long mmap_legacy_base(void)
20602+static unsigned long mmap_legacy_base(struct mm_struct *mm)
20603 {
20604- if (mmap_is_ia32())
20605+ if (mmap_is_ia32()) {
20606+
20607+#ifdef CONFIG_PAX_SEGMEXEC
20608+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20609+ return SEGMEXEC_TASK_UNMAPPED_BASE;
20610+ else
20611+#endif
20612+
20613 return TASK_UNMAPPED_BASE;
20614- else
20615+ } else
20616 return TASK_UNMAPPED_BASE + mmap_rnd();
20617 }
20618
20619@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
20620 void arch_pick_mmap_layout(struct mm_struct *mm)
20621 {
20622 if (mmap_is_legacy()) {
20623- mm->mmap_base = mmap_legacy_base();
20624+ mm->mmap_base = mmap_legacy_base(mm);
20625+
20626+#ifdef CONFIG_PAX_RANDMMAP
20627+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20628+ mm->mmap_base += mm->delta_mmap;
20629+#endif
20630+
20631 mm->get_unmapped_area = arch_get_unmapped_area;
20632 mm->unmap_area = arch_unmap_area;
20633 } else {
20634- mm->mmap_base = mmap_base();
20635+ mm->mmap_base = mmap_base(mm);
20636+
20637+#ifdef CONFIG_PAX_RANDMMAP
20638+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20639+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
20640+#endif
20641+
20642 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
20643 mm->unmap_area = arch_unmap_area_topdown;
20644 }
20645diff -urNp linux-3.0.4/arch/x86/mm/mmio-mod.c linux-3.0.4/arch/x86/mm/mmio-mod.c
20646--- linux-3.0.4/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
20647+++ linux-3.0.4/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
20648@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
20649 break;
20650 default:
20651 {
20652- unsigned char *ip = (unsigned char *)instptr;
20653+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
20654 my_trace->opcode = MMIO_UNKNOWN_OP;
20655 my_trace->width = 0;
20656 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
20657@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
20658 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
20659 void __iomem *addr)
20660 {
20661- static atomic_t next_id;
20662+ static atomic_unchecked_t next_id;
20663 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20664 /* These are page-unaligned. */
20665 struct mmiotrace_map map = {
20666@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20667 .private = trace
20668 },
20669 .phys = offset,
20670- .id = atomic_inc_return(&next_id)
20671+ .id = atomic_inc_return_unchecked(&next_id)
20672 };
20673 map.map_id = trace->id;
20674
20675diff -urNp linux-3.0.4/arch/x86/mm/pageattr.c linux-3.0.4/arch/x86/mm/pageattr.c
20676--- linux-3.0.4/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
20677+++ linux-3.0.4/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
20678@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20679 */
20680 #ifdef CONFIG_PCI_BIOS
20681 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20682- pgprot_val(forbidden) |= _PAGE_NX;
20683+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20684 #endif
20685
20686 /*
20687@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20688 * Does not cover __inittext since that is gone later on. On
20689 * 64bit we do not enforce !NX on the low mapping
20690 */
20691- if (within(address, (unsigned long)_text, (unsigned long)_etext))
20692- pgprot_val(forbidden) |= _PAGE_NX;
20693+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20694+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20695
20696+#ifdef CONFIG_DEBUG_RODATA
20697 /*
20698 * The .rodata section needs to be read-only. Using the pfn
20699 * catches all aliases.
20700@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20701 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20702 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20703 pgprot_val(forbidden) |= _PAGE_RW;
20704+#endif
20705
20706 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20707 /*
20708@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20709 }
20710 #endif
20711
20712+#ifdef CONFIG_PAX_KERNEXEC
20713+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20714+ pgprot_val(forbidden) |= _PAGE_RW;
20715+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20716+ }
20717+#endif
20718+
20719 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20720
20721 return prot;
20722@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20723 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20724 {
20725 /* change init_mm */
20726+ pax_open_kernel();
20727 set_pte_atomic(kpte, pte);
20728+
20729 #ifdef CONFIG_X86_32
20730 if (!SHARED_KERNEL_PMD) {
20731+
20732+#ifdef CONFIG_PAX_PER_CPU_PGD
20733+ unsigned long cpu;
20734+#else
20735 struct page *page;
20736+#endif
20737
20738+#ifdef CONFIG_PAX_PER_CPU_PGD
20739+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20740+ pgd_t *pgd = get_cpu_pgd(cpu);
20741+#else
20742 list_for_each_entry(page, &pgd_list, lru) {
20743- pgd_t *pgd;
20744+ pgd_t *pgd = (pgd_t *)page_address(page);
20745+#endif
20746+
20747 pud_t *pud;
20748 pmd_t *pmd;
20749
20750- pgd = (pgd_t *)page_address(page) + pgd_index(address);
20751+ pgd += pgd_index(address);
20752 pud = pud_offset(pgd, address);
20753 pmd = pmd_offset(pud, address);
20754 set_pte_atomic((pte_t *)pmd, pte);
20755 }
20756 }
20757 #endif
20758+ pax_close_kernel();
20759 }
20760
20761 static int
20762diff -urNp linux-3.0.4/arch/x86/mm/pageattr-test.c linux-3.0.4/arch/x86/mm/pageattr-test.c
20763--- linux-3.0.4/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
20764+++ linux-3.0.4/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
20765@@ -36,7 +36,7 @@ enum {
20766
20767 static int pte_testbit(pte_t pte)
20768 {
20769- return pte_flags(pte) & _PAGE_UNUSED1;
20770+ return pte_flags(pte) & _PAGE_CPA_TEST;
20771 }
20772
20773 struct split_state {
20774diff -urNp linux-3.0.4/arch/x86/mm/pat.c linux-3.0.4/arch/x86/mm/pat.c
20775--- linux-3.0.4/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
20776+++ linux-3.0.4/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
20777@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20778
20779 if (!entry) {
20780 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20781- current->comm, current->pid, start, end);
20782+ current->comm, task_pid_nr(current), start, end);
20783 return -EINVAL;
20784 }
20785
20786@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20787 while (cursor < to) {
20788 if (!devmem_is_allowed(pfn)) {
20789 printk(KERN_INFO
20790- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20791- current->comm, from, to);
20792+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20793+ current->comm, from, to, cursor);
20794 return 0;
20795 }
20796 cursor += PAGE_SIZE;
20797@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20798 printk(KERN_INFO
20799 "%s:%d ioremap_change_attr failed %s "
20800 "for %Lx-%Lx\n",
20801- current->comm, current->pid,
20802+ current->comm, task_pid_nr(current),
20803 cattr_name(flags),
20804 base, (unsigned long long)(base + size));
20805 return -EINVAL;
20806@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20807 if (want_flags != flags) {
20808 printk(KERN_WARNING
20809 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20810- current->comm, current->pid,
20811+ current->comm, task_pid_nr(current),
20812 cattr_name(want_flags),
20813 (unsigned long long)paddr,
20814 (unsigned long long)(paddr + size),
20815@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20816 free_memtype(paddr, paddr + size);
20817 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20818 " for %Lx-%Lx, got %s\n",
20819- current->comm, current->pid,
20820+ current->comm, task_pid_nr(current),
20821 cattr_name(want_flags),
20822 (unsigned long long)paddr,
20823 (unsigned long long)(paddr + size),
20824diff -urNp linux-3.0.4/arch/x86/mm/pf_in.c linux-3.0.4/arch/x86/mm/pf_in.c
20825--- linux-3.0.4/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
20826+++ linux-3.0.4/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
20827@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20828 int i;
20829 enum reason_type rv = OTHERS;
20830
20831- p = (unsigned char *)ins_addr;
20832+ p = (unsigned char *)ktla_ktva(ins_addr);
20833 p += skip_prefix(p, &prf);
20834 p += get_opcode(p, &opcode);
20835
20836@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20837 struct prefix_bits prf;
20838 int i;
20839
20840- p = (unsigned char *)ins_addr;
20841+ p = (unsigned char *)ktla_ktva(ins_addr);
20842 p += skip_prefix(p, &prf);
20843 p += get_opcode(p, &opcode);
20844
20845@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20846 struct prefix_bits prf;
20847 int i;
20848
20849- p = (unsigned char *)ins_addr;
20850+ p = (unsigned char *)ktla_ktva(ins_addr);
20851 p += skip_prefix(p, &prf);
20852 p += get_opcode(p, &opcode);
20853
20854@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
20855 struct prefix_bits prf;
20856 int i;
20857
20858- p = (unsigned char *)ins_addr;
20859+ p = (unsigned char *)ktla_ktva(ins_addr);
20860 p += skip_prefix(p, &prf);
20861 p += get_opcode(p, &opcode);
20862 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20863@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
20864 struct prefix_bits prf;
20865 int i;
20866
20867- p = (unsigned char *)ins_addr;
20868+ p = (unsigned char *)ktla_ktva(ins_addr);
20869 p += skip_prefix(p, &prf);
20870 p += get_opcode(p, &opcode);
20871 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20872diff -urNp linux-3.0.4/arch/x86/mm/pgtable_32.c linux-3.0.4/arch/x86/mm/pgtable_32.c
20873--- linux-3.0.4/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
20874+++ linux-3.0.4/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
20875@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20876 return;
20877 }
20878 pte = pte_offset_kernel(pmd, vaddr);
20879+
20880+ pax_open_kernel();
20881 if (pte_val(pteval))
20882 set_pte_at(&init_mm, vaddr, pte, pteval);
20883 else
20884 pte_clear(&init_mm, vaddr, pte);
20885+ pax_close_kernel();
20886
20887 /*
20888 * It's enough to flush this one mapping.
20889diff -urNp linux-3.0.4/arch/x86/mm/pgtable.c linux-3.0.4/arch/x86/mm/pgtable.c
20890--- linux-3.0.4/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
20891+++ linux-3.0.4/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
20892@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20893 list_del(&page->lru);
20894 }
20895
20896-#define UNSHARED_PTRS_PER_PGD \
20897- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20898+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20899+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20900
20901+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20902+{
20903+ while (count--)
20904+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20905+}
20906+#endif
20907+
20908+#ifdef CONFIG_PAX_PER_CPU_PGD
20909+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20910+{
20911+ while (count--)
20912+
20913+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20914+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20915+#else
20916+ *dst++ = *src++;
20917+#endif
20918
20919+}
20920+#endif
20921+
20922+#ifdef CONFIG_X86_64
20923+#define pxd_t pud_t
20924+#define pyd_t pgd_t
20925+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20926+#define pxd_free(mm, pud) pud_free((mm), (pud))
20927+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20928+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20929+#define PYD_SIZE PGDIR_SIZE
20930+#else
20931+#define pxd_t pmd_t
20932+#define pyd_t pud_t
20933+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20934+#define pxd_free(mm, pud) pmd_free((mm), (pud))
20935+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20936+#define pyd_offset(mm ,address) pud_offset((mm), (address))
20937+#define PYD_SIZE PUD_SIZE
20938+#endif
20939+
20940+#ifdef CONFIG_PAX_PER_CPU_PGD
20941+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20942+static inline void pgd_dtor(pgd_t *pgd) {}
20943+#else
20944 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20945 {
20946 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20947@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20948 pgd_list_del(pgd);
20949 spin_unlock(&pgd_lock);
20950 }
20951+#endif
20952
20953 /*
20954 * List of all pgd's needed for non-PAE so it can invalidate entries
20955@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20956 * -- wli
20957 */
20958
20959-#ifdef CONFIG_X86_PAE
20960+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20961 /*
20962 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20963 * updating the top-level pagetable entries to guarantee the
20964@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20965 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20966 * and initialize the kernel pmds here.
20967 */
20968-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20969+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20970
20971 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20972 {
20973@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20974 */
20975 flush_tlb_mm(mm);
20976 }
20977+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20978+#define PREALLOCATED_PXDS USER_PGD_PTRS
20979 #else /* !CONFIG_X86_PAE */
20980
20981 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20982-#define PREALLOCATED_PMDS 0
20983+#define PREALLOCATED_PXDS 0
20984
20985 #endif /* CONFIG_X86_PAE */
20986
20987-static void free_pmds(pmd_t *pmds[])
20988+static void free_pxds(pxd_t *pxds[])
20989 {
20990 int i;
20991
20992- for(i = 0; i < PREALLOCATED_PMDS; i++)
20993- if (pmds[i])
20994- free_page((unsigned long)pmds[i]);
20995+ for(i = 0; i < PREALLOCATED_PXDS; i++)
20996+ if (pxds[i])
20997+ free_page((unsigned long)pxds[i]);
20998 }
20999
21000-static int preallocate_pmds(pmd_t *pmds[])
21001+static int preallocate_pxds(pxd_t *pxds[])
21002 {
21003 int i;
21004 bool failed = false;
21005
21006- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21007- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21008- if (pmd == NULL)
21009+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21010+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21011+ if (pxd == NULL)
21012 failed = true;
21013- pmds[i] = pmd;
21014+ pxds[i] = pxd;
21015 }
21016
21017 if (failed) {
21018- free_pmds(pmds);
21019+ free_pxds(pxds);
21020 return -ENOMEM;
21021 }
21022
21023@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
21024 * preallocate which never got a corresponding vma will need to be
21025 * freed manually.
21026 */
21027-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21028+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21029 {
21030 int i;
21031
21032- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21033+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21034 pgd_t pgd = pgdp[i];
21035
21036 if (pgd_val(pgd) != 0) {
21037- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21038+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21039
21040- pgdp[i] = native_make_pgd(0);
21041+ set_pgd(pgdp + i, native_make_pgd(0));
21042
21043- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21044- pmd_free(mm, pmd);
21045+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21046+ pxd_free(mm, pxd);
21047 }
21048 }
21049 }
21050
21051-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21052+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21053 {
21054- pud_t *pud;
21055+ pyd_t *pyd;
21056 unsigned long addr;
21057 int i;
21058
21059- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21060+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21061 return;
21062
21063- pud = pud_offset(pgd, 0);
21064+#ifdef CONFIG_X86_64
21065+ pyd = pyd_offset(mm, 0L);
21066+#else
21067+ pyd = pyd_offset(pgd, 0L);
21068+#endif
21069
21070- for (addr = i = 0; i < PREALLOCATED_PMDS;
21071- i++, pud++, addr += PUD_SIZE) {
21072- pmd_t *pmd = pmds[i];
21073+ for (addr = i = 0; i < PREALLOCATED_PXDS;
21074+ i++, pyd++, addr += PYD_SIZE) {
21075+ pxd_t *pxd = pxds[i];
21076
21077 if (i >= KERNEL_PGD_BOUNDARY)
21078- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21079- sizeof(pmd_t) * PTRS_PER_PMD);
21080+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21081+ sizeof(pxd_t) * PTRS_PER_PMD);
21082
21083- pud_populate(mm, pud, pmd);
21084+ pyd_populate(mm, pyd, pxd);
21085 }
21086 }
21087
21088 pgd_t *pgd_alloc(struct mm_struct *mm)
21089 {
21090 pgd_t *pgd;
21091- pmd_t *pmds[PREALLOCATED_PMDS];
21092+ pxd_t *pxds[PREALLOCATED_PXDS];
21093
21094 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21095
21096@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21097
21098 mm->pgd = pgd;
21099
21100- if (preallocate_pmds(pmds) != 0)
21101+ if (preallocate_pxds(pxds) != 0)
21102 goto out_free_pgd;
21103
21104 if (paravirt_pgd_alloc(mm) != 0)
21105- goto out_free_pmds;
21106+ goto out_free_pxds;
21107
21108 /*
21109 * Make sure that pre-populating the pmds is atomic with
21110@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21111 spin_lock(&pgd_lock);
21112
21113 pgd_ctor(mm, pgd);
21114- pgd_prepopulate_pmd(mm, pgd, pmds);
21115+ pgd_prepopulate_pxd(mm, pgd, pxds);
21116
21117 spin_unlock(&pgd_lock);
21118
21119 return pgd;
21120
21121-out_free_pmds:
21122- free_pmds(pmds);
21123+out_free_pxds:
21124+ free_pxds(pxds);
21125 out_free_pgd:
21126 free_page((unsigned long)pgd);
21127 out:
21128@@ -295,7 +344,7 @@ out:
21129
21130 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
21131 {
21132- pgd_mop_up_pmds(mm, pgd);
21133+ pgd_mop_up_pxds(mm, pgd);
21134 pgd_dtor(pgd);
21135 paravirt_pgd_free(mm, pgd);
21136 free_page((unsigned long)pgd);
21137diff -urNp linux-3.0.4/arch/x86/mm/setup_nx.c linux-3.0.4/arch/x86/mm/setup_nx.c
21138--- linux-3.0.4/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
21139+++ linux-3.0.4/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
21140@@ -5,8 +5,10 @@
21141 #include <asm/pgtable.h>
21142 #include <asm/proto.h>
21143
21144+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21145 static int disable_nx __cpuinitdata;
21146
21147+#ifndef CONFIG_PAX_PAGEEXEC
21148 /*
21149 * noexec = on|off
21150 *
21151@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
21152 return 0;
21153 }
21154 early_param("noexec", noexec_setup);
21155+#endif
21156+
21157+#endif
21158
21159 void __cpuinit x86_configure_nx(void)
21160 {
21161+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21162 if (cpu_has_nx && !disable_nx)
21163 __supported_pte_mask |= _PAGE_NX;
21164 else
21165+#endif
21166 __supported_pte_mask &= ~_PAGE_NX;
21167 }
21168
21169diff -urNp linux-3.0.4/arch/x86/mm/tlb.c linux-3.0.4/arch/x86/mm/tlb.c
21170--- linux-3.0.4/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
21171+++ linux-3.0.4/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
21172@@ -65,7 +65,11 @@ void leave_mm(int cpu)
21173 BUG();
21174 cpumask_clear_cpu(cpu,
21175 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
21176+
21177+#ifndef CONFIG_PAX_PER_CPU_PGD
21178 load_cr3(swapper_pg_dir);
21179+#endif
21180+
21181 }
21182 EXPORT_SYMBOL_GPL(leave_mm);
21183
21184diff -urNp linux-3.0.4/arch/x86/net/bpf_jit_comp.c linux-3.0.4/arch/x86/net/bpf_jit_comp.c
21185--- linux-3.0.4/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
21186+++ linux-3.0.4/arch/x86/net/bpf_jit_comp.c 2011-08-23 21:47:55.000000000 -0400
21187@@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
21188 module_free(NULL, image);
21189 return;
21190 }
21191+ pax_open_kernel();
21192 memcpy(image + proglen, temp, ilen);
21193+ pax_close_kernel();
21194 }
21195 proglen += ilen;
21196 addrs[i] = proglen;
21197@@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
21198 break;
21199 }
21200 if (proglen == oldproglen) {
21201- image = module_alloc(max_t(unsigned int,
21202+ image = module_alloc_exec(max_t(unsigned int,
21203 proglen,
21204 sizeof(struct work_struct)));
21205 if (!image)
21206diff -urNp linux-3.0.4/arch/x86/oprofile/backtrace.c linux-3.0.4/arch/x86/oprofile/backtrace.c
21207--- linux-3.0.4/arch/x86/oprofile/backtrace.c 2011-09-02 18:11:21.000000000 -0400
21208+++ linux-3.0.4/arch/x86/oprofile/backtrace.c 2011-10-06 04:17:55.000000000 -0400
21209@@ -83,11 +83,11 @@ dump_user_backtrace_32(struct stack_fram
21210 struct stack_frame_ia32 *fp;
21211 unsigned long bytes;
21212
21213- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
21214+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
21215 if (bytes != sizeof(bufhead))
21216 return NULL;
21217
21218- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
21219+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
21220
21221 oprofile_add_trace(bufhead[0].return_address);
21222
21223@@ -129,7 +129,7 @@ static struct stack_frame *dump_user_bac
21224 struct stack_frame bufhead[2];
21225 unsigned long bytes;
21226
21227- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
21228+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
21229 if (bytes != sizeof(bufhead))
21230 return NULL;
21231
21232@@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
21233 {
21234 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
21235
21236- if (!user_mode_vm(regs)) {
21237+ if (!user_mode(regs)) {
21238 unsigned long stack = kernel_stack_pointer(regs);
21239 if (depth)
21240 dump_trace(NULL, regs, (unsigned long *)stack, 0,
21241diff -urNp linux-3.0.4/arch/x86/pci/mrst.c linux-3.0.4/arch/x86/pci/mrst.c
21242--- linux-3.0.4/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
21243+++ linux-3.0.4/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
21244@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
21245 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
21246 pci_mmcfg_late_init();
21247 pcibios_enable_irq = mrst_pci_irq_enable;
21248- pci_root_ops = pci_mrst_ops;
21249+ pax_open_kernel();
21250+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
21251+ pax_close_kernel();
21252 /* Continue with standard init */
21253 return 1;
21254 }
21255diff -urNp linux-3.0.4/arch/x86/pci/pcbios.c linux-3.0.4/arch/x86/pci/pcbios.c
21256--- linux-3.0.4/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
21257+++ linux-3.0.4/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
21258@@ -79,50 +79,93 @@ union bios32 {
21259 static struct {
21260 unsigned long address;
21261 unsigned short segment;
21262-} bios32_indirect = { 0, __KERNEL_CS };
21263+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
21264
21265 /*
21266 * Returns the entry point for the given service, NULL on error
21267 */
21268
21269-static unsigned long bios32_service(unsigned long service)
21270+static unsigned long __devinit bios32_service(unsigned long service)
21271 {
21272 unsigned char return_code; /* %al */
21273 unsigned long address; /* %ebx */
21274 unsigned long length; /* %ecx */
21275 unsigned long entry; /* %edx */
21276 unsigned long flags;
21277+ struct desc_struct d, *gdt;
21278
21279 local_irq_save(flags);
21280- __asm__("lcall *(%%edi); cld"
21281+
21282+ gdt = get_cpu_gdt_table(smp_processor_id());
21283+
21284+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
21285+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21286+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
21287+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21288+
21289+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
21290 : "=a" (return_code),
21291 "=b" (address),
21292 "=c" (length),
21293 "=d" (entry)
21294 : "0" (service),
21295 "1" (0),
21296- "D" (&bios32_indirect));
21297+ "D" (&bios32_indirect),
21298+ "r"(__PCIBIOS_DS)
21299+ : "memory");
21300+
21301+ pax_open_kernel();
21302+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
21303+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
21304+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
21305+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
21306+ pax_close_kernel();
21307+
21308 local_irq_restore(flags);
21309
21310 switch (return_code) {
21311- case 0:
21312- return address + entry;
21313- case 0x80: /* Not present */
21314- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
21315- return 0;
21316- default: /* Shouldn't happen */
21317- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
21318- service, return_code);
21319+ case 0: {
21320+ int cpu;
21321+ unsigned char flags;
21322+
21323+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
21324+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
21325+ printk(KERN_WARNING "bios32_service: not valid\n");
21326 return 0;
21327+ }
21328+ address = address + PAGE_OFFSET;
21329+ length += 16UL; /* some BIOSs underreport this... */
21330+ flags = 4;
21331+ if (length >= 64*1024*1024) {
21332+ length >>= PAGE_SHIFT;
21333+ flags |= 8;
21334+ }
21335+
21336+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
21337+ gdt = get_cpu_gdt_table(cpu);
21338+ pack_descriptor(&d, address, length, 0x9b, flags);
21339+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21340+ pack_descriptor(&d, address, length, 0x93, flags);
21341+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21342+ }
21343+ return entry;
21344+ }
21345+ case 0x80: /* Not present */
21346+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
21347+ return 0;
21348+ default: /* Shouldn't happen */
21349+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
21350+ service, return_code);
21351+ return 0;
21352 }
21353 }
21354
21355 static struct {
21356 unsigned long address;
21357 unsigned short segment;
21358-} pci_indirect = { 0, __KERNEL_CS };
21359+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
21360
21361-static int pci_bios_present;
21362+static int pci_bios_present __read_only;
21363
21364 static int __devinit check_pcibios(void)
21365 {
21366@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
21367 unsigned long flags, pcibios_entry;
21368
21369 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
21370- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
21371+ pci_indirect.address = pcibios_entry;
21372
21373 local_irq_save(flags);
21374- __asm__(
21375- "lcall *(%%edi); cld\n\t"
21376+ __asm__("movw %w6, %%ds\n\t"
21377+ "lcall *%%ss:(%%edi); cld\n\t"
21378+ "push %%ss\n\t"
21379+ "pop %%ds\n\t"
21380 "jc 1f\n\t"
21381 "xor %%ah, %%ah\n"
21382 "1:"
21383@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
21384 "=b" (ebx),
21385 "=c" (ecx)
21386 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
21387- "D" (&pci_indirect)
21388+ "D" (&pci_indirect),
21389+ "r" (__PCIBIOS_DS)
21390 : "memory");
21391 local_irq_restore(flags);
21392
21393@@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
21394
21395 switch (len) {
21396 case 1:
21397- __asm__("lcall *(%%esi); cld\n\t"
21398+ __asm__("movw %w6, %%ds\n\t"
21399+ "lcall *%%ss:(%%esi); cld\n\t"
21400+ "push %%ss\n\t"
21401+ "pop %%ds\n\t"
21402 "jc 1f\n\t"
21403 "xor %%ah, %%ah\n"
21404 "1:"
21405@@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
21406 : "1" (PCIBIOS_READ_CONFIG_BYTE),
21407 "b" (bx),
21408 "D" ((long)reg),
21409- "S" (&pci_indirect));
21410+ "S" (&pci_indirect),
21411+ "r" (__PCIBIOS_DS));
21412 /*
21413 * Zero-extend the result beyond 8 bits, do not trust the
21414 * BIOS having done it:
21415@@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
21416 *value &= 0xff;
21417 break;
21418 case 2:
21419- __asm__("lcall *(%%esi); cld\n\t"
21420+ __asm__("movw %w6, %%ds\n\t"
21421+ "lcall *%%ss:(%%esi); cld\n\t"
21422+ "push %%ss\n\t"
21423+ "pop %%ds\n\t"
21424 "jc 1f\n\t"
21425 "xor %%ah, %%ah\n"
21426 "1:"
21427@@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
21428 : "1" (PCIBIOS_READ_CONFIG_WORD),
21429 "b" (bx),
21430 "D" ((long)reg),
21431- "S" (&pci_indirect));
21432+ "S" (&pci_indirect),
21433+ "r" (__PCIBIOS_DS));
21434 /*
21435 * Zero-extend the result beyond 16 bits, do not trust the
21436 * BIOS having done it:
21437@@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
21438 *value &= 0xffff;
21439 break;
21440 case 4:
21441- __asm__("lcall *(%%esi); cld\n\t"
21442+ __asm__("movw %w6, %%ds\n\t"
21443+ "lcall *%%ss:(%%esi); cld\n\t"
21444+ "push %%ss\n\t"
21445+ "pop %%ds\n\t"
21446 "jc 1f\n\t"
21447 "xor %%ah, %%ah\n"
21448 "1:"
21449@@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
21450 : "1" (PCIBIOS_READ_CONFIG_DWORD),
21451 "b" (bx),
21452 "D" ((long)reg),
21453- "S" (&pci_indirect));
21454+ "S" (&pci_indirect),
21455+ "r" (__PCIBIOS_DS));
21456 break;
21457 }
21458
21459@@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
21460
21461 switch (len) {
21462 case 1:
21463- __asm__("lcall *(%%esi); cld\n\t"
21464+ __asm__("movw %w6, %%ds\n\t"
21465+ "lcall *%%ss:(%%esi); cld\n\t"
21466+ "push %%ss\n\t"
21467+ "pop %%ds\n\t"
21468 "jc 1f\n\t"
21469 "xor %%ah, %%ah\n"
21470 "1:"
21471@@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
21472 "c" (value),
21473 "b" (bx),
21474 "D" ((long)reg),
21475- "S" (&pci_indirect));
21476+ "S" (&pci_indirect),
21477+ "r" (__PCIBIOS_DS));
21478 break;
21479 case 2:
21480- __asm__("lcall *(%%esi); cld\n\t"
21481+ __asm__("movw %w6, %%ds\n\t"
21482+ "lcall *%%ss:(%%esi); cld\n\t"
21483+ "push %%ss\n\t"
21484+ "pop %%ds\n\t"
21485 "jc 1f\n\t"
21486 "xor %%ah, %%ah\n"
21487 "1:"
21488@@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
21489 "c" (value),
21490 "b" (bx),
21491 "D" ((long)reg),
21492- "S" (&pci_indirect));
21493+ "S" (&pci_indirect),
21494+ "r" (__PCIBIOS_DS));
21495 break;
21496 case 4:
21497- __asm__("lcall *(%%esi); cld\n\t"
21498+ __asm__("movw %w6, %%ds\n\t"
21499+ "lcall *%%ss:(%%esi); cld\n\t"
21500+ "push %%ss\n\t"
21501+ "pop %%ds\n\t"
21502 "jc 1f\n\t"
21503 "xor %%ah, %%ah\n"
21504 "1:"
21505@@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
21506 "c" (value),
21507 "b" (bx),
21508 "D" ((long)reg),
21509- "S" (&pci_indirect));
21510+ "S" (&pci_indirect),
21511+ "r" (__PCIBIOS_DS));
21512 break;
21513 }
21514
21515@@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
21516
21517 DBG("PCI: Fetching IRQ routing table... ");
21518 __asm__("push %%es\n\t"
21519+ "movw %w8, %%ds\n\t"
21520 "push %%ds\n\t"
21521 "pop %%es\n\t"
21522- "lcall *(%%esi); cld\n\t"
21523+ "lcall *%%ss:(%%esi); cld\n\t"
21524 "pop %%es\n\t"
21525+ "push %%ss\n\t"
21526+ "pop %%ds\n"
21527 "jc 1f\n\t"
21528 "xor %%ah, %%ah\n"
21529 "1:"
21530@@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
21531 "1" (0),
21532 "D" ((long) &opt),
21533 "S" (&pci_indirect),
21534- "m" (opt)
21535+ "m" (opt),
21536+ "r" (__PCIBIOS_DS)
21537 : "memory");
21538 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
21539 if (ret & 0xff00)
21540@@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
21541 {
21542 int ret;
21543
21544- __asm__("lcall *(%%esi); cld\n\t"
21545+ __asm__("movw %w5, %%ds\n\t"
21546+ "lcall *%%ss:(%%esi); cld\n\t"
21547+ "push %%ss\n\t"
21548+ "pop %%ds\n"
21549 "jc 1f\n\t"
21550 "xor %%ah, %%ah\n"
21551 "1:"
21552@@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
21553 : "0" (PCIBIOS_SET_PCI_HW_INT),
21554 "b" ((dev->bus->number << 8) | dev->devfn),
21555 "c" ((irq << 8) | (pin + 10)),
21556- "S" (&pci_indirect));
21557+ "S" (&pci_indirect),
21558+ "r" (__PCIBIOS_DS));
21559 return !(ret & 0xff00);
21560 }
21561 EXPORT_SYMBOL(pcibios_set_irq_routing);
21562diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_32.c linux-3.0.4/arch/x86/platform/efi/efi_32.c
21563--- linux-3.0.4/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
21564+++ linux-3.0.4/arch/x86/platform/efi/efi_32.c 2011-10-06 04:17:55.000000000 -0400
21565@@ -38,70 +38,56 @@
21566 */
21567
21568 static unsigned long efi_rt_eflags;
21569-static pgd_t efi_bak_pg_dir_pointer[2];
21570+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
21571
21572-void efi_call_phys_prelog(void)
21573+void __init efi_call_phys_prelog(void)
21574 {
21575- unsigned long cr4;
21576- unsigned long temp;
21577 struct desc_ptr gdt_descr;
21578
21579- local_irq_save(efi_rt_eflags);
21580+#ifdef CONFIG_PAX_KERNEXEC
21581+ struct desc_struct d;
21582+#endif
21583
21584- /*
21585- * If I don't have PAE, I should just duplicate two entries in page
21586- * directory. If I have PAE, I just need to duplicate one entry in
21587- * page directory.
21588- */
21589- cr4 = read_cr4_safe();
21590+ local_irq_save(efi_rt_eflags);
21591
21592- if (cr4 & X86_CR4_PAE) {
21593- efi_bak_pg_dir_pointer[0].pgd =
21594- swapper_pg_dir[pgd_index(0)].pgd;
21595- swapper_pg_dir[0].pgd =
21596- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21597- } else {
21598- efi_bak_pg_dir_pointer[0].pgd =
21599- swapper_pg_dir[pgd_index(0)].pgd;
21600- efi_bak_pg_dir_pointer[1].pgd =
21601- swapper_pg_dir[pgd_index(0x400000)].pgd;
21602- swapper_pg_dir[pgd_index(0)].pgd =
21603- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21604- temp = PAGE_OFFSET + 0x400000;
21605- swapper_pg_dir[pgd_index(0x400000)].pgd =
21606- swapper_pg_dir[pgd_index(temp)].pgd;
21607- }
21608+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
21609+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21610+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
21611
21612 /*
21613 * After the lock is released, the original page table is restored.
21614 */
21615 __flush_tlb_all();
21616
21617+#ifdef CONFIG_PAX_KERNEXEC
21618+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
21619+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
21620+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
21621+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
21622+#endif
21623+
21624 gdt_descr.address = __pa(get_cpu_gdt_table(0));
21625 gdt_descr.size = GDT_SIZE - 1;
21626 load_gdt(&gdt_descr);
21627 }
21628
21629-void efi_call_phys_epilog(void)
21630+void __init efi_call_phys_epilog(void)
21631 {
21632- unsigned long cr4;
21633 struct desc_ptr gdt_descr;
21634
21635+#ifdef CONFIG_PAX_KERNEXEC
21636+ struct desc_struct d;
21637+
21638+ memset(&d, 0, sizeof d);
21639+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
21640+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
21641+#endif
21642+
21643 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
21644 gdt_descr.size = GDT_SIZE - 1;
21645 load_gdt(&gdt_descr);
21646
21647- cr4 = read_cr4_safe();
21648-
21649- if (cr4 & X86_CR4_PAE) {
21650- swapper_pg_dir[pgd_index(0)].pgd =
21651- efi_bak_pg_dir_pointer[0].pgd;
21652- } else {
21653- swapper_pg_dir[pgd_index(0)].pgd =
21654- efi_bak_pg_dir_pointer[0].pgd;
21655- swapper_pg_dir[pgd_index(0x400000)].pgd =
21656- efi_bak_pg_dir_pointer[1].pgd;
21657- }
21658+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
21659
21660 /*
21661 * After the lock is released, the original page table is restored.
21662diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S
21663--- linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
21664+++ linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S 2011-09-19 09:16:58.000000000 -0400
21665@@ -6,7 +6,9 @@
21666 */
21667
21668 #include <linux/linkage.h>
21669+#include <linux/init.h>
21670 #include <asm/page_types.h>
21671+#include <asm/segment.h>
21672
21673 /*
21674 * efi_call_phys(void *, ...) is a function with variable parameters.
21675@@ -20,7 +22,7 @@
21676 * service functions will comply with gcc calling convention, too.
21677 */
21678
21679-.text
21680+__INIT
21681 ENTRY(efi_call_phys)
21682 /*
21683 * 0. The function can only be called in Linux kernel. So CS has been
21684@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
21685 * The mapping of lower virtual memory has been created in prelog and
21686 * epilog.
21687 */
21688- movl $1f, %edx
21689- subl $__PAGE_OFFSET, %edx
21690- jmp *%edx
21691+ movl $(__KERNEXEC_EFI_DS), %edx
21692+ mov %edx, %ds
21693+ mov %edx, %es
21694+ mov %edx, %ss
21695+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
21696 1:
21697
21698 /*
21699@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
21700 * parameter 2, ..., param n. To make things easy, we save the return
21701 * address of efi_call_phys in a global variable.
21702 */
21703- popl %edx
21704- movl %edx, saved_return_addr
21705- /* get the function pointer into ECX*/
21706- popl %ecx
21707- movl %ecx, efi_rt_function_ptr
21708- movl $2f, %edx
21709- subl $__PAGE_OFFSET, %edx
21710- pushl %edx
21711+ popl (saved_return_addr)
21712+ popl (efi_rt_function_ptr)
21713
21714 /*
21715 * 3. Clear PG bit in %CR0.
21716@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
21717 /*
21718 * 5. Call the physical function.
21719 */
21720- jmp *%ecx
21721+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
21722
21723-2:
21724 /*
21725 * 6. After EFI runtime service returns, control will return to
21726 * following instruction. We'd better readjust stack pointer first.
21727@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
21728 movl %cr0, %edx
21729 orl $0x80000000, %edx
21730 movl %edx, %cr0
21731- jmp 1f
21732-1:
21733+
21734 /*
21735 * 8. Now restore the virtual mode from flat mode by
21736 * adding EIP with PAGE_OFFSET.
21737 */
21738- movl $1f, %edx
21739- jmp *%edx
21740+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
21741 1:
21742+ movl $(__KERNEL_DS), %edx
21743+ mov %edx, %ds
21744+ mov %edx, %es
21745+ mov %edx, %ss
21746
21747 /*
21748 * 9. Balance the stack. And because EAX contain the return value,
21749 * we'd better not clobber it.
21750 */
21751- leal efi_rt_function_ptr, %edx
21752- movl (%edx), %ecx
21753- pushl %ecx
21754+ pushl (efi_rt_function_ptr)
21755
21756 /*
21757- * 10. Push the saved return address onto the stack and return.
21758+ * 10. Return to the saved return address.
21759 */
21760- leal saved_return_addr, %edx
21761- movl (%edx), %ecx
21762- pushl %ecx
21763- ret
21764+ jmpl *(saved_return_addr)
21765 ENDPROC(efi_call_phys)
21766 .previous
21767
21768-.data
21769+__INITDATA
21770 saved_return_addr:
21771 .long 0
21772 efi_rt_function_ptr:
21773diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_stub_64.S linux-3.0.4/arch/x86/platform/efi/efi_stub_64.S
21774--- linux-3.0.4/arch/x86/platform/efi/efi_stub_64.S 2011-07-21 22:17:23.000000000 -0400
21775+++ linux-3.0.4/arch/x86/platform/efi/efi_stub_64.S 2011-10-06 04:17:55.000000000 -0400
21776@@ -7,6 +7,7 @@
21777 */
21778
21779 #include <linux/linkage.h>
21780+#include <asm/alternative-asm.h>
21781
21782 #define SAVE_XMM \
21783 mov %rsp, %rax; \
21784@@ -40,6 +41,7 @@ ENTRY(efi_call0)
21785 call *%rdi
21786 addq $32, %rsp
21787 RESTORE_XMM
21788+ pax_force_retaddr
21789 ret
21790 ENDPROC(efi_call0)
21791
21792@@ -50,6 +52,7 @@ ENTRY(efi_call1)
21793 call *%rdi
21794 addq $32, %rsp
21795 RESTORE_XMM
21796+ pax_force_retaddr
21797 ret
21798 ENDPROC(efi_call1)
21799
21800@@ -60,6 +63,7 @@ ENTRY(efi_call2)
21801 call *%rdi
21802 addq $32, %rsp
21803 RESTORE_XMM
21804+ pax_force_retaddr
21805 ret
21806 ENDPROC(efi_call2)
21807
21808@@ -71,6 +75,7 @@ ENTRY(efi_call3)
21809 call *%rdi
21810 addq $32, %rsp
21811 RESTORE_XMM
21812+ pax_force_retaddr
21813 ret
21814 ENDPROC(efi_call3)
21815
21816@@ -83,6 +88,7 @@ ENTRY(efi_call4)
21817 call *%rdi
21818 addq $32, %rsp
21819 RESTORE_XMM
21820+ pax_force_retaddr
21821 ret
21822 ENDPROC(efi_call4)
21823
21824@@ -96,6 +102,7 @@ ENTRY(efi_call5)
21825 call *%rdi
21826 addq $48, %rsp
21827 RESTORE_XMM
21828+ pax_force_retaddr
21829 ret
21830 ENDPROC(efi_call5)
21831
21832@@ -112,5 +119,6 @@ ENTRY(efi_call6)
21833 call *%rdi
21834 addq $48, %rsp
21835 RESTORE_XMM
21836+ pax_force_retaddr
21837 ret
21838 ENDPROC(efi_call6)
21839diff -urNp linux-3.0.4/arch/x86/platform/mrst/mrst.c linux-3.0.4/arch/x86/platform/mrst/mrst.c
21840--- linux-3.0.4/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
21841+++ linux-3.0.4/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
21842@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21843 }
21844
21845 /* Reboot and power off are handled by the SCU on a MID device */
21846-static void mrst_power_off(void)
21847+static __noreturn void mrst_power_off(void)
21848 {
21849 intel_scu_ipc_simple_command(0xf1, 1);
21850+ BUG();
21851 }
21852
21853-static void mrst_reboot(void)
21854+static __noreturn void mrst_reboot(void)
21855 {
21856 intel_scu_ipc_simple_command(0xf1, 0);
21857+ BUG();
21858 }
21859
21860 /*
21861diff -urNp linux-3.0.4/arch/x86/platform/uv/tlb_uv.c linux-3.0.4/arch/x86/platform/uv/tlb_uv.c
21862--- linux-3.0.4/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
21863+++ linux-3.0.4/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
21864@@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
21865 cpumask_t mask;
21866 struct reset_args reset_args;
21867
21868+ pax_track_stack();
21869+
21870 reset_args.sender = sender;
21871 cpus_clear(mask);
21872 /* find a single cpu for each uvhub in this distribution mask */
21873diff -urNp linux-3.0.4/arch/x86/power/cpu.c linux-3.0.4/arch/x86/power/cpu.c
21874--- linux-3.0.4/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
21875+++ linux-3.0.4/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
21876@@ -130,7 +130,7 @@ static void do_fpu_end(void)
21877 static void fix_processor_context(void)
21878 {
21879 int cpu = smp_processor_id();
21880- struct tss_struct *t = &per_cpu(init_tss, cpu);
21881+ struct tss_struct *t = init_tss + cpu;
21882
21883 set_tss_desc(cpu, t); /*
21884 * This just modifies memory; should not be
21885@@ -140,7 +140,9 @@ static void fix_processor_context(void)
21886 */
21887
21888 #ifdef CONFIG_X86_64
21889+ pax_open_kernel();
21890 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21891+ pax_close_kernel();
21892
21893 syscall_init(); /* This sets MSR_*STAR and related */
21894 #endif
21895diff -urNp linux-3.0.4/arch/x86/vdso/Makefile linux-3.0.4/arch/x86/vdso/Makefile
21896--- linux-3.0.4/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
21897+++ linux-3.0.4/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
21898@@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
21899 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21900 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21901
21902-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21903+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21904 GCOV_PROFILE := n
21905
21906 #
21907diff -urNp linux-3.0.4/arch/x86/vdso/vdso32-setup.c linux-3.0.4/arch/x86/vdso/vdso32-setup.c
21908--- linux-3.0.4/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
21909+++ linux-3.0.4/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
21910@@ -25,6 +25,7 @@
21911 #include <asm/tlbflush.h>
21912 #include <asm/vdso.h>
21913 #include <asm/proto.h>
21914+#include <asm/mman.h>
21915
21916 enum {
21917 VDSO_DISABLED = 0,
21918@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21919 void enable_sep_cpu(void)
21920 {
21921 int cpu = get_cpu();
21922- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21923+ struct tss_struct *tss = init_tss + cpu;
21924
21925 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21926 put_cpu();
21927@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21928 gate_vma.vm_start = FIXADDR_USER_START;
21929 gate_vma.vm_end = FIXADDR_USER_END;
21930 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21931- gate_vma.vm_page_prot = __P101;
21932+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21933 /*
21934 * Make sure the vDSO gets into every core dump.
21935 * Dumping its contents makes post-mortem fully interpretable later
21936@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21937 if (compat)
21938 addr = VDSO_HIGH_BASE;
21939 else {
21940- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21941+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21942 if (IS_ERR_VALUE(addr)) {
21943 ret = addr;
21944 goto up_fail;
21945 }
21946 }
21947
21948- current->mm->context.vdso = (void *)addr;
21949+ current->mm->context.vdso = addr;
21950
21951 if (compat_uses_vma || !compat) {
21952 /*
21953@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21954 }
21955
21956 current_thread_info()->sysenter_return =
21957- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21958+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21959
21960 up_fail:
21961 if (ret)
21962- current->mm->context.vdso = NULL;
21963+ current->mm->context.vdso = 0;
21964
21965 up_write(&mm->mmap_sem);
21966
21967@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21968
21969 const char *arch_vma_name(struct vm_area_struct *vma)
21970 {
21971- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21972+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21973 return "[vdso]";
21974+
21975+#ifdef CONFIG_PAX_SEGMEXEC
21976+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21977+ return "[vdso]";
21978+#endif
21979+
21980 return NULL;
21981 }
21982
21983@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21984 * Check to see if the corresponding task was created in compat vdso
21985 * mode.
21986 */
21987- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21988+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21989 return &gate_vma;
21990 return NULL;
21991 }
21992diff -urNp linux-3.0.4/arch/x86/vdso/vma.c linux-3.0.4/arch/x86/vdso/vma.c
21993--- linux-3.0.4/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
21994+++ linux-3.0.4/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
21995@@ -15,18 +15,19 @@
21996 #include <asm/proto.h>
21997 #include <asm/vdso.h>
21998
21999-unsigned int __read_mostly vdso_enabled = 1;
22000-
22001 extern char vdso_start[], vdso_end[];
22002 extern unsigned short vdso_sync_cpuid;
22003+extern char __vsyscall_0;
22004
22005 static struct page **vdso_pages;
22006+static struct page *vsyscall_page;
22007 static unsigned vdso_size;
22008
22009 static int __init init_vdso_vars(void)
22010 {
22011- int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
22012- int i;
22013+ size_t nbytes = vdso_end - vdso_start;
22014+ size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
22015+ size_t i;
22016
22017 vdso_size = npages << PAGE_SHIFT;
22018 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
22019@@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
22020 goto oom;
22021 for (i = 0; i < npages; i++) {
22022 struct page *p;
22023- p = alloc_page(GFP_KERNEL);
22024+ p = alloc_page(GFP_KERNEL | __GFP_ZERO);
22025 if (!p)
22026 goto oom;
22027 vdso_pages[i] = p;
22028- copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
22029+ memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
22030+ nbytes -= PAGE_SIZE;
22031 }
22032+ vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
22033
22034 return 0;
22035
22036 oom:
22037- printk("Cannot allocate vdso\n");
22038- vdso_enabled = 0;
22039- return -ENOMEM;
22040+ panic("Cannot allocate vdso\n");
22041 }
22042 subsys_initcall(init_vdso_vars);
22043
22044@@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
22045 unsigned long addr;
22046 int ret;
22047
22048- if (!vdso_enabled)
22049- return 0;
22050-
22051 down_write(&mm->mmap_sem);
22052- addr = vdso_addr(mm->start_stack, vdso_size);
22053- addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
22054+ addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
22055+ addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
22056 if (IS_ERR_VALUE(addr)) {
22057 ret = addr;
22058 goto up_fail;
22059 }
22060
22061- current->mm->context.vdso = (void *)addr;
22062+ mm->context.vdso = addr + PAGE_SIZE;
22063
22064- ret = install_special_mapping(mm, addr, vdso_size,
22065+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
22066 VM_READ|VM_EXEC|
22067- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22068+ VM_MAYREAD|VM_MAYEXEC|
22069 VM_ALWAYSDUMP,
22070- vdso_pages);
22071+ &vsyscall_page);
22072 if (ret) {
22073- current->mm->context.vdso = NULL;
22074+ mm->context.vdso = 0;
22075 goto up_fail;
22076 }
22077
22078+ ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
22079+ VM_READ|VM_EXEC|
22080+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22081+ VM_ALWAYSDUMP,
22082+ vdso_pages);
22083+ if (ret)
22084+ mm->context.vdso = 0;
22085+
22086 up_fail:
22087 up_write(&mm->mmap_sem);
22088 return ret;
22089 }
22090-
22091-static __init int vdso_setup(char *s)
22092-{
22093- vdso_enabled = simple_strtoul(s, NULL, 0);
22094- return 0;
22095-}
22096-__setup("vdso=", vdso_setup);
22097diff -urNp linux-3.0.4/arch/x86/xen/enlighten.c linux-3.0.4/arch/x86/xen/enlighten.c
22098--- linux-3.0.4/arch/x86/xen/enlighten.c 2011-09-02 18:11:26.000000000 -0400
22099+++ linux-3.0.4/arch/x86/xen/enlighten.c 2011-08-29 23:26:21.000000000 -0400
22100@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22101
22102 struct shared_info xen_dummy_shared_info;
22103
22104-void *xen_initial_gdt;
22105-
22106 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
22107 __read_mostly int xen_have_vector_callback;
22108 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
22109@@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
22110 #endif
22111 };
22112
22113-static void xen_reboot(int reason)
22114+static __noreturn void xen_reboot(int reason)
22115 {
22116 struct sched_shutdown r = { .reason = reason };
22117
22118@@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
22119 BUG();
22120 }
22121
22122-static void xen_restart(char *msg)
22123+static __noreturn void xen_restart(char *msg)
22124 {
22125 xen_reboot(SHUTDOWN_reboot);
22126 }
22127
22128-static void xen_emergency_restart(void)
22129+static __noreturn void xen_emergency_restart(void)
22130 {
22131 xen_reboot(SHUTDOWN_reboot);
22132 }
22133
22134-static void xen_machine_halt(void)
22135+static __noreturn void xen_machine_halt(void)
22136 {
22137 xen_reboot(SHUTDOWN_poweroff);
22138 }
22139@@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
22140 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
22141
22142 /* Work out if we support NX */
22143- x86_configure_nx();
22144+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22145+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
22146+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
22147+ unsigned l, h;
22148+
22149+ __supported_pte_mask |= _PAGE_NX;
22150+ rdmsr(MSR_EFER, l, h);
22151+ l |= EFER_NX;
22152+ wrmsr(MSR_EFER, l, h);
22153+ }
22154+#endif
22155
22156 xen_setup_features();
22157
22158@@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
22159
22160 machine_ops = xen_machine_ops;
22161
22162- /*
22163- * The only reliable way to retain the initial address of the
22164- * percpu gdt_page is to remember it here, so we can go and
22165- * mark it RW later, when the initial percpu area is freed.
22166- */
22167- xen_initial_gdt = &per_cpu(gdt_page, 0);
22168-
22169 xen_smp_init();
22170
22171 #ifdef CONFIG_ACPI_NUMA
22172diff -urNp linux-3.0.4/arch/x86/xen/mmu.c linux-3.0.4/arch/x86/xen/mmu.c
22173--- linux-3.0.4/arch/x86/xen/mmu.c 2011-09-02 18:11:26.000000000 -0400
22174+++ linux-3.0.4/arch/x86/xen/mmu.c 2011-08-29 23:26:21.000000000 -0400
22175@@ -1683,6 +1683,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
22176 convert_pfn_mfn(init_level4_pgt);
22177 convert_pfn_mfn(level3_ident_pgt);
22178 convert_pfn_mfn(level3_kernel_pgt);
22179+ convert_pfn_mfn(level3_vmalloc_pgt);
22180+ convert_pfn_mfn(level3_vmemmap_pgt);
22181
22182 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
22183 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
22184@@ -1701,7 +1703,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
22185 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
22186 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
22187 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
22188+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
22189+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
22190 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
22191+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
22192 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
22193 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
22194
22195@@ -1913,6 +1918,7 @@ static void __init xen_post_allocator_in
22196 pv_mmu_ops.set_pud = xen_set_pud;
22197 #if PAGETABLE_LEVELS == 4
22198 pv_mmu_ops.set_pgd = xen_set_pgd;
22199+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
22200 #endif
22201
22202 /* This will work as long as patching hasn't happened yet
22203@@ -1994,6 +2000,7 @@ static const struct pv_mmu_ops xen_mmu_o
22204 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
22205 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
22206 .set_pgd = xen_set_pgd_hyper,
22207+ .set_pgd_batched = xen_set_pgd_hyper,
22208
22209 .alloc_pud = xen_alloc_pmd_init,
22210 .release_pud = xen_release_pmd_init,
22211diff -urNp linux-3.0.4/arch/x86/xen/smp.c linux-3.0.4/arch/x86/xen/smp.c
22212--- linux-3.0.4/arch/x86/xen/smp.c 2011-09-02 18:11:26.000000000 -0400
22213+++ linux-3.0.4/arch/x86/xen/smp.c 2011-08-29 23:26:21.000000000 -0400
22214@@ -193,11 +193,6 @@ static void __init xen_smp_prepare_boot_
22215 {
22216 BUG_ON(smp_processor_id() != 0);
22217 native_smp_prepare_boot_cpu();
22218-
22219- /* We've switched to the "real" per-cpu gdt, so make sure the
22220- old memory can be recycled */
22221- make_lowmem_page_readwrite(xen_initial_gdt);
22222-
22223 xen_filter_cpu_maps();
22224 xen_setup_vcpu_info_placement();
22225 }
22226@@ -265,12 +260,12 @@ cpu_initialize_context(unsigned int cpu,
22227 gdt = get_cpu_gdt_table(cpu);
22228
22229 ctxt->flags = VGCF_IN_KERNEL;
22230- ctxt->user_regs.ds = __USER_DS;
22231- ctxt->user_regs.es = __USER_DS;
22232+ ctxt->user_regs.ds = __KERNEL_DS;
22233+ ctxt->user_regs.es = __KERNEL_DS;
22234 ctxt->user_regs.ss = __KERNEL_DS;
22235 #ifdef CONFIG_X86_32
22236 ctxt->user_regs.fs = __KERNEL_PERCPU;
22237- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
22238+ savesegment(gs, ctxt->user_regs.gs);
22239 #else
22240 ctxt->gs_base_kernel = per_cpu_offset(cpu);
22241 #endif
22242@@ -321,13 +316,12 @@ static int __cpuinit xen_cpu_up(unsigned
22243 int rc;
22244
22245 per_cpu(current_task, cpu) = idle;
22246+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
22247 #ifdef CONFIG_X86_32
22248 irq_ctx_init(cpu);
22249 #else
22250 clear_tsk_thread_flag(idle, TIF_FORK);
22251- per_cpu(kernel_stack, cpu) =
22252- (unsigned long)task_stack_page(idle) -
22253- KERNEL_STACK_OFFSET + THREAD_SIZE;
22254+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22255 #endif
22256 xen_setup_runstate_info(cpu);
22257 xen_setup_timer(cpu);
22258diff -urNp linux-3.0.4/arch/x86/xen/xen-asm_32.S linux-3.0.4/arch/x86/xen/xen-asm_32.S
22259--- linux-3.0.4/arch/x86/xen/xen-asm_32.S 2011-07-21 22:17:23.000000000 -0400
22260+++ linux-3.0.4/arch/x86/xen/xen-asm_32.S 2011-08-23 21:47:55.000000000 -0400
22261@@ -83,14 +83,14 @@ ENTRY(xen_iret)
22262 ESP_OFFSET=4 # bytes pushed onto stack
22263
22264 /*
22265- * Store vcpu_info pointer for easy access. Do it this way to
22266- * avoid having to reload %fs
22267+ * Store vcpu_info pointer for easy access.
22268 */
22269 #ifdef CONFIG_SMP
22270- GET_THREAD_INFO(%eax)
22271- movl TI_cpu(%eax), %eax
22272- movl __per_cpu_offset(,%eax,4), %eax
22273- mov xen_vcpu(%eax), %eax
22274+ push %fs
22275+ mov $(__KERNEL_PERCPU), %eax
22276+ mov %eax, %fs
22277+ mov PER_CPU_VAR(xen_vcpu), %eax
22278+ pop %fs
22279 #else
22280 movl xen_vcpu, %eax
22281 #endif
22282diff -urNp linux-3.0.4/arch/x86/xen/xen-head.S linux-3.0.4/arch/x86/xen/xen-head.S
22283--- linux-3.0.4/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
22284+++ linux-3.0.4/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
22285@@ -19,6 +19,17 @@ ENTRY(startup_xen)
22286 #ifdef CONFIG_X86_32
22287 mov %esi,xen_start_info
22288 mov $init_thread_union+THREAD_SIZE,%esp
22289+#ifdef CONFIG_SMP
22290+ movl $cpu_gdt_table,%edi
22291+ movl $__per_cpu_load,%eax
22292+ movw %ax,__KERNEL_PERCPU + 2(%edi)
22293+ rorl $16,%eax
22294+ movb %al,__KERNEL_PERCPU + 4(%edi)
22295+ movb %ah,__KERNEL_PERCPU + 7(%edi)
22296+ movl $__per_cpu_end - 1,%eax
22297+ subl $__per_cpu_start,%eax
22298+ movw %ax,__KERNEL_PERCPU + 0(%edi)
22299+#endif
22300 #else
22301 mov %rsi,xen_start_info
22302 mov $init_thread_union+THREAD_SIZE,%rsp
22303diff -urNp linux-3.0.4/arch/x86/xen/xen-ops.h linux-3.0.4/arch/x86/xen/xen-ops.h
22304--- linux-3.0.4/arch/x86/xen/xen-ops.h 2011-09-02 18:11:21.000000000 -0400
22305+++ linux-3.0.4/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
22306@@ -10,8 +10,6 @@
22307 extern const char xen_hypervisor_callback[];
22308 extern const char xen_failsafe_callback[];
22309
22310-extern void *xen_initial_gdt;
22311-
22312 struct trap_info;
22313 void xen_copy_trap_info(struct trap_info *traps);
22314
22315diff -urNp linux-3.0.4/block/blk-iopoll.c linux-3.0.4/block/blk-iopoll.c
22316--- linux-3.0.4/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
22317+++ linux-3.0.4/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
22318@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
22319 }
22320 EXPORT_SYMBOL(blk_iopoll_complete);
22321
22322-static void blk_iopoll_softirq(struct softirq_action *h)
22323+static void blk_iopoll_softirq(void)
22324 {
22325 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
22326 int rearm = 0, budget = blk_iopoll_budget;
22327diff -urNp linux-3.0.4/block/blk-map.c linux-3.0.4/block/blk-map.c
22328--- linux-3.0.4/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
22329+++ linux-3.0.4/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
22330@@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
22331 if (!len || !kbuf)
22332 return -EINVAL;
22333
22334- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
22335+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
22336 if (do_copy)
22337 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
22338 else
22339diff -urNp linux-3.0.4/block/blk-softirq.c linux-3.0.4/block/blk-softirq.c
22340--- linux-3.0.4/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
22341+++ linux-3.0.4/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
22342@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
22343 * Softirq action handler - move entries to local list and loop over them
22344 * while passing them to the queue registered handler.
22345 */
22346-static void blk_done_softirq(struct softirq_action *h)
22347+static void blk_done_softirq(void)
22348 {
22349 struct list_head *cpu_list, local_list;
22350
22351diff -urNp linux-3.0.4/block/bsg.c linux-3.0.4/block/bsg.c
22352--- linux-3.0.4/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
22353+++ linux-3.0.4/block/bsg.c 2011-10-06 04:17:55.000000000 -0400
22354@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
22355 struct sg_io_v4 *hdr, struct bsg_device *bd,
22356 fmode_t has_write_perm)
22357 {
22358+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22359+ unsigned char *cmdptr;
22360+
22361 if (hdr->request_len > BLK_MAX_CDB) {
22362 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
22363 if (!rq->cmd)
22364 return -ENOMEM;
22365- }
22366+ cmdptr = rq->cmd;
22367+ } else
22368+ cmdptr = tmpcmd;
22369
22370- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
22371+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
22372 hdr->request_len))
22373 return -EFAULT;
22374
22375+ if (cmdptr != rq->cmd)
22376+ memcpy(rq->cmd, cmdptr, hdr->request_len);
22377+
22378 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
22379 if (blk_verify_command(rq->cmd, has_write_perm))
22380 return -EPERM;
22381@@ -249,7 +257,7 @@ bsg_map_hdr(struct bsg_device *bd, struc
22382 struct request *rq, *next_rq = NULL;
22383 int ret, rw;
22384 unsigned int dxfer_len;
22385- void *dxferp = NULL;
22386+ void __user *dxferp = NULL;
22387 struct bsg_class_device *bcd = &q->bsg_dev;
22388
22389 /* if the LLD has been removed then the bsg_unregister_queue will
22390@@ -291,7 +299,7 @@ bsg_map_hdr(struct bsg_device *bd, struc
22391 rq->next_rq = next_rq;
22392 next_rq->cmd_type = rq->cmd_type;
22393
22394- dxferp = (void*)(unsigned long)hdr->din_xferp;
22395+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
22396 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
22397 hdr->din_xfer_len, GFP_KERNEL);
22398 if (ret)
22399@@ -300,10 +308,10 @@ bsg_map_hdr(struct bsg_device *bd, struc
22400
22401 if (hdr->dout_xfer_len) {
22402 dxfer_len = hdr->dout_xfer_len;
22403- dxferp = (void*)(unsigned long)hdr->dout_xferp;
22404+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
22405 } else if (hdr->din_xfer_len) {
22406 dxfer_len = hdr->din_xfer_len;
22407- dxferp = (void*)(unsigned long)hdr->din_xferp;
22408+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
22409 } else
22410 dxfer_len = 0;
22411
22412@@ -445,7 +453,7 @@ static int blk_complete_sgv4_hdr_rq(stru
22413 int len = min_t(unsigned int, hdr->max_response_len,
22414 rq->sense_len);
22415
22416- ret = copy_to_user((void*)(unsigned long)hdr->response,
22417+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
22418 rq->sense, len);
22419 if (!ret)
22420 hdr->response_len = len;
22421diff -urNp linux-3.0.4/block/compat_ioctl.c linux-3.0.4/block/compat_ioctl.c
22422--- linux-3.0.4/block/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
22423+++ linux-3.0.4/block/compat_ioctl.c 2011-10-06 04:17:55.000000000 -0400
22424@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_
22425 err |= __get_user(f->spec1, &uf->spec1);
22426 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
22427 err |= __get_user(name, &uf->name);
22428- f->name = compat_ptr(name);
22429+ f->name = (void __force_kernel *)compat_ptr(name);
22430 if (err) {
22431 err = -EFAULT;
22432 goto out;
22433diff -urNp linux-3.0.4/block/scsi_ioctl.c linux-3.0.4/block/scsi_ioctl.c
22434--- linux-3.0.4/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
22435+++ linux-3.0.4/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
22436@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
22437 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
22438 struct sg_io_hdr *hdr, fmode_t mode)
22439 {
22440- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
22441+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22442+ unsigned char *cmdptr;
22443+
22444+ if (rq->cmd != rq->__cmd)
22445+ cmdptr = rq->cmd;
22446+ else
22447+ cmdptr = tmpcmd;
22448+
22449+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
22450 return -EFAULT;
22451+
22452+ if (cmdptr != rq->cmd)
22453+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
22454+
22455 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
22456 return -EPERM;
22457
22458@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
22459 int err;
22460 unsigned int in_len, out_len, bytes, opcode, cmdlen;
22461 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
22462+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22463+ unsigned char *cmdptr;
22464
22465 if (!sic)
22466 return -EINVAL;
22467@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
22468 */
22469 err = -EFAULT;
22470 rq->cmd_len = cmdlen;
22471- if (copy_from_user(rq->cmd, sic->data, cmdlen))
22472+
22473+ if (rq->cmd != rq->__cmd)
22474+ cmdptr = rq->cmd;
22475+ else
22476+ cmdptr = tmpcmd;
22477+
22478+ if (copy_from_user(cmdptr, sic->data, cmdlen))
22479 goto error;
22480
22481+ if (rq->cmd != cmdptr)
22482+ memcpy(rq->cmd, cmdptr, cmdlen);
22483+
22484 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
22485 goto error;
22486
22487diff -urNp linux-3.0.4/crypto/cryptd.c linux-3.0.4/crypto/cryptd.c
22488--- linux-3.0.4/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
22489+++ linux-3.0.4/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
22490@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
22491
22492 struct cryptd_blkcipher_request_ctx {
22493 crypto_completion_t complete;
22494-};
22495+} __no_const;
22496
22497 struct cryptd_hash_ctx {
22498 struct crypto_shash *child;
22499@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
22500
22501 struct cryptd_aead_request_ctx {
22502 crypto_completion_t complete;
22503-};
22504+} __no_const;
22505
22506 static void cryptd_queue_worker(struct work_struct *work);
22507
22508diff -urNp linux-3.0.4/crypto/gf128mul.c linux-3.0.4/crypto/gf128mul.c
22509--- linux-3.0.4/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
22510+++ linux-3.0.4/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
22511@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
22512 for (i = 0; i < 7; ++i)
22513 gf128mul_x_lle(&p[i + 1], &p[i]);
22514
22515- memset(r, 0, sizeof(r));
22516+ memset(r, 0, sizeof(*r));
22517 for (i = 0;;) {
22518 u8 ch = ((u8 *)b)[15 - i];
22519
22520@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
22521 for (i = 0; i < 7; ++i)
22522 gf128mul_x_bbe(&p[i + 1], &p[i]);
22523
22524- memset(r, 0, sizeof(r));
22525+ memset(r, 0, sizeof(*r));
22526 for (i = 0;;) {
22527 u8 ch = ((u8 *)b)[i];
22528
22529diff -urNp linux-3.0.4/crypto/serpent.c linux-3.0.4/crypto/serpent.c
22530--- linux-3.0.4/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
22531+++ linux-3.0.4/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
22532@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
22533 u32 r0,r1,r2,r3,r4;
22534 int i;
22535
22536+ pax_track_stack();
22537+
22538 /* Copy key, add padding */
22539
22540 for (i = 0; i < keylen; ++i)
22541diff -urNp linux-3.0.4/Documentation/dontdiff linux-3.0.4/Documentation/dontdiff
22542--- linux-3.0.4/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
22543+++ linux-3.0.4/Documentation/dontdiff 2011-08-23 21:47:55.000000000 -0400
22544@@ -5,6 +5,7 @@
22545 *.cis
22546 *.cpio
22547 *.csp
22548+*.dbg
22549 *.dsp
22550 *.dvi
22551 *.elf
22552@@ -48,9 +49,11 @@
22553 *.tab.h
22554 *.tex
22555 *.ver
22556+*.vim
22557 *.xml
22558 *.xz
22559 *_MODULES
22560+*_reg_safe.h
22561 *_vga16.c
22562 *~
22563 \#*#
22564@@ -70,6 +73,7 @@ Kerntypes
22565 Module.markers
22566 Module.symvers
22567 PENDING
22568+PERF*
22569 SCCS
22570 System.map*
22571 TAGS
22572@@ -98,6 +102,8 @@ bzImage*
22573 capability_names.h
22574 capflags.c
22575 classlist.h*
22576+clut_vga16.c
22577+common-cmds.h
22578 comp*.log
22579 compile.h*
22580 conf
22581@@ -126,12 +132,14 @@ fore200e_pca_fw.c*
22582 gconf
22583 gconf.glade.h
22584 gen-devlist
22585+gen-kdb_cmds.c
22586 gen_crc32table
22587 gen_init_cpio
22588 generated
22589 genheaders
22590 genksyms
22591 *_gray256.c
22592+hash
22593 hpet_example
22594 hugepage-mmap
22595 hugepage-shm
22596@@ -146,7 +154,6 @@ int32.c
22597 int4.c
22598 int8.c
22599 kallsyms
22600-kconfig
22601 keywords.c
22602 ksym.c*
22603 ksym.h*
22604@@ -154,7 +161,6 @@ kxgettext
22605 lkc_defs.h
22606 lex.c
22607 lex.*.c
22608-linux
22609 logo_*.c
22610 logo_*_clut224.c
22611 logo_*_mono.c
22612@@ -174,6 +180,7 @@ mkboot
22613 mkbugboot
22614 mkcpustr
22615 mkdep
22616+mkpiggy
22617 mkprep
22618 mkregtable
22619 mktables
22620@@ -209,6 +216,7 @@ r300_reg_safe.h
22621 r420_reg_safe.h
22622 r600_reg_safe.h
22623 recordmcount
22624+regdb.c
22625 relocs
22626 rlim_names.h
22627 rn50_reg_safe.h
22628@@ -219,6 +227,7 @@ setup
22629 setup.bin
22630 setup.elf
22631 sImage
22632+slabinfo
22633 sm_tbl*
22634 split-include
22635 syscalltab.h
22636@@ -246,7 +255,9 @@ vmlinux
22637 vmlinux-*
22638 vmlinux.aout
22639 vmlinux.bin.all
22640+vmlinux.bin.bz2
22641 vmlinux.lds
22642+vmlinux.relocs
22643 vmlinuz
22644 voffset.h
22645 vsyscall.lds
22646@@ -254,6 +265,7 @@ vsyscall_32.lds
22647 wanxlfw.inc
22648 uImage
22649 unifdef
22650+utsrelease.h
22651 wakeup.bin
22652 wakeup.elf
22653 wakeup.lds
22654diff -urNp linux-3.0.4/Documentation/kernel-parameters.txt linux-3.0.4/Documentation/kernel-parameters.txt
22655--- linux-3.0.4/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
22656+++ linux-3.0.4/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
22657@@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
22658 the specified number of seconds. This is to be used if
22659 your oopses keep scrolling off the screen.
22660
22661+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
22662+ virtualization environments that don't cope well with the
22663+ expand down segment used by UDEREF on X86-32 or the frequent
22664+ page table updates on X86-64.
22665+
22666+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
22667+
22668 pcbit= [HW,ISDN]
22669
22670 pcd. [PARIDE]
22671diff -urNp linux-3.0.4/drivers/acpi/apei/cper.c linux-3.0.4/drivers/acpi/apei/cper.c
22672--- linux-3.0.4/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
22673+++ linux-3.0.4/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
22674@@ -38,12 +38,12 @@
22675 */
22676 u64 cper_next_record_id(void)
22677 {
22678- static atomic64_t seq;
22679+ static atomic64_unchecked_t seq;
22680
22681- if (!atomic64_read(&seq))
22682- atomic64_set(&seq, ((u64)get_seconds()) << 32);
22683+ if (!atomic64_read_unchecked(&seq))
22684+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
22685
22686- return atomic64_inc_return(&seq);
22687+ return atomic64_inc_return_unchecked(&seq);
22688 }
22689 EXPORT_SYMBOL_GPL(cper_next_record_id);
22690
22691diff -urNp linux-3.0.4/drivers/acpi/ec_sys.c linux-3.0.4/drivers/acpi/ec_sys.c
22692--- linux-3.0.4/drivers/acpi/ec_sys.c 2011-07-21 22:17:23.000000000 -0400
22693+++ linux-3.0.4/drivers/acpi/ec_sys.c 2011-08-24 19:06:55.000000000 -0400
22694@@ -11,6 +11,7 @@
22695 #include <linux/kernel.h>
22696 #include <linux/acpi.h>
22697 #include <linux/debugfs.h>
22698+#include <asm/uaccess.h>
22699 #include "internal.h"
22700
22701 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
22702@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
22703 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
22704 */
22705 unsigned int size = EC_SPACE_SIZE;
22706- u8 *data = (u8 *) buf;
22707+ u8 data;
22708 loff_t init_off = *off;
22709 int err = 0;
22710
22711@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
22712 size = count;
22713
22714 while (size) {
22715- err = ec_read(*off, &data[*off - init_off]);
22716+ err = ec_read(*off, &data);
22717 if (err)
22718 return err;
22719+ if (put_user(data, &buf[*off - init_off]))
22720+ return -EFAULT;
22721 *off += 1;
22722 size--;
22723 }
22724@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
22725
22726 unsigned int size = count;
22727 loff_t init_off = *off;
22728- u8 *data = (u8 *) buf;
22729 int err = 0;
22730
22731 if (*off >= EC_SPACE_SIZE)
22732@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
22733 }
22734
22735 while (size) {
22736- u8 byte_write = data[*off - init_off];
22737+ u8 byte_write;
22738+ if (get_user(byte_write, &buf[*off - init_off]))
22739+ return -EFAULT;
22740 err = ec_write(*off, byte_write);
22741 if (err)
22742 return err;
22743diff -urNp linux-3.0.4/drivers/acpi/proc.c linux-3.0.4/drivers/acpi/proc.c
22744--- linux-3.0.4/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
22745+++ linux-3.0.4/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
22746@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
22747 size_t count, loff_t * ppos)
22748 {
22749 struct list_head *node, *next;
22750- char strbuf[5];
22751- char str[5] = "";
22752- unsigned int len = count;
22753-
22754- if (len > 4)
22755- len = 4;
22756- if (len < 0)
22757- return -EFAULT;
22758+ char strbuf[5] = {0};
22759
22760- if (copy_from_user(strbuf, buffer, len))
22761+ if (count > 4)
22762+ count = 4;
22763+ if (copy_from_user(strbuf, buffer, count))
22764 return -EFAULT;
22765- strbuf[len] = '\0';
22766- sscanf(strbuf, "%s", str);
22767+ strbuf[count] = '\0';
22768
22769 mutex_lock(&acpi_device_lock);
22770 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
22771@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
22772 if (!dev->wakeup.flags.valid)
22773 continue;
22774
22775- if (!strncmp(dev->pnp.bus_id, str, 4)) {
22776+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
22777 if (device_can_wakeup(&dev->dev)) {
22778 bool enable = !device_may_wakeup(&dev->dev);
22779 device_set_wakeup_enable(&dev->dev, enable);
22780diff -urNp linux-3.0.4/drivers/acpi/processor_driver.c linux-3.0.4/drivers/acpi/processor_driver.c
22781--- linux-3.0.4/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
22782+++ linux-3.0.4/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
22783@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
22784 return 0;
22785 #endif
22786
22787- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
22788+ BUG_ON(pr->id >= nr_cpu_ids);
22789
22790 /*
22791 * Buggy BIOS check
22792diff -urNp linux-3.0.4/drivers/ata/libata-core.c linux-3.0.4/drivers/ata/libata-core.c
22793--- linux-3.0.4/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
22794+++ linux-3.0.4/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
22795@@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
22796 struct ata_port *ap;
22797 unsigned int tag;
22798
22799- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22800+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22801 ap = qc->ap;
22802
22803 qc->flags = 0;
22804@@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
22805 struct ata_port *ap;
22806 struct ata_link *link;
22807
22808- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22809+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22810 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
22811 ap = qc->ap;
22812 link = qc->dev->link;
22813@@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
22814 return;
22815
22816 spin_lock(&lock);
22817+ pax_open_kernel();
22818
22819 for (cur = ops->inherits; cur; cur = cur->inherits) {
22820 void **inherit = (void **)cur;
22821@@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
22822 if (IS_ERR(*pp))
22823 *pp = NULL;
22824
22825- ops->inherits = NULL;
22826+ *(struct ata_port_operations **)&ops->inherits = NULL;
22827
22828+ pax_close_kernel();
22829 spin_unlock(&lock);
22830 }
22831
22832diff -urNp linux-3.0.4/drivers/ata/libata-eh.c linux-3.0.4/drivers/ata/libata-eh.c
22833--- linux-3.0.4/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
22834+++ linux-3.0.4/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
22835@@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22836 {
22837 struct ata_link *link;
22838
22839+ pax_track_stack();
22840+
22841 ata_for_each_link(link, ap, HOST_FIRST)
22842 ata_eh_link_report(link);
22843 }
22844diff -urNp linux-3.0.4/drivers/ata/pata_arasan_cf.c linux-3.0.4/drivers/ata/pata_arasan_cf.c
22845--- linux-3.0.4/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
22846+++ linux-3.0.4/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
22847@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22848 /* Handle platform specific quirks */
22849 if (pdata->quirk) {
22850 if (pdata->quirk & CF_BROKEN_PIO) {
22851- ap->ops->set_piomode = NULL;
22852+ pax_open_kernel();
22853+ *(void **)&ap->ops->set_piomode = NULL;
22854+ pax_close_kernel();
22855 ap->pio_mask = 0;
22856 }
22857 if (pdata->quirk & CF_BROKEN_MWDMA)
22858diff -urNp linux-3.0.4/drivers/atm/adummy.c linux-3.0.4/drivers/atm/adummy.c
22859--- linux-3.0.4/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
22860+++ linux-3.0.4/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
22861@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22862 vcc->pop(vcc, skb);
22863 else
22864 dev_kfree_skb_any(skb);
22865- atomic_inc(&vcc->stats->tx);
22866+ atomic_inc_unchecked(&vcc->stats->tx);
22867
22868 return 0;
22869 }
22870diff -urNp linux-3.0.4/drivers/atm/ambassador.c linux-3.0.4/drivers/atm/ambassador.c
22871--- linux-3.0.4/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
22872+++ linux-3.0.4/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
22873@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22874 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22875
22876 // VC layer stats
22877- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22878+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22879
22880 // free the descriptor
22881 kfree (tx_descr);
22882@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22883 dump_skb ("<<<", vc, skb);
22884
22885 // VC layer stats
22886- atomic_inc(&atm_vcc->stats->rx);
22887+ atomic_inc_unchecked(&atm_vcc->stats->rx);
22888 __net_timestamp(skb);
22889 // end of our responsibility
22890 atm_vcc->push (atm_vcc, skb);
22891@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22892 } else {
22893 PRINTK (KERN_INFO, "dropped over-size frame");
22894 // should we count this?
22895- atomic_inc(&atm_vcc->stats->rx_drop);
22896+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22897 }
22898
22899 } else {
22900@@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22901 }
22902
22903 if (check_area (skb->data, skb->len)) {
22904- atomic_inc(&atm_vcc->stats->tx_err);
22905+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22906 return -ENOMEM; // ?
22907 }
22908
22909diff -urNp linux-3.0.4/drivers/atm/atmtcp.c linux-3.0.4/drivers/atm/atmtcp.c
22910--- linux-3.0.4/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
22911+++ linux-3.0.4/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
22912@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22913 if (vcc->pop) vcc->pop(vcc,skb);
22914 else dev_kfree_skb(skb);
22915 if (dev_data) return 0;
22916- atomic_inc(&vcc->stats->tx_err);
22917+ atomic_inc_unchecked(&vcc->stats->tx_err);
22918 return -ENOLINK;
22919 }
22920 size = skb->len+sizeof(struct atmtcp_hdr);
22921@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22922 if (!new_skb) {
22923 if (vcc->pop) vcc->pop(vcc,skb);
22924 else dev_kfree_skb(skb);
22925- atomic_inc(&vcc->stats->tx_err);
22926+ atomic_inc_unchecked(&vcc->stats->tx_err);
22927 return -ENOBUFS;
22928 }
22929 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22930@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22931 if (vcc->pop) vcc->pop(vcc,skb);
22932 else dev_kfree_skb(skb);
22933 out_vcc->push(out_vcc,new_skb);
22934- atomic_inc(&vcc->stats->tx);
22935- atomic_inc(&out_vcc->stats->rx);
22936+ atomic_inc_unchecked(&vcc->stats->tx);
22937+ atomic_inc_unchecked(&out_vcc->stats->rx);
22938 return 0;
22939 }
22940
22941@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22942 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22943 read_unlock(&vcc_sklist_lock);
22944 if (!out_vcc) {
22945- atomic_inc(&vcc->stats->tx_err);
22946+ atomic_inc_unchecked(&vcc->stats->tx_err);
22947 goto done;
22948 }
22949 skb_pull(skb,sizeof(struct atmtcp_hdr));
22950@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22951 __net_timestamp(new_skb);
22952 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22953 out_vcc->push(out_vcc,new_skb);
22954- atomic_inc(&vcc->stats->tx);
22955- atomic_inc(&out_vcc->stats->rx);
22956+ atomic_inc_unchecked(&vcc->stats->tx);
22957+ atomic_inc_unchecked(&out_vcc->stats->rx);
22958 done:
22959 if (vcc->pop) vcc->pop(vcc,skb);
22960 else dev_kfree_skb(skb);
22961diff -urNp linux-3.0.4/drivers/atm/eni.c linux-3.0.4/drivers/atm/eni.c
22962--- linux-3.0.4/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
22963+++ linux-3.0.4/drivers/atm/eni.c 2011-08-23 21:47:55.000000000 -0400
22964@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22965 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22966 vcc->dev->number);
22967 length = 0;
22968- atomic_inc(&vcc->stats->rx_err);
22969+ atomic_inc_unchecked(&vcc->stats->rx_err);
22970 }
22971 else {
22972 length = ATM_CELL_SIZE-1; /* no HEC */
22973@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22974 size);
22975 }
22976 eff = length = 0;
22977- atomic_inc(&vcc->stats->rx_err);
22978+ atomic_inc_unchecked(&vcc->stats->rx_err);
22979 }
22980 else {
22981 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22982@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22983 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22984 vcc->dev->number,vcc->vci,length,size << 2,descr);
22985 length = eff = 0;
22986- atomic_inc(&vcc->stats->rx_err);
22987+ atomic_inc_unchecked(&vcc->stats->rx_err);
22988 }
22989 }
22990 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22991@@ -771,7 +771,7 @@ rx_dequeued++;
22992 vcc->push(vcc,skb);
22993 pushed++;
22994 }
22995- atomic_inc(&vcc->stats->rx);
22996+ atomic_inc_unchecked(&vcc->stats->rx);
22997 }
22998 wake_up(&eni_dev->rx_wait);
22999 }
23000@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
23001 PCI_DMA_TODEVICE);
23002 if (vcc->pop) vcc->pop(vcc,skb);
23003 else dev_kfree_skb_irq(skb);
23004- atomic_inc(&vcc->stats->tx);
23005+ atomic_inc_unchecked(&vcc->stats->tx);
23006 wake_up(&eni_dev->tx_wait);
23007 dma_complete++;
23008 }
23009diff -urNp linux-3.0.4/drivers/atm/firestream.c linux-3.0.4/drivers/atm/firestream.c
23010--- linux-3.0.4/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
23011+++ linux-3.0.4/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
23012@@ -749,7 +749,7 @@ static void process_txdone_queue (struct
23013 }
23014 }
23015
23016- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23017+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23018
23019 fs_dprintk (FS_DEBUG_TXMEM, "i");
23020 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
23021@@ -816,7 +816,7 @@ static void process_incoming (struct fs_
23022 #endif
23023 skb_put (skb, qe->p1 & 0xffff);
23024 ATM_SKB(skb)->vcc = atm_vcc;
23025- atomic_inc(&atm_vcc->stats->rx);
23026+ atomic_inc_unchecked(&atm_vcc->stats->rx);
23027 __net_timestamp(skb);
23028 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
23029 atm_vcc->push (atm_vcc, skb);
23030@@ -837,12 +837,12 @@ static void process_incoming (struct fs_
23031 kfree (pe);
23032 }
23033 if (atm_vcc)
23034- atomic_inc(&atm_vcc->stats->rx_drop);
23035+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23036 break;
23037 case 0x1f: /* Reassembly abort: no buffers. */
23038 /* Silently increment error counter. */
23039 if (atm_vcc)
23040- atomic_inc(&atm_vcc->stats->rx_drop);
23041+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23042 break;
23043 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
23044 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
23045diff -urNp linux-3.0.4/drivers/atm/fore200e.c linux-3.0.4/drivers/atm/fore200e.c
23046--- linux-3.0.4/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
23047+++ linux-3.0.4/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
23048@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
23049 #endif
23050 /* check error condition */
23051 if (*entry->status & STATUS_ERROR)
23052- atomic_inc(&vcc->stats->tx_err);
23053+ atomic_inc_unchecked(&vcc->stats->tx_err);
23054 else
23055- atomic_inc(&vcc->stats->tx);
23056+ atomic_inc_unchecked(&vcc->stats->tx);
23057 }
23058 }
23059
23060@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
23061 if (skb == NULL) {
23062 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
23063
23064- atomic_inc(&vcc->stats->rx_drop);
23065+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23066 return -ENOMEM;
23067 }
23068
23069@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
23070
23071 dev_kfree_skb_any(skb);
23072
23073- atomic_inc(&vcc->stats->rx_drop);
23074+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23075 return -ENOMEM;
23076 }
23077
23078 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23079
23080 vcc->push(vcc, skb);
23081- atomic_inc(&vcc->stats->rx);
23082+ atomic_inc_unchecked(&vcc->stats->rx);
23083
23084 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23085
23086@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
23087 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
23088 fore200e->atm_dev->number,
23089 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
23090- atomic_inc(&vcc->stats->rx_err);
23091+ atomic_inc_unchecked(&vcc->stats->rx_err);
23092 }
23093 }
23094
23095@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
23096 goto retry_here;
23097 }
23098
23099- atomic_inc(&vcc->stats->tx_err);
23100+ atomic_inc_unchecked(&vcc->stats->tx_err);
23101
23102 fore200e->tx_sat++;
23103 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
23104diff -urNp linux-3.0.4/drivers/atm/he.c linux-3.0.4/drivers/atm/he.c
23105--- linux-3.0.4/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
23106+++ linux-3.0.4/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
23107@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23108
23109 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
23110 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
23111- atomic_inc(&vcc->stats->rx_drop);
23112+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23113 goto return_host_buffers;
23114 }
23115
23116@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23117 RBRQ_LEN_ERR(he_dev->rbrq_head)
23118 ? "LEN_ERR" : "",
23119 vcc->vpi, vcc->vci);
23120- atomic_inc(&vcc->stats->rx_err);
23121+ atomic_inc_unchecked(&vcc->stats->rx_err);
23122 goto return_host_buffers;
23123 }
23124
23125@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23126 vcc->push(vcc, skb);
23127 spin_lock(&he_dev->global_lock);
23128
23129- atomic_inc(&vcc->stats->rx);
23130+ atomic_inc_unchecked(&vcc->stats->rx);
23131
23132 return_host_buffers:
23133 ++pdus_assembled;
23134@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
23135 tpd->vcc->pop(tpd->vcc, tpd->skb);
23136 else
23137 dev_kfree_skb_any(tpd->skb);
23138- atomic_inc(&tpd->vcc->stats->tx_err);
23139+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
23140 }
23141 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
23142 return;
23143@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23144 vcc->pop(vcc, skb);
23145 else
23146 dev_kfree_skb_any(skb);
23147- atomic_inc(&vcc->stats->tx_err);
23148+ atomic_inc_unchecked(&vcc->stats->tx_err);
23149 return -EINVAL;
23150 }
23151
23152@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23153 vcc->pop(vcc, skb);
23154 else
23155 dev_kfree_skb_any(skb);
23156- atomic_inc(&vcc->stats->tx_err);
23157+ atomic_inc_unchecked(&vcc->stats->tx_err);
23158 return -EINVAL;
23159 }
23160 #endif
23161@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23162 vcc->pop(vcc, skb);
23163 else
23164 dev_kfree_skb_any(skb);
23165- atomic_inc(&vcc->stats->tx_err);
23166+ atomic_inc_unchecked(&vcc->stats->tx_err);
23167 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23168 return -ENOMEM;
23169 }
23170@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23171 vcc->pop(vcc, skb);
23172 else
23173 dev_kfree_skb_any(skb);
23174- atomic_inc(&vcc->stats->tx_err);
23175+ atomic_inc_unchecked(&vcc->stats->tx_err);
23176 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23177 return -ENOMEM;
23178 }
23179@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23180 __enqueue_tpd(he_dev, tpd, cid);
23181 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23182
23183- atomic_inc(&vcc->stats->tx);
23184+ atomic_inc_unchecked(&vcc->stats->tx);
23185
23186 return 0;
23187 }
23188diff -urNp linux-3.0.4/drivers/atm/horizon.c linux-3.0.4/drivers/atm/horizon.c
23189--- linux-3.0.4/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
23190+++ linux-3.0.4/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
23191@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
23192 {
23193 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
23194 // VC layer stats
23195- atomic_inc(&vcc->stats->rx);
23196+ atomic_inc_unchecked(&vcc->stats->rx);
23197 __net_timestamp(skb);
23198 // end of our responsibility
23199 vcc->push (vcc, skb);
23200@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
23201 dev->tx_iovec = NULL;
23202
23203 // VC layer stats
23204- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23205+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23206
23207 // free the skb
23208 hrz_kfree_skb (skb);
23209diff -urNp linux-3.0.4/drivers/atm/idt77252.c linux-3.0.4/drivers/atm/idt77252.c
23210--- linux-3.0.4/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
23211+++ linux-3.0.4/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
23212@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
23213 else
23214 dev_kfree_skb(skb);
23215
23216- atomic_inc(&vcc->stats->tx);
23217+ atomic_inc_unchecked(&vcc->stats->tx);
23218 }
23219
23220 atomic_dec(&scq->used);
23221@@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
23222 if ((sb = dev_alloc_skb(64)) == NULL) {
23223 printk("%s: Can't allocate buffers for aal0.\n",
23224 card->name);
23225- atomic_add(i, &vcc->stats->rx_drop);
23226+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
23227 break;
23228 }
23229 if (!atm_charge(vcc, sb->truesize)) {
23230 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
23231 card->name);
23232- atomic_add(i - 1, &vcc->stats->rx_drop);
23233+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
23234 dev_kfree_skb(sb);
23235 break;
23236 }
23237@@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
23238 ATM_SKB(sb)->vcc = vcc;
23239 __net_timestamp(sb);
23240 vcc->push(vcc, sb);
23241- atomic_inc(&vcc->stats->rx);
23242+ atomic_inc_unchecked(&vcc->stats->rx);
23243
23244 cell += ATM_CELL_PAYLOAD;
23245 }
23246@@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
23247 "(CDC: %08x)\n",
23248 card->name, len, rpp->len, readl(SAR_REG_CDC));
23249 recycle_rx_pool_skb(card, rpp);
23250- atomic_inc(&vcc->stats->rx_err);
23251+ atomic_inc_unchecked(&vcc->stats->rx_err);
23252 return;
23253 }
23254 if (stat & SAR_RSQE_CRC) {
23255 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
23256 recycle_rx_pool_skb(card, rpp);
23257- atomic_inc(&vcc->stats->rx_err);
23258+ atomic_inc_unchecked(&vcc->stats->rx_err);
23259 return;
23260 }
23261 if (skb_queue_len(&rpp->queue) > 1) {
23262@@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
23263 RXPRINTK("%s: Can't alloc RX skb.\n",
23264 card->name);
23265 recycle_rx_pool_skb(card, rpp);
23266- atomic_inc(&vcc->stats->rx_err);
23267+ atomic_inc_unchecked(&vcc->stats->rx_err);
23268 return;
23269 }
23270 if (!atm_charge(vcc, skb->truesize)) {
23271@@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
23272 __net_timestamp(skb);
23273
23274 vcc->push(vcc, skb);
23275- atomic_inc(&vcc->stats->rx);
23276+ atomic_inc_unchecked(&vcc->stats->rx);
23277
23278 return;
23279 }
23280@@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
23281 __net_timestamp(skb);
23282
23283 vcc->push(vcc, skb);
23284- atomic_inc(&vcc->stats->rx);
23285+ atomic_inc_unchecked(&vcc->stats->rx);
23286
23287 if (skb->truesize > SAR_FB_SIZE_3)
23288 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
23289@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
23290 if (vcc->qos.aal != ATM_AAL0) {
23291 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
23292 card->name, vpi, vci);
23293- atomic_inc(&vcc->stats->rx_drop);
23294+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23295 goto drop;
23296 }
23297
23298 if ((sb = dev_alloc_skb(64)) == NULL) {
23299 printk("%s: Can't allocate buffers for AAL0.\n",
23300 card->name);
23301- atomic_inc(&vcc->stats->rx_err);
23302+ atomic_inc_unchecked(&vcc->stats->rx_err);
23303 goto drop;
23304 }
23305
23306@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
23307 ATM_SKB(sb)->vcc = vcc;
23308 __net_timestamp(sb);
23309 vcc->push(vcc, sb);
23310- atomic_inc(&vcc->stats->rx);
23311+ atomic_inc_unchecked(&vcc->stats->rx);
23312
23313 drop:
23314 skb_pull(queue, 64);
23315@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23316
23317 if (vc == NULL) {
23318 printk("%s: NULL connection in send().\n", card->name);
23319- atomic_inc(&vcc->stats->tx_err);
23320+ atomic_inc_unchecked(&vcc->stats->tx_err);
23321 dev_kfree_skb(skb);
23322 return -EINVAL;
23323 }
23324 if (!test_bit(VCF_TX, &vc->flags)) {
23325 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
23326- atomic_inc(&vcc->stats->tx_err);
23327+ atomic_inc_unchecked(&vcc->stats->tx_err);
23328 dev_kfree_skb(skb);
23329 return -EINVAL;
23330 }
23331@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23332 break;
23333 default:
23334 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
23335- atomic_inc(&vcc->stats->tx_err);
23336+ atomic_inc_unchecked(&vcc->stats->tx_err);
23337 dev_kfree_skb(skb);
23338 return -EINVAL;
23339 }
23340
23341 if (skb_shinfo(skb)->nr_frags != 0) {
23342 printk("%s: No scatter-gather yet.\n", card->name);
23343- atomic_inc(&vcc->stats->tx_err);
23344+ atomic_inc_unchecked(&vcc->stats->tx_err);
23345 dev_kfree_skb(skb);
23346 return -EINVAL;
23347 }
23348@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23349
23350 err = queue_skb(card, vc, skb, oam);
23351 if (err) {
23352- atomic_inc(&vcc->stats->tx_err);
23353+ atomic_inc_unchecked(&vcc->stats->tx_err);
23354 dev_kfree_skb(skb);
23355 return err;
23356 }
23357@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
23358 skb = dev_alloc_skb(64);
23359 if (!skb) {
23360 printk("%s: Out of memory in send_oam().\n", card->name);
23361- atomic_inc(&vcc->stats->tx_err);
23362+ atomic_inc_unchecked(&vcc->stats->tx_err);
23363 return -ENOMEM;
23364 }
23365 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
23366diff -urNp linux-3.0.4/drivers/atm/iphase.c linux-3.0.4/drivers/atm/iphase.c
23367--- linux-3.0.4/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
23368+++ linux-3.0.4/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
23369@@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
23370 status = (u_short) (buf_desc_ptr->desc_mode);
23371 if (status & (RX_CER | RX_PTE | RX_OFL))
23372 {
23373- atomic_inc(&vcc->stats->rx_err);
23374+ atomic_inc_unchecked(&vcc->stats->rx_err);
23375 IF_ERR(printk("IA: bad packet, dropping it");)
23376 if (status & RX_CER) {
23377 IF_ERR(printk(" cause: packet CRC error\n");)
23378@@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
23379 len = dma_addr - buf_addr;
23380 if (len > iadev->rx_buf_sz) {
23381 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
23382- atomic_inc(&vcc->stats->rx_err);
23383+ atomic_inc_unchecked(&vcc->stats->rx_err);
23384 goto out_free_desc;
23385 }
23386
23387@@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
23388 ia_vcc = INPH_IA_VCC(vcc);
23389 if (ia_vcc == NULL)
23390 {
23391- atomic_inc(&vcc->stats->rx_err);
23392+ atomic_inc_unchecked(&vcc->stats->rx_err);
23393 dev_kfree_skb_any(skb);
23394 atm_return(vcc, atm_guess_pdu2truesize(len));
23395 goto INCR_DLE;
23396@@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
23397 if ((length > iadev->rx_buf_sz) || (length >
23398 (skb->len - sizeof(struct cpcs_trailer))))
23399 {
23400- atomic_inc(&vcc->stats->rx_err);
23401+ atomic_inc_unchecked(&vcc->stats->rx_err);
23402 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
23403 length, skb->len);)
23404 dev_kfree_skb_any(skb);
23405@@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
23406
23407 IF_RX(printk("rx_dle_intr: skb push");)
23408 vcc->push(vcc,skb);
23409- atomic_inc(&vcc->stats->rx);
23410+ atomic_inc_unchecked(&vcc->stats->rx);
23411 iadev->rx_pkt_cnt++;
23412 }
23413 INCR_DLE:
23414@@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
23415 {
23416 struct k_sonet_stats *stats;
23417 stats = &PRIV(_ia_dev[board])->sonet_stats;
23418- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
23419- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
23420- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
23421- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
23422- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
23423- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
23424- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
23425- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
23426- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
23427+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
23428+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
23429+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
23430+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
23431+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
23432+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
23433+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
23434+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
23435+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
23436 }
23437 ia_cmds.status = 0;
23438 break;
23439@@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
23440 if ((desc == 0) || (desc > iadev->num_tx_desc))
23441 {
23442 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
23443- atomic_inc(&vcc->stats->tx);
23444+ atomic_inc_unchecked(&vcc->stats->tx);
23445 if (vcc->pop)
23446 vcc->pop(vcc, skb);
23447 else
23448@@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
23449 ATM_DESC(skb) = vcc->vci;
23450 skb_queue_tail(&iadev->tx_dma_q, skb);
23451
23452- atomic_inc(&vcc->stats->tx);
23453+ atomic_inc_unchecked(&vcc->stats->tx);
23454 iadev->tx_pkt_cnt++;
23455 /* Increment transaction counter */
23456 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
23457
23458 #if 0
23459 /* add flow control logic */
23460- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
23461+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
23462 if (iavcc->vc_desc_cnt > 10) {
23463 vcc->tx_quota = vcc->tx_quota * 3 / 4;
23464 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
23465diff -urNp linux-3.0.4/drivers/atm/lanai.c linux-3.0.4/drivers/atm/lanai.c
23466--- linux-3.0.4/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
23467+++ linux-3.0.4/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
23468@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
23469 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
23470 lanai_endtx(lanai, lvcc);
23471 lanai_free_skb(lvcc->tx.atmvcc, skb);
23472- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
23473+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
23474 }
23475
23476 /* Try to fill the buffer - don't call unless there is backlog */
23477@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
23478 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
23479 __net_timestamp(skb);
23480 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
23481- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
23482+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
23483 out:
23484 lvcc->rx.buf.ptr = end;
23485 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
23486@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
23487 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
23488 "vcc %d\n", lanai->number, (unsigned int) s, vci);
23489 lanai->stats.service_rxnotaal5++;
23490- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23491+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23492 return 0;
23493 }
23494 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
23495@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
23496 int bytes;
23497 read_unlock(&vcc_sklist_lock);
23498 DPRINTK("got trashed rx pdu on vci %d\n", vci);
23499- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23500+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23501 lvcc->stats.x.aal5.service_trash++;
23502 bytes = (SERVICE_GET_END(s) * 16) -
23503 (((unsigned long) lvcc->rx.buf.ptr) -
23504@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
23505 }
23506 if (s & SERVICE_STREAM) {
23507 read_unlock(&vcc_sklist_lock);
23508- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23509+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23510 lvcc->stats.x.aal5.service_stream++;
23511 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
23512 "PDU on VCI %d!\n", lanai->number, vci);
23513@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
23514 return 0;
23515 }
23516 DPRINTK("got rx crc error on vci %d\n", vci);
23517- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23518+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23519 lvcc->stats.x.aal5.service_rxcrc++;
23520 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
23521 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
23522diff -urNp linux-3.0.4/drivers/atm/nicstar.c linux-3.0.4/drivers/atm/nicstar.c
23523--- linux-3.0.4/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
23524+++ linux-3.0.4/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
23525@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
23526 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
23527 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
23528 card->index);
23529- atomic_inc(&vcc->stats->tx_err);
23530+ atomic_inc_unchecked(&vcc->stats->tx_err);
23531 dev_kfree_skb_any(skb);
23532 return -EINVAL;
23533 }
23534@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
23535 if (!vc->tx) {
23536 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
23537 card->index);
23538- atomic_inc(&vcc->stats->tx_err);
23539+ atomic_inc_unchecked(&vcc->stats->tx_err);
23540 dev_kfree_skb_any(skb);
23541 return -EINVAL;
23542 }
23543@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
23544 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
23545 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
23546 card->index);
23547- atomic_inc(&vcc->stats->tx_err);
23548+ atomic_inc_unchecked(&vcc->stats->tx_err);
23549 dev_kfree_skb_any(skb);
23550 return -EINVAL;
23551 }
23552
23553 if (skb_shinfo(skb)->nr_frags != 0) {
23554 printk("nicstar%d: No scatter-gather yet.\n", card->index);
23555- atomic_inc(&vcc->stats->tx_err);
23556+ atomic_inc_unchecked(&vcc->stats->tx_err);
23557 dev_kfree_skb_any(skb);
23558 return -EINVAL;
23559 }
23560@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
23561 }
23562
23563 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
23564- atomic_inc(&vcc->stats->tx_err);
23565+ atomic_inc_unchecked(&vcc->stats->tx_err);
23566 dev_kfree_skb_any(skb);
23567 return -EIO;
23568 }
23569- atomic_inc(&vcc->stats->tx);
23570+ atomic_inc_unchecked(&vcc->stats->tx);
23571
23572 return 0;
23573 }
23574@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
23575 printk
23576 ("nicstar%d: Can't allocate buffers for aal0.\n",
23577 card->index);
23578- atomic_add(i, &vcc->stats->rx_drop);
23579+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
23580 break;
23581 }
23582 if (!atm_charge(vcc, sb->truesize)) {
23583 RXPRINTK
23584 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
23585 card->index);
23586- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
23587+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
23588 dev_kfree_skb_any(sb);
23589 break;
23590 }
23591@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
23592 ATM_SKB(sb)->vcc = vcc;
23593 __net_timestamp(sb);
23594 vcc->push(vcc, sb);
23595- atomic_inc(&vcc->stats->rx);
23596+ atomic_inc_unchecked(&vcc->stats->rx);
23597 cell += ATM_CELL_PAYLOAD;
23598 }
23599
23600@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
23601 if (iovb == NULL) {
23602 printk("nicstar%d: Out of iovec buffers.\n",
23603 card->index);
23604- atomic_inc(&vcc->stats->rx_drop);
23605+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23606 recycle_rx_buf(card, skb);
23607 return;
23608 }
23609@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
23610 small or large buffer itself. */
23611 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
23612 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
23613- atomic_inc(&vcc->stats->rx_err);
23614+ atomic_inc_unchecked(&vcc->stats->rx_err);
23615 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23616 NS_MAX_IOVECS);
23617 NS_PRV_IOVCNT(iovb) = 0;
23618@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
23619 ("nicstar%d: Expected a small buffer, and this is not one.\n",
23620 card->index);
23621 which_list(card, skb);
23622- atomic_inc(&vcc->stats->rx_err);
23623+ atomic_inc_unchecked(&vcc->stats->rx_err);
23624 recycle_rx_buf(card, skb);
23625 vc->rx_iov = NULL;
23626 recycle_iov_buf(card, iovb);
23627@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
23628 ("nicstar%d: Expected a large buffer, and this is not one.\n",
23629 card->index);
23630 which_list(card, skb);
23631- atomic_inc(&vcc->stats->rx_err);
23632+ atomic_inc_unchecked(&vcc->stats->rx_err);
23633 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23634 NS_PRV_IOVCNT(iovb));
23635 vc->rx_iov = NULL;
23636@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
23637 printk(" - PDU size mismatch.\n");
23638 else
23639 printk(".\n");
23640- atomic_inc(&vcc->stats->rx_err);
23641+ atomic_inc_unchecked(&vcc->stats->rx_err);
23642 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23643 NS_PRV_IOVCNT(iovb));
23644 vc->rx_iov = NULL;
23645@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
23646 /* skb points to a small buffer */
23647 if (!atm_charge(vcc, skb->truesize)) {
23648 push_rxbufs(card, skb);
23649- atomic_inc(&vcc->stats->rx_drop);
23650+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23651 } else {
23652 skb_put(skb, len);
23653 dequeue_sm_buf(card, skb);
23654@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
23655 ATM_SKB(skb)->vcc = vcc;
23656 __net_timestamp(skb);
23657 vcc->push(vcc, skb);
23658- atomic_inc(&vcc->stats->rx);
23659+ atomic_inc_unchecked(&vcc->stats->rx);
23660 }
23661 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
23662 struct sk_buff *sb;
23663@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
23664 if (len <= NS_SMBUFSIZE) {
23665 if (!atm_charge(vcc, sb->truesize)) {
23666 push_rxbufs(card, sb);
23667- atomic_inc(&vcc->stats->rx_drop);
23668+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23669 } else {
23670 skb_put(sb, len);
23671 dequeue_sm_buf(card, sb);
23672@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
23673 ATM_SKB(sb)->vcc = vcc;
23674 __net_timestamp(sb);
23675 vcc->push(vcc, sb);
23676- atomic_inc(&vcc->stats->rx);
23677+ atomic_inc_unchecked(&vcc->stats->rx);
23678 }
23679
23680 push_rxbufs(card, skb);
23681@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
23682
23683 if (!atm_charge(vcc, skb->truesize)) {
23684 push_rxbufs(card, skb);
23685- atomic_inc(&vcc->stats->rx_drop);
23686+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23687 } else {
23688 dequeue_lg_buf(card, skb);
23689 #ifdef NS_USE_DESTRUCTORS
23690@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
23691 ATM_SKB(skb)->vcc = vcc;
23692 __net_timestamp(skb);
23693 vcc->push(vcc, skb);
23694- atomic_inc(&vcc->stats->rx);
23695+ atomic_inc_unchecked(&vcc->stats->rx);
23696 }
23697
23698 push_rxbufs(card, sb);
23699@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
23700 printk
23701 ("nicstar%d: Out of huge buffers.\n",
23702 card->index);
23703- atomic_inc(&vcc->stats->rx_drop);
23704+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23705 recycle_iovec_rx_bufs(card,
23706 (struct iovec *)
23707 iovb->data,
23708@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
23709 card->hbpool.count++;
23710 } else
23711 dev_kfree_skb_any(hb);
23712- atomic_inc(&vcc->stats->rx_drop);
23713+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23714 } else {
23715 /* Copy the small buffer to the huge buffer */
23716 sb = (struct sk_buff *)iov->iov_base;
23717@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
23718 #endif /* NS_USE_DESTRUCTORS */
23719 __net_timestamp(hb);
23720 vcc->push(vcc, hb);
23721- atomic_inc(&vcc->stats->rx);
23722+ atomic_inc_unchecked(&vcc->stats->rx);
23723 }
23724 }
23725
23726diff -urNp linux-3.0.4/drivers/atm/solos-pci.c linux-3.0.4/drivers/atm/solos-pci.c
23727--- linux-3.0.4/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
23728+++ linux-3.0.4/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
23729@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
23730 }
23731 atm_charge(vcc, skb->truesize);
23732 vcc->push(vcc, skb);
23733- atomic_inc(&vcc->stats->rx);
23734+ atomic_inc_unchecked(&vcc->stats->rx);
23735 break;
23736
23737 case PKT_STATUS:
23738@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
23739 char msg[500];
23740 char item[10];
23741
23742+ pax_track_stack();
23743+
23744 len = buf->len;
23745 for (i = 0; i < len; i++){
23746 if(i % 8 == 0)
23747@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
23748 vcc = SKB_CB(oldskb)->vcc;
23749
23750 if (vcc) {
23751- atomic_inc(&vcc->stats->tx);
23752+ atomic_inc_unchecked(&vcc->stats->tx);
23753 solos_pop(vcc, oldskb);
23754 } else
23755 dev_kfree_skb_irq(oldskb);
23756diff -urNp linux-3.0.4/drivers/atm/suni.c linux-3.0.4/drivers/atm/suni.c
23757--- linux-3.0.4/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
23758+++ linux-3.0.4/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
23759@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
23760
23761
23762 #define ADD_LIMITED(s,v) \
23763- atomic_add((v),&stats->s); \
23764- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
23765+ atomic_add_unchecked((v),&stats->s); \
23766+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
23767
23768
23769 static void suni_hz(unsigned long from_timer)
23770diff -urNp linux-3.0.4/drivers/atm/uPD98402.c linux-3.0.4/drivers/atm/uPD98402.c
23771--- linux-3.0.4/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
23772+++ linux-3.0.4/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
23773@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
23774 struct sonet_stats tmp;
23775 int error = 0;
23776
23777- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23778+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23779 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
23780 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
23781 if (zero && !error) {
23782@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
23783
23784
23785 #define ADD_LIMITED(s,v) \
23786- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
23787- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
23788- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23789+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
23790+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
23791+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23792
23793
23794 static void stat_event(struct atm_dev *dev)
23795@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
23796 if (reason & uPD98402_INT_PFM) stat_event(dev);
23797 if (reason & uPD98402_INT_PCO) {
23798 (void) GET(PCOCR); /* clear interrupt cause */
23799- atomic_add(GET(HECCT),
23800+ atomic_add_unchecked(GET(HECCT),
23801 &PRIV(dev)->sonet_stats.uncorr_hcs);
23802 }
23803 if ((reason & uPD98402_INT_RFO) &&
23804@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
23805 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
23806 uPD98402_INT_LOS),PIMR); /* enable them */
23807 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
23808- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23809- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
23810- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
23811+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23812+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
23813+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
23814 return 0;
23815 }
23816
23817diff -urNp linux-3.0.4/drivers/atm/zatm.c linux-3.0.4/drivers/atm/zatm.c
23818--- linux-3.0.4/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
23819+++ linux-3.0.4/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
23820@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23821 }
23822 if (!size) {
23823 dev_kfree_skb_irq(skb);
23824- if (vcc) atomic_inc(&vcc->stats->rx_err);
23825+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23826 continue;
23827 }
23828 if (!atm_charge(vcc,skb->truesize)) {
23829@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23830 skb->len = size;
23831 ATM_SKB(skb)->vcc = vcc;
23832 vcc->push(vcc,skb);
23833- atomic_inc(&vcc->stats->rx);
23834+ atomic_inc_unchecked(&vcc->stats->rx);
23835 }
23836 zout(pos & 0xffff,MTA(mbx));
23837 #if 0 /* probably a stupid idea */
23838@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23839 skb_queue_head(&zatm_vcc->backlog,skb);
23840 break;
23841 }
23842- atomic_inc(&vcc->stats->tx);
23843+ atomic_inc_unchecked(&vcc->stats->tx);
23844 wake_up(&zatm_vcc->tx_wait);
23845 }
23846
23847diff -urNp linux-3.0.4/drivers/base/devtmpfs.c linux-3.0.4/drivers/base/devtmpfs.c
23848--- linux-3.0.4/drivers/base/devtmpfs.c 2011-07-21 22:17:23.000000000 -0400
23849+++ linux-3.0.4/drivers/base/devtmpfs.c 2011-10-06 04:17:55.000000000 -0400
23850@@ -357,7 +357,7 @@ int devtmpfs_mount(const char *mntdir)
23851 if (!dev_mnt)
23852 return 0;
23853
23854- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
23855+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
23856 if (err)
23857 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
23858 else
23859diff -urNp linux-3.0.4/drivers/base/power/wakeup.c linux-3.0.4/drivers/base/power/wakeup.c
23860--- linux-3.0.4/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
23861+++ linux-3.0.4/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
23862@@ -29,14 +29,14 @@ bool events_check_enabled;
23863 * They need to be modified together atomically, so it's better to use one
23864 * atomic variable to hold them both.
23865 */
23866-static atomic_t combined_event_count = ATOMIC_INIT(0);
23867+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23868
23869 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23870 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23871
23872 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23873 {
23874- unsigned int comb = atomic_read(&combined_event_count);
23875+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
23876
23877 *cnt = (comb >> IN_PROGRESS_BITS);
23878 *inpr = comb & MAX_IN_PROGRESS;
23879@@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
23880 ws->last_time = ktime_get();
23881
23882 /* Increment the counter of events in progress. */
23883- atomic_inc(&combined_event_count);
23884+ atomic_inc_unchecked(&combined_event_count);
23885 }
23886
23887 /**
23888@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
23889 * Increment the counter of registered wakeup events and decrement the
23890 * couter of wakeup events in progress simultaneously.
23891 */
23892- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23893+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23894 }
23895
23896 /**
23897diff -urNp linux-3.0.4/drivers/block/cciss.c linux-3.0.4/drivers/block/cciss.c
23898--- linux-3.0.4/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
23899+++ linux-3.0.4/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
23900@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
23901 int err;
23902 u32 cp;
23903
23904+ memset(&arg64, 0, sizeof(arg64));
23905+
23906 err = 0;
23907 err |=
23908 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23909@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
23910 while (!list_empty(&h->reqQ)) {
23911 c = list_entry(h->reqQ.next, CommandList_struct, list);
23912 /* can't do anything if fifo is full */
23913- if ((h->access.fifo_full(h))) {
23914+ if ((h->access->fifo_full(h))) {
23915 dev_warn(&h->pdev->dev, "fifo full\n");
23916 break;
23917 }
23918@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
23919 h->Qdepth--;
23920
23921 /* Tell the controller execute command */
23922- h->access.submit_command(h, c);
23923+ h->access->submit_command(h, c);
23924
23925 /* Put job onto the completed Q */
23926 addQ(&h->cmpQ, c);
23927@@ -3422,17 +3424,17 @@ startio:
23928
23929 static inline unsigned long get_next_completion(ctlr_info_t *h)
23930 {
23931- return h->access.command_completed(h);
23932+ return h->access->command_completed(h);
23933 }
23934
23935 static inline int interrupt_pending(ctlr_info_t *h)
23936 {
23937- return h->access.intr_pending(h);
23938+ return h->access->intr_pending(h);
23939 }
23940
23941 static inline long interrupt_not_for_us(ctlr_info_t *h)
23942 {
23943- return ((h->access.intr_pending(h) == 0) ||
23944+ return ((h->access->intr_pending(h) == 0) ||
23945 (h->interrupts_enabled == 0));
23946 }
23947
23948@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
23949 u32 a;
23950
23951 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23952- return h->access.command_completed(h);
23953+ return h->access->command_completed(h);
23954
23955 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23956 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23957@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
23958 trans_support & CFGTBL_Trans_use_short_tags);
23959
23960 /* Change the access methods to the performant access methods */
23961- h->access = SA5_performant_access;
23962+ h->access = &SA5_performant_access;
23963 h->transMethod = CFGTBL_Trans_Performant;
23964
23965 return;
23966@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
23967 if (prod_index < 0)
23968 return -ENODEV;
23969 h->product_name = products[prod_index].product_name;
23970- h->access = *(products[prod_index].access);
23971+ h->access = products[prod_index].access;
23972
23973 if (cciss_board_disabled(h)) {
23974 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23975@@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
23976 }
23977
23978 /* make sure the board interrupts are off */
23979- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23980+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23981 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
23982 if (rc)
23983 goto clean2;
23984@@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
23985 * fake ones to scoop up any residual completions.
23986 */
23987 spin_lock_irqsave(&h->lock, flags);
23988- h->access.set_intr_mask(h, CCISS_INTR_OFF);
23989+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
23990 spin_unlock_irqrestore(&h->lock, flags);
23991 free_irq(h->intr[PERF_MODE_INT], h);
23992 rc = cciss_request_irq(h, cciss_msix_discard_completions,
23993@@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
23994 dev_info(&h->pdev->dev, "Board READY.\n");
23995 dev_info(&h->pdev->dev,
23996 "Waiting for stale completions to drain.\n");
23997- h->access.set_intr_mask(h, CCISS_INTR_ON);
23998+ h->access->set_intr_mask(h, CCISS_INTR_ON);
23999 msleep(10000);
24000- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24001+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24002
24003 rc = controller_reset_failed(h->cfgtable);
24004 if (rc)
24005@@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
24006 cciss_scsi_setup(h);
24007
24008 /* Turn the interrupts on so we can service requests */
24009- h->access.set_intr_mask(h, CCISS_INTR_ON);
24010+ h->access->set_intr_mask(h, CCISS_INTR_ON);
24011
24012 /* Get the firmware version */
24013 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
24014@@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
24015 kfree(flush_buf);
24016 if (return_code != IO_OK)
24017 dev_warn(&h->pdev->dev, "Error flushing cache\n");
24018- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24019+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24020 free_irq(h->intr[PERF_MODE_INT], h);
24021 }
24022
24023diff -urNp linux-3.0.4/drivers/block/cciss.h linux-3.0.4/drivers/block/cciss.h
24024--- linux-3.0.4/drivers/block/cciss.h 2011-09-02 18:11:21.000000000 -0400
24025+++ linux-3.0.4/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
24026@@ -100,7 +100,7 @@ struct ctlr_info
24027 /* information about each logical volume */
24028 drive_info_struct *drv[CISS_MAX_LUN];
24029
24030- struct access_method access;
24031+ struct access_method *access;
24032
24033 /* queue and queue Info */
24034 struct list_head reqQ;
24035diff -urNp linux-3.0.4/drivers/block/cpqarray.c linux-3.0.4/drivers/block/cpqarray.c
24036--- linux-3.0.4/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
24037+++ linux-3.0.4/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
24038@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
24039 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
24040 goto Enomem4;
24041 }
24042- hba[i]->access.set_intr_mask(hba[i], 0);
24043+ hba[i]->access->set_intr_mask(hba[i], 0);
24044 if (request_irq(hba[i]->intr, do_ida_intr,
24045 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
24046 {
24047@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
24048 add_timer(&hba[i]->timer);
24049
24050 /* Enable IRQ now that spinlock and rate limit timer are set up */
24051- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24052+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24053
24054 for(j=0; j<NWD; j++) {
24055 struct gendisk *disk = ida_gendisk[i][j];
24056@@ -694,7 +694,7 @@ DBGINFO(
24057 for(i=0; i<NR_PRODUCTS; i++) {
24058 if (board_id == products[i].board_id) {
24059 c->product_name = products[i].product_name;
24060- c->access = *(products[i].access);
24061+ c->access = products[i].access;
24062 break;
24063 }
24064 }
24065@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
24066 hba[ctlr]->intr = intr;
24067 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
24068 hba[ctlr]->product_name = products[j].product_name;
24069- hba[ctlr]->access = *(products[j].access);
24070+ hba[ctlr]->access = products[j].access;
24071 hba[ctlr]->ctlr = ctlr;
24072 hba[ctlr]->board_id = board_id;
24073 hba[ctlr]->pci_dev = NULL; /* not PCI */
24074@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
24075 struct scatterlist tmp_sg[SG_MAX];
24076 int i, dir, seg;
24077
24078+ pax_track_stack();
24079+
24080 queue_next:
24081 creq = blk_peek_request(q);
24082 if (!creq)
24083@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
24084
24085 while((c = h->reqQ) != NULL) {
24086 /* Can't do anything if we're busy */
24087- if (h->access.fifo_full(h) == 0)
24088+ if (h->access->fifo_full(h) == 0)
24089 return;
24090
24091 /* Get the first entry from the request Q */
24092@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
24093 h->Qdepth--;
24094
24095 /* Tell the controller to do our bidding */
24096- h->access.submit_command(h, c);
24097+ h->access->submit_command(h, c);
24098
24099 /* Get onto the completion Q */
24100 addQ(&h->cmpQ, c);
24101@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
24102 unsigned long flags;
24103 __u32 a,a1;
24104
24105- istat = h->access.intr_pending(h);
24106+ istat = h->access->intr_pending(h);
24107 /* Is this interrupt for us? */
24108 if (istat == 0)
24109 return IRQ_NONE;
24110@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
24111 */
24112 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
24113 if (istat & FIFO_NOT_EMPTY) {
24114- while((a = h->access.command_completed(h))) {
24115+ while((a = h->access->command_completed(h))) {
24116 a1 = a; a &= ~3;
24117 if ((c = h->cmpQ) == NULL)
24118 {
24119@@ -1449,11 +1451,11 @@ static int sendcmd(
24120 /*
24121 * Disable interrupt
24122 */
24123- info_p->access.set_intr_mask(info_p, 0);
24124+ info_p->access->set_intr_mask(info_p, 0);
24125 /* Make sure there is room in the command FIFO */
24126 /* Actually it should be completely empty at this time. */
24127 for (i = 200000; i > 0; i--) {
24128- temp = info_p->access.fifo_full(info_p);
24129+ temp = info_p->access->fifo_full(info_p);
24130 if (temp != 0) {
24131 break;
24132 }
24133@@ -1466,7 +1468,7 @@ DBG(
24134 /*
24135 * Send the cmd
24136 */
24137- info_p->access.submit_command(info_p, c);
24138+ info_p->access->submit_command(info_p, c);
24139 complete = pollcomplete(ctlr);
24140
24141 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
24142@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
24143 * we check the new geometry. Then turn interrupts back on when
24144 * we're done.
24145 */
24146- host->access.set_intr_mask(host, 0);
24147+ host->access->set_intr_mask(host, 0);
24148 getgeometry(ctlr);
24149- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
24150+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
24151
24152 for(i=0; i<NWD; i++) {
24153 struct gendisk *disk = ida_gendisk[ctlr][i];
24154@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
24155 /* Wait (up to 2 seconds) for a command to complete */
24156
24157 for (i = 200000; i > 0; i--) {
24158- done = hba[ctlr]->access.command_completed(hba[ctlr]);
24159+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
24160 if (done == 0) {
24161 udelay(10); /* a short fixed delay */
24162 } else
24163diff -urNp linux-3.0.4/drivers/block/cpqarray.h linux-3.0.4/drivers/block/cpqarray.h
24164--- linux-3.0.4/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
24165+++ linux-3.0.4/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
24166@@ -99,7 +99,7 @@ struct ctlr_info {
24167 drv_info_t drv[NWD];
24168 struct proc_dir_entry *proc;
24169
24170- struct access_method access;
24171+ struct access_method *access;
24172
24173 cmdlist_t *reqQ;
24174 cmdlist_t *cmpQ;
24175diff -urNp linux-3.0.4/drivers/block/DAC960.c linux-3.0.4/drivers/block/DAC960.c
24176--- linux-3.0.4/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
24177+++ linux-3.0.4/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
24178@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
24179 unsigned long flags;
24180 int Channel, TargetID;
24181
24182+ pax_track_stack();
24183+
24184 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
24185 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
24186 sizeof(DAC960_SCSI_Inquiry_T) +
24187diff -urNp linux-3.0.4/drivers/block/drbd/drbd_int.h linux-3.0.4/drivers/block/drbd/drbd_int.h
24188--- linux-3.0.4/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
24189+++ linux-3.0.4/drivers/block/drbd/drbd_int.h 2011-10-06 04:17:55.000000000 -0400
24190@@ -737,7 +737,7 @@ struct drbd_request;
24191 struct drbd_epoch {
24192 struct list_head list;
24193 unsigned int barrier_nr;
24194- atomic_t epoch_size; /* increased on every request added. */
24195+ atomic_unchecked_t epoch_size; /* increased on every request added. */
24196 atomic_t active; /* increased on every req. added, and dec on every finished. */
24197 unsigned long flags;
24198 };
24199@@ -1109,7 +1109,7 @@ struct drbd_conf {
24200 void *int_dig_in;
24201 void *int_dig_vv;
24202 wait_queue_head_t seq_wait;
24203- atomic_t packet_seq;
24204+ atomic_unchecked_t packet_seq;
24205 unsigned int peer_seq;
24206 spinlock_t peer_seq_lock;
24207 unsigned int minor;
24208@@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct
24209
24210 static inline void drbd_tcp_cork(struct socket *sock)
24211 {
24212- int __user val = 1;
24213+ int val = 1;
24214 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
24215- (char __user *)&val, sizeof(val));
24216+ (char __force_user *)&val, sizeof(val));
24217 }
24218
24219 static inline void drbd_tcp_uncork(struct socket *sock)
24220 {
24221- int __user val = 0;
24222+ int val = 0;
24223 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
24224- (char __user *)&val, sizeof(val));
24225+ (char __force_user *)&val, sizeof(val));
24226 }
24227
24228 static inline void drbd_tcp_nodelay(struct socket *sock)
24229 {
24230- int __user val = 1;
24231+ int val = 1;
24232 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
24233- (char __user *)&val, sizeof(val));
24234+ (char __force_user *)&val, sizeof(val));
24235 }
24236
24237 static inline void drbd_tcp_quickack(struct socket *sock)
24238 {
24239- int __user val = 2;
24240+ int val = 2;
24241 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
24242- (char __user *)&val, sizeof(val));
24243+ (char __force_user *)&val, sizeof(val));
24244 }
24245
24246 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
24247diff -urNp linux-3.0.4/drivers/block/drbd/drbd_main.c linux-3.0.4/drivers/block/drbd/drbd_main.c
24248--- linux-3.0.4/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
24249+++ linux-3.0.4/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
24250@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
24251 p.sector = sector;
24252 p.block_id = block_id;
24253 p.blksize = blksize;
24254- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
24255+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
24256
24257 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
24258 return false;
24259@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
24260 p.sector = cpu_to_be64(req->sector);
24261 p.block_id = (unsigned long)req;
24262 p.seq_num = cpu_to_be32(req->seq_num =
24263- atomic_add_return(1, &mdev->packet_seq));
24264+ atomic_add_return_unchecked(1, &mdev->packet_seq));
24265
24266 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
24267
24268@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
24269 atomic_set(&mdev->unacked_cnt, 0);
24270 atomic_set(&mdev->local_cnt, 0);
24271 atomic_set(&mdev->net_cnt, 0);
24272- atomic_set(&mdev->packet_seq, 0);
24273+ atomic_set_unchecked(&mdev->packet_seq, 0);
24274 atomic_set(&mdev->pp_in_use, 0);
24275 atomic_set(&mdev->pp_in_use_by_net, 0);
24276 atomic_set(&mdev->rs_sect_in, 0);
24277@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
24278 mdev->receiver.t_state);
24279
24280 /* no need to lock it, I'm the only thread alive */
24281- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
24282- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
24283+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
24284+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
24285 mdev->al_writ_cnt =
24286 mdev->bm_writ_cnt =
24287 mdev->read_cnt =
24288diff -urNp linux-3.0.4/drivers/block/drbd/drbd_nl.c linux-3.0.4/drivers/block/drbd/drbd_nl.c
24289--- linux-3.0.4/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
24290+++ linux-3.0.4/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
24291@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
24292 module_put(THIS_MODULE);
24293 }
24294
24295-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
24296+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
24297
24298 static unsigned short *
24299 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
24300@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
24301 cn_reply->id.idx = CN_IDX_DRBD;
24302 cn_reply->id.val = CN_VAL_DRBD;
24303
24304- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24305+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24306 cn_reply->ack = 0; /* not used here. */
24307 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24308 (int)((char *)tl - (char *)reply->tag_list);
24309@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
24310 cn_reply->id.idx = CN_IDX_DRBD;
24311 cn_reply->id.val = CN_VAL_DRBD;
24312
24313- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24314+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24315 cn_reply->ack = 0; /* not used here. */
24316 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24317 (int)((char *)tl - (char *)reply->tag_list);
24318@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
24319 cn_reply->id.idx = CN_IDX_DRBD;
24320 cn_reply->id.val = CN_VAL_DRBD;
24321
24322- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
24323+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
24324 cn_reply->ack = 0; // not used here.
24325 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24326 (int)((char*)tl - (char*)reply->tag_list);
24327@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
24328 cn_reply->id.idx = CN_IDX_DRBD;
24329 cn_reply->id.val = CN_VAL_DRBD;
24330
24331- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24332+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24333 cn_reply->ack = 0; /* not used here. */
24334 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24335 (int)((char *)tl - (char *)reply->tag_list);
24336diff -urNp linux-3.0.4/drivers/block/drbd/drbd_receiver.c linux-3.0.4/drivers/block/drbd/drbd_receiver.c
24337--- linux-3.0.4/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
24338+++ linux-3.0.4/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
24339@@ -894,7 +894,7 @@ retry:
24340 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
24341 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
24342
24343- atomic_set(&mdev->packet_seq, 0);
24344+ atomic_set_unchecked(&mdev->packet_seq, 0);
24345 mdev->peer_seq = 0;
24346
24347 drbd_thread_start(&mdev->asender);
24348@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
24349 do {
24350 next_epoch = NULL;
24351
24352- epoch_size = atomic_read(&epoch->epoch_size);
24353+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
24354
24355 switch (ev & ~EV_CLEANUP) {
24356 case EV_PUT:
24357@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
24358 rv = FE_DESTROYED;
24359 } else {
24360 epoch->flags = 0;
24361- atomic_set(&epoch->epoch_size, 0);
24362+ atomic_set_unchecked(&epoch->epoch_size, 0);
24363 /* atomic_set(&epoch->active, 0); is already zero */
24364 if (rv == FE_STILL_LIVE)
24365 rv = FE_RECYCLED;
24366@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
24367 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
24368 drbd_flush(mdev);
24369
24370- if (atomic_read(&mdev->current_epoch->epoch_size)) {
24371+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
24372 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
24373 if (epoch)
24374 break;
24375 }
24376
24377 epoch = mdev->current_epoch;
24378- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
24379+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
24380
24381 D_ASSERT(atomic_read(&epoch->active) == 0);
24382 D_ASSERT(epoch->flags == 0);
24383@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
24384 }
24385
24386 epoch->flags = 0;
24387- atomic_set(&epoch->epoch_size, 0);
24388+ atomic_set_unchecked(&epoch->epoch_size, 0);
24389 atomic_set(&epoch->active, 0);
24390
24391 spin_lock(&mdev->epoch_lock);
24392- if (atomic_read(&mdev->current_epoch->epoch_size)) {
24393+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
24394 list_add(&epoch->list, &mdev->current_epoch->list);
24395 mdev->current_epoch = epoch;
24396 mdev->epochs++;
24397@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
24398 spin_unlock(&mdev->peer_seq_lock);
24399
24400 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
24401- atomic_inc(&mdev->current_epoch->epoch_size);
24402+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
24403 return drbd_drain_block(mdev, data_size);
24404 }
24405
24406@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
24407
24408 spin_lock(&mdev->epoch_lock);
24409 e->epoch = mdev->current_epoch;
24410- atomic_inc(&e->epoch->epoch_size);
24411+ atomic_inc_unchecked(&e->epoch->epoch_size);
24412 atomic_inc(&e->epoch->active);
24413 spin_unlock(&mdev->epoch_lock);
24414
24415@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
24416 D_ASSERT(list_empty(&mdev->done_ee));
24417
24418 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
24419- atomic_set(&mdev->current_epoch->epoch_size, 0);
24420+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
24421 D_ASSERT(list_empty(&mdev->current_epoch->list));
24422 }
24423
24424diff -urNp linux-3.0.4/drivers/block/loop.c linux-3.0.4/drivers/block/loop.c
24425--- linux-3.0.4/drivers/block/loop.c 2011-09-02 18:11:26.000000000 -0400
24426+++ linux-3.0.4/drivers/block/loop.c 2011-10-06 04:17:55.000000000 -0400
24427@@ -283,7 +283,7 @@ static int __do_lo_send_write(struct fil
24428 mm_segment_t old_fs = get_fs();
24429
24430 set_fs(get_ds());
24431- bw = file->f_op->write(file, buf, len, &pos);
24432+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
24433 set_fs(old_fs);
24434 if (likely(bw == len))
24435 return 0;
24436diff -urNp linux-3.0.4/drivers/block/nbd.c linux-3.0.4/drivers/block/nbd.c
24437--- linux-3.0.4/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
24438+++ linux-3.0.4/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
24439@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
24440 struct kvec iov;
24441 sigset_t blocked, oldset;
24442
24443+ pax_track_stack();
24444+
24445 if (unlikely(!sock)) {
24446 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
24447 lo->disk->disk_name, (send ? "send" : "recv"));
24448@@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
24449 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
24450 unsigned int cmd, unsigned long arg)
24451 {
24452+ pax_track_stack();
24453+
24454 switch (cmd) {
24455 case NBD_DISCONNECT: {
24456 struct request sreq;
24457diff -urNp linux-3.0.4/drivers/char/agp/frontend.c linux-3.0.4/drivers/char/agp/frontend.c
24458--- linux-3.0.4/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
24459+++ linux-3.0.4/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
24460@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
24461 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
24462 return -EFAULT;
24463
24464- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
24465+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
24466 return -EFAULT;
24467
24468 client = agp_find_client_by_pid(reserve.pid);
24469diff -urNp linux-3.0.4/drivers/char/briq_panel.c linux-3.0.4/drivers/char/briq_panel.c
24470--- linux-3.0.4/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
24471+++ linux-3.0.4/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
24472@@ -9,6 +9,7 @@
24473 #include <linux/types.h>
24474 #include <linux/errno.h>
24475 #include <linux/tty.h>
24476+#include <linux/mutex.h>
24477 #include <linux/timer.h>
24478 #include <linux/kernel.h>
24479 #include <linux/wait.h>
24480@@ -34,6 +35,7 @@ static int vfd_is_open;
24481 static unsigned char vfd[40];
24482 static int vfd_cursor;
24483 static unsigned char ledpb, led;
24484+static DEFINE_MUTEX(vfd_mutex);
24485
24486 static void update_vfd(void)
24487 {
24488@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
24489 if (!vfd_is_open)
24490 return -EBUSY;
24491
24492+ mutex_lock(&vfd_mutex);
24493 for (;;) {
24494 char c;
24495 if (!indx)
24496 break;
24497- if (get_user(c, buf))
24498+ if (get_user(c, buf)) {
24499+ mutex_unlock(&vfd_mutex);
24500 return -EFAULT;
24501+ }
24502 if (esc) {
24503 set_led(c);
24504 esc = 0;
24505@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
24506 buf++;
24507 }
24508 update_vfd();
24509+ mutex_unlock(&vfd_mutex);
24510
24511 return len;
24512 }
24513diff -urNp linux-3.0.4/drivers/char/genrtc.c linux-3.0.4/drivers/char/genrtc.c
24514--- linux-3.0.4/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
24515+++ linux-3.0.4/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
24516@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
24517 switch (cmd) {
24518
24519 case RTC_PLL_GET:
24520+ memset(&pll, 0, sizeof(pll));
24521 if (get_rtc_pll(&pll))
24522 return -EINVAL;
24523 else
24524diff -urNp linux-3.0.4/drivers/char/hpet.c linux-3.0.4/drivers/char/hpet.c
24525--- linux-3.0.4/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
24526+++ linux-3.0.4/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
24527@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
24528 }
24529
24530 static int
24531-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
24532+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
24533 struct hpet_info *info)
24534 {
24535 struct hpet_timer __iomem *timer;
24536diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c
24537--- linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
24538+++ linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
24539@@ -415,7 +415,7 @@ struct ipmi_smi {
24540 struct proc_dir_entry *proc_dir;
24541 char proc_dir_name[10];
24542
24543- atomic_t stats[IPMI_NUM_STATS];
24544+ atomic_unchecked_t stats[IPMI_NUM_STATS];
24545
24546 /*
24547 * run_to_completion duplicate of smb_info, smi_info
24548@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
24549
24550
24551 #define ipmi_inc_stat(intf, stat) \
24552- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
24553+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
24554 #define ipmi_get_stat(intf, stat) \
24555- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
24556+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
24557
24558 static int is_lan_addr(struct ipmi_addr *addr)
24559 {
24560@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
24561 INIT_LIST_HEAD(&intf->cmd_rcvrs);
24562 init_waitqueue_head(&intf->waitq);
24563 for (i = 0; i < IPMI_NUM_STATS; i++)
24564- atomic_set(&intf->stats[i], 0);
24565+ atomic_set_unchecked(&intf->stats[i], 0);
24566
24567 intf->proc_dir = NULL;
24568
24569@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
24570 struct ipmi_smi_msg smi_msg;
24571 struct ipmi_recv_msg recv_msg;
24572
24573+ pax_track_stack();
24574+
24575 si = (struct ipmi_system_interface_addr *) &addr;
24576 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
24577 si->channel = IPMI_BMC_CHANNEL;
24578diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c
24579--- linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
24580+++ linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
24581@@ -277,7 +277,7 @@ struct smi_info {
24582 unsigned char slave_addr;
24583
24584 /* Counters and things for the proc filesystem. */
24585- atomic_t stats[SI_NUM_STATS];
24586+ atomic_unchecked_t stats[SI_NUM_STATS];
24587
24588 struct task_struct *thread;
24589
24590@@ -286,9 +286,9 @@ struct smi_info {
24591 };
24592
24593 #define smi_inc_stat(smi, stat) \
24594- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
24595+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
24596 #define smi_get_stat(smi, stat) \
24597- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
24598+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
24599
24600 #define SI_MAX_PARMS 4
24601
24602@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
24603 atomic_set(&new_smi->req_events, 0);
24604 new_smi->run_to_completion = 0;
24605 for (i = 0; i < SI_NUM_STATS; i++)
24606- atomic_set(&new_smi->stats[i], 0);
24607+ atomic_set_unchecked(&new_smi->stats[i], 0);
24608
24609 new_smi->interrupt_disabled = 1;
24610 atomic_set(&new_smi->stop_operation, 0);
24611diff -urNp linux-3.0.4/drivers/char/Kconfig linux-3.0.4/drivers/char/Kconfig
24612--- linux-3.0.4/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
24613+++ linux-3.0.4/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
24614@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
24615
24616 config DEVKMEM
24617 bool "/dev/kmem virtual device support"
24618- default y
24619+ default n
24620+ depends on !GRKERNSEC_KMEM
24621 help
24622 Say Y here if you want to support the /dev/kmem device. The
24623 /dev/kmem device is rarely used, but can be used for certain
24624@@ -596,6 +597,7 @@ config DEVPORT
24625 bool
24626 depends on !M68K
24627 depends on ISA || PCI
24628+ depends on !GRKERNSEC_KMEM
24629 default y
24630
24631 source "drivers/s390/char/Kconfig"
24632diff -urNp linux-3.0.4/drivers/char/mem.c linux-3.0.4/drivers/char/mem.c
24633--- linux-3.0.4/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
24634+++ linux-3.0.4/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
24635@@ -18,6 +18,7 @@
24636 #include <linux/raw.h>
24637 #include <linux/tty.h>
24638 #include <linux/capability.h>
24639+#include <linux/security.h>
24640 #include <linux/ptrace.h>
24641 #include <linux/device.h>
24642 #include <linux/highmem.h>
24643@@ -34,6 +35,10 @@
24644 # include <linux/efi.h>
24645 #endif
24646
24647+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
24648+extern struct file_operations grsec_fops;
24649+#endif
24650+
24651 static inline unsigned long size_inside_page(unsigned long start,
24652 unsigned long size)
24653 {
24654@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
24655
24656 while (cursor < to) {
24657 if (!devmem_is_allowed(pfn)) {
24658+#ifdef CONFIG_GRKERNSEC_KMEM
24659+ gr_handle_mem_readwrite(from, to);
24660+#else
24661 printk(KERN_INFO
24662 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
24663 current->comm, from, to);
24664+#endif
24665 return 0;
24666 }
24667 cursor += PAGE_SIZE;
24668@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
24669 }
24670 return 1;
24671 }
24672+#elif defined(CONFIG_GRKERNSEC_KMEM)
24673+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24674+{
24675+ return 0;
24676+}
24677 #else
24678 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24679 {
24680@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
24681
24682 while (count > 0) {
24683 unsigned long remaining;
24684+ char *temp;
24685
24686 sz = size_inside_page(p, count);
24687
24688@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
24689 if (!ptr)
24690 return -EFAULT;
24691
24692- remaining = copy_to_user(buf, ptr, sz);
24693+#ifdef CONFIG_PAX_USERCOPY
24694+ temp = kmalloc(sz, GFP_KERNEL);
24695+ if (!temp) {
24696+ unxlate_dev_mem_ptr(p, ptr);
24697+ return -ENOMEM;
24698+ }
24699+ memcpy(temp, ptr, sz);
24700+#else
24701+ temp = ptr;
24702+#endif
24703+
24704+ remaining = copy_to_user(buf, temp, sz);
24705+
24706+#ifdef CONFIG_PAX_USERCOPY
24707+ kfree(temp);
24708+#endif
24709+
24710 unxlate_dev_mem_ptr(p, ptr);
24711 if (remaining)
24712 return -EFAULT;
24713@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
24714 size_t count, loff_t *ppos)
24715 {
24716 unsigned long p = *ppos;
24717- ssize_t low_count, read, sz;
24718+ ssize_t low_count, read, sz, err = 0;
24719 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
24720- int err = 0;
24721
24722 read = 0;
24723 if (p < (unsigned long) high_memory) {
24724@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
24725 }
24726 #endif
24727 while (low_count > 0) {
24728+ char *temp;
24729+
24730 sz = size_inside_page(p, low_count);
24731
24732 /*
24733@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
24734 */
24735 kbuf = xlate_dev_kmem_ptr((char *)p);
24736
24737- if (copy_to_user(buf, kbuf, sz))
24738+#ifdef CONFIG_PAX_USERCOPY
24739+ temp = kmalloc(sz, GFP_KERNEL);
24740+ if (!temp)
24741+ return -ENOMEM;
24742+ memcpy(temp, kbuf, sz);
24743+#else
24744+ temp = kbuf;
24745+#endif
24746+
24747+ err = copy_to_user(buf, temp, sz);
24748+
24749+#ifdef CONFIG_PAX_USERCOPY
24750+ kfree(temp);
24751+#endif
24752+
24753+ if (err)
24754 return -EFAULT;
24755 buf += sz;
24756 p += sz;
24757@@ -866,6 +913,9 @@ static const struct memdev {
24758 #ifdef CONFIG_CRASH_DUMP
24759 [12] = { "oldmem", 0, &oldmem_fops, NULL },
24760 #endif
24761+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
24762+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
24763+#endif
24764 };
24765
24766 static int memory_open(struct inode *inode, struct file *filp)
24767diff -urNp linux-3.0.4/drivers/char/nvram.c linux-3.0.4/drivers/char/nvram.c
24768--- linux-3.0.4/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
24769+++ linux-3.0.4/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
24770@@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
24771
24772 spin_unlock_irq(&rtc_lock);
24773
24774- if (copy_to_user(buf, contents, tmp - contents))
24775+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
24776 return -EFAULT;
24777
24778 *ppos = i;
24779diff -urNp linux-3.0.4/drivers/char/random.c linux-3.0.4/drivers/char/random.c
24780--- linux-3.0.4/drivers/char/random.c 2011-09-02 18:11:21.000000000 -0400
24781+++ linux-3.0.4/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
24782@@ -261,8 +261,13 @@
24783 /*
24784 * Configuration information
24785 */
24786+#ifdef CONFIG_GRKERNSEC_RANDNET
24787+#define INPUT_POOL_WORDS 512
24788+#define OUTPUT_POOL_WORDS 128
24789+#else
24790 #define INPUT_POOL_WORDS 128
24791 #define OUTPUT_POOL_WORDS 32
24792+#endif
24793 #define SEC_XFER_SIZE 512
24794 #define EXTRACT_SIZE 10
24795
24796@@ -300,10 +305,17 @@ static struct poolinfo {
24797 int poolwords;
24798 int tap1, tap2, tap3, tap4, tap5;
24799 } poolinfo_table[] = {
24800+#ifdef CONFIG_GRKERNSEC_RANDNET
24801+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
24802+ { 512, 411, 308, 208, 104, 1 },
24803+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
24804+ { 128, 103, 76, 51, 25, 1 },
24805+#else
24806 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
24807 { 128, 103, 76, 51, 25, 1 },
24808 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
24809 { 32, 26, 20, 14, 7, 1 },
24810+#endif
24811 #if 0
24812 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
24813 { 2048, 1638, 1231, 819, 411, 1 },
24814@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
24815
24816 extract_buf(r, tmp);
24817 i = min_t(int, nbytes, EXTRACT_SIZE);
24818- if (copy_to_user(buf, tmp, i)) {
24819+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
24820 ret = -EFAULT;
24821 break;
24822 }
24823@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
24824 #include <linux/sysctl.h>
24825
24826 static int min_read_thresh = 8, min_write_thresh;
24827-static int max_read_thresh = INPUT_POOL_WORDS * 32;
24828+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
24829 static int max_write_thresh = INPUT_POOL_WORDS * 32;
24830 static char sysctl_bootid[16];
24831
24832diff -urNp linux-3.0.4/drivers/char/sonypi.c linux-3.0.4/drivers/char/sonypi.c
24833--- linux-3.0.4/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
24834+++ linux-3.0.4/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
24835@@ -55,6 +55,7 @@
24836 #include <asm/uaccess.h>
24837 #include <asm/io.h>
24838 #include <asm/system.h>
24839+#include <asm/local.h>
24840
24841 #include <linux/sonypi.h>
24842
24843@@ -491,7 +492,7 @@ static struct sonypi_device {
24844 spinlock_t fifo_lock;
24845 wait_queue_head_t fifo_proc_list;
24846 struct fasync_struct *fifo_async;
24847- int open_count;
24848+ local_t open_count;
24849 int model;
24850 struct input_dev *input_jog_dev;
24851 struct input_dev *input_key_dev;
24852@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
24853 static int sonypi_misc_release(struct inode *inode, struct file *file)
24854 {
24855 mutex_lock(&sonypi_device.lock);
24856- sonypi_device.open_count--;
24857+ local_dec(&sonypi_device.open_count);
24858 mutex_unlock(&sonypi_device.lock);
24859 return 0;
24860 }
24861@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
24862 {
24863 mutex_lock(&sonypi_device.lock);
24864 /* Flush input queue on first open */
24865- if (!sonypi_device.open_count)
24866+ if (!local_read(&sonypi_device.open_count))
24867 kfifo_reset(&sonypi_device.fifo);
24868- sonypi_device.open_count++;
24869+ local_inc(&sonypi_device.open_count);
24870 mutex_unlock(&sonypi_device.lock);
24871
24872 return 0;
24873diff -urNp linux-3.0.4/drivers/char/tpm/tpm_bios.c linux-3.0.4/drivers/char/tpm/tpm_bios.c
24874--- linux-3.0.4/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
24875+++ linux-3.0.4/drivers/char/tpm/tpm_bios.c 2011-10-06 04:17:55.000000000 -0400
24876@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
24877 event = addr;
24878
24879 if ((event->event_type == 0 && event->event_size == 0) ||
24880- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
24881+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
24882 return NULL;
24883
24884 return addr;
24885@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24886 return NULL;
24887
24888 if ((event->event_type == 0 && event->event_size == 0) ||
24889- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24890+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24891 return NULL;
24892
24893 (*pos)++;
24894@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24895 int i;
24896
24897 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24898- seq_putc(m, data[i]);
24899+ if (!seq_putc(m, data[i]))
24900+ return -EFAULT;
24901
24902 return 0;
24903 }
24904@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log
24905 log->bios_event_log_end = log->bios_event_log + len;
24906
24907 virt = acpi_os_map_memory(start, len);
24908+ if (!virt) {
24909+ kfree(log->bios_event_log);
24910+ log->bios_event_log = NULL;
24911+ return -EFAULT;
24912+ }
24913
24914- memcpy(log->bios_event_log, virt, len);
24915+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
24916
24917 acpi_os_unmap_memory(virt, len);
24918 return 0;
24919diff -urNp linux-3.0.4/drivers/char/tpm/tpm.c linux-3.0.4/drivers/char/tpm/tpm.c
24920--- linux-3.0.4/drivers/char/tpm/tpm.c 2011-07-21 22:17:23.000000000 -0400
24921+++ linux-3.0.4/drivers/char/tpm/tpm.c 2011-08-23 21:48:14.000000000 -0400
24922@@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24923 chip->vendor.req_complete_val)
24924 goto out_recv;
24925
24926- if ((status == chip->vendor.req_canceled)) {
24927+ if (status == chip->vendor.req_canceled) {
24928 dev_err(chip->dev, "Operation Canceled\n");
24929 rc = -ECANCELED;
24930 goto out;
24931@@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24932
24933 struct tpm_chip *chip = dev_get_drvdata(dev);
24934
24935+ pax_track_stack();
24936+
24937 tpm_cmd.header.in = tpm_readpubek_header;
24938 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24939 "attempting to read the PUBEK");
24940diff -urNp linux-3.0.4/drivers/char/virtio_console.c linux-3.0.4/drivers/char/virtio_console.c
24941--- linux-3.0.4/drivers/char/virtio_console.c 2011-07-21 22:17:23.000000000 -0400
24942+++ linux-3.0.4/drivers/char/virtio_console.c 2011-10-06 04:17:55.000000000 -0400
24943@@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port
24944 if (to_user) {
24945 ssize_t ret;
24946
24947- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
24948+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
24949 if (ret)
24950 return -EFAULT;
24951 } else {
24952@@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct fil
24953 if (!port_has_data(port) && !port->host_connected)
24954 return 0;
24955
24956- return fill_readbuf(port, ubuf, count, true);
24957+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
24958 }
24959
24960 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
24961diff -urNp linux-3.0.4/drivers/crypto/hifn_795x.c linux-3.0.4/drivers/crypto/hifn_795x.c
24962--- linux-3.0.4/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
24963+++ linux-3.0.4/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
24964@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24965 0xCA, 0x34, 0x2B, 0x2E};
24966 struct scatterlist sg;
24967
24968+ pax_track_stack();
24969+
24970 memset(src, 0, sizeof(src));
24971 memset(ctx.key, 0, sizeof(ctx.key));
24972
24973diff -urNp linux-3.0.4/drivers/crypto/padlock-aes.c linux-3.0.4/drivers/crypto/padlock-aes.c
24974--- linux-3.0.4/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
24975+++ linux-3.0.4/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
24976@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24977 struct crypto_aes_ctx gen_aes;
24978 int cpu;
24979
24980+ pax_track_stack();
24981+
24982 if (key_len % 8) {
24983 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24984 return -EINVAL;
24985diff -urNp linux-3.0.4/drivers/edac/edac_pci_sysfs.c linux-3.0.4/drivers/edac/edac_pci_sysfs.c
24986--- linux-3.0.4/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
24987+++ linux-3.0.4/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
24988@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24989 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24990 static int edac_pci_poll_msec = 1000; /* one second workq period */
24991
24992-static atomic_t pci_parity_count = ATOMIC_INIT(0);
24993-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24994+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24995+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24996
24997 static struct kobject *edac_pci_top_main_kobj;
24998 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24999@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
25000 edac_printk(KERN_CRIT, EDAC_PCI,
25001 "Signaled System Error on %s\n",
25002 pci_name(dev));
25003- atomic_inc(&pci_nonparity_count);
25004+ atomic_inc_unchecked(&pci_nonparity_count);
25005 }
25006
25007 if (status & (PCI_STATUS_PARITY)) {
25008@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
25009 "Master Data Parity Error on %s\n",
25010 pci_name(dev));
25011
25012- atomic_inc(&pci_parity_count);
25013+ atomic_inc_unchecked(&pci_parity_count);
25014 }
25015
25016 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25017@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
25018 "Detected Parity Error on %s\n",
25019 pci_name(dev));
25020
25021- atomic_inc(&pci_parity_count);
25022+ atomic_inc_unchecked(&pci_parity_count);
25023 }
25024 }
25025
25026@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
25027 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
25028 "Signaled System Error on %s\n",
25029 pci_name(dev));
25030- atomic_inc(&pci_nonparity_count);
25031+ atomic_inc_unchecked(&pci_nonparity_count);
25032 }
25033
25034 if (status & (PCI_STATUS_PARITY)) {
25035@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
25036 "Master Data Parity Error on "
25037 "%s\n", pci_name(dev));
25038
25039- atomic_inc(&pci_parity_count);
25040+ atomic_inc_unchecked(&pci_parity_count);
25041 }
25042
25043 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25044@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
25045 "Detected Parity Error on %s\n",
25046 pci_name(dev));
25047
25048- atomic_inc(&pci_parity_count);
25049+ atomic_inc_unchecked(&pci_parity_count);
25050 }
25051 }
25052 }
25053@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
25054 if (!check_pci_errors)
25055 return;
25056
25057- before_count = atomic_read(&pci_parity_count);
25058+ before_count = atomic_read_unchecked(&pci_parity_count);
25059
25060 /* scan all PCI devices looking for a Parity Error on devices and
25061 * bridges.
25062@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
25063 /* Only if operator has selected panic on PCI Error */
25064 if (edac_pci_get_panic_on_pe()) {
25065 /* If the count is different 'after' from 'before' */
25066- if (before_count != atomic_read(&pci_parity_count))
25067+ if (before_count != atomic_read_unchecked(&pci_parity_count))
25068 panic("EDAC: PCI Parity Error");
25069 }
25070 }
25071diff -urNp linux-3.0.4/drivers/edac/mce_amd.h linux-3.0.4/drivers/edac/mce_amd.h
25072--- linux-3.0.4/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
25073+++ linux-3.0.4/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
25074@@ -83,7 +83,7 @@ struct amd_decoder_ops {
25075 bool (*dc_mce)(u16, u8);
25076 bool (*ic_mce)(u16, u8);
25077 bool (*nb_mce)(u16, u8);
25078-};
25079+} __no_const;
25080
25081 void amd_report_gart_errors(bool);
25082 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
25083diff -urNp linux-3.0.4/drivers/firewire/core-card.c linux-3.0.4/drivers/firewire/core-card.c
25084--- linux-3.0.4/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
25085+++ linux-3.0.4/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
25086@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
25087
25088 void fw_core_remove_card(struct fw_card *card)
25089 {
25090- struct fw_card_driver dummy_driver = dummy_driver_template;
25091+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
25092
25093 card->driver->update_phy_reg(card, 4,
25094 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
25095diff -urNp linux-3.0.4/drivers/firewire/core-cdev.c linux-3.0.4/drivers/firewire/core-cdev.c
25096--- linux-3.0.4/drivers/firewire/core-cdev.c 2011-09-02 18:11:21.000000000 -0400
25097+++ linux-3.0.4/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
25098@@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
25099 int ret;
25100
25101 if ((request->channels == 0 && request->bandwidth == 0) ||
25102- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
25103- request->bandwidth < 0)
25104+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
25105 return -EINVAL;
25106
25107 r = kmalloc(sizeof(*r), GFP_KERNEL);
25108diff -urNp linux-3.0.4/drivers/firewire/core.h linux-3.0.4/drivers/firewire/core.h
25109--- linux-3.0.4/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
25110+++ linux-3.0.4/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
25111@@ -101,6 +101,7 @@ struct fw_card_driver {
25112
25113 int (*stop_iso)(struct fw_iso_context *ctx);
25114 };
25115+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
25116
25117 void fw_card_initialize(struct fw_card *card,
25118 const struct fw_card_driver *driver, struct device *device);
25119diff -urNp linux-3.0.4/drivers/firewire/core-transaction.c linux-3.0.4/drivers/firewire/core-transaction.c
25120--- linux-3.0.4/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
25121+++ linux-3.0.4/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
25122@@ -37,6 +37,7 @@
25123 #include <linux/timer.h>
25124 #include <linux/types.h>
25125 #include <linux/workqueue.h>
25126+#include <linux/sched.h>
25127
25128 #include <asm/byteorder.h>
25129
25130@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
25131 struct transaction_callback_data d;
25132 struct fw_transaction t;
25133
25134+ pax_track_stack();
25135+
25136 init_timer_on_stack(&t.split_timeout_timer);
25137 init_completion(&d.done);
25138 d.payload = payload;
25139diff -urNp linux-3.0.4/drivers/firmware/dmi_scan.c linux-3.0.4/drivers/firmware/dmi_scan.c
25140--- linux-3.0.4/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
25141+++ linux-3.0.4/drivers/firmware/dmi_scan.c 2011-10-06 04:17:55.000000000 -0400
25142@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
25143 }
25144 }
25145 else {
25146- /*
25147- * no iounmap() for that ioremap(); it would be a no-op, but
25148- * it's so early in setup that sucker gets confused into doing
25149- * what it shouldn't if we actually call it.
25150- */
25151 p = dmi_ioremap(0xF0000, 0x10000);
25152 if (p == NULL)
25153 goto error;
25154@@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct
25155 if (buf == NULL)
25156 return -1;
25157
25158- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
25159+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
25160
25161 iounmap(buf);
25162 return 0;
25163diff -urNp linux-3.0.4/drivers/gpio/vr41xx_giu.c linux-3.0.4/drivers/gpio/vr41xx_giu.c
25164--- linux-3.0.4/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
25165+++ linux-3.0.4/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
25166@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
25167 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
25168 maskl, pendl, maskh, pendh);
25169
25170- atomic_inc(&irq_err_count);
25171+ atomic_inc_unchecked(&irq_err_count);
25172
25173 return -EINVAL;
25174 }
25175diff -urNp linux-3.0.4/drivers/gpu/drm/drm_crtc.c linux-3.0.4/drivers/gpu/drm/drm_crtc.c
25176--- linux-3.0.4/drivers/gpu/drm/drm_crtc.c 2011-07-21 22:17:23.000000000 -0400
25177+++ linux-3.0.4/drivers/gpu/drm/drm_crtc.c 2011-10-06 04:17:55.000000000 -0400
25178@@ -1372,7 +1372,7 @@ int drm_mode_getconnector(struct drm_dev
25179 */
25180 if ((out_resp->count_modes >= mode_count) && mode_count) {
25181 copied = 0;
25182- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
25183+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
25184 list_for_each_entry(mode, &connector->modes, head) {
25185 drm_crtc_convert_to_umode(&u_mode, mode);
25186 if (copy_to_user(mode_ptr + copied,
25187@@ -1387,8 +1387,8 @@ int drm_mode_getconnector(struct drm_dev
25188
25189 if ((out_resp->count_props >= props_count) && props_count) {
25190 copied = 0;
25191- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
25192- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
25193+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
25194+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
25195 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
25196 if (connector->property_ids[i] != 0) {
25197 if (put_user(connector->property_ids[i],
25198@@ -1410,7 +1410,7 @@ int drm_mode_getconnector(struct drm_dev
25199
25200 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
25201 copied = 0;
25202- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
25203+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
25204 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
25205 if (connector->encoder_ids[i] != 0) {
25206 if (put_user(connector->encoder_ids[i],
25207@@ -1569,7 +1569,7 @@ int drm_mode_setcrtc(struct drm_device *
25208 }
25209
25210 for (i = 0; i < crtc_req->count_connectors; i++) {
25211- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
25212+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
25213 if (get_user(out_id, &set_connectors_ptr[i])) {
25214 ret = -EFAULT;
25215 goto out;
25216@@ -1850,7 +1850,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_de
25217 fb = obj_to_fb(obj);
25218
25219 num_clips = r->num_clips;
25220- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
25221+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
25222
25223 if (!num_clips != !clips_ptr) {
25224 ret = -EINVAL;
25225@@ -2270,7 +2270,7 @@ int drm_mode_getproperty_ioctl(struct dr
25226 out_resp->flags = property->flags;
25227
25228 if ((out_resp->count_values >= value_count) && value_count) {
25229- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
25230+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
25231 for (i = 0; i < value_count; i++) {
25232 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
25233 ret = -EFAULT;
25234@@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct dr
25235 if (property->flags & DRM_MODE_PROP_ENUM) {
25236 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
25237 copied = 0;
25238- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
25239+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
25240 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
25241
25242 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
25243@@ -2306,7 +2306,7 @@ int drm_mode_getproperty_ioctl(struct dr
25244 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
25245 copied = 0;
25246 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
25247- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
25248+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
25249
25250 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
25251 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
25252@@ -2367,7 +2367,7 @@ int drm_mode_getblob_ioctl(struct drm_de
25253 struct drm_mode_get_blob *out_resp = data;
25254 struct drm_property_blob *blob;
25255 int ret = 0;
25256- void *blob_ptr;
25257+ void __user *blob_ptr;
25258
25259 if (!drm_core_check_feature(dev, DRIVER_MODESET))
25260 return -EINVAL;
25261@@ -2381,7 +2381,7 @@ int drm_mode_getblob_ioctl(struct drm_de
25262 blob = obj_to_blob(obj);
25263
25264 if (out_resp->length == blob->length) {
25265- blob_ptr = (void *)(unsigned long)out_resp->data;
25266+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
25267 if (copy_to_user(blob_ptr, blob->data, blob->length)){
25268 ret = -EFAULT;
25269 goto done;
25270diff -urNp linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c
25271--- linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
25272+++ linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
25273@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
25274 struct drm_crtc *tmp;
25275 int crtc_mask = 1;
25276
25277- WARN(!crtc, "checking null crtc?\n");
25278+ BUG_ON(!crtc);
25279
25280 dev = crtc->dev;
25281
25282@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
25283 struct drm_encoder *encoder;
25284 bool ret = true;
25285
25286+ pax_track_stack();
25287+
25288 crtc->enabled = drm_helper_crtc_in_use(crtc);
25289 if (!crtc->enabled)
25290 return true;
25291diff -urNp linux-3.0.4/drivers/gpu/drm/drm_drv.c linux-3.0.4/drivers/gpu/drm/drm_drv.c
25292--- linux-3.0.4/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
25293+++ linux-3.0.4/drivers/gpu/drm/drm_drv.c 2011-10-06 04:17:55.000000000 -0400
25294@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
25295 /**
25296 * Copy and IOCTL return string to user space
25297 */
25298-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
25299+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
25300 {
25301 int len;
25302
25303@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
25304
25305 dev = file_priv->minor->dev;
25306 atomic_inc(&dev->ioctl_count);
25307- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
25308+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
25309 ++file_priv->ioctl_count;
25310
25311 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
25312diff -urNp linux-3.0.4/drivers/gpu/drm/drm_fops.c linux-3.0.4/drivers/gpu/drm/drm_fops.c
25313--- linux-3.0.4/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
25314+++ linux-3.0.4/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
25315@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
25316 }
25317
25318 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
25319- atomic_set(&dev->counts[i], 0);
25320+ atomic_set_unchecked(&dev->counts[i], 0);
25321
25322 dev->sigdata.lock = NULL;
25323
25324@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
25325
25326 retcode = drm_open_helper(inode, filp, dev);
25327 if (!retcode) {
25328- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
25329- if (!dev->open_count++)
25330+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
25331+ if (local_inc_return(&dev->open_count) == 1)
25332 retcode = drm_setup(dev);
25333 }
25334 if (!retcode) {
25335@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
25336
25337 mutex_lock(&drm_global_mutex);
25338
25339- DRM_DEBUG("open_count = %d\n", dev->open_count);
25340+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
25341
25342 if (dev->driver->preclose)
25343 dev->driver->preclose(dev, file_priv);
25344@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
25345 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
25346 task_pid_nr(current),
25347 (long)old_encode_dev(file_priv->minor->device),
25348- dev->open_count);
25349+ local_read(&dev->open_count));
25350
25351 /* if the master has gone away we can't do anything with the lock */
25352 if (file_priv->minor->master)
25353@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
25354 * End inline drm_release
25355 */
25356
25357- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
25358- if (!--dev->open_count) {
25359+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
25360+ if (local_dec_and_test(&dev->open_count)) {
25361 if (atomic_read(&dev->ioctl_count)) {
25362 DRM_ERROR("Device busy: %d\n",
25363 atomic_read(&dev->ioctl_count));
25364diff -urNp linux-3.0.4/drivers/gpu/drm/drm_global.c linux-3.0.4/drivers/gpu/drm/drm_global.c
25365--- linux-3.0.4/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
25366+++ linux-3.0.4/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
25367@@ -36,7 +36,7 @@
25368 struct drm_global_item {
25369 struct mutex mutex;
25370 void *object;
25371- int refcount;
25372+ atomic_t refcount;
25373 };
25374
25375 static struct drm_global_item glob[DRM_GLOBAL_NUM];
25376@@ -49,7 +49,7 @@ void drm_global_init(void)
25377 struct drm_global_item *item = &glob[i];
25378 mutex_init(&item->mutex);
25379 item->object = NULL;
25380- item->refcount = 0;
25381+ atomic_set(&item->refcount, 0);
25382 }
25383 }
25384
25385@@ -59,7 +59,7 @@ void drm_global_release(void)
25386 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
25387 struct drm_global_item *item = &glob[i];
25388 BUG_ON(item->object != NULL);
25389- BUG_ON(item->refcount != 0);
25390+ BUG_ON(atomic_read(&item->refcount) != 0);
25391 }
25392 }
25393
25394@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
25395 void *object;
25396
25397 mutex_lock(&item->mutex);
25398- if (item->refcount == 0) {
25399+ if (atomic_read(&item->refcount) == 0) {
25400 item->object = kzalloc(ref->size, GFP_KERNEL);
25401 if (unlikely(item->object == NULL)) {
25402 ret = -ENOMEM;
25403@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
25404 goto out_err;
25405
25406 }
25407- ++item->refcount;
25408+ atomic_inc(&item->refcount);
25409 ref->object = item->object;
25410 object = item->object;
25411 mutex_unlock(&item->mutex);
25412@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
25413 struct drm_global_item *item = &glob[ref->global_type];
25414
25415 mutex_lock(&item->mutex);
25416- BUG_ON(item->refcount == 0);
25417+ BUG_ON(atomic_read(&item->refcount) == 0);
25418 BUG_ON(ref->object != item->object);
25419- if (--item->refcount == 0) {
25420+ if (atomic_dec_and_test(&item->refcount)) {
25421 ref->release(ref);
25422 item->object = NULL;
25423 }
25424diff -urNp linux-3.0.4/drivers/gpu/drm/drm_info.c linux-3.0.4/drivers/gpu/drm/drm_info.c
25425--- linux-3.0.4/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
25426+++ linux-3.0.4/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
25427@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
25428 struct drm_local_map *map;
25429 struct drm_map_list *r_list;
25430
25431- /* Hardcoded from _DRM_FRAME_BUFFER,
25432- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
25433- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
25434- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
25435+ static const char * const types[] = {
25436+ [_DRM_FRAME_BUFFER] = "FB",
25437+ [_DRM_REGISTERS] = "REG",
25438+ [_DRM_SHM] = "SHM",
25439+ [_DRM_AGP] = "AGP",
25440+ [_DRM_SCATTER_GATHER] = "SG",
25441+ [_DRM_CONSISTENT] = "PCI",
25442+ [_DRM_GEM] = "GEM" };
25443 const char *type;
25444 int i;
25445
25446@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
25447 map = r_list->map;
25448 if (!map)
25449 continue;
25450- if (map->type < 0 || map->type > 5)
25451+ if (map->type >= ARRAY_SIZE(types))
25452 type = "??";
25453 else
25454 type = types[map->type];
25455@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
25456 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
25457 vma->vm_flags & VM_LOCKED ? 'l' : '-',
25458 vma->vm_flags & VM_IO ? 'i' : '-',
25459+#ifdef CONFIG_GRKERNSEC_HIDESYM
25460+ 0);
25461+#else
25462 vma->vm_pgoff);
25463+#endif
25464
25465 #if defined(__i386__)
25466 pgprot = pgprot_val(vma->vm_page_prot);
25467diff -urNp linux-3.0.4/drivers/gpu/drm/drm_ioc32.c linux-3.0.4/drivers/gpu/drm/drm_ioc32.c
25468--- linux-3.0.4/drivers/gpu/drm/drm_ioc32.c 2011-07-21 22:17:23.000000000 -0400
25469+++ linux-3.0.4/drivers/gpu/drm/drm_ioc32.c 2011-10-06 04:17:55.000000000 -0400
25470@@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct fi
25471 request = compat_alloc_user_space(nbytes);
25472 if (!access_ok(VERIFY_WRITE, request, nbytes))
25473 return -EFAULT;
25474- list = (struct drm_buf_desc *) (request + 1);
25475+ list = (struct drm_buf_desc __user *) (request + 1);
25476
25477 if (__put_user(count, &request->count)
25478 || __put_user(list, &request->list))
25479@@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct fil
25480 request = compat_alloc_user_space(nbytes);
25481 if (!access_ok(VERIFY_WRITE, request, nbytes))
25482 return -EFAULT;
25483- list = (struct drm_buf_pub *) (request + 1);
25484+ list = (struct drm_buf_pub __user *) (request + 1);
25485
25486 if (__put_user(count, &request->count)
25487 || __put_user(list, &request->list))
25488diff -urNp linux-3.0.4/drivers/gpu/drm/drm_ioctl.c linux-3.0.4/drivers/gpu/drm/drm_ioctl.c
25489--- linux-3.0.4/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
25490+++ linux-3.0.4/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
25491@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
25492 stats->data[i].value =
25493 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
25494 else
25495- stats->data[i].value = atomic_read(&dev->counts[i]);
25496+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
25497 stats->data[i].type = dev->types[i];
25498 }
25499
25500diff -urNp linux-3.0.4/drivers/gpu/drm/drm_lock.c linux-3.0.4/drivers/gpu/drm/drm_lock.c
25501--- linux-3.0.4/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
25502+++ linux-3.0.4/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
25503@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
25504 if (drm_lock_take(&master->lock, lock->context)) {
25505 master->lock.file_priv = file_priv;
25506 master->lock.lock_time = jiffies;
25507- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
25508+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
25509 break; /* Got lock */
25510 }
25511
25512@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
25513 return -EINVAL;
25514 }
25515
25516- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
25517+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
25518
25519 if (drm_lock_free(&master->lock, lock->context)) {
25520 /* FIXME: Should really bail out here. */
25521diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c
25522--- linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
25523+++ linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
25524@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
25525 dma->buflist[vertex->idx],
25526 vertex->discard, vertex->used);
25527
25528- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
25529- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
25530+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
25531+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
25532 sarea_priv->last_enqueue = dev_priv->counter - 1;
25533 sarea_priv->last_dispatch = (int)hw_status[5];
25534
25535@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
25536 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
25537 mc->last_render);
25538
25539- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
25540- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
25541+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
25542+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
25543 sarea_priv->last_enqueue = dev_priv->counter - 1;
25544 sarea_priv->last_dispatch = (int)hw_status[5];
25545
25546diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h
25547--- linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
25548+++ linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
25549@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
25550 int page_flipping;
25551
25552 wait_queue_head_t irq_queue;
25553- atomic_t irq_received;
25554- atomic_t irq_emitted;
25555+ atomic_unchecked_t irq_received;
25556+ atomic_unchecked_t irq_emitted;
25557
25558 int front_offset;
25559 } drm_i810_private_t;
25560diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c
25561--- linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
25562+++ linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-10-06 04:17:55.000000000 -0400
25563@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
25564 I915_READ(GTIMR));
25565 }
25566 seq_printf(m, "Interrupts received: %d\n",
25567- atomic_read(&dev_priv->irq_received));
25568+ atomic_read_unchecked(&dev_priv->irq_received));
25569 for (i = 0; i < I915_NUM_RINGS; i++) {
25570 if (IS_GEN6(dev)) {
25571 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
25572@@ -1147,7 +1147,7 @@ static int i915_opregion(struct seq_file
25573 return ret;
25574
25575 if (opregion->header)
25576- seq_write(m, opregion->header, OPREGION_SIZE);
25577+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
25578
25579 mutex_unlock(&dev->struct_mutex);
25580
25581diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c
25582--- linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-09-02 18:11:21.000000000 -0400
25583+++ linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
25584@@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
25585 bool can_switch;
25586
25587 spin_lock(&dev->count_lock);
25588- can_switch = (dev->open_count == 0);
25589+ can_switch = (local_read(&dev->open_count) == 0);
25590 spin_unlock(&dev->count_lock);
25591 return can_switch;
25592 }
25593diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h
25594--- linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
25595+++ linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
25596@@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
25597 /* render clock increase/decrease */
25598 /* display clock increase/decrease */
25599 /* pll clock increase/decrease */
25600-};
25601+} __no_const;
25602
25603 struct intel_device_info {
25604 u8 gen;
25605@@ -300,7 +300,7 @@ typedef struct drm_i915_private {
25606 int current_page;
25607 int page_flipping;
25608
25609- atomic_t irq_received;
25610+ atomic_unchecked_t irq_received;
25611
25612 /* protects the irq masks */
25613 spinlock_t irq_lock;
25614@@ -874,7 +874,7 @@ struct drm_i915_gem_object {
25615 * will be page flipped away on the next vblank. When it
25616 * reaches 0, dev_priv->pending_flip_queue will be woken up.
25617 */
25618- atomic_t pending_flip;
25619+ atomic_unchecked_t pending_flip;
25620 };
25621
25622 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
25623@@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
25624 extern void intel_teardown_gmbus(struct drm_device *dev);
25625 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
25626 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
25627-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
25628+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
25629 {
25630 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
25631 }
25632diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
25633--- linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
25634+++ linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
25635@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
25636 i915_gem_clflush_object(obj);
25637
25638 if (obj->base.pending_write_domain)
25639- cd->flips |= atomic_read(&obj->pending_flip);
25640+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
25641
25642 /* The actual obj->write_domain will be updated with
25643 * pending_write_domain after we emit the accumulated flush for all
25644diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c
25645--- linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-09-02 18:11:21.000000000 -0400
25646+++ linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
25647@@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
25648 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
25649 struct drm_i915_master_private *master_priv;
25650
25651- atomic_inc(&dev_priv->irq_received);
25652+ atomic_inc_unchecked(&dev_priv->irq_received);
25653
25654 /* disable master interrupt before clearing iir */
25655 de_ier = I915_READ(DEIER);
25656@@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
25657 struct drm_i915_master_private *master_priv;
25658 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
25659
25660- atomic_inc(&dev_priv->irq_received);
25661+ atomic_inc_unchecked(&dev_priv->irq_received);
25662
25663 if (IS_GEN6(dev))
25664 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
25665@@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
25666 int ret = IRQ_NONE, pipe;
25667 bool blc_event = false;
25668
25669- atomic_inc(&dev_priv->irq_received);
25670+ atomic_inc_unchecked(&dev_priv->irq_received);
25671
25672 iir = I915_READ(IIR);
25673
25674@@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
25675 {
25676 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
25677
25678- atomic_set(&dev_priv->irq_received, 0);
25679+ atomic_set_unchecked(&dev_priv->irq_received, 0);
25680
25681 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
25682 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
25683@@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
25684 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
25685 int pipe;
25686
25687- atomic_set(&dev_priv->irq_received, 0);
25688+ atomic_set_unchecked(&dev_priv->irq_received, 0);
25689
25690 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
25691 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
25692diff -urNp linux-3.0.4/drivers/gpu/drm/i915/intel_display.c linux-3.0.4/drivers/gpu/drm/i915/intel_display.c
25693--- linux-3.0.4/drivers/gpu/drm/i915/intel_display.c 2011-09-02 18:11:21.000000000 -0400
25694+++ linux-3.0.4/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
25695@@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
25696
25697 wait_event(dev_priv->pending_flip_queue,
25698 atomic_read(&dev_priv->mm.wedged) ||
25699- atomic_read(&obj->pending_flip) == 0);
25700+ atomic_read_unchecked(&obj->pending_flip) == 0);
25701
25702 /* Big Hammer, we also need to ensure that any pending
25703 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
25704@@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
25705 obj = to_intel_framebuffer(crtc->fb)->obj;
25706 dev_priv = crtc->dev->dev_private;
25707 wait_event(dev_priv->pending_flip_queue,
25708- atomic_read(&obj->pending_flip) == 0);
25709+ atomic_read_unchecked(&obj->pending_flip) == 0);
25710 }
25711
25712 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
25713@@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
25714
25715 atomic_clear_mask(1 << intel_crtc->plane,
25716 &obj->pending_flip.counter);
25717- if (atomic_read(&obj->pending_flip) == 0)
25718+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
25719 wake_up(&dev_priv->pending_flip_queue);
25720
25721 schedule_work(&work->work);
25722@@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
25723 /* Block clients from rendering to the new back buffer until
25724 * the flip occurs and the object is no longer visible.
25725 */
25726- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
25727+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
25728
25729 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
25730 if (ret)
25731@@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
25732 return 0;
25733
25734 cleanup_pending:
25735- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
25736+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
25737 cleanup_objs:
25738 drm_gem_object_unreference(&work->old_fb_obj->base);
25739 drm_gem_object_unreference(&obj->base);
25740diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h
25741--- linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
25742+++ linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
25743@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
25744 u32 clear_cmd;
25745 u32 maccess;
25746
25747- atomic_t vbl_received; /**< Number of vblanks received. */
25748+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
25749 wait_queue_head_t fence_queue;
25750- atomic_t last_fence_retired;
25751+ atomic_unchecked_t last_fence_retired;
25752 u32 next_fence_to_post;
25753
25754 unsigned int fb_cpp;
25755diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c
25756--- linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
25757+++ linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
25758@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
25759 if (crtc != 0)
25760 return 0;
25761
25762- return atomic_read(&dev_priv->vbl_received);
25763+ return atomic_read_unchecked(&dev_priv->vbl_received);
25764 }
25765
25766
25767@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
25768 /* VBLANK interrupt */
25769 if (status & MGA_VLINEPEN) {
25770 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
25771- atomic_inc(&dev_priv->vbl_received);
25772+ atomic_inc_unchecked(&dev_priv->vbl_received);
25773 drm_handle_vblank(dev, 0);
25774 handled = 1;
25775 }
25776@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
25777 if ((prim_start & ~0x03) != (prim_end & ~0x03))
25778 MGA_WRITE(MGA_PRIMEND, prim_end);
25779
25780- atomic_inc(&dev_priv->last_fence_retired);
25781+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
25782 DRM_WAKEUP(&dev_priv->fence_queue);
25783 handled = 1;
25784 }
25785@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
25786 * using fences.
25787 */
25788 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
25789- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
25790+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
25791 - *sequence) <= (1 << 23)));
25792
25793 *sequence = cur_fence;
25794diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c
25795--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
25796+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-26 19:49:56.000000000 -0400
25797@@ -200,7 +200,7 @@ struct methods {
25798 const char desc[8];
25799 void (*loadbios)(struct drm_device *, uint8_t *);
25800 const bool rw;
25801-};
25802+} __do_const;
25803
25804 static struct methods shadow_methods[] = {
25805 { "PRAMIN", load_vbios_pramin, true },
25806@@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
25807 struct bit_table {
25808 const char id;
25809 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
25810-};
25811+} __no_const;
25812
25813 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
25814
25815diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h
25816--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
25817+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
25818@@ -227,7 +227,7 @@ struct nouveau_channel {
25819 struct list_head pending;
25820 uint32_t sequence;
25821 uint32_t sequence_ack;
25822- atomic_t last_sequence_irq;
25823+ atomic_unchecked_t last_sequence_irq;
25824 } fence;
25825
25826 /* DMA push buffer */
25827@@ -304,7 +304,7 @@ struct nouveau_exec_engine {
25828 u32 handle, u16 class);
25829 void (*set_tile_region)(struct drm_device *dev, int i);
25830 void (*tlb_flush)(struct drm_device *, int engine);
25831-};
25832+} __no_const;
25833
25834 struct nouveau_instmem_engine {
25835 void *priv;
25836@@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
25837 struct nouveau_mc_engine {
25838 int (*init)(struct drm_device *dev);
25839 void (*takedown)(struct drm_device *dev);
25840-};
25841+} __no_const;
25842
25843 struct nouveau_timer_engine {
25844 int (*init)(struct drm_device *dev);
25845 void (*takedown)(struct drm_device *dev);
25846 uint64_t (*read)(struct drm_device *dev);
25847-};
25848+} __no_const;
25849
25850 struct nouveau_fb_engine {
25851 int num_tiles;
25852@@ -494,7 +494,7 @@ struct nouveau_vram_engine {
25853 void (*put)(struct drm_device *, struct nouveau_mem **);
25854
25855 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
25856-};
25857+} __no_const;
25858
25859 struct nouveau_engine {
25860 struct nouveau_instmem_engine instmem;
25861@@ -640,7 +640,7 @@ struct drm_nouveau_private {
25862 struct drm_global_reference mem_global_ref;
25863 struct ttm_bo_global_ref bo_global_ref;
25864 struct ttm_bo_device bdev;
25865- atomic_t validate_sequence;
25866+ atomic_unchecked_t validate_sequence;
25867 } ttm;
25868
25869 struct {
25870diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c
25871--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
25872+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
25873@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
25874 if (USE_REFCNT(dev))
25875 sequence = nvchan_rd32(chan, 0x48);
25876 else
25877- sequence = atomic_read(&chan->fence.last_sequence_irq);
25878+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
25879
25880 if (chan->fence.sequence_ack == sequence)
25881 goto out;
25882@@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
25883
25884 INIT_LIST_HEAD(&chan->fence.pending);
25885 spin_lock_init(&chan->fence.lock);
25886- atomic_set(&chan->fence.last_sequence_irq, 0);
25887+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
25888 return 0;
25889 }
25890
25891diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c
25892--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
25893+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
25894@@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
25895 int trycnt = 0;
25896 int ret, i;
25897
25898- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
25899+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
25900 retry:
25901 if (++trycnt > 100000) {
25902 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
25903diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c
25904--- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
25905+++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
25906@@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
25907 bool can_switch;
25908
25909 spin_lock(&dev->count_lock);
25910- can_switch = (dev->open_count == 0);
25911+ can_switch = (local_read(&dev->open_count) == 0);
25912 spin_unlock(&dev->count_lock);
25913 return can_switch;
25914 }
25915diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c
25916--- linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
25917+++ linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
25918@@ -560,7 +560,7 @@ static int
25919 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
25920 u32 class, u32 mthd, u32 data)
25921 {
25922- atomic_set(&chan->fence.last_sequence_irq, data);
25923+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
25924 return 0;
25925 }
25926
25927diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c
25928--- linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
25929+++ linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
25930@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
25931
25932 /* GH: Simple idle check.
25933 */
25934- atomic_set(&dev_priv->idle_count, 0);
25935+ atomic_set_unchecked(&dev_priv->idle_count, 0);
25936
25937 /* We don't support anything other than bus-mastering ring mode,
25938 * but the ring can be in either AGP or PCI space for the ring
25939diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h
25940--- linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
25941+++ linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
25942@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
25943 int is_pci;
25944 unsigned long cce_buffers_offset;
25945
25946- atomic_t idle_count;
25947+ atomic_unchecked_t idle_count;
25948
25949 int page_flipping;
25950 int current_page;
25951 u32 crtc_offset;
25952 u32 crtc_offset_cntl;
25953
25954- atomic_t vbl_received;
25955+ atomic_unchecked_t vbl_received;
25956
25957 u32 color_fmt;
25958 unsigned int front_offset;
25959diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c
25960--- linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
25961+++ linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
25962@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
25963 if (crtc != 0)
25964 return 0;
25965
25966- return atomic_read(&dev_priv->vbl_received);
25967+ return atomic_read_unchecked(&dev_priv->vbl_received);
25968 }
25969
25970 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
25971@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
25972 /* VBLANK interrupt */
25973 if (status & R128_CRTC_VBLANK_INT) {
25974 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
25975- atomic_inc(&dev_priv->vbl_received);
25976+ atomic_inc_unchecked(&dev_priv->vbl_received);
25977 drm_handle_vblank(dev, 0);
25978 return IRQ_HANDLED;
25979 }
25980diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_state.c linux-3.0.4/drivers/gpu/drm/r128/r128_state.c
25981--- linux-3.0.4/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
25982+++ linux-3.0.4/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
25983@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
25984
25985 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
25986 {
25987- if (atomic_read(&dev_priv->idle_count) == 0)
25988+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
25989 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
25990 else
25991- atomic_set(&dev_priv->idle_count, 0);
25992+ atomic_set_unchecked(&dev_priv->idle_count, 0);
25993 }
25994
25995 #endif
25996diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/atom.c linux-3.0.4/drivers/gpu/drm/radeon/atom.c
25997--- linux-3.0.4/drivers/gpu/drm/radeon/atom.c 2011-07-21 22:17:23.000000000 -0400
25998+++ linux-3.0.4/drivers/gpu/drm/radeon/atom.c 2011-08-23 21:48:14.000000000 -0400
25999@@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
26000 char name[512];
26001 int i;
26002
26003+ pax_track_stack();
26004+
26005 ctx->card = card;
26006 ctx->bios = bios;
26007
26008diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c
26009--- linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
26010+++ linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
26011@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
26012 regex_t mask_rex;
26013 regmatch_t match[4];
26014 char buf[1024];
26015- size_t end;
26016+ long end;
26017 int len;
26018 int done = 0;
26019 int r;
26020 unsigned o;
26021 struct offset *offset;
26022 char last_reg_s[10];
26023- int last_reg;
26024+ unsigned long last_reg;
26025
26026 if (regcomp
26027 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
26028diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c
26029--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
26030+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
26031@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
26032 struct radeon_gpio_rec gpio;
26033 struct radeon_hpd hpd;
26034
26035+ pax_track_stack();
26036+
26037 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
26038 return false;
26039
26040diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c
26041--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c 2011-09-02 18:11:21.000000000 -0400
26042+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
26043@@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
26044 bool can_switch;
26045
26046 spin_lock(&dev->count_lock);
26047- can_switch = (dev->open_count == 0);
26048+ can_switch = (local_read(&dev->open_count) == 0);
26049 spin_unlock(&dev->count_lock);
26050 return can_switch;
26051 }
26052diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c
26053--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-09-02 18:11:21.000000000 -0400
26054+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
26055@@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
26056 uint32_t post_div;
26057 u32 pll_out_min, pll_out_max;
26058
26059+ pax_track_stack();
26060+
26061 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
26062 freq = freq * 1000;
26063
26064diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h
26065--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
26066+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
26067@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
26068
26069 /* SW interrupt */
26070 wait_queue_head_t swi_queue;
26071- atomic_t swi_emitted;
26072+ atomic_unchecked_t swi_emitted;
26073 int vblank_crtc;
26074 uint32_t irq_enable_reg;
26075 uint32_t r500_disp_irq_reg;
26076diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c
26077--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
26078+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
26079@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
26080 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
26081 return 0;
26082 }
26083- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
26084+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
26085 if (!rdev->cp.ready)
26086 /* FIXME: cp is not running assume everythings is done right
26087 * away
26088@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
26089 return r;
26090 }
26091 radeon_fence_write(rdev, 0);
26092- atomic_set(&rdev->fence_drv.seq, 0);
26093+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
26094 INIT_LIST_HEAD(&rdev->fence_drv.created);
26095 INIT_LIST_HEAD(&rdev->fence_drv.emited);
26096 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
26097diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon.h linux-3.0.4/drivers/gpu/drm/radeon/radeon.h
26098--- linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-07-21 22:17:23.000000000 -0400
26099+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-08-23 21:47:55.000000000 -0400
26100@@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
26101 */
26102 struct radeon_fence_driver {
26103 uint32_t scratch_reg;
26104- atomic_t seq;
26105+ atomic_unchecked_t seq;
26106 uint32_t last_seq;
26107 unsigned long last_jiffies;
26108 unsigned long last_timeout;
26109@@ -960,7 +960,7 @@ struct radeon_asic {
26110 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
26111 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
26112 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
26113-};
26114+} __no_const;
26115
26116 /*
26117 * Asic structures
26118diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c
26119--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
26120+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
26121@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
26122 request = compat_alloc_user_space(sizeof(*request));
26123 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
26124 || __put_user(req32.param, &request->param)
26125- || __put_user((void __user *)(unsigned long)req32.value,
26126+ || __put_user((unsigned long)req32.value,
26127 &request->value))
26128 return -EFAULT;
26129
26130diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c
26131--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
26132+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
26133@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
26134 unsigned int ret;
26135 RING_LOCALS;
26136
26137- atomic_inc(&dev_priv->swi_emitted);
26138- ret = atomic_read(&dev_priv->swi_emitted);
26139+ atomic_inc_unchecked(&dev_priv->swi_emitted);
26140+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
26141
26142 BEGIN_RING(4);
26143 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
26144@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
26145 drm_radeon_private_t *dev_priv =
26146 (drm_radeon_private_t *) dev->dev_private;
26147
26148- atomic_set(&dev_priv->swi_emitted, 0);
26149+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
26150 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
26151
26152 dev->max_vblank_count = 0x001fffff;
26153diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c
26154--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
26155+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
26156@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
26157 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
26158 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
26159
26160- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
26161+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
26162 sarea_priv->nbox * sizeof(depth_boxes[0])))
26163 return -EFAULT;
26164
26165@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
26166 {
26167 drm_radeon_private_t *dev_priv = dev->dev_private;
26168 drm_radeon_getparam_t *param = data;
26169- int value;
26170+ int value = 0;
26171
26172 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
26173
26174diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c
26175--- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-07-21 22:17:23.000000000 -0400
26176+++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-23 21:47:55.000000000 -0400
26177@@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
26178 }
26179 if (unlikely(ttm_vm_ops == NULL)) {
26180 ttm_vm_ops = vma->vm_ops;
26181- radeon_ttm_vm_ops = *ttm_vm_ops;
26182- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
26183+ pax_open_kernel();
26184+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
26185+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
26186+ pax_close_kernel();
26187 }
26188 vma->vm_ops = &radeon_ttm_vm_ops;
26189 return 0;
26190diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/rs690.c linux-3.0.4/drivers/gpu/drm/radeon/rs690.c
26191--- linux-3.0.4/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
26192+++ linux-3.0.4/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
26193@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
26194 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
26195 rdev->pm.sideport_bandwidth.full)
26196 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
26197- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
26198+ read_delay_latency.full = dfixed_const(800 * 1000);
26199 read_delay_latency.full = dfixed_div(read_delay_latency,
26200 rdev->pm.igp_sideport_mclk);
26201+ a.full = dfixed_const(370);
26202+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
26203 } else {
26204 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
26205 rdev->pm.k8_bandwidth.full)
26206diff -urNp linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c
26207--- linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
26208+++ linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
26209@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
26210 static int ttm_pool_mm_shrink(struct shrinker *shrink,
26211 struct shrink_control *sc)
26212 {
26213- static atomic_t start_pool = ATOMIC_INIT(0);
26214+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
26215 unsigned i;
26216- unsigned pool_offset = atomic_add_return(1, &start_pool);
26217+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
26218 struct ttm_page_pool *pool;
26219 int shrink_pages = sc->nr_to_scan;
26220
26221diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_drv.h linux-3.0.4/drivers/gpu/drm/via/via_drv.h
26222--- linux-3.0.4/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
26223+++ linux-3.0.4/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
26224@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
26225 typedef uint32_t maskarray_t[5];
26226
26227 typedef struct drm_via_irq {
26228- atomic_t irq_received;
26229+ atomic_unchecked_t irq_received;
26230 uint32_t pending_mask;
26231 uint32_t enable_mask;
26232 wait_queue_head_t irq_queue;
26233@@ -75,7 +75,7 @@ typedef struct drm_via_private {
26234 struct timeval last_vblank;
26235 int last_vblank_valid;
26236 unsigned usec_per_vblank;
26237- atomic_t vbl_received;
26238+ atomic_unchecked_t vbl_received;
26239 drm_via_state_t hc_state;
26240 char pci_buf[VIA_PCI_BUF_SIZE];
26241 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
26242diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_irq.c linux-3.0.4/drivers/gpu/drm/via/via_irq.c
26243--- linux-3.0.4/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
26244+++ linux-3.0.4/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
26245@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
26246 if (crtc != 0)
26247 return 0;
26248
26249- return atomic_read(&dev_priv->vbl_received);
26250+ return atomic_read_unchecked(&dev_priv->vbl_received);
26251 }
26252
26253 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
26254@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
26255
26256 status = VIA_READ(VIA_REG_INTERRUPT);
26257 if (status & VIA_IRQ_VBLANK_PENDING) {
26258- atomic_inc(&dev_priv->vbl_received);
26259- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
26260+ atomic_inc_unchecked(&dev_priv->vbl_received);
26261+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
26262 do_gettimeofday(&cur_vblank);
26263 if (dev_priv->last_vblank_valid) {
26264 dev_priv->usec_per_vblank =
26265@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
26266 dev_priv->last_vblank = cur_vblank;
26267 dev_priv->last_vblank_valid = 1;
26268 }
26269- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
26270+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
26271 DRM_DEBUG("US per vblank is: %u\n",
26272 dev_priv->usec_per_vblank);
26273 }
26274@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
26275
26276 for (i = 0; i < dev_priv->num_irqs; ++i) {
26277 if (status & cur_irq->pending_mask) {
26278- atomic_inc(&cur_irq->irq_received);
26279+ atomic_inc_unchecked(&cur_irq->irq_received);
26280 DRM_WAKEUP(&cur_irq->irq_queue);
26281 handled = 1;
26282 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
26283@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
26284 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
26285 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
26286 masks[irq][4]));
26287- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
26288+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
26289 } else {
26290 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
26291 (((cur_irq_sequence =
26292- atomic_read(&cur_irq->irq_received)) -
26293+ atomic_read_unchecked(&cur_irq->irq_received)) -
26294 *sequence) <= (1 << 23)));
26295 }
26296 *sequence = cur_irq_sequence;
26297@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
26298 }
26299
26300 for (i = 0; i < dev_priv->num_irqs; ++i) {
26301- atomic_set(&cur_irq->irq_received, 0);
26302+ atomic_set_unchecked(&cur_irq->irq_received, 0);
26303 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
26304 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
26305 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
26306@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
26307 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
26308 case VIA_IRQ_RELATIVE:
26309 irqwait->request.sequence +=
26310- atomic_read(&cur_irq->irq_received);
26311+ atomic_read_unchecked(&cur_irq->irq_received);
26312 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
26313 case VIA_IRQ_ABSOLUTE:
26314 break;
26315diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
26316--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
26317+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
26318@@ -240,7 +240,7 @@ struct vmw_private {
26319 * Fencing and IRQs.
26320 */
26321
26322- atomic_t fence_seq;
26323+ atomic_unchecked_t fence_seq;
26324 wait_queue_head_t fence_queue;
26325 wait_queue_head_t fifo_queue;
26326 atomic_t fence_queue_waiters;
26327diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
26328--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-07-21 22:17:23.000000000 -0400
26329+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-10-06 04:17:55.000000000 -0400
26330@@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device
26331 struct drm_vmw_fence_rep fence_rep;
26332 struct drm_vmw_fence_rep __user *user_fence_rep;
26333 int ret;
26334- void *user_cmd;
26335+ void __user *user_cmd;
26336 void *cmd;
26337 uint32_t sequence;
26338 struct vmw_sw_context *sw_context = &dev_priv->ctx;
26339diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
26340--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
26341+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
26342@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
26343 while (!vmw_lag_lt(queue, us)) {
26344 spin_lock(&queue->lock);
26345 if (list_empty(&queue->head))
26346- sequence = atomic_read(&dev_priv->fence_seq);
26347+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
26348 else {
26349 fence = list_first_entry(&queue->head,
26350 struct vmw_fence, head);
26351diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
26352--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
26353+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-10-06 04:17:55.000000000 -0400
26354@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
26355 (unsigned int) min,
26356 (unsigned int) fifo->capabilities);
26357
26358- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
26359+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
26360 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
26361 vmw_fence_queue_init(&fifo->fence_queue);
26362 return vmw_fifo_send_fence(dev_priv, &dummy);
26363@@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_privat
26364 if (reserveable)
26365 iowrite32(bytes, fifo_mem +
26366 SVGA_FIFO_RESERVED);
26367- return fifo_mem + (next_cmd >> 2);
26368+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
26369 } else {
26370 need_bounce = true;
26371 }
26372@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
26373
26374 fm = vmw_fifo_reserve(dev_priv, bytes);
26375 if (unlikely(fm == NULL)) {
26376- *sequence = atomic_read(&dev_priv->fence_seq);
26377+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
26378 ret = -ENOMEM;
26379 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
26380 false, 3*HZ);
26381@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
26382 }
26383
26384 do {
26385- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
26386+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
26387 } while (*sequence == 0);
26388
26389 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
26390diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
26391--- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
26392+++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
26393@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
26394 * emitted. Then the fence is stale and signaled.
26395 */
26396
26397- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
26398+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
26399 > VMW_FENCE_WRAP);
26400
26401 return ret;
26402@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
26403
26404 if (fifo_idle)
26405 down_read(&fifo_state->rwsem);
26406- signal_seq = atomic_read(&dev_priv->fence_seq);
26407+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
26408 ret = 0;
26409
26410 for (;;) {
26411diff -urNp linux-3.0.4/drivers/hid/hid-core.c linux-3.0.4/drivers/hid/hid-core.c
26412--- linux-3.0.4/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
26413+++ linux-3.0.4/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
26414@@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
26415
26416 int hid_add_device(struct hid_device *hdev)
26417 {
26418- static atomic_t id = ATOMIC_INIT(0);
26419+ static atomic_unchecked_t id = ATOMIC_INIT(0);
26420 int ret;
26421
26422 if (WARN_ON(hdev->status & HID_STAT_ADDED))
26423@@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
26424 /* XXX hack, any other cleaner solution after the driver core
26425 * is converted to allow more than 20 bytes as the device name? */
26426 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
26427- hdev->vendor, hdev->product, atomic_inc_return(&id));
26428+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
26429
26430 hid_debug_register(hdev, dev_name(&hdev->dev));
26431 ret = device_add(&hdev->dev);
26432diff -urNp linux-3.0.4/drivers/hid/usbhid/hiddev.c linux-3.0.4/drivers/hid/usbhid/hiddev.c
26433--- linux-3.0.4/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
26434+++ linux-3.0.4/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
26435@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
26436 break;
26437
26438 case HIDIOCAPPLICATION:
26439- if (arg < 0 || arg >= hid->maxapplication)
26440+ if (arg >= hid->maxapplication)
26441 break;
26442
26443 for (i = 0; i < hid->maxcollection; i++)
26444diff -urNp linux-3.0.4/drivers/hwmon/acpi_power_meter.c linux-3.0.4/drivers/hwmon/acpi_power_meter.c
26445--- linux-3.0.4/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
26446+++ linux-3.0.4/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
26447@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
26448 return res;
26449
26450 temp /= 1000;
26451- if (temp < 0)
26452- return -EINVAL;
26453
26454 mutex_lock(&resource->lock);
26455 resource->trip[attr->index - 7] = temp;
26456diff -urNp linux-3.0.4/drivers/hwmon/sht15.c linux-3.0.4/drivers/hwmon/sht15.c
26457--- linux-3.0.4/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
26458+++ linux-3.0.4/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
26459@@ -166,7 +166,7 @@ struct sht15_data {
26460 int supply_uV;
26461 bool supply_uV_valid;
26462 struct work_struct update_supply_work;
26463- atomic_t interrupt_handled;
26464+ atomic_unchecked_t interrupt_handled;
26465 };
26466
26467 /**
26468@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
26469 return ret;
26470
26471 gpio_direction_input(data->pdata->gpio_data);
26472- atomic_set(&data->interrupt_handled, 0);
26473+ atomic_set_unchecked(&data->interrupt_handled, 0);
26474
26475 enable_irq(gpio_to_irq(data->pdata->gpio_data));
26476 if (gpio_get_value(data->pdata->gpio_data) == 0) {
26477 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
26478 /* Only relevant if the interrupt hasn't occurred. */
26479- if (!atomic_read(&data->interrupt_handled))
26480+ if (!atomic_read_unchecked(&data->interrupt_handled))
26481 schedule_work(&data->read_work);
26482 }
26483 ret = wait_event_timeout(data->wait_queue,
26484@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
26485
26486 /* First disable the interrupt */
26487 disable_irq_nosync(irq);
26488- atomic_inc(&data->interrupt_handled);
26489+ atomic_inc_unchecked(&data->interrupt_handled);
26490 /* Then schedule a reading work struct */
26491 if (data->state != SHT15_READING_NOTHING)
26492 schedule_work(&data->read_work);
26493@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
26494 * If not, then start the interrupt again - care here as could
26495 * have gone low in meantime so verify it hasn't!
26496 */
26497- atomic_set(&data->interrupt_handled, 0);
26498+ atomic_set_unchecked(&data->interrupt_handled, 0);
26499 enable_irq(gpio_to_irq(data->pdata->gpio_data));
26500 /* If still not occurred or another handler has been scheduled */
26501 if (gpio_get_value(data->pdata->gpio_data)
26502- || atomic_read(&data->interrupt_handled))
26503+ || atomic_read_unchecked(&data->interrupt_handled))
26504 return;
26505 }
26506
26507diff -urNp linux-3.0.4/drivers/hwmon/w83791d.c linux-3.0.4/drivers/hwmon/w83791d.c
26508--- linux-3.0.4/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
26509+++ linux-3.0.4/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
26510@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
26511 struct i2c_board_info *info);
26512 static int w83791d_remove(struct i2c_client *client);
26513
26514-static int w83791d_read(struct i2c_client *client, u8 register);
26515-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
26516+static int w83791d_read(struct i2c_client *client, u8 reg);
26517+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
26518 static struct w83791d_data *w83791d_update_device(struct device *dev);
26519
26520 #ifdef DEBUG
26521diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c
26522--- linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
26523+++ linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
26524@@ -43,7 +43,7 @@
26525 extern struct i2c_adapter amd756_smbus;
26526
26527 static struct i2c_adapter *s4882_adapter;
26528-static struct i2c_algorithm *s4882_algo;
26529+static i2c_algorithm_no_const *s4882_algo;
26530
26531 /* Wrapper access functions for multiplexed SMBus */
26532 static DEFINE_MUTEX(amd756_lock);
26533diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c
26534--- linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
26535+++ linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
26536@@ -41,7 +41,7 @@
26537 extern struct i2c_adapter *nforce2_smbus;
26538
26539 static struct i2c_adapter *s4985_adapter;
26540-static struct i2c_algorithm *s4985_algo;
26541+static i2c_algorithm_no_const *s4985_algo;
26542
26543 /* Wrapper access functions for multiplexed SMBus */
26544 static DEFINE_MUTEX(nforce2_lock);
26545diff -urNp linux-3.0.4/drivers/i2c/i2c-mux.c linux-3.0.4/drivers/i2c/i2c-mux.c
26546--- linux-3.0.4/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
26547+++ linux-3.0.4/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
26548@@ -28,7 +28,7 @@
26549 /* multiplexer per channel data */
26550 struct i2c_mux_priv {
26551 struct i2c_adapter adap;
26552- struct i2c_algorithm algo;
26553+ i2c_algorithm_no_const algo;
26554
26555 struct i2c_adapter *parent;
26556 void *mux_dev; /* the mux chip/device */
26557diff -urNp linux-3.0.4/drivers/ide/ide-cd.c linux-3.0.4/drivers/ide/ide-cd.c
26558--- linux-3.0.4/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
26559+++ linux-3.0.4/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
26560@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
26561 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
26562 if ((unsigned long)buf & alignment
26563 || blk_rq_bytes(rq) & q->dma_pad_mask
26564- || object_is_on_stack(buf))
26565+ || object_starts_on_stack(buf))
26566 drive->dma = 0;
26567 }
26568 }
26569diff -urNp linux-3.0.4/drivers/ide/ide-floppy.c linux-3.0.4/drivers/ide/ide-floppy.c
26570--- linux-3.0.4/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
26571+++ linux-3.0.4/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
26572@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
26573 u8 pc_buf[256], header_len, desc_cnt;
26574 int i, rc = 1, blocks, length;
26575
26576+ pax_track_stack();
26577+
26578 ide_debug_log(IDE_DBG_FUNC, "enter");
26579
26580 drive->bios_cyl = 0;
26581diff -urNp linux-3.0.4/drivers/ide/setup-pci.c linux-3.0.4/drivers/ide/setup-pci.c
26582--- linux-3.0.4/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
26583+++ linux-3.0.4/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
26584@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
26585 int ret, i, n_ports = dev2 ? 4 : 2;
26586 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
26587
26588+ pax_track_stack();
26589+
26590 for (i = 0; i < n_ports / 2; i++) {
26591 ret = ide_setup_pci_controller(pdev[i], d, !i);
26592 if (ret < 0)
26593diff -urNp linux-3.0.4/drivers/infiniband/core/cm.c linux-3.0.4/drivers/infiniband/core/cm.c
26594--- linux-3.0.4/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
26595+++ linux-3.0.4/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
26596@@ -113,7 +113,7 @@ static char const counter_group_names[CM
26597
26598 struct cm_counter_group {
26599 struct kobject obj;
26600- atomic_long_t counter[CM_ATTR_COUNT];
26601+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
26602 };
26603
26604 struct cm_counter_attribute {
26605@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
26606 struct ib_mad_send_buf *msg = NULL;
26607 int ret;
26608
26609- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26610+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26611 counter[CM_REQ_COUNTER]);
26612
26613 /* Quick state check to discard duplicate REQs. */
26614@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
26615 if (!cm_id_priv)
26616 return;
26617
26618- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26619+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26620 counter[CM_REP_COUNTER]);
26621 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
26622 if (ret)
26623@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
26624 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
26625 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
26626 spin_unlock_irq(&cm_id_priv->lock);
26627- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26628+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26629 counter[CM_RTU_COUNTER]);
26630 goto out;
26631 }
26632@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
26633 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
26634 dreq_msg->local_comm_id);
26635 if (!cm_id_priv) {
26636- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26637+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26638 counter[CM_DREQ_COUNTER]);
26639 cm_issue_drep(work->port, work->mad_recv_wc);
26640 return -EINVAL;
26641@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
26642 case IB_CM_MRA_REP_RCVD:
26643 break;
26644 case IB_CM_TIMEWAIT:
26645- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26646+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26647 counter[CM_DREQ_COUNTER]);
26648 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
26649 goto unlock;
26650@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
26651 cm_free_msg(msg);
26652 goto deref;
26653 case IB_CM_DREQ_RCVD:
26654- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26655+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26656 counter[CM_DREQ_COUNTER]);
26657 goto unlock;
26658 default:
26659@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
26660 ib_modify_mad(cm_id_priv->av.port->mad_agent,
26661 cm_id_priv->msg, timeout)) {
26662 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
26663- atomic_long_inc(&work->port->
26664+ atomic_long_inc_unchecked(&work->port->
26665 counter_group[CM_RECV_DUPLICATES].
26666 counter[CM_MRA_COUNTER]);
26667 goto out;
26668@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
26669 break;
26670 case IB_CM_MRA_REQ_RCVD:
26671 case IB_CM_MRA_REP_RCVD:
26672- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26673+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26674 counter[CM_MRA_COUNTER]);
26675 /* fall through */
26676 default:
26677@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
26678 case IB_CM_LAP_IDLE:
26679 break;
26680 case IB_CM_MRA_LAP_SENT:
26681- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26682+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26683 counter[CM_LAP_COUNTER]);
26684 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
26685 goto unlock;
26686@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
26687 cm_free_msg(msg);
26688 goto deref;
26689 case IB_CM_LAP_RCVD:
26690- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26691+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26692 counter[CM_LAP_COUNTER]);
26693 goto unlock;
26694 default:
26695@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
26696 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
26697 if (cur_cm_id_priv) {
26698 spin_unlock_irq(&cm.lock);
26699- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26700+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26701 counter[CM_SIDR_REQ_COUNTER]);
26702 goto out; /* Duplicate message. */
26703 }
26704@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
26705 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
26706 msg->retries = 1;
26707
26708- atomic_long_add(1 + msg->retries,
26709+ atomic_long_add_unchecked(1 + msg->retries,
26710 &port->counter_group[CM_XMIT].counter[attr_index]);
26711 if (msg->retries)
26712- atomic_long_add(msg->retries,
26713+ atomic_long_add_unchecked(msg->retries,
26714 &port->counter_group[CM_XMIT_RETRIES].
26715 counter[attr_index]);
26716
26717@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
26718 }
26719
26720 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
26721- atomic_long_inc(&port->counter_group[CM_RECV].
26722+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
26723 counter[attr_id - CM_ATTR_ID_OFFSET]);
26724
26725 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
26726@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
26727 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
26728
26729 return sprintf(buf, "%ld\n",
26730- atomic_long_read(&group->counter[cm_attr->index]));
26731+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
26732 }
26733
26734 static const struct sysfs_ops cm_counter_ops = {
26735diff -urNp linux-3.0.4/drivers/infiniband/core/fmr_pool.c linux-3.0.4/drivers/infiniband/core/fmr_pool.c
26736--- linux-3.0.4/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
26737+++ linux-3.0.4/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
26738@@ -97,8 +97,8 @@ struct ib_fmr_pool {
26739
26740 struct task_struct *thread;
26741
26742- atomic_t req_ser;
26743- atomic_t flush_ser;
26744+ atomic_unchecked_t req_ser;
26745+ atomic_unchecked_t flush_ser;
26746
26747 wait_queue_head_t force_wait;
26748 };
26749@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
26750 struct ib_fmr_pool *pool = pool_ptr;
26751
26752 do {
26753- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
26754+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
26755 ib_fmr_batch_release(pool);
26756
26757- atomic_inc(&pool->flush_ser);
26758+ atomic_inc_unchecked(&pool->flush_ser);
26759 wake_up_interruptible(&pool->force_wait);
26760
26761 if (pool->flush_function)
26762@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
26763 }
26764
26765 set_current_state(TASK_INTERRUPTIBLE);
26766- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
26767+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
26768 !kthread_should_stop())
26769 schedule();
26770 __set_current_state(TASK_RUNNING);
26771@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
26772 pool->dirty_watermark = params->dirty_watermark;
26773 pool->dirty_len = 0;
26774 spin_lock_init(&pool->pool_lock);
26775- atomic_set(&pool->req_ser, 0);
26776- atomic_set(&pool->flush_ser, 0);
26777+ atomic_set_unchecked(&pool->req_ser, 0);
26778+ atomic_set_unchecked(&pool->flush_ser, 0);
26779 init_waitqueue_head(&pool->force_wait);
26780
26781 pool->thread = kthread_run(ib_fmr_cleanup_thread,
26782@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
26783 }
26784 spin_unlock_irq(&pool->pool_lock);
26785
26786- serial = atomic_inc_return(&pool->req_ser);
26787+ serial = atomic_inc_return_unchecked(&pool->req_ser);
26788 wake_up_process(pool->thread);
26789
26790 if (wait_event_interruptible(pool->force_wait,
26791- atomic_read(&pool->flush_ser) - serial >= 0))
26792+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
26793 return -EINTR;
26794
26795 return 0;
26796@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
26797 } else {
26798 list_add_tail(&fmr->list, &pool->dirty_list);
26799 if (++pool->dirty_len >= pool->dirty_watermark) {
26800- atomic_inc(&pool->req_ser);
26801+ atomic_inc_unchecked(&pool->req_ser);
26802 wake_up_process(pool->thread);
26803 }
26804 }
26805diff -urNp linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c
26806--- linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
26807+++ linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
26808@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
26809 int err;
26810 struct fw_ri_tpte tpt;
26811 u32 stag_idx;
26812- static atomic_t key;
26813+ static atomic_unchecked_t key;
26814
26815 if (c4iw_fatal_error(rdev))
26816 return -EIO;
26817@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
26818 &rdev->resource.tpt_fifo_lock);
26819 if (!stag_idx)
26820 return -ENOMEM;
26821- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
26822+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
26823 }
26824 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
26825 __func__, stag_state, type, pdid, stag_idx);
26826diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c
26827--- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
26828+++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
26829@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
26830 struct infinipath_counters counters;
26831 struct ipath_devdata *dd;
26832
26833+ pax_track_stack();
26834+
26835 dd = file->f_path.dentry->d_inode->i_private;
26836 dd->ipath_f_read_counters(dd, &counters);
26837
26838diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c
26839--- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
26840+++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
26841@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
26842 struct ib_atomic_eth *ateth;
26843 struct ipath_ack_entry *e;
26844 u64 vaddr;
26845- atomic64_t *maddr;
26846+ atomic64_unchecked_t *maddr;
26847 u64 sdata;
26848 u32 rkey;
26849 u8 next;
26850@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
26851 IB_ACCESS_REMOTE_ATOMIC)))
26852 goto nack_acc_unlck;
26853 /* Perform atomic OP and save result. */
26854- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
26855+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
26856 sdata = be64_to_cpu(ateth->swap_data);
26857 e = &qp->s_ack_queue[qp->r_head_ack_queue];
26858 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
26859- (u64) atomic64_add_return(sdata, maddr) - sdata :
26860+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
26861 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
26862 be64_to_cpu(ateth->compare_data),
26863 sdata);
26864diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c
26865--- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
26866+++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
26867@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
26868 unsigned long flags;
26869 struct ib_wc wc;
26870 u64 sdata;
26871- atomic64_t *maddr;
26872+ atomic64_unchecked_t *maddr;
26873 enum ib_wc_status send_status;
26874
26875 /*
26876@@ -382,11 +382,11 @@ again:
26877 IB_ACCESS_REMOTE_ATOMIC)))
26878 goto acc_err;
26879 /* Perform atomic OP and save result. */
26880- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
26881+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
26882 sdata = wqe->wr.wr.atomic.compare_add;
26883 *(u64 *) sqp->s_sge.sge.vaddr =
26884 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
26885- (u64) atomic64_add_return(sdata, maddr) - sdata :
26886+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
26887 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
26888 sdata, wqe->wr.wr.atomic.swap);
26889 goto send_comp;
26890diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.c linux-3.0.4/drivers/infiniband/hw/nes/nes.c
26891--- linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
26892+++ linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
26893@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
26894 LIST_HEAD(nes_adapter_list);
26895 static LIST_HEAD(nes_dev_list);
26896
26897-atomic_t qps_destroyed;
26898+atomic_unchecked_t qps_destroyed;
26899
26900 static unsigned int ee_flsh_adapter;
26901 static unsigned int sysfs_nonidx_addr;
26902@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
26903 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
26904 struct nes_adapter *nesadapter = nesdev->nesadapter;
26905
26906- atomic_inc(&qps_destroyed);
26907+ atomic_inc_unchecked(&qps_destroyed);
26908
26909 /* Free the control structures */
26910
26911diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c
26912--- linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
26913+++ linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
26914@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
26915 u32 cm_packets_retrans;
26916 u32 cm_packets_created;
26917 u32 cm_packets_received;
26918-atomic_t cm_listens_created;
26919-atomic_t cm_listens_destroyed;
26920+atomic_unchecked_t cm_listens_created;
26921+atomic_unchecked_t cm_listens_destroyed;
26922 u32 cm_backlog_drops;
26923-atomic_t cm_loopbacks;
26924-atomic_t cm_nodes_created;
26925-atomic_t cm_nodes_destroyed;
26926-atomic_t cm_accel_dropped_pkts;
26927-atomic_t cm_resets_recvd;
26928+atomic_unchecked_t cm_loopbacks;
26929+atomic_unchecked_t cm_nodes_created;
26930+atomic_unchecked_t cm_nodes_destroyed;
26931+atomic_unchecked_t cm_accel_dropped_pkts;
26932+atomic_unchecked_t cm_resets_recvd;
26933
26934 static inline int mini_cm_accelerated(struct nes_cm_core *,
26935 struct nes_cm_node *);
26936@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
26937
26938 static struct nes_cm_core *g_cm_core;
26939
26940-atomic_t cm_connects;
26941-atomic_t cm_accepts;
26942-atomic_t cm_disconnects;
26943-atomic_t cm_closes;
26944-atomic_t cm_connecteds;
26945-atomic_t cm_connect_reqs;
26946-atomic_t cm_rejects;
26947+atomic_unchecked_t cm_connects;
26948+atomic_unchecked_t cm_accepts;
26949+atomic_unchecked_t cm_disconnects;
26950+atomic_unchecked_t cm_closes;
26951+atomic_unchecked_t cm_connecteds;
26952+atomic_unchecked_t cm_connect_reqs;
26953+atomic_unchecked_t cm_rejects;
26954
26955
26956 /**
26957@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
26958 kfree(listener);
26959 listener = NULL;
26960 ret = 0;
26961- atomic_inc(&cm_listens_destroyed);
26962+ atomic_inc_unchecked(&cm_listens_destroyed);
26963 } else {
26964 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
26965 }
26966@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
26967 cm_node->rem_mac);
26968
26969 add_hte_node(cm_core, cm_node);
26970- atomic_inc(&cm_nodes_created);
26971+ atomic_inc_unchecked(&cm_nodes_created);
26972
26973 return cm_node;
26974 }
26975@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
26976 }
26977
26978 atomic_dec(&cm_core->node_cnt);
26979- atomic_inc(&cm_nodes_destroyed);
26980+ atomic_inc_unchecked(&cm_nodes_destroyed);
26981 nesqp = cm_node->nesqp;
26982 if (nesqp) {
26983 nesqp->cm_node = NULL;
26984@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
26985
26986 static void drop_packet(struct sk_buff *skb)
26987 {
26988- atomic_inc(&cm_accel_dropped_pkts);
26989+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
26990 dev_kfree_skb_any(skb);
26991 }
26992
26993@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
26994 {
26995
26996 int reset = 0; /* whether to send reset in case of err.. */
26997- atomic_inc(&cm_resets_recvd);
26998+ atomic_inc_unchecked(&cm_resets_recvd);
26999 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
27000 " refcnt=%d\n", cm_node, cm_node->state,
27001 atomic_read(&cm_node->ref_count));
27002@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
27003 rem_ref_cm_node(cm_node->cm_core, cm_node);
27004 return NULL;
27005 }
27006- atomic_inc(&cm_loopbacks);
27007+ atomic_inc_unchecked(&cm_loopbacks);
27008 loopbackremotenode->loopbackpartner = cm_node;
27009 loopbackremotenode->tcp_cntxt.rcv_wscale =
27010 NES_CM_DEFAULT_RCV_WND_SCALE;
27011@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
27012 add_ref_cm_node(cm_node);
27013 } else if (cm_node->state == NES_CM_STATE_TSA) {
27014 rem_ref_cm_node(cm_core, cm_node);
27015- atomic_inc(&cm_accel_dropped_pkts);
27016+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
27017 dev_kfree_skb_any(skb);
27018 break;
27019 }
27020@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
27021
27022 if ((cm_id) && (cm_id->event_handler)) {
27023 if (issue_disconn) {
27024- atomic_inc(&cm_disconnects);
27025+ atomic_inc_unchecked(&cm_disconnects);
27026 cm_event.event = IW_CM_EVENT_DISCONNECT;
27027 cm_event.status = disconn_status;
27028 cm_event.local_addr = cm_id->local_addr;
27029@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
27030 }
27031
27032 if (issue_close) {
27033- atomic_inc(&cm_closes);
27034+ atomic_inc_unchecked(&cm_closes);
27035 nes_disconnect(nesqp, 1);
27036
27037 cm_id->provider_data = nesqp;
27038@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
27039
27040 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
27041 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
27042- atomic_inc(&cm_accepts);
27043+ atomic_inc_unchecked(&cm_accepts);
27044
27045 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
27046 netdev_refcnt_read(nesvnic->netdev));
27047@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
27048
27049 struct nes_cm_core *cm_core;
27050
27051- atomic_inc(&cm_rejects);
27052+ atomic_inc_unchecked(&cm_rejects);
27053 cm_node = (struct nes_cm_node *) cm_id->provider_data;
27054 loopback = cm_node->loopbackpartner;
27055 cm_core = cm_node->cm_core;
27056@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
27057 ntohl(cm_id->local_addr.sin_addr.s_addr),
27058 ntohs(cm_id->local_addr.sin_port));
27059
27060- atomic_inc(&cm_connects);
27061+ atomic_inc_unchecked(&cm_connects);
27062 nesqp->active_conn = 1;
27063
27064 /* cache the cm_id in the qp */
27065@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
27066 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
27067 return err;
27068 }
27069- atomic_inc(&cm_listens_created);
27070+ atomic_inc_unchecked(&cm_listens_created);
27071 }
27072
27073 cm_id->add_ref(cm_id);
27074@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
27075 if (nesqp->destroyed) {
27076 return;
27077 }
27078- atomic_inc(&cm_connecteds);
27079+ atomic_inc_unchecked(&cm_connecteds);
27080 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
27081 " local port 0x%04X. jiffies = %lu.\n",
27082 nesqp->hwqp.qp_id,
27083@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
27084
27085 cm_id->add_ref(cm_id);
27086 ret = cm_id->event_handler(cm_id, &cm_event);
27087- atomic_inc(&cm_closes);
27088+ atomic_inc_unchecked(&cm_closes);
27089 cm_event.event = IW_CM_EVENT_CLOSE;
27090 cm_event.status = 0;
27091 cm_event.provider_data = cm_id->provider_data;
27092@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
27093 return;
27094 cm_id = cm_node->cm_id;
27095
27096- atomic_inc(&cm_connect_reqs);
27097+ atomic_inc_unchecked(&cm_connect_reqs);
27098 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
27099 cm_node, cm_id, jiffies);
27100
27101@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
27102 return;
27103 cm_id = cm_node->cm_id;
27104
27105- atomic_inc(&cm_connect_reqs);
27106+ atomic_inc_unchecked(&cm_connect_reqs);
27107 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
27108 cm_node, cm_id, jiffies);
27109
27110diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.h linux-3.0.4/drivers/infiniband/hw/nes/nes.h
27111--- linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
27112+++ linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
27113@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
27114 extern unsigned int wqm_quanta;
27115 extern struct list_head nes_adapter_list;
27116
27117-extern atomic_t cm_connects;
27118-extern atomic_t cm_accepts;
27119-extern atomic_t cm_disconnects;
27120-extern atomic_t cm_closes;
27121-extern atomic_t cm_connecteds;
27122-extern atomic_t cm_connect_reqs;
27123-extern atomic_t cm_rejects;
27124-extern atomic_t mod_qp_timouts;
27125-extern atomic_t qps_created;
27126-extern atomic_t qps_destroyed;
27127-extern atomic_t sw_qps_destroyed;
27128+extern atomic_unchecked_t cm_connects;
27129+extern atomic_unchecked_t cm_accepts;
27130+extern atomic_unchecked_t cm_disconnects;
27131+extern atomic_unchecked_t cm_closes;
27132+extern atomic_unchecked_t cm_connecteds;
27133+extern atomic_unchecked_t cm_connect_reqs;
27134+extern atomic_unchecked_t cm_rejects;
27135+extern atomic_unchecked_t mod_qp_timouts;
27136+extern atomic_unchecked_t qps_created;
27137+extern atomic_unchecked_t qps_destroyed;
27138+extern atomic_unchecked_t sw_qps_destroyed;
27139 extern u32 mh_detected;
27140 extern u32 mh_pauses_sent;
27141 extern u32 cm_packets_sent;
27142@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
27143 extern u32 cm_packets_received;
27144 extern u32 cm_packets_dropped;
27145 extern u32 cm_packets_retrans;
27146-extern atomic_t cm_listens_created;
27147-extern atomic_t cm_listens_destroyed;
27148+extern atomic_unchecked_t cm_listens_created;
27149+extern atomic_unchecked_t cm_listens_destroyed;
27150 extern u32 cm_backlog_drops;
27151-extern atomic_t cm_loopbacks;
27152-extern atomic_t cm_nodes_created;
27153-extern atomic_t cm_nodes_destroyed;
27154-extern atomic_t cm_accel_dropped_pkts;
27155-extern atomic_t cm_resets_recvd;
27156+extern atomic_unchecked_t cm_loopbacks;
27157+extern atomic_unchecked_t cm_nodes_created;
27158+extern atomic_unchecked_t cm_nodes_destroyed;
27159+extern atomic_unchecked_t cm_accel_dropped_pkts;
27160+extern atomic_unchecked_t cm_resets_recvd;
27161
27162 extern u32 int_mod_timer_init;
27163 extern u32 int_mod_cq_depth_256;
27164diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c
27165--- linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
27166+++ linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
27167@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
27168 target_stat_values[++index] = mh_detected;
27169 target_stat_values[++index] = mh_pauses_sent;
27170 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
27171- target_stat_values[++index] = atomic_read(&cm_connects);
27172- target_stat_values[++index] = atomic_read(&cm_accepts);
27173- target_stat_values[++index] = atomic_read(&cm_disconnects);
27174- target_stat_values[++index] = atomic_read(&cm_connecteds);
27175- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
27176- target_stat_values[++index] = atomic_read(&cm_rejects);
27177- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
27178- target_stat_values[++index] = atomic_read(&qps_created);
27179- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
27180- target_stat_values[++index] = atomic_read(&qps_destroyed);
27181- target_stat_values[++index] = atomic_read(&cm_closes);
27182+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
27183+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
27184+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
27185+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
27186+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
27187+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
27188+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
27189+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
27190+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
27191+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
27192+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
27193 target_stat_values[++index] = cm_packets_sent;
27194 target_stat_values[++index] = cm_packets_bounced;
27195 target_stat_values[++index] = cm_packets_created;
27196 target_stat_values[++index] = cm_packets_received;
27197 target_stat_values[++index] = cm_packets_dropped;
27198 target_stat_values[++index] = cm_packets_retrans;
27199- target_stat_values[++index] = atomic_read(&cm_listens_created);
27200- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
27201+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
27202+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
27203 target_stat_values[++index] = cm_backlog_drops;
27204- target_stat_values[++index] = atomic_read(&cm_loopbacks);
27205- target_stat_values[++index] = atomic_read(&cm_nodes_created);
27206- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
27207- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
27208- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
27209+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
27210+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
27211+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
27212+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
27213+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
27214 target_stat_values[++index] = nesadapter->free_4kpbl;
27215 target_stat_values[++index] = nesadapter->free_256pbl;
27216 target_stat_values[++index] = int_mod_timer_init;
27217diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c
27218--- linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
27219+++ linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
27220@@ -46,9 +46,9 @@
27221
27222 #include <rdma/ib_umem.h>
27223
27224-atomic_t mod_qp_timouts;
27225-atomic_t qps_created;
27226-atomic_t sw_qps_destroyed;
27227+atomic_unchecked_t mod_qp_timouts;
27228+atomic_unchecked_t qps_created;
27229+atomic_unchecked_t sw_qps_destroyed;
27230
27231 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
27232
27233@@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
27234 if (init_attr->create_flags)
27235 return ERR_PTR(-EINVAL);
27236
27237- atomic_inc(&qps_created);
27238+ atomic_inc_unchecked(&qps_created);
27239 switch (init_attr->qp_type) {
27240 case IB_QPT_RC:
27241 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
27242@@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
27243 struct iw_cm_event cm_event;
27244 int ret;
27245
27246- atomic_inc(&sw_qps_destroyed);
27247+ atomic_inc_unchecked(&sw_qps_destroyed);
27248 nesqp->destroyed = 1;
27249
27250 /* Blow away the connection if it exists. */
27251diff -urNp linux-3.0.4/drivers/infiniband/hw/qib/qib.h linux-3.0.4/drivers/infiniband/hw/qib/qib.h
27252--- linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
27253+++ linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
27254@@ -51,6 +51,7 @@
27255 #include <linux/completion.h>
27256 #include <linux/kref.h>
27257 #include <linux/sched.h>
27258+#include <linux/slab.h>
27259
27260 #include "qib_common.h"
27261 #include "qib_verbs.h"
27262diff -urNp linux-3.0.4/drivers/input/gameport/gameport.c linux-3.0.4/drivers/input/gameport/gameport.c
27263--- linux-3.0.4/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
27264+++ linux-3.0.4/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
27265@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
27266 */
27267 static void gameport_init_port(struct gameport *gameport)
27268 {
27269- static atomic_t gameport_no = ATOMIC_INIT(0);
27270+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
27271
27272 __module_get(THIS_MODULE);
27273
27274 mutex_init(&gameport->drv_mutex);
27275 device_initialize(&gameport->dev);
27276 dev_set_name(&gameport->dev, "gameport%lu",
27277- (unsigned long)atomic_inc_return(&gameport_no) - 1);
27278+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
27279 gameport->dev.bus = &gameport_bus;
27280 gameport->dev.release = gameport_release_port;
27281 if (gameport->parent)
27282diff -urNp linux-3.0.4/drivers/input/input.c linux-3.0.4/drivers/input/input.c
27283--- linux-3.0.4/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
27284+++ linux-3.0.4/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
27285@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
27286 */
27287 int input_register_device(struct input_dev *dev)
27288 {
27289- static atomic_t input_no = ATOMIC_INIT(0);
27290+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
27291 struct input_handler *handler;
27292 const char *path;
27293 int error;
27294@@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
27295 dev->setkeycode = input_default_setkeycode;
27296
27297 dev_set_name(&dev->dev, "input%ld",
27298- (unsigned long) atomic_inc_return(&input_no) - 1);
27299+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
27300
27301 error = device_add(&dev->dev);
27302 if (error)
27303diff -urNp linux-3.0.4/drivers/input/joystick/sidewinder.c linux-3.0.4/drivers/input/joystick/sidewinder.c
27304--- linux-3.0.4/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
27305+++ linux-3.0.4/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
27306@@ -30,6 +30,7 @@
27307 #include <linux/kernel.h>
27308 #include <linux/module.h>
27309 #include <linux/slab.h>
27310+#include <linux/sched.h>
27311 #include <linux/init.h>
27312 #include <linux/input.h>
27313 #include <linux/gameport.h>
27314@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
27315 unsigned char buf[SW_LENGTH];
27316 int i;
27317
27318+ pax_track_stack();
27319+
27320 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
27321
27322 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
27323diff -urNp linux-3.0.4/drivers/input/joystick/xpad.c linux-3.0.4/drivers/input/joystick/xpad.c
27324--- linux-3.0.4/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
27325+++ linux-3.0.4/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
27326@@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
27327
27328 static int xpad_led_probe(struct usb_xpad *xpad)
27329 {
27330- static atomic_t led_seq = ATOMIC_INIT(0);
27331+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
27332 long led_no;
27333 struct xpad_led *led;
27334 struct led_classdev *led_cdev;
27335@@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
27336 if (!led)
27337 return -ENOMEM;
27338
27339- led_no = (long)atomic_inc_return(&led_seq) - 1;
27340+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
27341
27342 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
27343 led->xpad = xpad;
27344diff -urNp linux-3.0.4/drivers/input/mousedev.c linux-3.0.4/drivers/input/mousedev.c
27345--- linux-3.0.4/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
27346+++ linux-3.0.4/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
27347@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
27348
27349 spin_unlock_irq(&client->packet_lock);
27350
27351- if (copy_to_user(buffer, data, count))
27352+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
27353 return -EFAULT;
27354
27355 return count;
27356diff -urNp linux-3.0.4/drivers/input/serio/serio.c linux-3.0.4/drivers/input/serio/serio.c
27357--- linux-3.0.4/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
27358+++ linux-3.0.4/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
27359@@ -497,7 +497,7 @@ static void serio_release_port(struct de
27360 */
27361 static void serio_init_port(struct serio *serio)
27362 {
27363- static atomic_t serio_no = ATOMIC_INIT(0);
27364+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
27365
27366 __module_get(THIS_MODULE);
27367
27368@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
27369 mutex_init(&serio->drv_mutex);
27370 device_initialize(&serio->dev);
27371 dev_set_name(&serio->dev, "serio%ld",
27372- (long)atomic_inc_return(&serio_no) - 1);
27373+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
27374 serio->dev.bus = &serio_bus;
27375 serio->dev.release = serio_release_port;
27376 serio->dev.groups = serio_device_attr_groups;
27377diff -urNp linux-3.0.4/drivers/isdn/capi/capi.c linux-3.0.4/drivers/isdn/capi/capi.c
27378--- linux-3.0.4/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
27379+++ linux-3.0.4/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
27380@@ -83,8 +83,8 @@ struct capiminor {
27381
27382 struct capi20_appl *ap;
27383 u32 ncci;
27384- atomic_t datahandle;
27385- atomic_t msgid;
27386+ atomic_unchecked_t datahandle;
27387+ atomic_unchecked_t msgid;
27388
27389 struct tty_port port;
27390 int ttyinstop;
27391@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
27392 capimsg_setu16(s, 2, mp->ap->applid);
27393 capimsg_setu8 (s, 4, CAPI_DATA_B3);
27394 capimsg_setu8 (s, 5, CAPI_RESP);
27395- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
27396+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
27397 capimsg_setu32(s, 8, mp->ncci);
27398 capimsg_setu16(s, 12, datahandle);
27399 }
27400@@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
27401 mp->outbytes -= len;
27402 spin_unlock_bh(&mp->outlock);
27403
27404- datahandle = atomic_inc_return(&mp->datahandle);
27405+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
27406 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
27407 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
27408 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
27409 capimsg_setu16(skb->data, 2, mp->ap->applid);
27410 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
27411 capimsg_setu8 (skb->data, 5, CAPI_REQ);
27412- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
27413+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
27414 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
27415 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
27416 capimsg_setu16(skb->data, 16, len); /* Data length */
27417diff -urNp linux-3.0.4/drivers/isdn/gigaset/common.c linux-3.0.4/drivers/isdn/gigaset/common.c
27418--- linux-3.0.4/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
27419+++ linux-3.0.4/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
27420@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
27421 cs->commands_pending = 0;
27422 cs->cur_at_seq = 0;
27423 cs->gotfwver = -1;
27424- cs->open_count = 0;
27425+ local_set(&cs->open_count, 0);
27426 cs->dev = NULL;
27427 cs->tty = NULL;
27428 cs->tty_dev = NULL;
27429diff -urNp linux-3.0.4/drivers/isdn/gigaset/gigaset.h linux-3.0.4/drivers/isdn/gigaset/gigaset.h
27430--- linux-3.0.4/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
27431+++ linux-3.0.4/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
27432@@ -35,6 +35,7 @@
27433 #include <linux/tty_driver.h>
27434 #include <linux/list.h>
27435 #include <asm/atomic.h>
27436+#include <asm/local.h>
27437
27438 #define GIG_VERSION {0, 5, 0, 0}
27439 #define GIG_COMPAT {0, 4, 0, 0}
27440@@ -433,7 +434,7 @@ struct cardstate {
27441 spinlock_t cmdlock;
27442 unsigned curlen, cmdbytes;
27443
27444- unsigned open_count;
27445+ local_t open_count;
27446 struct tty_struct *tty;
27447 struct tasklet_struct if_wake_tasklet;
27448 unsigned control_state;
27449diff -urNp linux-3.0.4/drivers/isdn/gigaset/interface.c linux-3.0.4/drivers/isdn/gigaset/interface.c
27450--- linux-3.0.4/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
27451+++ linux-3.0.4/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
27452@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
27453 }
27454 tty->driver_data = cs;
27455
27456- ++cs->open_count;
27457-
27458- if (cs->open_count == 1) {
27459+ if (local_inc_return(&cs->open_count) == 1) {
27460 spin_lock_irqsave(&cs->lock, flags);
27461 cs->tty = tty;
27462 spin_unlock_irqrestore(&cs->lock, flags);
27463@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
27464
27465 if (!cs->connected)
27466 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
27467- else if (!cs->open_count)
27468+ else if (!local_read(&cs->open_count))
27469 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27470 else {
27471- if (!--cs->open_count) {
27472+ if (!local_dec_return(&cs->open_count)) {
27473 spin_lock_irqsave(&cs->lock, flags);
27474 cs->tty = NULL;
27475 spin_unlock_irqrestore(&cs->lock, flags);
27476@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
27477 if (!cs->connected) {
27478 gig_dbg(DEBUG_IF, "not connected");
27479 retval = -ENODEV;
27480- } else if (!cs->open_count)
27481+ } else if (!local_read(&cs->open_count))
27482 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27483 else {
27484 retval = 0;
27485@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
27486 retval = -ENODEV;
27487 goto done;
27488 }
27489- if (!cs->open_count) {
27490+ if (!local_read(&cs->open_count)) {
27491 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27492 retval = -ENODEV;
27493 goto done;
27494@@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
27495 if (!cs->connected) {
27496 gig_dbg(DEBUG_IF, "not connected");
27497 retval = -ENODEV;
27498- } else if (!cs->open_count)
27499+ } else if (!local_read(&cs->open_count))
27500 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27501 else if (cs->mstate != MS_LOCKED) {
27502 dev_warn(cs->dev, "can't write to unlocked device\n");
27503@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
27504
27505 if (!cs->connected)
27506 gig_dbg(DEBUG_IF, "not connected");
27507- else if (!cs->open_count)
27508+ else if (!local_read(&cs->open_count))
27509 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27510 else if (cs->mstate != MS_LOCKED)
27511 dev_warn(cs->dev, "can't write to unlocked device\n");
27512@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
27513
27514 if (!cs->connected)
27515 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
27516- else if (!cs->open_count)
27517+ else if (!local_read(&cs->open_count))
27518 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27519 else
27520 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
27521@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
27522
27523 if (!cs->connected)
27524 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
27525- else if (!cs->open_count)
27526+ else if (!local_read(&cs->open_count))
27527 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27528 else
27529 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
27530@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
27531 goto out;
27532 }
27533
27534- if (!cs->open_count) {
27535+ if (!local_read(&cs->open_count)) {
27536 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27537 goto out;
27538 }
27539diff -urNp linux-3.0.4/drivers/isdn/hardware/avm/b1.c linux-3.0.4/drivers/isdn/hardware/avm/b1.c
27540--- linux-3.0.4/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
27541+++ linux-3.0.4/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
27542@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
27543 }
27544 if (left) {
27545 if (t4file->user) {
27546- if (copy_from_user(buf, dp, left))
27547+ if (left > sizeof buf || copy_from_user(buf, dp, left))
27548 return -EFAULT;
27549 } else {
27550 memcpy(buf, dp, left);
27551@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
27552 }
27553 if (left) {
27554 if (config->user) {
27555- if (copy_from_user(buf, dp, left))
27556+ if (left > sizeof buf || copy_from_user(buf, dp, left))
27557 return -EFAULT;
27558 } else {
27559 memcpy(buf, dp, left);
27560diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c
27561--- linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
27562+++ linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
27563@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
27564 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
27565 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
27566
27567+ pax_track_stack();
27568
27569 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
27570 {
27571diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c
27572--- linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
27573+++ linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
27574@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
27575 IDI_SYNC_REQ req;
27576 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27577
27578+ pax_track_stack();
27579+
27580 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27581
27582 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27583diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c
27584--- linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
27585+++ linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
27586@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
27587 IDI_SYNC_REQ req;
27588 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27589
27590+ pax_track_stack();
27591+
27592 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27593
27594 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27595diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c
27596--- linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
27597+++ linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
27598@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
27599 IDI_SYNC_REQ req;
27600 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27601
27602+ pax_track_stack();
27603+
27604 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27605
27606 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27607diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h
27608--- linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
27609+++ linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
27610@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
27611 } diva_didd_add_adapter_t;
27612 typedef struct _diva_didd_remove_adapter {
27613 IDI_CALL p_request;
27614-} diva_didd_remove_adapter_t;
27615+} __no_const diva_didd_remove_adapter_t;
27616 typedef struct _diva_didd_read_adapter_array {
27617 void * buffer;
27618 dword length;
27619diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c
27620--- linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
27621+++ linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
27622@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
27623 IDI_SYNC_REQ req;
27624 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27625
27626+ pax_track_stack();
27627+
27628 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27629
27630 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27631diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/message.c linux-3.0.4/drivers/isdn/hardware/eicon/message.c
27632--- linux-3.0.4/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
27633+++ linux-3.0.4/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
27634@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
27635 dword d;
27636 word w;
27637
27638+ pax_track_stack();
27639+
27640 a = plci->adapter;
27641 Id = ((word)plci->Id<<8)|a->Id;
27642 PUT_WORD(&SS_Ind[4],0x0000);
27643@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
27644 word j, n, w;
27645 dword d;
27646
27647+ pax_track_stack();
27648+
27649
27650 for(i=0;i<8;i++) bp_parms[i].length = 0;
27651 for(i=0;i<2;i++) global_config[i].length = 0;
27652@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
27653 const byte llc3[] = {4,3,2,2,6,6,0};
27654 const byte header[] = {0,2,3,3,0,0,0};
27655
27656+ pax_track_stack();
27657+
27658 for(i=0;i<8;i++) bp_parms[i].length = 0;
27659 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
27660 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
27661@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
27662 word appl_number_group_type[MAX_APPL];
27663 PLCI *auxplci;
27664
27665+ pax_track_stack();
27666+
27667 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
27668
27669 if(!a->group_optimization_enabled)
27670diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c
27671--- linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
27672+++ linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
27673@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
27674 IDI_SYNC_REQ req;
27675 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27676
27677+ pax_track_stack();
27678+
27679 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27680
27681 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27682diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h
27683--- linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
27684+++ linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
27685@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
27686 typedef struct _diva_os_idi_adapter_interface {
27687 diva_init_card_proc_t cleanup_adapter_proc;
27688 diva_cmd_card_proc_t cmd_proc;
27689-} diva_os_idi_adapter_interface_t;
27690+} __no_const diva_os_idi_adapter_interface_t;
27691
27692 typedef struct _diva_os_xdi_adapter {
27693 struct list_head link;
27694diff -urNp linux-3.0.4/drivers/isdn/i4l/isdn_common.c linux-3.0.4/drivers/isdn/i4l/isdn_common.c
27695--- linux-3.0.4/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
27696+++ linux-3.0.4/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
27697@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
27698 } iocpar;
27699 void __user *argp = (void __user *)arg;
27700
27701+ pax_track_stack();
27702+
27703 #define name iocpar.name
27704 #define bname iocpar.bname
27705 #define iocts iocpar.iocts
27706diff -urNp linux-3.0.4/drivers/isdn/icn/icn.c linux-3.0.4/drivers/isdn/icn/icn.c
27707--- linux-3.0.4/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
27708+++ linux-3.0.4/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
27709@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
27710 if (count > len)
27711 count = len;
27712 if (user) {
27713- if (copy_from_user(msg, buf, count))
27714+ if (count > sizeof msg || copy_from_user(msg, buf, count))
27715 return -EFAULT;
27716 } else
27717 memcpy(msg, buf, count);
27718diff -urNp linux-3.0.4/drivers/lguest/core.c linux-3.0.4/drivers/lguest/core.c
27719--- linux-3.0.4/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
27720+++ linux-3.0.4/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
27721@@ -92,9 +92,17 @@ static __init int map_switcher(void)
27722 * it's worked so far. The end address needs +1 because __get_vm_area
27723 * allocates an extra guard page, so we need space for that.
27724 */
27725+
27726+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27727+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
27728+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
27729+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
27730+#else
27731 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
27732 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
27733 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
27734+#endif
27735+
27736 if (!switcher_vma) {
27737 err = -ENOMEM;
27738 printk("lguest: could not map switcher pages high\n");
27739@@ -119,7 +127,7 @@ static __init int map_switcher(void)
27740 * Now the Switcher is mapped at the right address, we can't fail!
27741 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
27742 */
27743- memcpy(switcher_vma->addr, start_switcher_text,
27744+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
27745 end_switcher_text - start_switcher_text);
27746
27747 printk(KERN_INFO "lguest: mapped switcher at %p\n",
27748diff -urNp linux-3.0.4/drivers/lguest/x86/core.c linux-3.0.4/drivers/lguest/x86/core.c
27749--- linux-3.0.4/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
27750+++ linux-3.0.4/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
27751@@ -59,7 +59,7 @@ static struct {
27752 /* Offset from where switcher.S was compiled to where we've copied it */
27753 static unsigned long switcher_offset(void)
27754 {
27755- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
27756+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
27757 }
27758
27759 /* This cpu's struct lguest_pages. */
27760@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
27761 * These copies are pretty cheap, so we do them unconditionally: */
27762 /* Save the current Host top-level page directory.
27763 */
27764+
27765+#ifdef CONFIG_PAX_PER_CPU_PGD
27766+ pages->state.host_cr3 = read_cr3();
27767+#else
27768 pages->state.host_cr3 = __pa(current->mm->pgd);
27769+#endif
27770+
27771 /*
27772 * Set up the Guest's page tables to see this CPU's pages (and no
27773 * other CPU's pages).
27774@@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
27775 * compiled-in switcher code and the high-mapped copy we just made.
27776 */
27777 for (i = 0; i < IDT_ENTRIES; i++)
27778- default_idt_entries[i] += switcher_offset();
27779+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
27780
27781 /*
27782 * Set up the Switcher's per-cpu areas.
27783@@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
27784 * it will be undisturbed when we switch. To change %cs and jump we
27785 * need this structure to feed to Intel's "lcall" instruction.
27786 */
27787- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
27788+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
27789 lguest_entry.segment = LGUEST_CS;
27790
27791 /*
27792diff -urNp linux-3.0.4/drivers/lguest/x86/switcher_32.S linux-3.0.4/drivers/lguest/x86/switcher_32.S
27793--- linux-3.0.4/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
27794+++ linux-3.0.4/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
27795@@ -87,6 +87,7 @@
27796 #include <asm/page.h>
27797 #include <asm/segment.h>
27798 #include <asm/lguest.h>
27799+#include <asm/processor-flags.h>
27800
27801 // We mark the start of the code to copy
27802 // It's placed in .text tho it's never run here
27803@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
27804 // Changes type when we load it: damn Intel!
27805 // For after we switch over our page tables
27806 // That entry will be read-only: we'd crash.
27807+
27808+#ifdef CONFIG_PAX_KERNEXEC
27809+ mov %cr0, %edx
27810+ xor $X86_CR0_WP, %edx
27811+ mov %edx, %cr0
27812+#endif
27813+
27814 movl $(GDT_ENTRY_TSS*8), %edx
27815 ltr %dx
27816
27817@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
27818 // Let's clear it again for our return.
27819 // The GDT descriptor of the Host
27820 // Points to the table after two "size" bytes
27821- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
27822+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
27823 // Clear "used" from type field (byte 5, bit 2)
27824- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
27825+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
27826+
27827+#ifdef CONFIG_PAX_KERNEXEC
27828+ mov %cr0, %eax
27829+ xor $X86_CR0_WP, %eax
27830+ mov %eax, %cr0
27831+#endif
27832
27833 // Once our page table's switched, the Guest is live!
27834 // The Host fades as we run this final step.
27835@@ -295,13 +309,12 @@ deliver_to_host:
27836 // I consulted gcc, and it gave
27837 // These instructions, which I gladly credit:
27838 leal (%edx,%ebx,8), %eax
27839- movzwl (%eax),%edx
27840- movl 4(%eax), %eax
27841- xorw %ax, %ax
27842- orl %eax, %edx
27843+ movl 4(%eax), %edx
27844+ movw (%eax), %dx
27845 // Now the address of the handler's in %edx
27846 // We call it now: its "iret" drops us home.
27847- jmp *%edx
27848+ ljmp $__KERNEL_CS, $1f
27849+1: jmp *%edx
27850
27851 // Every interrupt can come to us here
27852 // But we must truly tell each apart.
27853diff -urNp linux-3.0.4/drivers/md/dm.c linux-3.0.4/drivers/md/dm.c
27854--- linux-3.0.4/drivers/md/dm.c 2011-09-02 18:11:21.000000000 -0400
27855+++ linux-3.0.4/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
27856@@ -164,9 +164,9 @@ struct mapped_device {
27857 /*
27858 * Event handling.
27859 */
27860- atomic_t event_nr;
27861+ atomic_unchecked_t event_nr;
27862 wait_queue_head_t eventq;
27863- atomic_t uevent_seq;
27864+ atomic_unchecked_t uevent_seq;
27865 struct list_head uevent_list;
27866 spinlock_t uevent_lock; /* Protect access to uevent_list */
27867
27868@@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
27869 rwlock_init(&md->map_lock);
27870 atomic_set(&md->holders, 1);
27871 atomic_set(&md->open_count, 0);
27872- atomic_set(&md->event_nr, 0);
27873- atomic_set(&md->uevent_seq, 0);
27874+ atomic_set_unchecked(&md->event_nr, 0);
27875+ atomic_set_unchecked(&md->uevent_seq, 0);
27876 INIT_LIST_HEAD(&md->uevent_list);
27877 spin_lock_init(&md->uevent_lock);
27878
27879@@ -1977,7 +1977,7 @@ static void event_callback(void *context
27880
27881 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
27882
27883- atomic_inc(&md->event_nr);
27884+ atomic_inc_unchecked(&md->event_nr);
27885 wake_up(&md->eventq);
27886 }
27887
27888@@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
27889
27890 uint32_t dm_next_uevent_seq(struct mapped_device *md)
27891 {
27892- return atomic_add_return(1, &md->uevent_seq);
27893+ return atomic_add_return_unchecked(1, &md->uevent_seq);
27894 }
27895
27896 uint32_t dm_get_event_nr(struct mapped_device *md)
27897 {
27898- return atomic_read(&md->event_nr);
27899+ return atomic_read_unchecked(&md->event_nr);
27900 }
27901
27902 int dm_wait_event(struct mapped_device *md, int event_nr)
27903 {
27904 return wait_event_interruptible(md->eventq,
27905- (event_nr != atomic_read(&md->event_nr)));
27906+ (event_nr != atomic_read_unchecked(&md->event_nr)));
27907 }
27908
27909 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
27910diff -urNp linux-3.0.4/drivers/md/dm-ioctl.c linux-3.0.4/drivers/md/dm-ioctl.c
27911--- linux-3.0.4/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
27912+++ linux-3.0.4/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
27913@@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
27914 cmd == DM_LIST_VERSIONS_CMD)
27915 return 0;
27916
27917- if ((cmd == DM_DEV_CREATE_CMD)) {
27918+ if (cmd == DM_DEV_CREATE_CMD) {
27919 if (!*param->name) {
27920 DMWARN("name not supplied when creating device");
27921 return -EINVAL;
27922diff -urNp linux-3.0.4/drivers/md/dm-raid1.c linux-3.0.4/drivers/md/dm-raid1.c
27923--- linux-3.0.4/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
27924+++ linux-3.0.4/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
27925@@ -40,7 +40,7 @@ enum dm_raid1_error {
27926
27927 struct mirror {
27928 struct mirror_set *ms;
27929- atomic_t error_count;
27930+ atomic_unchecked_t error_count;
27931 unsigned long error_type;
27932 struct dm_dev *dev;
27933 sector_t offset;
27934@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
27935 struct mirror *m;
27936
27937 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
27938- if (!atomic_read(&m->error_count))
27939+ if (!atomic_read_unchecked(&m->error_count))
27940 return m;
27941
27942 return NULL;
27943@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
27944 * simple way to tell if a device has encountered
27945 * errors.
27946 */
27947- atomic_inc(&m->error_count);
27948+ atomic_inc_unchecked(&m->error_count);
27949
27950 if (test_and_set_bit(error_type, &m->error_type))
27951 return;
27952@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
27953 struct mirror *m = get_default_mirror(ms);
27954
27955 do {
27956- if (likely(!atomic_read(&m->error_count)))
27957+ if (likely(!atomic_read_unchecked(&m->error_count)))
27958 return m;
27959
27960 if (m-- == ms->mirror)
27961@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
27962 {
27963 struct mirror *default_mirror = get_default_mirror(m->ms);
27964
27965- return !atomic_read(&default_mirror->error_count);
27966+ return !atomic_read_unchecked(&default_mirror->error_count);
27967 }
27968
27969 static int mirror_available(struct mirror_set *ms, struct bio *bio)
27970@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
27971 */
27972 if (likely(region_in_sync(ms, region, 1)))
27973 m = choose_mirror(ms, bio->bi_sector);
27974- else if (m && atomic_read(&m->error_count))
27975+ else if (m && atomic_read_unchecked(&m->error_count))
27976 m = NULL;
27977
27978 if (likely(m))
27979@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
27980 }
27981
27982 ms->mirror[mirror].ms = ms;
27983- atomic_set(&(ms->mirror[mirror].error_count), 0);
27984+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
27985 ms->mirror[mirror].error_type = 0;
27986 ms->mirror[mirror].offset = offset;
27987
27988@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
27989 */
27990 static char device_status_char(struct mirror *m)
27991 {
27992- if (!atomic_read(&(m->error_count)))
27993+ if (!atomic_read_unchecked(&(m->error_count)))
27994 return 'A';
27995
27996 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
27997diff -urNp linux-3.0.4/drivers/md/dm-stripe.c linux-3.0.4/drivers/md/dm-stripe.c
27998--- linux-3.0.4/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
27999+++ linux-3.0.4/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
28000@@ -20,7 +20,7 @@ struct stripe {
28001 struct dm_dev *dev;
28002 sector_t physical_start;
28003
28004- atomic_t error_count;
28005+ atomic_unchecked_t error_count;
28006 };
28007
28008 struct stripe_c {
28009@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
28010 kfree(sc);
28011 return r;
28012 }
28013- atomic_set(&(sc->stripe[i].error_count), 0);
28014+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
28015 }
28016
28017 ti->private = sc;
28018@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
28019 DMEMIT("%d ", sc->stripes);
28020 for (i = 0; i < sc->stripes; i++) {
28021 DMEMIT("%s ", sc->stripe[i].dev->name);
28022- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
28023+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
28024 'D' : 'A';
28025 }
28026 buffer[i] = '\0';
28027@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
28028 */
28029 for (i = 0; i < sc->stripes; i++)
28030 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
28031- atomic_inc(&(sc->stripe[i].error_count));
28032- if (atomic_read(&(sc->stripe[i].error_count)) <
28033+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
28034+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
28035 DM_IO_ERROR_THRESHOLD)
28036 schedule_work(&sc->trigger_event);
28037 }
28038diff -urNp linux-3.0.4/drivers/md/dm-table.c linux-3.0.4/drivers/md/dm-table.c
28039--- linux-3.0.4/drivers/md/dm-table.c 2011-07-21 22:17:23.000000000 -0400
28040+++ linux-3.0.4/drivers/md/dm-table.c 2011-08-23 21:47:55.000000000 -0400
28041@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
28042 if (!dev_size)
28043 return 0;
28044
28045- if ((start >= dev_size) || (start + len > dev_size)) {
28046+ if ((start >= dev_size) || (len > dev_size - start)) {
28047 DMWARN("%s: %s too small for target: "
28048 "start=%llu, len=%llu, dev_size=%llu",
28049 dm_device_name(ti->table->md), bdevname(bdev, b),
28050diff -urNp linux-3.0.4/drivers/md/md.c linux-3.0.4/drivers/md/md.c
28051--- linux-3.0.4/drivers/md/md.c 2011-07-21 22:17:23.000000000 -0400
28052+++ linux-3.0.4/drivers/md/md.c 2011-08-23 21:47:55.000000000 -0400
28053@@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
28054 * start build, activate spare
28055 */
28056 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
28057-static atomic_t md_event_count;
28058+static atomic_unchecked_t md_event_count;
28059 void md_new_event(mddev_t *mddev)
28060 {
28061- atomic_inc(&md_event_count);
28062+ atomic_inc_unchecked(&md_event_count);
28063 wake_up(&md_event_waiters);
28064 }
28065 EXPORT_SYMBOL_GPL(md_new_event);
28066@@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
28067 */
28068 static void md_new_event_inintr(mddev_t *mddev)
28069 {
28070- atomic_inc(&md_event_count);
28071+ atomic_inc_unchecked(&md_event_count);
28072 wake_up(&md_event_waiters);
28073 }
28074
28075@@ -1457,7 +1457,7 @@ static int super_1_load(mdk_rdev_t *rdev
28076
28077 rdev->preferred_minor = 0xffff;
28078 rdev->data_offset = le64_to_cpu(sb->data_offset);
28079- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
28080+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
28081
28082 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
28083 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
28084@@ -1635,7 +1635,7 @@ static void super_1_sync(mddev_t *mddev,
28085 else
28086 sb->resync_offset = cpu_to_le64(0);
28087
28088- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
28089+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
28090
28091 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
28092 sb->size = cpu_to_le64(mddev->dev_sectors);
28093@@ -2428,7 +2428,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
28094 static ssize_t
28095 errors_show(mdk_rdev_t *rdev, char *page)
28096 {
28097- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
28098+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
28099 }
28100
28101 static ssize_t
28102@@ -2437,7 +2437,7 @@ errors_store(mdk_rdev_t *rdev, const cha
28103 char *e;
28104 unsigned long n = simple_strtoul(buf, &e, 10);
28105 if (*buf && (*e == 0 || *e == '\n')) {
28106- atomic_set(&rdev->corrected_errors, n);
28107+ atomic_set_unchecked(&rdev->corrected_errors, n);
28108 return len;
28109 }
28110 return -EINVAL;
28111@@ -2793,8 +2793,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
28112 rdev->last_read_error.tv_sec = 0;
28113 rdev->last_read_error.tv_nsec = 0;
28114 atomic_set(&rdev->nr_pending, 0);
28115- atomic_set(&rdev->read_errors, 0);
28116- atomic_set(&rdev->corrected_errors, 0);
28117+ atomic_set_unchecked(&rdev->read_errors, 0);
28118+ atomic_set_unchecked(&rdev->corrected_errors, 0);
28119
28120 INIT_LIST_HEAD(&rdev->same_set);
28121 init_waitqueue_head(&rdev->blocked_wait);
28122@@ -6415,7 +6415,7 @@ static int md_seq_show(struct seq_file *
28123
28124 spin_unlock(&pers_lock);
28125 seq_printf(seq, "\n");
28126- mi->event = atomic_read(&md_event_count);
28127+ mi->event = atomic_read_unchecked(&md_event_count);
28128 return 0;
28129 }
28130 if (v == (void*)2) {
28131@@ -6504,7 +6504,7 @@ static int md_seq_show(struct seq_file *
28132 chunk_kb ? "KB" : "B");
28133 if (bitmap->file) {
28134 seq_printf(seq, ", file: ");
28135- seq_path(seq, &bitmap->file->f_path, " \t\n");
28136+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
28137 }
28138
28139 seq_printf(seq, "\n");
28140@@ -6538,7 +6538,7 @@ static int md_seq_open(struct inode *ino
28141 else {
28142 struct seq_file *p = file->private_data;
28143 p->private = mi;
28144- mi->event = atomic_read(&md_event_count);
28145+ mi->event = atomic_read_unchecked(&md_event_count);
28146 }
28147 return error;
28148 }
28149@@ -6554,7 +6554,7 @@ static unsigned int mdstat_poll(struct f
28150 /* always allow read */
28151 mask = POLLIN | POLLRDNORM;
28152
28153- if (mi->event != atomic_read(&md_event_count))
28154+ if (mi->event != atomic_read_unchecked(&md_event_count))
28155 mask |= POLLERR | POLLPRI;
28156 return mask;
28157 }
28158@@ -6598,7 +6598,7 @@ static int is_mddev_idle(mddev_t *mddev,
28159 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
28160 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
28161 (int)part_stat_read(&disk->part0, sectors[1]) -
28162- atomic_read(&disk->sync_io);
28163+ atomic_read_unchecked(&disk->sync_io);
28164 /* sync IO will cause sync_io to increase before the disk_stats
28165 * as sync_io is counted when a request starts, and
28166 * disk_stats is counted when it completes.
28167diff -urNp linux-3.0.4/drivers/md/md.h linux-3.0.4/drivers/md/md.h
28168--- linux-3.0.4/drivers/md/md.h 2011-07-21 22:17:23.000000000 -0400
28169+++ linux-3.0.4/drivers/md/md.h 2011-08-23 21:47:55.000000000 -0400
28170@@ -97,13 +97,13 @@ struct mdk_rdev_s
28171 * only maintained for arrays that
28172 * support hot removal
28173 */
28174- atomic_t read_errors; /* number of consecutive read errors that
28175+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
28176 * we have tried to ignore.
28177 */
28178 struct timespec last_read_error; /* monotonic time since our
28179 * last read error
28180 */
28181- atomic_t corrected_errors; /* number of corrected read errors,
28182+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
28183 * for reporting to userspace and storing
28184 * in superblock.
28185 */
28186@@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
28187
28188 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
28189 {
28190- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
28191+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
28192 }
28193
28194 struct mdk_personality
28195diff -urNp linux-3.0.4/drivers/md/raid10.c linux-3.0.4/drivers/md/raid10.c
28196--- linux-3.0.4/drivers/md/raid10.c 2011-07-21 22:17:23.000000000 -0400
28197+++ linux-3.0.4/drivers/md/raid10.c 2011-08-23 21:47:55.000000000 -0400
28198@@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
28199 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
28200 set_bit(R10BIO_Uptodate, &r10_bio->state);
28201 else {
28202- atomic_add(r10_bio->sectors,
28203+ atomic_add_unchecked(r10_bio->sectors,
28204 &conf->mirrors[d].rdev->corrected_errors);
28205 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
28206 md_error(r10_bio->mddev,
28207@@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
28208 {
28209 struct timespec cur_time_mon;
28210 unsigned long hours_since_last;
28211- unsigned int read_errors = atomic_read(&rdev->read_errors);
28212+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
28213
28214 ktime_get_ts(&cur_time_mon);
28215
28216@@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
28217 * overflowing the shift of read_errors by hours_since_last.
28218 */
28219 if (hours_since_last >= 8 * sizeof(read_errors))
28220- atomic_set(&rdev->read_errors, 0);
28221+ atomic_set_unchecked(&rdev->read_errors, 0);
28222 else
28223- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
28224+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
28225 }
28226
28227 /*
28228@@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
28229 return;
28230
28231 check_decay_read_errors(mddev, rdev);
28232- atomic_inc(&rdev->read_errors);
28233- if (atomic_read(&rdev->read_errors) > max_read_errors) {
28234+ atomic_inc_unchecked(&rdev->read_errors);
28235+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
28236 char b[BDEVNAME_SIZE];
28237 bdevname(rdev->bdev, b);
28238
28239@@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
28240 "md/raid10:%s: %s: Raid device exceeded "
28241 "read_error threshold [cur %d:max %d]\n",
28242 mdname(mddev), b,
28243- atomic_read(&rdev->read_errors), max_read_errors);
28244+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
28245 printk(KERN_NOTICE
28246 "md/raid10:%s: %s: Failing raid device\n",
28247 mdname(mddev), b);
28248@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
28249 test_bit(In_sync, &rdev->flags)) {
28250 atomic_inc(&rdev->nr_pending);
28251 rcu_read_unlock();
28252- atomic_add(s, &rdev->corrected_errors);
28253+ atomic_add_unchecked(s, &rdev->corrected_errors);
28254 if (sync_page_io(rdev,
28255 r10_bio->devs[sl].addr +
28256 sect,
28257diff -urNp linux-3.0.4/drivers/md/raid1.c linux-3.0.4/drivers/md/raid1.c
28258--- linux-3.0.4/drivers/md/raid1.c 2011-07-21 22:17:23.000000000 -0400
28259+++ linux-3.0.4/drivers/md/raid1.c 2011-08-23 21:47:55.000000000 -0400
28260@@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
28261 rdev_dec_pending(rdev, mddev);
28262 md_error(mddev, rdev);
28263 } else
28264- atomic_add(s, &rdev->corrected_errors);
28265+ atomic_add_unchecked(s, &rdev->corrected_errors);
28266 }
28267 d = start;
28268 while (d != r1_bio->read_disk) {
28269@@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
28270 /* Well, this device is dead */
28271 md_error(mddev, rdev);
28272 else {
28273- atomic_add(s, &rdev->corrected_errors);
28274+ atomic_add_unchecked(s, &rdev->corrected_errors);
28275 printk(KERN_INFO
28276 "md/raid1:%s: read error corrected "
28277 "(%d sectors at %llu on %s)\n",
28278diff -urNp linux-3.0.4/drivers/md/raid5.c linux-3.0.4/drivers/md/raid5.c
28279--- linux-3.0.4/drivers/md/raid5.c 2011-07-21 22:17:23.000000000 -0400
28280+++ linux-3.0.4/drivers/md/raid5.c 2011-08-23 21:48:14.000000000 -0400
28281@@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
28282 bi->bi_next = NULL;
28283 if ((rw & WRITE) &&
28284 test_bit(R5_ReWrite, &sh->dev[i].flags))
28285- atomic_add(STRIPE_SECTORS,
28286+ atomic_add_unchecked(STRIPE_SECTORS,
28287 &rdev->corrected_errors);
28288 generic_make_request(bi);
28289 } else {
28290@@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
28291 clear_bit(R5_ReadError, &sh->dev[i].flags);
28292 clear_bit(R5_ReWrite, &sh->dev[i].flags);
28293 }
28294- if (atomic_read(&conf->disks[i].rdev->read_errors))
28295- atomic_set(&conf->disks[i].rdev->read_errors, 0);
28296+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
28297+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
28298 } else {
28299 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
28300 int retry = 0;
28301 rdev = conf->disks[i].rdev;
28302
28303 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
28304- atomic_inc(&rdev->read_errors);
28305+ atomic_inc_unchecked(&rdev->read_errors);
28306 if (conf->mddev->degraded >= conf->max_degraded)
28307 printk_rl(KERN_WARNING
28308 "md/raid:%s: read error not correctable "
28309@@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
28310 (unsigned long long)(sh->sector
28311 + rdev->data_offset),
28312 bdn);
28313- else if (atomic_read(&rdev->read_errors)
28314+ else if (atomic_read_unchecked(&rdev->read_errors)
28315 > conf->max_nr_stripes)
28316 printk(KERN_WARNING
28317 "md/raid:%s: Too many read errors, failing device %s.\n",
28318@@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
28319 sector_t r_sector;
28320 struct stripe_head sh2;
28321
28322+ pax_track_stack();
28323
28324 chunk_offset = sector_div(new_sector, sectors_per_chunk);
28325 stripe = new_sector;
28326diff -urNp linux-3.0.4/drivers/media/common/saa7146_hlp.c linux-3.0.4/drivers/media/common/saa7146_hlp.c
28327--- linux-3.0.4/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
28328+++ linux-3.0.4/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
28329@@ -353,6 +353,8 @@ static void calculate_clipping_registers
28330
28331 int x[32], y[32], w[32], h[32];
28332
28333+ pax_track_stack();
28334+
28335 /* clear out memory */
28336 memset(&line_list[0], 0x00, sizeof(u32)*32);
28337 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
28338diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
28339--- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
28340+++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
28341@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
28342 u8 buf[HOST_LINK_BUF_SIZE];
28343 int i;
28344
28345+ pax_track_stack();
28346+
28347 dprintk("%s\n", __func__);
28348
28349 /* check if we have space for a link buf in the rx_buffer */
28350@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
28351 unsigned long timeout;
28352 int written;
28353
28354+ pax_track_stack();
28355+
28356 dprintk("%s\n", __func__);
28357
28358 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
28359diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h
28360--- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-21 22:17:23.000000000 -0400
28361+++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-24 18:24:40.000000000 -0400
28362@@ -68,12 +68,12 @@ struct dvb_demux_feed {
28363 union {
28364 struct dmx_ts_feed ts;
28365 struct dmx_section_feed sec;
28366- } feed;
28367+ } __no_const feed;
28368
28369 union {
28370 dmx_ts_cb ts;
28371 dmx_section_cb sec;
28372- } cb;
28373+ } __no_const cb;
28374
28375 struct dvb_demux *demux;
28376 void *priv;
28377diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c
28378--- linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-07-21 22:17:23.000000000 -0400
28379+++ linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-24 18:24:19.000000000 -0400
28380@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
28381 const struct dvb_device *template, void *priv, int type)
28382 {
28383 struct dvb_device *dvbdev;
28384- struct file_operations *dvbdevfops;
28385+ file_operations_no_const *dvbdevfops;
28386 struct device *clsdev;
28387 int minor;
28388 int id;
28389diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c
28390--- linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-07-21 22:17:23.000000000 -0400
28391+++ linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-24 18:26:33.000000000 -0400
28392@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
28393 struct dib0700_adapter_state {
28394 int (*set_param_save) (struct dvb_frontend *,
28395 struct dvb_frontend_parameters *);
28396-};
28397+} __no_const;
28398
28399 static int dib7070_set_param_override(struct dvb_frontend *fe,
28400 struct dvb_frontend_parameters *fep)
28401diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c
28402--- linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
28403+++ linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
28404@@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
28405 if (!buf)
28406 return -ENOMEM;
28407
28408+ pax_track_stack();
28409+
28410 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
28411 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
28412 hx.addr, hx.len, hx.chk);
28413diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h
28414--- linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h 2011-07-21 22:17:23.000000000 -0400
28415+++ linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h 2011-08-24 18:27:27.000000000 -0400
28416@@ -97,7 +97,7 @@
28417 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
28418
28419 struct dibusb_state {
28420- struct dib_fe_xfer_ops ops;
28421+ dib_fe_xfer_ops_no_const ops;
28422 int mt2060_present;
28423 u8 tuner_addr;
28424 };
28425diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c
28426--- linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-07-21 22:17:23.000000000 -0400
28427+++ linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-24 18:27:45.000000000 -0400
28428@@ -95,7 +95,7 @@ struct su3000_state {
28429
28430 struct s6x0_state {
28431 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
28432-};
28433+} __no_const;
28434
28435 /* debug */
28436 static int dvb_usb_dw2102_debug;
28437diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c
28438--- linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
28439+++ linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
28440@@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
28441 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
28442 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
28443
28444+ pax_track_stack();
28445
28446 data[0] = 0x8a;
28447 len_in = 1;
28448@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
28449 int ret = 0, len_in;
28450 u8 data[512] = {0};
28451
28452+ pax_track_stack();
28453+
28454 data[0] = 0x0a;
28455 len_in = 1;
28456 info("FRM Firmware Cold Reset");
28457diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000.h linux-3.0.4/drivers/media/dvb/frontends/dib3000.h
28458--- linux-3.0.4/drivers/media/dvb/frontends/dib3000.h 2011-07-21 22:17:23.000000000 -0400
28459+++ linux-3.0.4/drivers/media/dvb/frontends/dib3000.h 2011-08-24 18:28:18.000000000 -0400
28460@@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
28461 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
28462 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
28463 };
28464+typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
28465
28466 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
28467 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
28468- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
28469+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
28470 #else
28471 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
28472 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
28473diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c
28474--- linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
28475+++ linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
28476@@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
28477 static struct dvb_frontend_ops dib3000mb_ops;
28478
28479 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
28480- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
28481+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
28482 {
28483 struct dib3000_state* state = NULL;
28484
28485diff -urNp linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c
28486--- linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
28487+++ linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
28488@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
28489 int ret = -1;
28490 int sync;
28491
28492+ pax_track_stack();
28493+
28494 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
28495
28496 fcp = 3000;
28497diff -urNp linux-3.0.4/drivers/media/dvb/frontends/or51211.c linux-3.0.4/drivers/media/dvb/frontends/or51211.c
28498--- linux-3.0.4/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
28499+++ linux-3.0.4/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
28500@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
28501 u8 tudata[585];
28502 int i;
28503
28504+ pax_track_stack();
28505+
28506 dprintk("Firmware is %zd bytes\n",fw->size);
28507
28508 /* Get eprom data */
28509diff -urNp linux-3.0.4/drivers/media/video/cx18/cx18-driver.c linux-3.0.4/drivers/media/video/cx18/cx18-driver.c
28510--- linux-3.0.4/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
28511+++ linux-3.0.4/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
28512@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
28513 struct i2c_client c;
28514 u8 eedata[256];
28515
28516+ pax_track_stack();
28517+
28518 memset(&c, 0, sizeof(c));
28519 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
28520 c.adapter = &cx->i2c_adap[0];
28521diff -urNp linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c
28522--- linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
28523+++ linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
28524@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
28525 bool handle = false;
28526 struct ir_raw_event ir_core_event[64];
28527
28528+ pax_track_stack();
28529+
28530 do {
28531 num = 0;
28532 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
28533diff -urNp linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
28534--- linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
28535+++ linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
28536@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
28537 u8 *eeprom;
28538 struct tveeprom tvdata;
28539
28540+ pax_track_stack();
28541+
28542 memset(&tvdata,0,sizeof(tvdata));
28543
28544 eeprom = pvr2_eeprom_fetch(hdw);
28545diff -urNp linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c
28546--- linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
28547+++ linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
28548@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
28549 unsigned char localPAT[256];
28550 unsigned char localPMT[256];
28551
28552+ pax_track_stack();
28553+
28554 /* Set video format - must be done first as it resets other settings */
28555 set_reg8(client, 0x41, h->video_format);
28556
28557diff -urNp linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c
28558--- linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
28559+++ linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
28560@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
28561 u8 tmp[512];
28562 dprintk(DBGLVL_CMD, "%s()\n", __func__);
28563
28564+ pax_track_stack();
28565+
28566 /* While any outstand message on the bus exists... */
28567 do {
28568
28569@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
28570 u8 tmp[512];
28571 dprintk(DBGLVL_CMD, "%s()\n", __func__);
28572
28573+ pax_track_stack();
28574+
28575 while (loop) {
28576
28577 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
28578diff -urNp linux-3.0.4/drivers/media/video/timblogiw.c linux-3.0.4/drivers/media/video/timblogiw.c
28579--- linux-3.0.4/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
28580+++ linux-3.0.4/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
28581@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
28582
28583 /* Platform device functions */
28584
28585-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
28586+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
28587 .vidioc_querycap = timblogiw_querycap,
28588 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
28589 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
28590diff -urNp linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c
28591--- linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
28592+++ linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
28593@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
28594 unsigned char rv, gv, bv;
28595 static unsigned char *Y, *U, *V;
28596
28597+ pax_track_stack();
28598+
28599 frame = usbvision->cur_frame;
28600 image_size = frame->frmwidth * frame->frmheight;
28601 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
28602diff -urNp linux-3.0.4/drivers/media/video/videobuf-dma-sg.c linux-3.0.4/drivers/media/video/videobuf-dma-sg.c
28603--- linux-3.0.4/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
28604+++ linux-3.0.4/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
28605@@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
28606 {
28607 struct videobuf_queue q;
28608
28609+ pax_track_stack();
28610+
28611 /* Required to make generic handler to call __videobuf_alloc */
28612 q.int_ops = &sg_ops;
28613
28614diff -urNp linux-3.0.4/drivers/message/fusion/mptbase.c linux-3.0.4/drivers/message/fusion/mptbase.c
28615--- linux-3.0.4/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
28616+++ linux-3.0.4/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
28617@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
28618 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
28619 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
28620
28621+#ifdef CONFIG_GRKERNSEC_HIDESYM
28622+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
28623+#else
28624 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
28625 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
28626+#endif
28627+
28628 /*
28629 * Rounding UP to nearest 4-kB boundary here...
28630 */
28631diff -urNp linux-3.0.4/drivers/message/fusion/mptsas.c linux-3.0.4/drivers/message/fusion/mptsas.c
28632--- linux-3.0.4/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
28633+++ linux-3.0.4/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
28634@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
28635 return 0;
28636 }
28637
28638+static inline void
28639+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
28640+{
28641+ if (phy_info->port_details) {
28642+ phy_info->port_details->rphy = rphy;
28643+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
28644+ ioc->name, rphy));
28645+ }
28646+
28647+ if (rphy) {
28648+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
28649+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
28650+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
28651+ ioc->name, rphy, rphy->dev.release));
28652+ }
28653+}
28654+
28655 /* no mutex */
28656 static void
28657 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
28658@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
28659 return NULL;
28660 }
28661
28662-static inline void
28663-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
28664-{
28665- if (phy_info->port_details) {
28666- phy_info->port_details->rphy = rphy;
28667- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
28668- ioc->name, rphy));
28669- }
28670-
28671- if (rphy) {
28672- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
28673- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
28674- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
28675- ioc->name, rphy, rphy->dev.release));
28676- }
28677-}
28678-
28679 static inline struct sas_port *
28680 mptsas_get_port(struct mptsas_phyinfo *phy_info)
28681 {
28682diff -urNp linux-3.0.4/drivers/message/fusion/mptscsih.c linux-3.0.4/drivers/message/fusion/mptscsih.c
28683--- linux-3.0.4/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
28684+++ linux-3.0.4/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
28685@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
28686
28687 h = shost_priv(SChost);
28688
28689- if (h) {
28690- if (h->info_kbuf == NULL)
28691- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
28692- return h->info_kbuf;
28693- h->info_kbuf[0] = '\0';
28694+ if (!h)
28695+ return NULL;
28696
28697- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
28698- h->info_kbuf[size-1] = '\0';
28699- }
28700+ if (h->info_kbuf == NULL)
28701+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
28702+ return h->info_kbuf;
28703+ h->info_kbuf[0] = '\0';
28704+
28705+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
28706+ h->info_kbuf[size-1] = '\0';
28707
28708 return h->info_kbuf;
28709 }
28710diff -urNp linux-3.0.4/drivers/message/i2o/i2o_config.c linux-3.0.4/drivers/message/i2o/i2o_config.c
28711--- linux-3.0.4/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
28712+++ linux-3.0.4/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
28713@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
28714 struct i2o_message *msg;
28715 unsigned int iop;
28716
28717+ pax_track_stack();
28718+
28719 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
28720 return -EFAULT;
28721
28722diff -urNp linux-3.0.4/drivers/message/i2o/i2o_proc.c linux-3.0.4/drivers/message/i2o/i2o_proc.c
28723--- linux-3.0.4/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
28724+++ linux-3.0.4/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
28725@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
28726 "Array Controller Device"
28727 };
28728
28729-static char *chtostr(u8 * chars, int n)
28730-{
28731- char tmp[256];
28732- tmp[0] = 0;
28733- return strncat(tmp, (char *)chars, n);
28734-}
28735-
28736 static int i2o_report_query_status(struct seq_file *seq, int block_status,
28737 char *group)
28738 {
28739@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
28740
28741 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
28742 seq_printf(seq, "%-#8x", ddm_table.module_id);
28743- seq_printf(seq, "%-29s",
28744- chtostr(ddm_table.module_name_version, 28));
28745+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
28746 seq_printf(seq, "%9d ", ddm_table.data_size);
28747 seq_printf(seq, "%8d", ddm_table.code_size);
28748
28749@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
28750
28751 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
28752 seq_printf(seq, "%-#8x", dst->module_id);
28753- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
28754- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
28755+ seq_printf(seq, "%-.28s", dst->module_name_version);
28756+ seq_printf(seq, "%-.8s", dst->date);
28757 seq_printf(seq, "%8d ", dst->module_size);
28758 seq_printf(seq, "%8d ", dst->mpb_size);
28759 seq_printf(seq, "0x%04x", dst->module_flags);
28760@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
28761 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
28762 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
28763 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
28764- seq_printf(seq, "Vendor info : %s\n",
28765- chtostr((u8 *) (work32 + 2), 16));
28766- seq_printf(seq, "Product info : %s\n",
28767- chtostr((u8 *) (work32 + 6), 16));
28768- seq_printf(seq, "Description : %s\n",
28769- chtostr((u8 *) (work32 + 10), 16));
28770- seq_printf(seq, "Product rev. : %s\n",
28771- chtostr((u8 *) (work32 + 14), 8));
28772+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
28773+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
28774+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
28775+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
28776
28777 seq_printf(seq, "Serial number : ");
28778 print_serial_number(seq, (u8 *) (work32 + 16),
28779@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
28780 }
28781
28782 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
28783- seq_printf(seq, "Module name : %s\n",
28784- chtostr(result.module_name, 24));
28785- seq_printf(seq, "Module revision : %s\n",
28786- chtostr(result.module_rev, 8));
28787+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
28788+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
28789
28790 seq_printf(seq, "Serial number : ");
28791 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
28792@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
28793 return 0;
28794 }
28795
28796- seq_printf(seq, "Device name : %s\n",
28797- chtostr(result.device_name, 64));
28798- seq_printf(seq, "Service name : %s\n",
28799- chtostr(result.service_name, 64));
28800- seq_printf(seq, "Physical name : %s\n",
28801- chtostr(result.physical_location, 64));
28802- seq_printf(seq, "Instance number : %s\n",
28803- chtostr(result.instance_number, 4));
28804+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
28805+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
28806+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
28807+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
28808
28809 return 0;
28810 }
28811diff -urNp linux-3.0.4/drivers/message/i2o/iop.c linux-3.0.4/drivers/message/i2o/iop.c
28812--- linux-3.0.4/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
28813+++ linux-3.0.4/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
28814@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
28815
28816 spin_lock_irqsave(&c->context_list_lock, flags);
28817
28818- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
28819- atomic_inc(&c->context_list_counter);
28820+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
28821+ atomic_inc_unchecked(&c->context_list_counter);
28822
28823- entry->context = atomic_read(&c->context_list_counter);
28824+ entry->context = atomic_read_unchecked(&c->context_list_counter);
28825
28826 list_add(&entry->list, &c->context_list);
28827
28828@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
28829
28830 #if BITS_PER_LONG == 64
28831 spin_lock_init(&c->context_list_lock);
28832- atomic_set(&c->context_list_counter, 0);
28833+ atomic_set_unchecked(&c->context_list_counter, 0);
28834 INIT_LIST_HEAD(&c->context_list);
28835 #endif
28836
28837diff -urNp linux-3.0.4/drivers/mfd/abx500-core.c linux-3.0.4/drivers/mfd/abx500-core.c
28838--- linux-3.0.4/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
28839+++ linux-3.0.4/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
28840@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
28841
28842 struct abx500_device_entry {
28843 struct list_head list;
28844- struct abx500_ops ops;
28845+ abx500_ops_no_const ops;
28846 struct device *dev;
28847 };
28848
28849diff -urNp linux-3.0.4/drivers/mfd/janz-cmodio.c linux-3.0.4/drivers/mfd/janz-cmodio.c
28850--- linux-3.0.4/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
28851+++ linux-3.0.4/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
28852@@ -13,6 +13,7 @@
28853
28854 #include <linux/kernel.h>
28855 #include <linux/module.h>
28856+#include <linux/slab.h>
28857 #include <linux/init.h>
28858 #include <linux/pci.h>
28859 #include <linux/interrupt.h>
28860diff -urNp linux-3.0.4/drivers/mfd/wm8350-i2c.c linux-3.0.4/drivers/mfd/wm8350-i2c.c
28861--- linux-3.0.4/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
28862+++ linux-3.0.4/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
28863@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
28864 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
28865 int ret;
28866
28867+ pax_track_stack();
28868+
28869 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
28870 return -EINVAL;
28871
28872diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c
28873--- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-07-21 22:17:23.000000000 -0400
28874+++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-23 21:47:55.000000000 -0400
28875@@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
28876 * the lid is closed. This leads to interrupts as soon as a little move
28877 * is done.
28878 */
28879- atomic_inc(&lis3_dev.count);
28880+ atomic_inc_unchecked(&lis3_dev.count);
28881
28882 wake_up_interruptible(&lis3_dev.misc_wait);
28883 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
28884@@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
28885 if (lis3_dev.pm_dev)
28886 pm_runtime_get_sync(lis3_dev.pm_dev);
28887
28888- atomic_set(&lis3_dev.count, 0);
28889+ atomic_set_unchecked(&lis3_dev.count, 0);
28890 return 0;
28891 }
28892
28893@@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
28894 add_wait_queue(&lis3_dev.misc_wait, &wait);
28895 while (true) {
28896 set_current_state(TASK_INTERRUPTIBLE);
28897- data = atomic_xchg(&lis3_dev.count, 0);
28898+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
28899 if (data)
28900 break;
28901
28902@@ -583,7 +583,7 @@ out:
28903 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
28904 {
28905 poll_wait(file, &lis3_dev.misc_wait, wait);
28906- if (atomic_read(&lis3_dev.count))
28907+ if (atomic_read_unchecked(&lis3_dev.count))
28908 return POLLIN | POLLRDNORM;
28909 return 0;
28910 }
28911diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h
28912--- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
28913+++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
28914@@ -265,7 +265,7 @@ struct lis3lv02d {
28915 struct input_polled_dev *idev; /* input device */
28916 struct platform_device *pdev; /* platform device */
28917 struct regulator_bulk_data regulators[2];
28918- atomic_t count; /* interrupt count after last read */
28919+ atomic_unchecked_t count; /* interrupt count after last read */
28920 union axis_conversion ac; /* hw -> logical axis */
28921 int mapped_btns[3];
28922
28923diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c
28924--- linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
28925+++ linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
28926@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
28927 unsigned long nsec;
28928
28929 nsec = CLKS2NSEC(clks);
28930- atomic_long_inc(&mcs_op_statistics[op].count);
28931- atomic_long_add(nsec, &mcs_op_statistics[op].total);
28932+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
28933+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
28934 if (mcs_op_statistics[op].max < nsec)
28935 mcs_op_statistics[op].max = nsec;
28936 }
28937diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c
28938--- linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
28939+++ linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
28940@@ -32,9 +32,9 @@
28941
28942 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
28943
28944-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
28945+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
28946 {
28947- unsigned long val = atomic_long_read(v);
28948+ unsigned long val = atomic_long_read_unchecked(v);
28949
28950 seq_printf(s, "%16lu %s\n", val, id);
28951 }
28952@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
28953
28954 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
28955 for (op = 0; op < mcsop_last; op++) {
28956- count = atomic_long_read(&mcs_op_statistics[op].count);
28957- total = atomic_long_read(&mcs_op_statistics[op].total);
28958+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
28959+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
28960 max = mcs_op_statistics[op].max;
28961 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
28962 count ? total / count : 0, max);
28963diff -urNp linux-3.0.4/drivers/misc/sgi-gru/grutables.h linux-3.0.4/drivers/misc/sgi-gru/grutables.h
28964--- linux-3.0.4/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
28965+++ linux-3.0.4/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
28966@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
28967 * GRU statistics.
28968 */
28969 struct gru_stats_s {
28970- atomic_long_t vdata_alloc;
28971- atomic_long_t vdata_free;
28972- atomic_long_t gts_alloc;
28973- atomic_long_t gts_free;
28974- atomic_long_t gms_alloc;
28975- atomic_long_t gms_free;
28976- atomic_long_t gts_double_allocate;
28977- atomic_long_t assign_context;
28978- atomic_long_t assign_context_failed;
28979- atomic_long_t free_context;
28980- atomic_long_t load_user_context;
28981- atomic_long_t load_kernel_context;
28982- atomic_long_t lock_kernel_context;
28983- atomic_long_t unlock_kernel_context;
28984- atomic_long_t steal_user_context;
28985- atomic_long_t steal_kernel_context;
28986- atomic_long_t steal_context_failed;
28987- atomic_long_t nopfn;
28988- atomic_long_t asid_new;
28989- atomic_long_t asid_next;
28990- atomic_long_t asid_wrap;
28991- atomic_long_t asid_reuse;
28992- atomic_long_t intr;
28993- atomic_long_t intr_cbr;
28994- atomic_long_t intr_tfh;
28995- atomic_long_t intr_spurious;
28996- atomic_long_t intr_mm_lock_failed;
28997- atomic_long_t call_os;
28998- atomic_long_t call_os_wait_queue;
28999- atomic_long_t user_flush_tlb;
29000- atomic_long_t user_unload_context;
29001- atomic_long_t user_exception;
29002- atomic_long_t set_context_option;
29003- atomic_long_t check_context_retarget_intr;
29004- atomic_long_t check_context_unload;
29005- atomic_long_t tlb_dropin;
29006- atomic_long_t tlb_preload_page;
29007- atomic_long_t tlb_dropin_fail_no_asid;
29008- atomic_long_t tlb_dropin_fail_upm;
29009- atomic_long_t tlb_dropin_fail_invalid;
29010- atomic_long_t tlb_dropin_fail_range_active;
29011- atomic_long_t tlb_dropin_fail_idle;
29012- atomic_long_t tlb_dropin_fail_fmm;
29013- atomic_long_t tlb_dropin_fail_no_exception;
29014- atomic_long_t tfh_stale_on_fault;
29015- atomic_long_t mmu_invalidate_range;
29016- atomic_long_t mmu_invalidate_page;
29017- atomic_long_t flush_tlb;
29018- atomic_long_t flush_tlb_gru;
29019- atomic_long_t flush_tlb_gru_tgh;
29020- atomic_long_t flush_tlb_gru_zero_asid;
29021-
29022- atomic_long_t copy_gpa;
29023- atomic_long_t read_gpa;
29024-
29025- atomic_long_t mesq_receive;
29026- atomic_long_t mesq_receive_none;
29027- atomic_long_t mesq_send;
29028- atomic_long_t mesq_send_failed;
29029- atomic_long_t mesq_noop;
29030- atomic_long_t mesq_send_unexpected_error;
29031- atomic_long_t mesq_send_lb_overflow;
29032- atomic_long_t mesq_send_qlimit_reached;
29033- atomic_long_t mesq_send_amo_nacked;
29034- atomic_long_t mesq_send_put_nacked;
29035- atomic_long_t mesq_page_overflow;
29036- atomic_long_t mesq_qf_locked;
29037- atomic_long_t mesq_qf_noop_not_full;
29038- atomic_long_t mesq_qf_switch_head_failed;
29039- atomic_long_t mesq_qf_unexpected_error;
29040- atomic_long_t mesq_noop_unexpected_error;
29041- atomic_long_t mesq_noop_lb_overflow;
29042- atomic_long_t mesq_noop_qlimit_reached;
29043- atomic_long_t mesq_noop_amo_nacked;
29044- atomic_long_t mesq_noop_put_nacked;
29045- atomic_long_t mesq_noop_page_overflow;
29046+ atomic_long_unchecked_t vdata_alloc;
29047+ atomic_long_unchecked_t vdata_free;
29048+ atomic_long_unchecked_t gts_alloc;
29049+ atomic_long_unchecked_t gts_free;
29050+ atomic_long_unchecked_t gms_alloc;
29051+ atomic_long_unchecked_t gms_free;
29052+ atomic_long_unchecked_t gts_double_allocate;
29053+ atomic_long_unchecked_t assign_context;
29054+ atomic_long_unchecked_t assign_context_failed;
29055+ atomic_long_unchecked_t free_context;
29056+ atomic_long_unchecked_t load_user_context;
29057+ atomic_long_unchecked_t load_kernel_context;
29058+ atomic_long_unchecked_t lock_kernel_context;
29059+ atomic_long_unchecked_t unlock_kernel_context;
29060+ atomic_long_unchecked_t steal_user_context;
29061+ atomic_long_unchecked_t steal_kernel_context;
29062+ atomic_long_unchecked_t steal_context_failed;
29063+ atomic_long_unchecked_t nopfn;
29064+ atomic_long_unchecked_t asid_new;
29065+ atomic_long_unchecked_t asid_next;
29066+ atomic_long_unchecked_t asid_wrap;
29067+ atomic_long_unchecked_t asid_reuse;
29068+ atomic_long_unchecked_t intr;
29069+ atomic_long_unchecked_t intr_cbr;
29070+ atomic_long_unchecked_t intr_tfh;
29071+ atomic_long_unchecked_t intr_spurious;
29072+ atomic_long_unchecked_t intr_mm_lock_failed;
29073+ atomic_long_unchecked_t call_os;
29074+ atomic_long_unchecked_t call_os_wait_queue;
29075+ atomic_long_unchecked_t user_flush_tlb;
29076+ atomic_long_unchecked_t user_unload_context;
29077+ atomic_long_unchecked_t user_exception;
29078+ atomic_long_unchecked_t set_context_option;
29079+ atomic_long_unchecked_t check_context_retarget_intr;
29080+ atomic_long_unchecked_t check_context_unload;
29081+ atomic_long_unchecked_t tlb_dropin;
29082+ atomic_long_unchecked_t tlb_preload_page;
29083+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
29084+ atomic_long_unchecked_t tlb_dropin_fail_upm;
29085+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
29086+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
29087+ atomic_long_unchecked_t tlb_dropin_fail_idle;
29088+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
29089+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
29090+ atomic_long_unchecked_t tfh_stale_on_fault;
29091+ atomic_long_unchecked_t mmu_invalidate_range;
29092+ atomic_long_unchecked_t mmu_invalidate_page;
29093+ atomic_long_unchecked_t flush_tlb;
29094+ atomic_long_unchecked_t flush_tlb_gru;
29095+ atomic_long_unchecked_t flush_tlb_gru_tgh;
29096+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
29097+
29098+ atomic_long_unchecked_t copy_gpa;
29099+ atomic_long_unchecked_t read_gpa;
29100+
29101+ atomic_long_unchecked_t mesq_receive;
29102+ atomic_long_unchecked_t mesq_receive_none;
29103+ atomic_long_unchecked_t mesq_send;
29104+ atomic_long_unchecked_t mesq_send_failed;
29105+ atomic_long_unchecked_t mesq_noop;
29106+ atomic_long_unchecked_t mesq_send_unexpected_error;
29107+ atomic_long_unchecked_t mesq_send_lb_overflow;
29108+ atomic_long_unchecked_t mesq_send_qlimit_reached;
29109+ atomic_long_unchecked_t mesq_send_amo_nacked;
29110+ atomic_long_unchecked_t mesq_send_put_nacked;
29111+ atomic_long_unchecked_t mesq_page_overflow;
29112+ atomic_long_unchecked_t mesq_qf_locked;
29113+ atomic_long_unchecked_t mesq_qf_noop_not_full;
29114+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
29115+ atomic_long_unchecked_t mesq_qf_unexpected_error;
29116+ atomic_long_unchecked_t mesq_noop_unexpected_error;
29117+ atomic_long_unchecked_t mesq_noop_lb_overflow;
29118+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
29119+ atomic_long_unchecked_t mesq_noop_amo_nacked;
29120+ atomic_long_unchecked_t mesq_noop_put_nacked;
29121+ atomic_long_unchecked_t mesq_noop_page_overflow;
29122
29123 };
29124
29125@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
29126 tghop_invalidate, mcsop_last};
29127
29128 struct mcs_op_statistic {
29129- atomic_long_t count;
29130- atomic_long_t total;
29131+ atomic_long_unchecked_t count;
29132+ atomic_long_unchecked_t total;
29133 unsigned long max;
29134 };
29135
29136@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
29137
29138 #define STAT(id) do { \
29139 if (gru_options & OPT_STATS) \
29140- atomic_long_inc(&gru_stats.id); \
29141+ atomic_long_inc_unchecked(&gru_stats.id); \
29142 } while (0)
29143
29144 #ifdef CONFIG_SGI_GRU_DEBUG
29145diff -urNp linux-3.0.4/drivers/misc/sgi-xp/xp.h linux-3.0.4/drivers/misc/sgi-xp/xp.h
29146--- linux-3.0.4/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
29147+++ linux-3.0.4/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
29148@@ -289,7 +289,7 @@ struct xpc_interface {
29149 xpc_notify_func, void *);
29150 void (*received) (short, int, void *);
29151 enum xp_retval (*partid_to_nasids) (short, void *);
29152-};
29153+} __no_const;
29154
29155 extern struct xpc_interface xpc_interface;
29156
29157diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c
29158--- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
29159+++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
29160@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
29161 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
29162 unsigned long timeo = jiffies + HZ;
29163
29164+ pax_track_stack();
29165+
29166 /* Prevent setting state FL_SYNCING for chip in suspended state. */
29167 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
29168 goto sleep;
29169@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
29170 unsigned long initial_adr;
29171 int initial_len = len;
29172
29173+ pax_track_stack();
29174+
29175 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
29176 adr += chip->start;
29177 initial_adr = adr;
29178@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
29179 int retries = 3;
29180 int ret;
29181
29182+ pax_track_stack();
29183+
29184 adr += chip->start;
29185
29186 retry:
29187diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c
29188--- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
29189+++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
29190@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
29191 unsigned long cmd_addr;
29192 struct cfi_private *cfi = map->fldrv_priv;
29193
29194+ pax_track_stack();
29195+
29196 adr += chip->start;
29197
29198 /* Ensure cmd read/writes are aligned. */
29199@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
29200 DECLARE_WAITQUEUE(wait, current);
29201 int wbufsize, z;
29202
29203+ pax_track_stack();
29204+
29205 /* M58LW064A requires bus alignment for buffer wriets -- saw */
29206 if (adr & (map_bankwidth(map)-1))
29207 return -EINVAL;
29208@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
29209 DECLARE_WAITQUEUE(wait, current);
29210 int ret = 0;
29211
29212+ pax_track_stack();
29213+
29214 adr += chip->start;
29215
29216 /* Let's determine this according to the interleave only once */
29217@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
29218 unsigned long timeo = jiffies + HZ;
29219 DECLARE_WAITQUEUE(wait, current);
29220
29221+ pax_track_stack();
29222+
29223 adr += chip->start;
29224
29225 /* Let's determine this according to the interleave only once */
29226@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
29227 unsigned long timeo = jiffies + HZ;
29228 DECLARE_WAITQUEUE(wait, current);
29229
29230+ pax_track_stack();
29231+
29232 adr += chip->start;
29233
29234 /* Let's determine this according to the interleave only once */
29235diff -urNp linux-3.0.4/drivers/mtd/devices/doc2000.c linux-3.0.4/drivers/mtd/devices/doc2000.c
29236--- linux-3.0.4/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
29237+++ linux-3.0.4/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
29238@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
29239
29240 /* The ECC will not be calculated correctly if less than 512 is written */
29241 /* DBB-
29242- if (len != 0x200 && eccbuf)
29243+ if (len != 0x200)
29244 printk(KERN_WARNING
29245 "ECC needs a full sector write (adr: %lx size %lx)\n",
29246 (long) to, (long) len);
29247diff -urNp linux-3.0.4/drivers/mtd/devices/doc2001.c linux-3.0.4/drivers/mtd/devices/doc2001.c
29248--- linux-3.0.4/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
29249+++ linux-3.0.4/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
29250@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
29251 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
29252
29253 /* Don't allow read past end of device */
29254- if (from >= this->totlen)
29255+ if (from >= this->totlen || !len)
29256 return -EINVAL;
29257
29258 /* Don't allow a single read to cross a 512-byte block boundary */
29259diff -urNp linux-3.0.4/drivers/mtd/ftl.c linux-3.0.4/drivers/mtd/ftl.c
29260--- linux-3.0.4/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
29261+++ linux-3.0.4/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
29262@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
29263 loff_t offset;
29264 uint16_t srcunitswap = cpu_to_le16(srcunit);
29265
29266+ pax_track_stack();
29267+
29268 eun = &part->EUNInfo[srcunit];
29269 xfer = &part->XferInfo[xferunit];
29270 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
29271diff -urNp linux-3.0.4/drivers/mtd/inftlcore.c linux-3.0.4/drivers/mtd/inftlcore.c
29272--- linux-3.0.4/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
29273+++ linux-3.0.4/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
29274@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
29275 struct inftl_oob oob;
29276 size_t retlen;
29277
29278+ pax_track_stack();
29279+
29280 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
29281 "pending=%d)\n", inftl, thisVUC, pendingblock);
29282
29283diff -urNp linux-3.0.4/drivers/mtd/inftlmount.c linux-3.0.4/drivers/mtd/inftlmount.c
29284--- linux-3.0.4/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
29285+++ linux-3.0.4/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
29286@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
29287 struct INFTLPartition *ip;
29288 size_t retlen;
29289
29290+ pax_track_stack();
29291+
29292 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
29293
29294 /*
29295diff -urNp linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c
29296--- linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
29297+++ linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
29298@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
29299 {
29300 map_word pfow_val[4];
29301
29302+ pax_track_stack();
29303+
29304 /* Check identification string */
29305 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
29306 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
29307diff -urNp linux-3.0.4/drivers/mtd/mtdchar.c linux-3.0.4/drivers/mtd/mtdchar.c
29308--- linux-3.0.4/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
29309+++ linux-3.0.4/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
29310@@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
29311 u_long size;
29312 struct mtd_info_user info;
29313
29314+ pax_track_stack();
29315+
29316 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
29317
29318 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
29319diff -urNp linux-3.0.4/drivers/mtd/nand/denali.c linux-3.0.4/drivers/mtd/nand/denali.c
29320--- linux-3.0.4/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
29321+++ linux-3.0.4/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
29322@@ -26,6 +26,7 @@
29323 #include <linux/pci.h>
29324 #include <linux/mtd/mtd.h>
29325 #include <linux/module.h>
29326+#include <linux/slab.h>
29327
29328 #include "denali.h"
29329
29330diff -urNp linux-3.0.4/drivers/mtd/nftlcore.c linux-3.0.4/drivers/mtd/nftlcore.c
29331--- linux-3.0.4/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
29332+++ linux-3.0.4/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
29333@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
29334 int inplace = 1;
29335 size_t retlen;
29336
29337+ pax_track_stack();
29338+
29339 memset(BlockMap, 0xff, sizeof(BlockMap));
29340 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
29341
29342diff -urNp linux-3.0.4/drivers/mtd/nftlmount.c linux-3.0.4/drivers/mtd/nftlmount.c
29343--- linux-3.0.4/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
29344+++ linux-3.0.4/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
29345@@ -24,6 +24,7 @@
29346 #include <asm/errno.h>
29347 #include <linux/delay.h>
29348 #include <linux/slab.h>
29349+#include <linux/sched.h>
29350 #include <linux/mtd/mtd.h>
29351 #include <linux/mtd/nand.h>
29352 #include <linux/mtd/nftl.h>
29353@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
29354 struct mtd_info *mtd = nftl->mbd.mtd;
29355 unsigned int i;
29356
29357+ pax_track_stack();
29358+
29359 /* Assume logical EraseSize == physical erasesize for starting the scan.
29360 We'll sort it out later if we find a MediaHeader which says otherwise */
29361 /* Actually, we won't. The new DiskOnChip driver has already scanned
29362diff -urNp linux-3.0.4/drivers/mtd/ubi/build.c linux-3.0.4/drivers/mtd/ubi/build.c
29363--- linux-3.0.4/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
29364+++ linux-3.0.4/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
29365@@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
29366 static int __init bytes_str_to_int(const char *str)
29367 {
29368 char *endp;
29369- unsigned long result;
29370+ unsigned long result, scale = 1;
29371
29372 result = simple_strtoul(str, &endp, 0);
29373 if (str == endp || result >= INT_MAX) {
29374@@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
29375
29376 switch (*endp) {
29377 case 'G':
29378- result *= 1024;
29379+ scale *= 1024;
29380 case 'M':
29381- result *= 1024;
29382+ scale *= 1024;
29383 case 'K':
29384- result *= 1024;
29385+ scale *= 1024;
29386 if (endp[1] == 'i' && endp[2] == 'B')
29387 endp += 2;
29388 case '\0':
29389@@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
29390 return -EINVAL;
29391 }
29392
29393- return result;
29394+ if ((intoverflow_t)result*scale >= INT_MAX) {
29395+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
29396+ str);
29397+ return -EINVAL;
29398+ }
29399+
29400+ return result*scale;
29401 }
29402
29403 /**
29404diff -urNp linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c
29405--- linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
29406+++ linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
29407@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
29408 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
29409 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
29410
29411-static struct bfa_ioc_hwif nw_hwif_ct;
29412+static struct bfa_ioc_hwif nw_hwif_ct = {
29413+ .ioc_pll_init = bfa_ioc_ct_pll_init,
29414+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
29415+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
29416+ .ioc_reg_init = bfa_ioc_ct_reg_init,
29417+ .ioc_map_port = bfa_ioc_ct_map_port,
29418+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
29419+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
29420+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
29421+ .ioc_sync_start = bfa_ioc_ct_sync_start,
29422+ .ioc_sync_join = bfa_ioc_ct_sync_join,
29423+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
29424+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
29425+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
29426+};
29427
29428 /**
29429 * Called from bfa_ioc_attach() to map asic specific calls.
29430@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
29431 void
29432 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
29433 {
29434- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
29435- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
29436- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
29437- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
29438- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
29439- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
29440- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
29441- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
29442- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
29443- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
29444- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
29445- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
29446- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
29447-
29448 ioc->ioc_hwif = &nw_hwif_ct;
29449 }
29450
29451diff -urNp linux-3.0.4/drivers/net/bna/bnad.c linux-3.0.4/drivers/net/bna/bnad.c
29452--- linux-3.0.4/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
29453+++ linux-3.0.4/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
29454@@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
29455 struct bna_intr_info *intr_info =
29456 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
29457 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
29458- struct bna_tx_event_cbfn tx_cbfn;
29459+ static struct bna_tx_event_cbfn tx_cbfn = {
29460+ /* Initialize the tx event handlers */
29461+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
29462+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
29463+ .tx_stall_cbfn = bnad_cb_tx_stall,
29464+ .tx_resume_cbfn = bnad_cb_tx_resume,
29465+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
29466+ };
29467 struct bna_tx *tx;
29468 unsigned long flags;
29469
29470@@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
29471 tx_config->txq_depth = bnad->txq_depth;
29472 tx_config->tx_type = BNA_TX_T_REGULAR;
29473
29474- /* Initialize the tx event handlers */
29475- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
29476- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
29477- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
29478- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
29479- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
29480-
29481 /* Get BNA's resource requirement for one tx object */
29482 spin_lock_irqsave(&bnad->bna_lock, flags);
29483 bna_tx_res_req(bnad->num_txq_per_tx,
29484@@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
29485 struct bna_intr_info *intr_info =
29486 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
29487 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
29488- struct bna_rx_event_cbfn rx_cbfn;
29489+ static struct bna_rx_event_cbfn rx_cbfn = {
29490+ /* Initialize the Rx event handlers */
29491+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
29492+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
29493+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
29494+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
29495+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
29496+ .rx_post_cbfn = bnad_cb_rx_post
29497+ };
29498 struct bna_rx *rx;
29499 unsigned long flags;
29500
29501 /* Initialize the Rx object configuration */
29502 bnad_init_rx_config(bnad, rx_config);
29503
29504- /* Initialize the Rx event handlers */
29505- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
29506- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
29507- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
29508- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
29509- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
29510- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
29511-
29512 /* Get BNA's resource requirement for one Rx object */
29513 spin_lock_irqsave(&bnad->bna_lock, flags);
29514 bna_rx_res_req(rx_config, res_info);
29515diff -urNp linux-3.0.4/drivers/net/bnx2.c linux-3.0.4/drivers/net/bnx2.c
29516--- linux-3.0.4/drivers/net/bnx2.c 2011-07-21 22:17:23.000000000 -0400
29517+++ linux-3.0.4/drivers/net/bnx2.c 2011-08-23 21:48:14.000000000 -0400
29518@@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
29519 int rc = 0;
29520 u32 magic, csum;
29521
29522+ pax_track_stack();
29523+
29524 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
29525 goto test_nvram_done;
29526
29527diff -urNp linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c
29528--- linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
29529+++ linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
29530@@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
29531 int i, rc;
29532 u32 magic, crc;
29533
29534+ pax_track_stack();
29535+
29536 if (BP_NOMCP(bp))
29537 return 0;
29538
29539diff -urNp linux-3.0.4/drivers/net/cxgb3/l2t.h linux-3.0.4/drivers/net/cxgb3/l2t.h
29540--- linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-07-21 22:17:23.000000000 -0400
29541+++ linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-08-23 21:47:55.000000000 -0400
29542@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
29543 */
29544 struct l2t_skb_cb {
29545 arp_failure_handler_func arp_failure_handler;
29546-};
29547+} __no_const;
29548
29549 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
29550
29551diff -urNp linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c
29552--- linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
29553+++ linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
29554@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
29555 unsigned int nchan = adap->params.nports;
29556 struct msix_entry entries[MAX_INGQ + 1];
29557
29558+ pax_track_stack();
29559+
29560 for (i = 0; i < ARRAY_SIZE(entries); ++i)
29561 entries[i].entry = i;
29562
29563diff -urNp linux-3.0.4/drivers/net/cxgb4/t4_hw.c linux-3.0.4/drivers/net/cxgb4/t4_hw.c
29564--- linux-3.0.4/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
29565+++ linux-3.0.4/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
29566@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
29567 u8 vpd[VPD_LEN], csum;
29568 unsigned int vpdr_len, kw_offset, id_len;
29569
29570+ pax_track_stack();
29571+
29572 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
29573 if (ret < 0)
29574 return ret;
29575diff -urNp linux-3.0.4/drivers/net/e1000e/82571.c linux-3.0.4/drivers/net/e1000e/82571.c
29576--- linux-3.0.4/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
29577+++ linux-3.0.4/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
29578@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
29579 {
29580 struct e1000_hw *hw = &adapter->hw;
29581 struct e1000_mac_info *mac = &hw->mac;
29582- struct e1000_mac_operations *func = &mac->ops;
29583+ e1000_mac_operations_no_const *func = &mac->ops;
29584 u32 swsm = 0;
29585 u32 swsm2 = 0;
29586 bool force_clear_smbi = false;
29587diff -urNp linux-3.0.4/drivers/net/e1000e/es2lan.c linux-3.0.4/drivers/net/e1000e/es2lan.c
29588--- linux-3.0.4/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
29589+++ linux-3.0.4/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
29590@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
29591 {
29592 struct e1000_hw *hw = &adapter->hw;
29593 struct e1000_mac_info *mac = &hw->mac;
29594- struct e1000_mac_operations *func = &mac->ops;
29595+ e1000_mac_operations_no_const *func = &mac->ops;
29596
29597 /* Set media type */
29598 switch (adapter->pdev->device) {
29599diff -urNp linux-3.0.4/drivers/net/e1000e/hw.h linux-3.0.4/drivers/net/e1000e/hw.h
29600--- linux-3.0.4/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
29601+++ linux-3.0.4/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
29602@@ -776,6 +776,7 @@ struct e1000_mac_operations {
29603 void (*write_vfta)(struct e1000_hw *, u32, u32);
29604 s32 (*read_mac_addr)(struct e1000_hw *);
29605 };
29606+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
29607
29608 /* Function pointers for the PHY. */
29609 struct e1000_phy_operations {
29610@@ -799,6 +800,7 @@ struct e1000_phy_operations {
29611 void (*power_up)(struct e1000_hw *);
29612 void (*power_down)(struct e1000_hw *);
29613 };
29614+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
29615
29616 /* Function pointers for the NVM. */
29617 struct e1000_nvm_operations {
29618@@ -810,9 +812,10 @@ struct e1000_nvm_operations {
29619 s32 (*validate)(struct e1000_hw *);
29620 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
29621 };
29622+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
29623
29624 struct e1000_mac_info {
29625- struct e1000_mac_operations ops;
29626+ e1000_mac_operations_no_const ops;
29627 u8 addr[ETH_ALEN];
29628 u8 perm_addr[ETH_ALEN];
29629
29630@@ -853,7 +856,7 @@ struct e1000_mac_info {
29631 };
29632
29633 struct e1000_phy_info {
29634- struct e1000_phy_operations ops;
29635+ e1000_phy_operations_no_const ops;
29636
29637 enum e1000_phy_type type;
29638
29639@@ -887,7 +890,7 @@ struct e1000_phy_info {
29640 };
29641
29642 struct e1000_nvm_info {
29643- struct e1000_nvm_operations ops;
29644+ e1000_nvm_operations_no_const ops;
29645
29646 enum e1000_nvm_type type;
29647 enum e1000_nvm_override override;
29648diff -urNp linux-3.0.4/drivers/net/hamradio/6pack.c linux-3.0.4/drivers/net/hamradio/6pack.c
29649--- linux-3.0.4/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
29650+++ linux-3.0.4/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
29651@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
29652 unsigned char buf[512];
29653 int count1;
29654
29655+ pax_track_stack();
29656+
29657 if (!count)
29658 return;
29659
29660diff -urNp linux-3.0.4/drivers/net/igb/e1000_hw.h linux-3.0.4/drivers/net/igb/e1000_hw.h
29661--- linux-3.0.4/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
29662+++ linux-3.0.4/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
29663@@ -314,6 +314,7 @@ struct e1000_mac_operations {
29664 s32 (*read_mac_addr)(struct e1000_hw *);
29665 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
29666 };
29667+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
29668
29669 struct e1000_phy_operations {
29670 s32 (*acquire)(struct e1000_hw *);
29671@@ -330,6 +331,7 @@ struct e1000_phy_operations {
29672 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
29673 s32 (*write_reg)(struct e1000_hw *, u32, u16);
29674 };
29675+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
29676
29677 struct e1000_nvm_operations {
29678 s32 (*acquire)(struct e1000_hw *);
29679@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
29680 s32 (*update)(struct e1000_hw *);
29681 s32 (*validate)(struct e1000_hw *);
29682 };
29683+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
29684
29685 struct e1000_info {
29686 s32 (*get_invariants)(struct e1000_hw *);
29687@@ -350,7 +353,7 @@ struct e1000_info {
29688 extern const struct e1000_info e1000_82575_info;
29689
29690 struct e1000_mac_info {
29691- struct e1000_mac_operations ops;
29692+ e1000_mac_operations_no_const ops;
29693
29694 u8 addr[6];
29695 u8 perm_addr[6];
29696@@ -388,7 +391,7 @@ struct e1000_mac_info {
29697 };
29698
29699 struct e1000_phy_info {
29700- struct e1000_phy_operations ops;
29701+ e1000_phy_operations_no_const ops;
29702
29703 enum e1000_phy_type type;
29704
29705@@ -423,7 +426,7 @@ struct e1000_phy_info {
29706 };
29707
29708 struct e1000_nvm_info {
29709- struct e1000_nvm_operations ops;
29710+ e1000_nvm_operations_no_const ops;
29711 enum e1000_nvm_type type;
29712 enum e1000_nvm_override override;
29713
29714@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
29715 s32 (*check_for_ack)(struct e1000_hw *, u16);
29716 s32 (*check_for_rst)(struct e1000_hw *, u16);
29717 };
29718+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
29719
29720 struct e1000_mbx_stats {
29721 u32 msgs_tx;
29722@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
29723 };
29724
29725 struct e1000_mbx_info {
29726- struct e1000_mbx_operations ops;
29727+ e1000_mbx_operations_no_const ops;
29728 struct e1000_mbx_stats stats;
29729 u32 timeout;
29730 u32 usec_delay;
29731diff -urNp linux-3.0.4/drivers/net/igbvf/vf.h linux-3.0.4/drivers/net/igbvf/vf.h
29732--- linux-3.0.4/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
29733+++ linux-3.0.4/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
29734@@ -189,9 +189,10 @@ struct e1000_mac_operations {
29735 s32 (*read_mac_addr)(struct e1000_hw *);
29736 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
29737 };
29738+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
29739
29740 struct e1000_mac_info {
29741- struct e1000_mac_operations ops;
29742+ e1000_mac_operations_no_const ops;
29743 u8 addr[6];
29744 u8 perm_addr[6];
29745
29746@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
29747 s32 (*check_for_ack)(struct e1000_hw *);
29748 s32 (*check_for_rst)(struct e1000_hw *);
29749 };
29750+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
29751
29752 struct e1000_mbx_stats {
29753 u32 msgs_tx;
29754@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
29755 };
29756
29757 struct e1000_mbx_info {
29758- struct e1000_mbx_operations ops;
29759+ e1000_mbx_operations_no_const ops;
29760 struct e1000_mbx_stats stats;
29761 u32 timeout;
29762 u32 usec_delay;
29763diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_main.c linux-3.0.4/drivers/net/ixgb/ixgb_main.c
29764--- linux-3.0.4/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
29765+++ linux-3.0.4/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
29766@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
29767 u32 rctl;
29768 int i;
29769
29770+ pax_track_stack();
29771+
29772 /* Check for Promiscuous and All Multicast modes */
29773
29774 rctl = IXGB_READ_REG(hw, RCTL);
29775diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_param.c linux-3.0.4/drivers/net/ixgb/ixgb_param.c
29776--- linux-3.0.4/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
29777+++ linux-3.0.4/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
29778@@ -261,6 +261,9 @@ void __devinit
29779 ixgb_check_options(struct ixgb_adapter *adapter)
29780 {
29781 int bd = adapter->bd_number;
29782+
29783+ pax_track_stack();
29784+
29785 if (bd >= IXGB_MAX_NIC) {
29786 pr_notice("Warning: no configuration for board #%i\n", bd);
29787 pr_notice("Using defaults for all values\n");
29788diff -urNp linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h
29789--- linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
29790+++ linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
29791@@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
29792 s32 (*update_checksum)(struct ixgbe_hw *);
29793 u16 (*calc_checksum)(struct ixgbe_hw *);
29794 };
29795+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
29796
29797 struct ixgbe_mac_operations {
29798 s32 (*init_hw)(struct ixgbe_hw *);
29799@@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
29800 /* Flow Control */
29801 s32 (*fc_enable)(struct ixgbe_hw *, s32);
29802 };
29803+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
29804
29805 struct ixgbe_phy_operations {
29806 s32 (*identify)(struct ixgbe_hw *);
29807@@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
29808 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
29809 s32 (*check_overtemp)(struct ixgbe_hw *);
29810 };
29811+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
29812
29813 struct ixgbe_eeprom_info {
29814- struct ixgbe_eeprom_operations ops;
29815+ ixgbe_eeprom_operations_no_const ops;
29816 enum ixgbe_eeprom_type type;
29817 u32 semaphore_delay;
29818 u16 word_size;
29819@@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
29820
29821 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
29822 struct ixgbe_mac_info {
29823- struct ixgbe_mac_operations ops;
29824+ ixgbe_mac_operations_no_const ops;
29825 enum ixgbe_mac_type type;
29826 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
29827 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
29828@@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
29829 };
29830
29831 struct ixgbe_phy_info {
29832- struct ixgbe_phy_operations ops;
29833+ ixgbe_phy_operations_no_const ops;
29834 struct mdio_if_info mdio;
29835 enum ixgbe_phy_type type;
29836 u32 id;
29837@@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
29838 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
29839 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
29840 };
29841+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
29842
29843 struct ixgbe_mbx_stats {
29844 u32 msgs_tx;
29845@@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
29846 };
29847
29848 struct ixgbe_mbx_info {
29849- struct ixgbe_mbx_operations ops;
29850+ ixgbe_mbx_operations_no_const ops;
29851 struct ixgbe_mbx_stats stats;
29852 u32 timeout;
29853 u32 usec_delay;
29854diff -urNp linux-3.0.4/drivers/net/ixgbevf/vf.h linux-3.0.4/drivers/net/ixgbevf/vf.h
29855--- linux-3.0.4/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
29856+++ linux-3.0.4/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
29857@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
29858 s32 (*clear_vfta)(struct ixgbe_hw *);
29859 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
29860 };
29861+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
29862
29863 enum ixgbe_mac_type {
29864 ixgbe_mac_unknown = 0,
29865@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
29866 };
29867
29868 struct ixgbe_mac_info {
29869- struct ixgbe_mac_operations ops;
29870+ ixgbe_mac_operations_no_const ops;
29871 u8 addr[6];
29872 u8 perm_addr[6];
29873
29874@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
29875 s32 (*check_for_ack)(struct ixgbe_hw *);
29876 s32 (*check_for_rst)(struct ixgbe_hw *);
29877 };
29878+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
29879
29880 struct ixgbe_mbx_stats {
29881 u32 msgs_tx;
29882@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
29883 };
29884
29885 struct ixgbe_mbx_info {
29886- struct ixgbe_mbx_operations ops;
29887+ ixgbe_mbx_operations_no_const ops;
29888 struct ixgbe_mbx_stats stats;
29889 u32 timeout;
29890 u32 udelay;
29891diff -urNp linux-3.0.4/drivers/net/ksz884x.c linux-3.0.4/drivers/net/ksz884x.c
29892--- linux-3.0.4/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
29893+++ linux-3.0.4/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
29894@@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
29895 int rc;
29896 u64 counter[TOTAL_PORT_COUNTER_NUM];
29897
29898+ pax_track_stack();
29899+
29900 mutex_lock(&hw_priv->lock);
29901 n = SWITCH_PORT_NUM;
29902 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
29903diff -urNp linux-3.0.4/drivers/net/mlx4/main.c linux-3.0.4/drivers/net/mlx4/main.c
29904--- linux-3.0.4/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
29905+++ linux-3.0.4/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
29906@@ -40,6 +40,7 @@
29907 #include <linux/dma-mapping.h>
29908 #include <linux/slab.h>
29909 #include <linux/io-mapping.h>
29910+#include <linux/sched.h>
29911
29912 #include <linux/mlx4/device.h>
29913 #include <linux/mlx4/doorbell.h>
29914@@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
29915 u64 icm_size;
29916 int err;
29917
29918+ pax_track_stack();
29919+
29920 err = mlx4_QUERY_FW(dev);
29921 if (err) {
29922 if (err == -EACCES)
29923diff -urNp linux-3.0.4/drivers/net/niu.c linux-3.0.4/drivers/net/niu.c
29924--- linux-3.0.4/drivers/net/niu.c 2011-09-02 18:11:21.000000000 -0400
29925+++ linux-3.0.4/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
29926@@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
29927 int i, num_irqs, err;
29928 u8 first_ldg;
29929
29930+ pax_track_stack();
29931+
29932 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
29933 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
29934 ldg_num_map[i] = first_ldg + i;
29935diff -urNp linux-3.0.4/drivers/net/pcnet32.c linux-3.0.4/drivers/net/pcnet32.c
29936--- linux-3.0.4/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
29937+++ linux-3.0.4/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
29938@@ -82,7 +82,7 @@ static int cards_found;
29939 /*
29940 * VLB I/O addresses
29941 */
29942-static unsigned int pcnet32_portlist[] __initdata =
29943+static unsigned int pcnet32_portlist[] __devinitdata =
29944 { 0x300, 0x320, 0x340, 0x360, 0 };
29945
29946 static int pcnet32_debug;
29947@@ -270,7 +270,7 @@ struct pcnet32_private {
29948 struct sk_buff **rx_skbuff;
29949 dma_addr_t *tx_dma_addr;
29950 dma_addr_t *rx_dma_addr;
29951- struct pcnet32_access a;
29952+ struct pcnet32_access *a;
29953 spinlock_t lock; /* Guard lock */
29954 unsigned int cur_rx, cur_tx; /* The next free ring entry */
29955 unsigned int rx_ring_size; /* current rx ring size */
29956@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
29957 u16 val;
29958
29959 netif_wake_queue(dev);
29960- val = lp->a.read_csr(ioaddr, CSR3);
29961+ val = lp->a->read_csr(ioaddr, CSR3);
29962 val &= 0x00ff;
29963- lp->a.write_csr(ioaddr, CSR3, val);
29964+ lp->a->write_csr(ioaddr, CSR3, val);
29965 napi_enable(&lp->napi);
29966 }
29967
29968@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
29969 r = mii_link_ok(&lp->mii_if);
29970 } else if (lp->chip_version >= PCNET32_79C970A) {
29971 ulong ioaddr = dev->base_addr; /* card base I/O address */
29972- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29973+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29974 } else { /* can not detect link on really old chips */
29975 r = 1;
29976 }
29977@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
29978 pcnet32_netif_stop(dev);
29979
29980 spin_lock_irqsave(&lp->lock, flags);
29981- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29982+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29983
29984 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
29985
29986@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
29987 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
29988 {
29989 struct pcnet32_private *lp = netdev_priv(dev);
29990- struct pcnet32_access *a = &lp->a; /* access to registers */
29991+ struct pcnet32_access *a = lp->a; /* access to registers */
29992 ulong ioaddr = dev->base_addr; /* card base I/O address */
29993 struct sk_buff *skb; /* sk buff */
29994 int x, i; /* counters */
29995@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
29996 pcnet32_netif_stop(dev);
29997
29998 spin_lock_irqsave(&lp->lock, flags);
29999- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
30000+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
30001
30002 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
30003
30004 /* Reset the PCNET32 */
30005- lp->a.reset(ioaddr);
30006- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
30007+ lp->a->reset(ioaddr);
30008+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
30009
30010 /* switch pcnet32 to 32bit mode */
30011- lp->a.write_bcr(ioaddr, 20, 2);
30012+ lp->a->write_bcr(ioaddr, 20, 2);
30013
30014 /* purge & init rings but don't actually restart */
30015 pcnet32_restart(dev, 0x0000);
30016
30017- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
30018+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
30019
30020 /* Initialize Transmit buffers. */
30021 size = data_len + 15;
30022@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
30023
30024 /* set int loopback in CSR15 */
30025 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
30026- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
30027+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
30028
30029 teststatus = cpu_to_le16(0x8000);
30030- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
30031+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
30032
30033 /* Check status of descriptors */
30034 for (x = 0; x < numbuffs; x++) {
30035@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
30036 }
30037 }
30038
30039- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
30040+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
30041 wmb();
30042 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
30043 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
30044@@ -1015,7 +1015,7 @@ clean_up:
30045 pcnet32_restart(dev, CSR0_NORMAL);
30046 } else {
30047 pcnet32_purge_rx_ring(dev);
30048- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
30049+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
30050 }
30051 spin_unlock_irqrestore(&lp->lock, flags);
30052
30053@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
30054 enum ethtool_phys_id_state state)
30055 {
30056 struct pcnet32_private *lp = netdev_priv(dev);
30057- struct pcnet32_access *a = &lp->a;
30058+ struct pcnet32_access *a = lp->a;
30059 ulong ioaddr = dev->base_addr;
30060 unsigned long flags;
30061 int i;
30062@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
30063 {
30064 int csr5;
30065 struct pcnet32_private *lp = netdev_priv(dev);
30066- struct pcnet32_access *a = &lp->a;
30067+ struct pcnet32_access *a = lp->a;
30068 ulong ioaddr = dev->base_addr;
30069 int ticks;
30070
30071@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
30072 spin_lock_irqsave(&lp->lock, flags);
30073 if (pcnet32_tx(dev)) {
30074 /* reset the chip to clear the error condition, then restart */
30075- lp->a.reset(ioaddr);
30076- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
30077+ lp->a->reset(ioaddr);
30078+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
30079 pcnet32_restart(dev, CSR0_START);
30080 netif_wake_queue(dev);
30081 }
30082@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
30083 __napi_complete(napi);
30084
30085 /* clear interrupt masks */
30086- val = lp->a.read_csr(ioaddr, CSR3);
30087+ val = lp->a->read_csr(ioaddr, CSR3);
30088 val &= 0x00ff;
30089- lp->a.write_csr(ioaddr, CSR3, val);
30090+ lp->a->write_csr(ioaddr, CSR3, val);
30091
30092 /* Set interrupt enable. */
30093- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
30094+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
30095
30096 spin_unlock_irqrestore(&lp->lock, flags);
30097 }
30098@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
30099 int i, csr0;
30100 u16 *buff = ptr;
30101 struct pcnet32_private *lp = netdev_priv(dev);
30102- struct pcnet32_access *a = &lp->a;
30103+ struct pcnet32_access *a = lp->a;
30104 ulong ioaddr = dev->base_addr;
30105 unsigned long flags;
30106
30107@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
30108 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
30109 if (lp->phymask & (1 << j)) {
30110 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
30111- lp->a.write_bcr(ioaddr, 33,
30112+ lp->a->write_bcr(ioaddr, 33,
30113 (j << 5) | i);
30114- *buff++ = lp->a.read_bcr(ioaddr, 34);
30115+ *buff++ = lp->a->read_bcr(ioaddr, 34);
30116 }
30117 }
30118 }
30119@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
30120 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
30121 lp->options |= PCNET32_PORT_FD;
30122
30123- lp->a = *a;
30124+ lp->a = a;
30125
30126 /* prior to register_netdev, dev->name is not yet correct */
30127 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
30128@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
30129 if (lp->mii) {
30130 /* lp->phycount and lp->phymask are set to 0 by memset above */
30131
30132- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
30133+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
30134 /* scan for PHYs */
30135 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
30136 unsigned short id1, id2;
30137@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
30138 pr_info("Found PHY %04x:%04x at address %d\n",
30139 id1, id2, i);
30140 }
30141- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
30142+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
30143 if (lp->phycount > 1)
30144 lp->options |= PCNET32_PORT_MII;
30145 }
30146@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
30147 }
30148
30149 /* Reset the PCNET32 */
30150- lp->a.reset(ioaddr);
30151+ lp->a->reset(ioaddr);
30152
30153 /* switch pcnet32 to 32bit mode */
30154- lp->a.write_bcr(ioaddr, 20, 2);
30155+ lp->a->write_bcr(ioaddr, 20, 2);
30156
30157 netif_printk(lp, ifup, KERN_DEBUG, dev,
30158 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
30159@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
30160 (u32) (lp->init_dma_addr));
30161
30162 /* set/reset autoselect bit */
30163- val = lp->a.read_bcr(ioaddr, 2) & ~2;
30164+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
30165 if (lp->options & PCNET32_PORT_ASEL)
30166 val |= 2;
30167- lp->a.write_bcr(ioaddr, 2, val);
30168+ lp->a->write_bcr(ioaddr, 2, val);
30169
30170 /* handle full duplex setting */
30171 if (lp->mii_if.full_duplex) {
30172- val = lp->a.read_bcr(ioaddr, 9) & ~3;
30173+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
30174 if (lp->options & PCNET32_PORT_FD) {
30175 val |= 1;
30176 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
30177@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
30178 if (lp->chip_version == 0x2627)
30179 val |= 3;
30180 }
30181- lp->a.write_bcr(ioaddr, 9, val);
30182+ lp->a->write_bcr(ioaddr, 9, val);
30183 }
30184
30185 /* set/reset GPSI bit in test register */
30186- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
30187+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
30188 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
30189 val |= 0x10;
30190- lp->a.write_csr(ioaddr, 124, val);
30191+ lp->a->write_csr(ioaddr, 124, val);
30192
30193 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
30194 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
30195@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
30196 * duplex, and/or enable auto negotiation, and clear DANAS
30197 */
30198 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
30199- lp->a.write_bcr(ioaddr, 32,
30200- lp->a.read_bcr(ioaddr, 32) | 0x0080);
30201+ lp->a->write_bcr(ioaddr, 32,
30202+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
30203 /* disable Auto Negotiation, set 10Mpbs, HD */
30204- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
30205+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
30206 if (lp->options & PCNET32_PORT_FD)
30207 val |= 0x10;
30208 if (lp->options & PCNET32_PORT_100)
30209 val |= 0x08;
30210- lp->a.write_bcr(ioaddr, 32, val);
30211+ lp->a->write_bcr(ioaddr, 32, val);
30212 } else {
30213 if (lp->options & PCNET32_PORT_ASEL) {
30214- lp->a.write_bcr(ioaddr, 32,
30215- lp->a.read_bcr(ioaddr,
30216+ lp->a->write_bcr(ioaddr, 32,
30217+ lp->a->read_bcr(ioaddr,
30218 32) | 0x0080);
30219 /* enable auto negotiate, setup, disable fd */
30220- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
30221+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
30222 val |= 0x20;
30223- lp->a.write_bcr(ioaddr, 32, val);
30224+ lp->a->write_bcr(ioaddr, 32, val);
30225 }
30226 }
30227 } else {
30228@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
30229 * There is really no good other way to handle multiple PHYs
30230 * other than turning off all automatics
30231 */
30232- val = lp->a.read_bcr(ioaddr, 2);
30233- lp->a.write_bcr(ioaddr, 2, val & ~2);
30234- val = lp->a.read_bcr(ioaddr, 32);
30235- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
30236+ val = lp->a->read_bcr(ioaddr, 2);
30237+ lp->a->write_bcr(ioaddr, 2, val & ~2);
30238+ val = lp->a->read_bcr(ioaddr, 32);
30239+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
30240
30241 if (!(lp->options & PCNET32_PORT_ASEL)) {
30242 /* setup ecmd */
30243@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
30244 ethtool_cmd_speed_set(&ecmd,
30245 (lp->options & PCNET32_PORT_100) ?
30246 SPEED_100 : SPEED_10);
30247- bcr9 = lp->a.read_bcr(ioaddr, 9);
30248+ bcr9 = lp->a->read_bcr(ioaddr, 9);
30249
30250 if (lp->options & PCNET32_PORT_FD) {
30251 ecmd.duplex = DUPLEX_FULL;
30252@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
30253 ecmd.duplex = DUPLEX_HALF;
30254 bcr9 |= ~(1 << 0);
30255 }
30256- lp->a.write_bcr(ioaddr, 9, bcr9);
30257+ lp->a->write_bcr(ioaddr, 9, bcr9);
30258 }
30259
30260 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
30261@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
30262
30263 #ifdef DO_DXSUFLO
30264 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
30265- val = lp->a.read_csr(ioaddr, CSR3);
30266+ val = lp->a->read_csr(ioaddr, CSR3);
30267 val |= 0x40;
30268- lp->a.write_csr(ioaddr, CSR3, val);
30269+ lp->a->write_csr(ioaddr, CSR3, val);
30270 }
30271 #endif
30272
30273@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
30274 napi_enable(&lp->napi);
30275
30276 /* Re-initialize the PCNET32, and start it when done. */
30277- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
30278- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
30279+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
30280+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
30281
30282- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
30283- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
30284+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
30285+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
30286
30287 netif_start_queue(dev);
30288
30289@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
30290
30291 i = 0;
30292 while (i++ < 100)
30293- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
30294+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
30295 break;
30296 /*
30297 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
30298 * reports that doing so triggers a bug in the '974.
30299 */
30300- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
30301+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
30302
30303 netif_printk(lp, ifup, KERN_DEBUG, dev,
30304 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
30305 i,
30306 (u32) (lp->init_dma_addr),
30307- lp->a.read_csr(ioaddr, CSR0));
30308+ lp->a->read_csr(ioaddr, CSR0));
30309
30310 spin_unlock_irqrestore(&lp->lock, flags);
30311
30312@@ -2218,7 +2218,7 @@ err_free_ring:
30313 * Switch back to 16bit mode to avoid problems with dumb
30314 * DOS packet driver after a warm reboot
30315 */
30316- lp->a.write_bcr(ioaddr, 20, 4);
30317+ lp->a->write_bcr(ioaddr, 20, 4);
30318
30319 err_free_irq:
30320 spin_unlock_irqrestore(&lp->lock, flags);
30321@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
30322
30323 /* wait for stop */
30324 for (i = 0; i < 100; i++)
30325- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
30326+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
30327 break;
30328
30329 if (i >= 100)
30330@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
30331 return;
30332
30333 /* ReInit Ring */
30334- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
30335+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
30336 i = 0;
30337 while (i++ < 1000)
30338- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
30339+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
30340 break;
30341
30342- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
30343+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
30344 }
30345
30346 static void pcnet32_tx_timeout(struct net_device *dev)
30347@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
30348 /* Transmitter timeout, serious problems. */
30349 if (pcnet32_debug & NETIF_MSG_DRV)
30350 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
30351- dev->name, lp->a.read_csr(ioaddr, CSR0));
30352- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
30353+ dev->name, lp->a->read_csr(ioaddr, CSR0));
30354+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
30355 dev->stats.tx_errors++;
30356 if (netif_msg_tx_err(lp)) {
30357 int i;
30358@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
30359
30360 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
30361 "%s() called, csr0 %4.4x\n",
30362- __func__, lp->a.read_csr(ioaddr, CSR0));
30363+ __func__, lp->a->read_csr(ioaddr, CSR0));
30364
30365 /* Default status -- will not enable Successful-TxDone
30366 * interrupt when that option is available to us.
30367@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
30368 dev->stats.tx_bytes += skb->len;
30369
30370 /* Trigger an immediate send poll. */
30371- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
30372+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
30373
30374 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
30375 lp->tx_full = 1;
30376@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
30377
30378 spin_lock(&lp->lock);
30379
30380- csr0 = lp->a.read_csr(ioaddr, CSR0);
30381+ csr0 = lp->a->read_csr(ioaddr, CSR0);
30382 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
30383 if (csr0 == 0xffff)
30384 break; /* PCMCIA remove happened */
30385 /* Acknowledge all of the current interrupt sources ASAP. */
30386- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
30387+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
30388
30389 netif_printk(lp, intr, KERN_DEBUG, dev,
30390 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
30391- csr0, lp->a.read_csr(ioaddr, CSR0));
30392+ csr0, lp->a->read_csr(ioaddr, CSR0));
30393
30394 /* Log misc errors. */
30395 if (csr0 & 0x4000)
30396@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
30397 if (napi_schedule_prep(&lp->napi)) {
30398 u16 val;
30399 /* set interrupt masks */
30400- val = lp->a.read_csr(ioaddr, CSR3);
30401+ val = lp->a->read_csr(ioaddr, CSR3);
30402 val |= 0x5f00;
30403- lp->a.write_csr(ioaddr, CSR3, val);
30404+ lp->a->write_csr(ioaddr, CSR3, val);
30405
30406 __napi_schedule(&lp->napi);
30407 break;
30408 }
30409- csr0 = lp->a.read_csr(ioaddr, CSR0);
30410+ csr0 = lp->a->read_csr(ioaddr, CSR0);
30411 }
30412
30413 netif_printk(lp, intr, KERN_DEBUG, dev,
30414 "exiting interrupt, csr0=%#4.4x\n",
30415- lp->a.read_csr(ioaddr, CSR0));
30416+ lp->a->read_csr(ioaddr, CSR0));
30417
30418 spin_unlock(&lp->lock);
30419
30420@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
30421
30422 spin_lock_irqsave(&lp->lock, flags);
30423
30424- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
30425+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
30426
30427 netif_printk(lp, ifdown, KERN_DEBUG, dev,
30428 "Shutting down ethercard, status was %2.2x\n",
30429- lp->a.read_csr(ioaddr, CSR0));
30430+ lp->a->read_csr(ioaddr, CSR0));
30431
30432 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
30433- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
30434+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
30435
30436 /*
30437 * Switch back to 16bit mode to avoid problems with dumb
30438 * DOS packet driver after a warm reboot
30439 */
30440- lp->a.write_bcr(ioaddr, 20, 4);
30441+ lp->a->write_bcr(ioaddr, 20, 4);
30442
30443 spin_unlock_irqrestore(&lp->lock, flags);
30444
30445@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
30446 unsigned long flags;
30447
30448 spin_lock_irqsave(&lp->lock, flags);
30449- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
30450+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
30451 spin_unlock_irqrestore(&lp->lock, flags);
30452
30453 return &dev->stats;
30454@@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
30455 if (dev->flags & IFF_ALLMULTI) {
30456 ib->filter[0] = cpu_to_le32(~0U);
30457 ib->filter[1] = cpu_to_le32(~0U);
30458- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
30459- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
30460- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
30461- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
30462+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
30463+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
30464+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
30465+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
30466 return;
30467 }
30468 /* clear the multicast filter */
30469@@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
30470 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
30471 }
30472 for (i = 0; i < 4; i++)
30473- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
30474+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
30475 le16_to_cpu(mcast_table[i]));
30476 }
30477
30478@@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
30479
30480 spin_lock_irqsave(&lp->lock, flags);
30481 suspended = pcnet32_suspend(dev, &flags, 0);
30482- csr15 = lp->a.read_csr(ioaddr, CSR15);
30483+ csr15 = lp->a->read_csr(ioaddr, CSR15);
30484 if (dev->flags & IFF_PROMISC) {
30485 /* Log any net taps. */
30486 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
30487 lp->init_block->mode =
30488 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
30489 7);
30490- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
30491+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
30492 } else {
30493 lp->init_block->mode =
30494 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
30495- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
30496+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
30497 pcnet32_load_multicast(dev);
30498 }
30499
30500 if (suspended) {
30501 int csr5;
30502 /* clear SUSPEND (SPND) - CSR5 bit 0 */
30503- csr5 = lp->a.read_csr(ioaddr, CSR5);
30504- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
30505+ csr5 = lp->a->read_csr(ioaddr, CSR5);
30506+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
30507 } else {
30508- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
30509+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
30510 pcnet32_restart(dev, CSR0_NORMAL);
30511 netif_wake_queue(dev);
30512 }
30513@@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
30514 if (!lp->mii)
30515 return 0;
30516
30517- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
30518- val_out = lp->a.read_bcr(ioaddr, 34);
30519+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
30520+ val_out = lp->a->read_bcr(ioaddr, 34);
30521
30522 return val_out;
30523 }
30524@@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
30525 if (!lp->mii)
30526 return;
30527
30528- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
30529- lp->a.write_bcr(ioaddr, 34, val);
30530+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
30531+ lp->a->write_bcr(ioaddr, 34, val);
30532 }
30533
30534 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
30535@@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
30536 curr_link = mii_link_ok(&lp->mii_if);
30537 } else {
30538 ulong ioaddr = dev->base_addr; /* card base I/O address */
30539- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
30540+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
30541 }
30542 if (!curr_link) {
30543 if (prev_link || verbose) {
30544@@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
30545 (ecmd.duplex == DUPLEX_FULL)
30546 ? "full" : "half");
30547 }
30548- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
30549+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
30550 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
30551 if (lp->mii_if.full_duplex)
30552 bcr9 |= (1 << 0);
30553 else
30554 bcr9 &= ~(1 << 0);
30555- lp->a.write_bcr(dev->base_addr, 9, bcr9);
30556+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
30557 }
30558 } else {
30559 netif_info(lp, link, dev, "link up\n");
30560diff -urNp linux-3.0.4/drivers/net/ppp_generic.c linux-3.0.4/drivers/net/ppp_generic.c
30561--- linux-3.0.4/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
30562+++ linux-3.0.4/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
30563@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
30564 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
30565 struct ppp_stats stats;
30566 struct ppp_comp_stats cstats;
30567- char *vers;
30568
30569 switch (cmd) {
30570 case SIOCGPPPSTATS:
30571@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
30572 break;
30573
30574 case SIOCGPPPVER:
30575- vers = PPP_VERSION;
30576- if (copy_to_user(addr, vers, strlen(vers) + 1))
30577+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
30578 break;
30579 err = 0;
30580 break;
30581diff -urNp linux-3.0.4/drivers/net/r8169.c linux-3.0.4/drivers/net/r8169.c
30582--- linux-3.0.4/drivers/net/r8169.c 2011-09-02 18:11:21.000000000 -0400
30583+++ linux-3.0.4/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
30584@@ -645,12 +645,12 @@ struct rtl8169_private {
30585 struct mdio_ops {
30586 void (*write)(void __iomem *, int, int);
30587 int (*read)(void __iomem *, int);
30588- } mdio_ops;
30589+ } __no_const mdio_ops;
30590
30591 struct pll_power_ops {
30592 void (*down)(struct rtl8169_private *);
30593 void (*up)(struct rtl8169_private *);
30594- } pll_power_ops;
30595+ } __no_const pll_power_ops;
30596
30597 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
30598 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
30599diff -urNp linux-3.0.4/drivers/net/tg3.h linux-3.0.4/drivers/net/tg3.h
30600--- linux-3.0.4/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
30601+++ linux-3.0.4/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
30602@@ -134,6 +134,7 @@
30603 #define CHIPREV_ID_5750_A0 0x4000
30604 #define CHIPREV_ID_5750_A1 0x4001
30605 #define CHIPREV_ID_5750_A3 0x4003
30606+#define CHIPREV_ID_5750_C1 0x4201
30607 #define CHIPREV_ID_5750_C2 0x4202
30608 #define CHIPREV_ID_5752_A0_HW 0x5000
30609 #define CHIPREV_ID_5752_A0 0x6000
30610diff -urNp linux-3.0.4/drivers/net/tokenring/abyss.c linux-3.0.4/drivers/net/tokenring/abyss.c
30611--- linux-3.0.4/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
30612+++ linux-3.0.4/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
30613@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
30614
30615 static int __init abyss_init (void)
30616 {
30617- abyss_netdev_ops = tms380tr_netdev_ops;
30618+ pax_open_kernel();
30619+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
30620
30621- abyss_netdev_ops.ndo_open = abyss_open;
30622- abyss_netdev_ops.ndo_stop = abyss_close;
30623+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
30624+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
30625+ pax_close_kernel();
30626
30627 return pci_register_driver(&abyss_driver);
30628 }
30629diff -urNp linux-3.0.4/drivers/net/tokenring/madgemc.c linux-3.0.4/drivers/net/tokenring/madgemc.c
30630--- linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
30631+++ linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
30632@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
30633
30634 static int __init madgemc_init (void)
30635 {
30636- madgemc_netdev_ops = tms380tr_netdev_ops;
30637- madgemc_netdev_ops.ndo_open = madgemc_open;
30638- madgemc_netdev_ops.ndo_stop = madgemc_close;
30639+ pax_open_kernel();
30640+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
30641+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
30642+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
30643+ pax_close_kernel();
30644
30645 return mca_register_driver (&madgemc_driver);
30646 }
30647diff -urNp linux-3.0.4/drivers/net/tokenring/proteon.c linux-3.0.4/drivers/net/tokenring/proteon.c
30648--- linux-3.0.4/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
30649+++ linux-3.0.4/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
30650@@ -353,9 +353,11 @@ static int __init proteon_init(void)
30651 struct platform_device *pdev;
30652 int i, num = 0, err = 0;
30653
30654- proteon_netdev_ops = tms380tr_netdev_ops;
30655- proteon_netdev_ops.ndo_open = proteon_open;
30656- proteon_netdev_ops.ndo_stop = tms380tr_close;
30657+ pax_open_kernel();
30658+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
30659+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
30660+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
30661+ pax_close_kernel();
30662
30663 err = platform_driver_register(&proteon_driver);
30664 if (err)
30665diff -urNp linux-3.0.4/drivers/net/tokenring/skisa.c linux-3.0.4/drivers/net/tokenring/skisa.c
30666--- linux-3.0.4/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
30667+++ linux-3.0.4/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
30668@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
30669 struct platform_device *pdev;
30670 int i, num = 0, err = 0;
30671
30672- sk_isa_netdev_ops = tms380tr_netdev_ops;
30673- sk_isa_netdev_ops.ndo_open = sk_isa_open;
30674- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
30675+ pax_open_kernel();
30676+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
30677+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
30678+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
30679+ pax_close_kernel();
30680
30681 err = platform_driver_register(&sk_isa_driver);
30682 if (err)
30683diff -urNp linux-3.0.4/drivers/net/tulip/de2104x.c linux-3.0.4/drivers/net/tulip/de2104x.c
30684--- linux-3.0.4/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
30685+++ linux-3.0.4/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
30686@@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
30687 struct de_srom_info_leaf *il;
30688 void *bufp;
30689
30690+ pax_track_stack();
30691+
30692 /* download entire eeprom */
30693 for (i = 0; i < DE_EEPROM_WORDS; i++)
30694 ((__le16 *)ee_data)[i] =
30695diff -urNp linux-3.0.4/drivers/net/tulip/de4x5.c linux-3.0.4/drivers/net/tulip/de4x5.c
30696--- linux-3.0.4/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
30697+++ linux-3.0.4/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
30698@@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
30699 for (i=0; i<ETH_ALEN; i++) {
30700 tmp.addr[i] = dev->dev_addr[i];
30701 }
30702- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
30703+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
30704 break;
30705
30706 case DE4X5_SET_HWADDR: /* Set the hardware address */
30707@@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
30708 spin_lock_irqsave(&lp->lock, flags);
30709 memcpy(&statbuf, &lp->pktStats, ioc->len);
30710 spin_unlock_irqrestore(&lp->lock, flags);
30711- if (copy_to_user(ioc->data, &statbuf, ioc->len))
30712+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
30713 return -EFAULT;
30714 break;
30715 }
30716diff -urNp linux-3.0.4/drivers/net/usb/hso.c linux-3.0.4/drivers/net/usb/hso.c
30717--- linux-3.0.4/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
30718+++ linux-3.0.4/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
30719@@ -71,7 +71,7 @@
30720 #include <asm/byteorder.h>
30721 #include <linux/serial_core.h>
30722 #include <linux/serial.h>
30723-
30724+#include <asm/local.h>
30725
30726 #define MOD_AUTHOR "Option Wireless"
30727 #define MOD_DESCRIPTION "USB High Speed Option driver"
30728@@ -257,7 +257,7 @@ struct hso_serial {
30729
30730 /* from usb_serial_port */
30731 struct tty_struct *tty;
30732- int open_count;
30733+ local_t open_count;
30734 spinlock_t serial_lock;
30735
30736 int (*write_data) (struct hso_serial *serial);
30737@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
30738 struct urb *urb;
30739
30740 urb = serial->rx_urb[0];
30741- if (serial->open_count > 0) {
30742+ if (local_read(&serial->open_count) > 0) {
30743 count = put_rxbuf_data(urb, serial);
30744 if (count == -1)
30745 return;
30746@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
30747 DUMP1(urb->transfer_buffer, urb->actual_length);
30748
30749 /* Anyone listening? */
30750- if (serial->open_count == 0)
30751+ if (local_read(&serial->open_count) == 0)
30752 return;
30753
30754 if (status == 0) {
30755@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
30756 spin_unlock_irq(&serial->serial_lock);
30757
30758 /* check for port already opened, if not set the termios */
30759- serial->open_count++;
30760- if (serial->open_count == 1) {
30761+ if (local_inc_return(&serial->open_count) == 1) {
30762 serial->rx_state = RX_IDLE;
30763 /* Force default termio settings */
30764 _hso_serial_set_termios(tty, NULL);
30765@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
30766 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
30767 if (result) {
30768 hso_stop_serial_device(serial->parent);
30769- serial->open_count--;
30770+ local_dec(&serial->open_count);
30771 kref_put(&serial->parent->ref, hso_serial_ref_free);
30772 }
30773 } else {
30774@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
30775
30776 /* reset the rts and dtr */
30777 /* do the actual close */
30778- serial->open_count--;
30779+ local_dec(&serial->open_count);
30780
30781- if (serial->open_count <= 0) {
30782- serial->open_count = 0;
30783+ if (local_read(&serial->open_count) <= 0) {
30784+ local_set(&serial->open_count, 0);
30785 spin_lock_irq(&serial->serial_lock);
30786 if (serial->tty == tty) {
30787 serial->tty->driver_data = NULL;
30788@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
30789
30790 /* the actual setup */
30791 spin_lock_irqsave(&serial->serial_lock, flags);
30792- if (serial->open_count)
30793+ if (local_read(&serial->open_count))
30794 _hso_serial_set_termios(tty, old);
30795 else
30796 tty->termios = old;
30797@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
30798 D1("Pending read interrupt on port %d\n", i);
30799 spin_lock(&serial->serial_lock);
30800 if (serial->rx_state == RX_IDLE &&
30801- serial->open_count > 0) {
30802+ local_read(&serial->open_count) > 0) {
30803 /* Setup and send a ctrl req read on
30804 * port i */
30805 if (!serial->rx_urb_filled[0]) {
30806@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
30807 /* Start all serial ports */
30808 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
30809 if (serial_table[i] && (serial_table[i]->interface == iface)) {
30810- if (dev2ser(serial_table[i])->open_count) {
30811+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
30812 result =
30813 hso_start_serial_device(serial_table[i], GFP_NOIO);
30814 hso_kick_transmit(dev2ser(serial_table[i]));
30815diff -urNp linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c
30816--- linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
30817+++ linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
30818@@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
30819 * Return with error code if any of the queue indices
30820 * is out of range
30821 */
30822- if (p->ring_index[i] < 0 ||
30823- p->ring_index[i] >= adapter->num_rx_queues)
30824+ if (p->ring_index[i] >= adapter->num_rx_queues)
30825 return -EINVAL;
30826 }
30827
30828diff -urNp linux-3.0.4/drivers/net/vxge/vxge-config.h linux-3.0.4/drivers/net/vxge/vxge-config.h
30829--- linux-3.0.4/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
30830+++ linux-3.0.4/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
30831@@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
30832 void (*link_down)(struct __vxge_hw_device *devh);
30833 void (*crit_err)(struct __vxge_hw_device *devh,
30834 enum vxge_hw_event type, u64 ext_data);
30835-};
30836+} __no_const;
30837
30838 /*
30839 * struct __vxge_hw_blockpool_entry - Block private data structure
30840diff -urNp linux-3.0.4/drivers/net/vxge/vxge-main.c linux-3.0.4/drivers/net/vxge/vxge-main.c
30841--- linux-3.0.4/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
30842+++ linux-3.0.4/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
30843@@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
30844 struct sk_buff *completed[NR_SKB_COMPLETED];
30845 int more;
30846
30847+ pax_track_stack();
30848+
30849 do {
30850 more = 0;
30851 skb_ptr = completed;
30852@@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
30853 u8 mtable[256] = {0}; /* CPU to vpath mapping */
30854 int index;
30855
30856+ pax_track_stack();
30857+
30858 /*
30859 * Filling
30860 * - itable with bucket numbers
30861diff -urNp linux-3.0.4/drivers/net/vxge/vxge-traffic.h linux-3.0.4/drivers/net/vxge/vxge-traffic.h
30862--- linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
30863+++ linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
30864@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
30865 struct vxge_hw_mempool_dma *dma_object,
30866 u32 index,
30867 u32 is_last);
30868-};
30869+} __no_const;
30870
30871 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
30872 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
30873diff -urNp linux-3.0.4/drivers/net/wan/cycx_x25.c linux-3.0.4/drivers/net/wan/cycx_x25.c
30874--- linux-3.0.4/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
30875+++ linux-3.0.4/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
30876@@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
30877 unsigned char hex[1024],
30878 * phex = hex;
30879
30880+ pax_track_stack();
30881+
30882 if (len >= (sizeof(hex) / 2))
30883 len = (sizeof(hex) / 2) - 1;
30884
30885diff -urNp linux-3.0.4/drivers/net/wan/hdlc_x25.c linux-3.0.4/drivers/net/wan/hdlc_x25.c
30886--- linux-3.0.4/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
30887+++ linux-3.0.4/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
30888@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
30889
30890 static int x25_open(struct net_device *dev)
30891 {
30892- struct lapb_register_struct cb;
30893+ static struct lapb_register_struct cb = {
30894+ .connect_confirmation = x25_connected,
30895+ .connect_indication = x25_connected,
30896+ .disconnect_confirmation = x25_disconnected,
30897+ .disconnect_indication = x25_disconnected,
30898+ .data_indication = x25_data_indication,
30899+ .data_transmit = x25_data_transmit
30900+ };
30901 int result;
30902
30903- cb.connect_confirmation = x25_connected;
30904- cb.connect_indication = x25_connected;
30905- cb.disconnect_confirmation = x25_disconnected;
30906- cb.disconnect_indication = x25_disconnected;
30907- cb.data_indication = x25_data_indication;
30908- cb.data_transmit = x25_data_transmit;
30909-
30910 result = lapb_register(dev, &cb);
30911 if (result != LAPB_OK)
30912 return result;
30913diff -urNp linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c
30914--- linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
30915+++ linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
30916@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
30917 int do_autopm = 1;
30918 DECLARE_COMPLETION_ONSTACK(notif_completion);
30919
30920+ pax_track_stack();
30921+
30922 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
30923 i2400m, ack, ack_size);
30924 BUG_ON(_ack == i2400m->bm_ack_buf);
30925diff -urNp linux-3.0.4/drivers/net/wireless/airo.c linux-3.0.4/drivers/net/wireless/airo.c
30926--- linux-3.0.4/drivers/net/wireless/airo.c 2011-09-02 18:11:21.000000000 -0400
30927+++ linux-3.0.4/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
30928@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
30929 BSSListElement * loop_net;
30930 BSSListElement * tmp_net;
30931
30932+ pax_track_stack();
30933+
30934 /* Blow away current list of scan results */
30935 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
30936 list_move_tail (&loop_net->list, &ai->network_free_list);
30937@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
30938 WepKeyRid wkr;
30939 int rc;
30940
30941+ pax_track_stack();
30942+
30943 memset( &mySsid, 0, sizeof( mySsid ) );
30944 kfree (ai->flash);
30945 ai->flash = NULL;
30946@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
30947 __le32 *vals = stats.vals;
30948 int len;
30949
30950+ pax_track_stack();
30951+
30952 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30953 return -ENOMEM;
30954 data = file->private_data;
30955@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
30956 /* If doLoseSync is not 1, we won't do a Lose Sync */
30957 int doLoseSync = -1;
30958
30959+ pax_track_stack();
30960+
30961 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30962 return -ENOMEM;
30963 data = file->private_data;
30964@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
30965 int i;
30966 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
30967
30968+ pax_track_stack();
30969+
30970 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
30971 if (!qual)
30972 return -ENOMEM;
30973@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
30974 CapabilityRid cap_rid;
30975 __le32 *vals = stats_rid.vals;
30976
30977+ pax_track_stack();
30978+
30979 /* Get stats out of the card */
30980 clear_bit(JOB_WSTATS, &local->jobs);
30981 if (local->power.event) {
30982diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c
30983--- linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
30984+++ linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
30985@@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
30986 unsigned int v;
30987 u64 tsf;
30988
30989+ pax_track_stack();
30990+
30991 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
30992 len += snprintf(buf+len, sizeof(buf)-len,
30993 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
30994@@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
30995 unsigned int len = 0;
30996 unsigned int i;
30997
30998+ pax_track_stack();
30999+
31000 len += snprintf(buf+len, sizeof(buf)-len,
31001 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
31002
31003@@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
31004 unsigned int i;
31005 unsigned int v;
31006
31007+ pax_track_stack();
31008+
31009 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
31010 sc->ah->ah_ant_mode);
31011 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
31012@@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
31013 unsigned int len = 0;
31014 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
31015
31016+ pax_track_stack();
31017+
31018 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
31019 sc->bssidmask);
31020 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
31021@@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
31022 unsigned int len = 0;
31023 int i;
31024
31025+ pax_track_stack();
31026+
31027 len += snprintf(buf+len, sizeof(buf)-len,
31028 "RX\n---------------------\n");
31029 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
31030@@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
31031 char buf[700];
31032 unsigned int len = 0;
31033
31034+ pax_track_stack();
31035+
31036 len += snprintf(buf+len, sizeof(buf)-len,
31037 "HW has PHY error counters:\t%s\n",
31038 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
31039@@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
31040 struct ath5k_buf *bf, *bf0;
31041 int i, n;
31042
31043+ pax_track_stack();
31044+
31045 len += snprintf(buf+len, sizeof(buf)-len,
31046 "available txbuffers: %d\n", sc->txbuf_len);
31047
31048diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c
31049--- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
31050+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
31051@@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
31052 int i, im, j;
31053 int nmeasurement;
31054
31055+ pax_track_stack();
31056+
31057 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
31058 if (ah->txchainmask & (1 << i))
31059 num_chains++;
31060diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
31061--- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
31062+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
31063@@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
31064 int theta_low_bin = 0;
31065 int i;
31066
31067+ pax_track_stack();
31068+
31069 /* disregard any bin that contains <= 16 samples */
31070 thresh_accum_cnt = 16;
31071 scale_factor = 5;
31072diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c
31073--- linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
31074+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
31075@@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
31076 char buf[512];
31077 unsigned int len = 0;
31078
31079+ pax_track_stack();
31080+
31081 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
31082 len += snprintf(buf + len, sizeof(buf) - len,
31083 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
31084@@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
31085 u8 addr[ETH_ALEN];
31086 u32 tmp;
31087
31088+ pax_track_stack();
31089+
31090 len += snprintf(buf + len, sizeof(buf) - len,
31091 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
31092 wiphy_name(sc->hw->wiphy),
31093diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
31094--- linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
31095+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
31096@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
31097 unsigned int len = 0;
31098 int ret = 0;
31099
31100+ pax_track_stack();
31101+
31102 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
31103
31104 ath9k_htc_ps_wakeup(priv);
31105@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
31106 unsigned int len = 0;
31107 int ret = 0;
31108
31109+ pax_track_stack();
31110+
31111 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
31112
31113 ath9k_htc_ps_wakeup(priv);
31114@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
31115 unsigned int len = 0;
31116 int ret = 0;
31117
31118+ pax_track_stack();
31119+
31120 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
31121
31122 ath9k_htc_ps_wakeup(priv);
31123@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
31124 char buf[512];
31125 unsigned int len = 0;
31126
31127+ pax_track_stack();
31128+
31129 len += snprintf(buf + len, sizeof(buf) - len,
31130 "%20s : %10u\n", "Buffers queued",
31131 priv->debug.tx_stats.buf_queued);
31132@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
31133 char buf[512];
31134 unsigned int len = 0;
31135
31136+ pax_track_stack();
31137+
31138 spin_lock_bh(&priv->tx.tx_lock);
31139
31140 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
31141@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
31142 char buf[512];
31143 unsigned int len = 0;
31144
31145+ pax_track_stack();
31146+
31147 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
31148 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
31149
31150diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h
31151--- linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h 2011-09-02 18:11:21.000000000 -0400
31152+++ linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
31153@@ -585,7 +585,7 @@ struct ath_hw_private_ops {
31154
31155 /* ANI */
31156 void (*ani_cache_ini_regs)(struct ath_hw *ah);
31157-};
31158+} __no_const;
31159
31160 /**
31161 * struct ath_hw_ops - callbacks used by hardware code and driver code
31162@@ -637,7 +637,7 @@ struct ath_hw_ops {
31163 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
31164 struct ath_hw_antcomb_conf *antconf);
31165
31166-};
31167+} __no_const;
31168
31169 struct ath_nf_limits {
31170 s16 max;
31171@@ -650,7 +650,7 @@ struct ath_nf_limits {
31172 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
31173
31174 struct ath_hw {
31175- struct ath_ops reg_ops;
31176+ ath_ops_no_const reg_ops;
31177
31178 struct ieee80211_hw *hw;
31179 struct ath_common common;
31180diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath.h linux-3.0.4/drivers/net/wireless/ath/ath.h
31181--- linux-3.0.4/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
31182+++ linux-3.0.4/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
31183@@ -121,6 +121,7 @@ struct ath_ops {
31184 void (*write_flush) (void *);
31185 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
31186 };
31187+typedef struct ath_ops __no_const ath_ops_no_const;
31188
31189 struct ath_common;
31190 struct ath_bus_ops;
31191diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c
31192--- linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
31193+++ linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
31194@@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
31195 int err;
31196 DECLARE_SSID_BUF(ssid);
31197
31198+ pax_track_stack();
31199+
31200 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
31201
31202 if (ssid_len)
31203@@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
31204 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
31205 int err;
31206
31207+ pax_track_stack();
31208+
31209 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
31210 idx, keylen, len);
31211
31212diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c
31213--- linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
31214+++ linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
31215@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
31216 unsigned long flags;
31217 DECLARE_SSID_BUF(ssid);
31218
31219+ pax_track_stack();
31220+
31221 LIBIPW_DEBUG_SCAN("'%s' (%pM"
31222 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
31223 print_ssid(ssid, info_element->data, info_element->len),
31224diff -urNp linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c
31225--- linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-07-21 22:17:23.000000000 -0400
31226+++ linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-23 21:47:55.000000000 -0400
31227@@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
31228 */
31229 if (iwl3945_mod_params.disable_hw_scan) {
31230 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
31231- iwl3945_hw_ops.hw_scan = NULL;
31232+ pax_open_kernel();
31233+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
31234+ pax_close_kernel();
31235 }
31236
31237 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
31238diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
31239--- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
31240+++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
31241@@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
31242 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
31243 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
31244
31245+ pax_track_stack();
31246+
31247 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
31248
31249 /* Treat uninitialized rate scaling data same as non-existing. */
31250@@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
31251 container_of(lq_sta, struct iwl_station_priv, lq_sta);
31252 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
31253
31254+ pax_track_stack();
31255+
31256 /* Override starting rate (index 0) if needed for debug purposes */
31257 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
31258
31259diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c
31260--- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
31261+++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
31262@@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
31263 int pos = 0;
31264 const size_t bufsz = sizeof(buf);
31265
31266+ pax_track_stack();
31267+
31268 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
31269 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
31270 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
31271@@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
31272 char buf[256 * NUM_IWL_RXON_CTX];
31273 const size_t bufsz = sizeof(buf);
31274
31275+ pax_track_stack();
31276+
31277 for_each_context(priv, ctx) {
31278 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
31279 ctx->ctxid);
31280diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h
31281--- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
31282+++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
31283@@ -68,8 +68,8 @@ do {
31284 } while (0)
31285
31286 #else
31287-#define IWL_DEBUG(__priv, level, fmt, args...)
31288-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
31289+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
31290+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
31291 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
31292 const void *p, u32 len)
31293 {}
31294diff -urNp linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c
31295--- linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
31296+++ linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
31297@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
31298 int buf_len = 512;
31299 size_t len = 0;
31300
31301+ pax_track_stack();
31302+
31303 if (*ppos != 0)
31304 return 0;
31305 if (count < sizeof(buf))
31306diff -urNp linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c
31307--- linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
31308+++ linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
31309@@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
31310 return -EINVAL;
31311
31312 if (fake_hw_scan) {
31313- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
31314- mac80211_hwsim_ops.sw_scan_start = NULL;
31315- mac80211_hwsim_ops.sw_scan_complete = NULL;
31316+ pax_open_kernel();
31317+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
31318+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
31319+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
31320+ pax_close_kernel();
31321 }
31322
31323 spin_lock_init(&hwsim_radio_lock);
31324diff -urNp linux-3.0.4/drivers/net/wireless/rndis_wlan.c linux-3.0.4/drivers/net/wireless/rndis_wlan.c
31325--- linux-3.0.4/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
31326+++ linux-3.0.4/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
31327@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
31328
31329 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
31330
31331- if (rts_threshold < 0 || rts_threshold > 2347)
31332+ if (rts_threshold > 2347)
31333 rts_threshold = 2347;
31334
31335 tmp = cpu_to_le32(rts_threshold);
31336diff -urNp linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
31337--- linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
31338+++ linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
31339@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
31340 u8 rfpath;
31341 u8 num_total_rfpath = rtlphy->num_total_rfpath;
31342
31343+ pax_track_stack();
31344+
31345 precommoncmdcnt = 0;
31346 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
31347 MAX_PRECMD_CNT,
31348diff -urNp linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h
31349--- linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
31350+++ linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
31351@@ -266,7 +266,7 @@ struct wl1251_if_operations {
31352 void (*reset)(struct wl1251 *wl);
31353 void (*enable_irq)(struct wl1251 *wl);
31354 void (*disable_irq)(struct wl1251 *wl);
31355-};
31356+} __no_const;
31357
31358 struct wl1251 {
31359 struct ieee80211_hw *hw;
31360diff -urNp linux-3.0.4/drivers/net/wireless/wl12xx/spi.c linux-3.0.4/drivers/net/wireless/wl12xx/spi.c
31361--- linux-3.0.4/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
31362+++ linux-3.0.4/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
31363@@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
31364 u32 chunk_len;
31365 int i;
31366
31367+ pax_track_stack();
31368+
31369 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
31370
31371 spi_message_init(&m);
31372diff -urNp linux-3.0.4/drivers/oprofile/buffer_sync.c linux-3.0.4/drivers/oprofile/buffer_sync.c
31373--- linux-3.0.4/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
31374+++ linux-3.0.4/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
31375@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
31376 if (cookie == NO_COOKIE)
31377 offset = pc;
31378 if (cookie == INVALID_COOKIE) {
31379- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
31380+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
31381 offset = pc;
31382 }
31383 if (cookie != last_cookie) {
31384@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
31385 /* add userspace sample */
31386
31387 if (!mm) {
31388- atomic_inc(&oprofile_stats.sample_lost_no_mm);
31389+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
31390 return 0;
31391 }
31392
31393 cookie = lookup_dcookie(mm, s->eip, &offset);
31394
31395 if (cookie == INVALID_COOKIE) {
31396- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
31397+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
31398 return 0;
31399 }
31400
31401@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
31402 /* ignore backtraces if failed to add a sample */
31403 if (state == sb_bt_start) {
31404 state = sb_bt_ignore;
31405- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
31406+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
31407 }
31408 }
31409 release_mm(mm);
31410diff -urNp linux-3.0.4/drivers/oprofile/event_buffer.c linux-3.0.4/drivers/oprofile/event_buffer.c
31411--- linux-3.0.4/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
31412+++ linux-3.0.4/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
31413@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
31414 }
31415
31416 if (buffer_pos == buffer_size) {
31417- atomic_inc(&oprofile_stats.event_lost_overflow);
31418+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
31419 return;
31420 }
31421
31422diff -urNp linux-3.0.4/drivers/oprofile/oprof.c linux-3.0.4/drivers/oprofile/oprof.c
31423--- linux-3.0.4/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
31424+++ linux-3.0.4/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
31425@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
31426 if (oprofile_ops.switch_events())
31427 return;
31428
31429- atomic_inc(&oprofile_stats.multiplex_counter);
31430+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
31431 start_switch_worker();
31432 }
31433
31434diff -urNp linux-3.0.4/drivers/oprofile/oprofilefs.c linux-3.0.4/drivers/oprofile/oprofilefs.c
31435--- linux-3.0.4/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
31436+++ linux-3.0.4/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
31437@@ -186,7 +186,7 @@ static const struct file_operations atom
31438
31439
31440 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
31441- char const *name, atomic_t *val)
31442+ char const *name, atomic_unchecked_t *val)
31443 {
31444 return __oprofilefs_create_file(sb, root, name,
31445 &atomic_ro_fops, 0444, val);
31446diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.c linux-3.0.4/drivers/oprofile/oprofile_stats.c
31447--- linux-3.0.4/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
31448+++ linux-3.0.4/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
31449@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
31450 cpu_buf->sample_invalid_eip = 0;
31451 }
31452
31453- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
31454- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
31455- atomic_set(&oprofile_stats.event_lost_overflow, 0);
31456- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
31457- atomic_set(&oprofile_stats.multiplex_counter, 0);
31458+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
31459+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
31460+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
31461+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
31462+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
31463 }
31464
31465
31466diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.h linux-3.0.4/drivers/oprofile/oprofile_stats.h
31467--- linux-3.0.4/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
31468+++ linux-3.0.4/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
31469@@ -13,11 +13,11 @@
31470 #include <asm/atomic.h>
31471
31472 struct oprofile_stat_struct {
31473- atomic_t sample_lost_no_mm;
31474- atomic_t sample_lost_no_mapping;
31475- atomic_t bt_lost_no_mapping;
31476- atomic_t event_lost_overflow;
31477- atomic_t multiplex_counter;
31478+ atomic_unchecked_t sample_lost_no_mm;
31479+ atomic_unchecked_t sample_lost_no_mapping;
31480+ atomic_unchecked_t bt_lost_no_mapping;
31481+ atomic_unchecked_t event_lost_overflow;
31482+ atomic_unchecked_t multiplex_counter;
31483 };
31484
31485 extern struct oprofile_stat_struct oprofile_stats;
31486diff -urNp linux-3.0.4/drivers/parport/procfs.c linux-3.0.4/drivers/parport/procfs.c
31487--- linux-3.0.4/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
31488+++ linux-3.0.4/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
31489@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
31490
31491 *ppos += len;
31492
31493- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
31494+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
31495 }
31496
31497 #ifdef CONFIG_PARPORT_1284
31498@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
31499
31500 *ppos += len;
31501
31502- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
31503+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
31504 }
31505 #endif /* IEEE1284.3 support. */
31506
31507diff -urNp linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h
31508--- linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
31509+++ linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
31510@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
31511 int (*hardware_test) (struct slot* slot, u32 value);
31512 u8 (*get_power) (struct slot* slot);
31513 int (*set_power) (struct slot* slot, int value);
31514-};
31515+} __no_const;
31516
31517 struct cpci_hp_controller {
31518 unsigned int irq;
31519diff -urNp linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c
31520--- linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
31521+++ linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
31522@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
31523
31524 void compaq_nvram_init (void __iomem *rom_start)
31525 {
31526+
31527+#ifndef CONFIG_PAX_KERNEXEC
31528 if (rom_start) {
31529 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
31530 }
31531+#endif
31532+
31533 dbg("int15 entry = %p\n", compaq_int15_entry_point);
31534
31535 /* initialize our int15 lock */
31536diff -urNp linux-3.0.4/drivers/pci/pcie/aspm.c linux-3.0.4/drivers/pci/pcie/aspm.c
31537--- linux-3.0.4/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
31538+++ linux-3.0.4/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
31539@@ -27,9 +27,9 @@
31540 #define MODULE_PARAM_PREFIX "pcie_aspm."
31541
31542 /* Note: those are not register definitions */
31543-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
31544-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
31545-#define ASPM_STATE_L1 (4) /* L1 state */
31546+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
31547+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
31548+#define ASPM_STATE_L1 (4U) /* L1 state */
31549 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
31550 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
31551
31552diff -urNp linux-3.0.4/drivers/pci/probe.c linux-3.0.4/drivers/pci/probe.c
31553--- linux-3.0.4/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
31554+++ linux-3.0.4/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
31555@@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
31556 u32 l, sz, mask;
31557 u16 orig_cmd;
31558
31559- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
31560+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
31561
31562 if (!dev->mmio_always_on) {
31563 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
31564diff -urNp linux-3.0.4/drivers/pci/proc.c linux-3.0.4/drivers/pci/proc.c
31565--- linux-3.0.4/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
31566+++ linux-3.0.4/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
31567@@ -476,7 +476,16 @@ static const struct file_operations proc
31568 static int __init pci_proc_init(void)
31569 {
31570 struct pci_dev *dev = NULL;
31571+
31572+#ifdef CONFIG_GRKERNSEC_PROC_ADD
31573+#ifdef CONFIG_GRKERNSEC_PROC_USER
31574+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
31575+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
31576+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
31577+#endif
31578+#else
31579 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
31580+#endif
31581 proc_create("devices", 0, proc_bus_pci_dir,
31582 &proc_bus_pci_dev_operations);
31583 proc_initialized = 1;
31584diff -urNp linux-3.0.4/drivers/pci/xen-pcifront.c linux-3.0.4/drivers/pci/xen-pcifront.c
31585--- linux-3.0.4/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
31586+++ linux-3.0.4/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
31587@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
31588 struct pcifront_sd *sd = bus->sysdata;
31589 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31590
31591+ pax_track_stack();
31592+
31593 if (verbose_request)
31594 dev_info(&pdev->xdev->dev,
31595 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
31596@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
31597 struct pcifront_sd *sd = bus->sysdata;
31598 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31599
31600+ pax_track_stack();
31601+
31602 if (verbose_request)
31603 dev_info(&pdev->xdev->dev,
31604 "write dev=%04x:%02x:%02x.%01x - "
31605@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
31606 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31607 struct msi_desc *entry;
31608
31609+ pax_track_stack();
31610+
31611 if (nvec > SH_INFO_MAX_VEC) {
31612 dev_err(&dev->dev, "too much vector for pci frontend: %x."
31613 " Increase SH_INFO_MAX_VEC.\n", nvec);
31614@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
31615 struct pcifront_sd *sd = dev->bus->sysdata;
31616 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31617
31618+ pax_track_stack();
31619+
31620 err = do_pci_op(pdev, &op);
31621
31622 /* What should do for error ? */
31623@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
31624 struct pcifront_sd *sd = dev->bus->sysdata;
31625 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31626
31627+ pax_track_stack();
31628+
31629 err = do_pci_op(pdev, &op);
31630 if (likely(!err)) {
31631 vector[0] = op.value;
31632diff -urNp linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c
31633--- linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
31634+++ linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
31635@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
31636 return 0;
31637 }
31638
31639-void static hotkey_mask_warn_incomplete_mask(void)
31640+static void hotkey_mask_warn_incomplete_mask(void)
31641 {
31642 /* log only what the user can fix... */
31643 const u32 wantedmask = hotkey_driver_mask &
31644diff -urNp linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c
31645--- linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
31646+++ linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
31647@@ -59,7 +59,7 @@ do { \
31648 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
31649 } while(0)
31650
31651-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
31652+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
31653 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
31654
31655 /*
31656@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
31657
31658 cpu = get_cpu();
31659 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
31660+
31661+ pax_open_kernel();
31662 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
31663+ pax_close_kernel();
31664
31665 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
31666 spin_lock_irqsave(&pnp_bios_lock, flags);
31667@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
31668 :"memory");
31669 spin_unlock_irqrestore(&pnp_bios_lock, flags);
31670
31671+ pax_open_kernel();
31672 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
31673+ pax_close_kernel();
31674+
31675 put_cpu();
31676
31677 /* If we get here and this is set then the PnP BIOS faulted on us. */
31678@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
31679 return status;
31680 }
31681
31682-void pnpbios_calls_init(union pnp_bios_install_struct *header)
31683+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
31684 {
31685 int i;
31686
31687@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
31688 pnp_bios_callpoint.offset = header->fields.pm16offset;
31689 pnp_bios_callpoint.segment = PNP_CS16;
31690
31691+ pax_open_kernel();
31692+
31693 for_each_possible_cpu(i) {
31694 struct desc_struct *gdt = get_cpu_gdt_table(i);
31695 if (!gdt)
31696@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
31697 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
31698 (unsigned long)__va(header->fields.pm16dseg));
31699 }
31700+
31701+ pax_close_kernel();
31702 }
31703diff -urNp linux-3.0.4/drivers/pnp/resource.c linux-3.0.4/drivers/pnp/resource.c
31704--- linux-3.0.4/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
31705+++ linux-3.0.4/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
31706@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
31707 return 1;
31708
31709 /* check if the resource is valid */
31710- if (*irq < 0 || *irq > 15)
31711+ if (*irq > 15)
31712 return 0;
31713
31714 /* check if the resource is reserved */
31715@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
31716 return 1;
31717
31718 /* check if the resource is valid */
31719- if (*dma < 0 || *dma == 4 || *dma > 7)
31720+ if (*dma == 4 || *dma > 7)
31721 return 0;
31722
31723 /* check if the resource is reserved */
31724diff -urNp linux-3.0.4/drivers/power/bq27x00_battery.c linux-3.0.4/drivers/power/bq27x00_battery.c
31725--- linux-3.0.4/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
31726+++ linux-3.0.4/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
31727@@ -67,7 +67,7 @@
31728 struct bq27x00_device_info;
31729 struct bq27x00_access_methods {
31730 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
31731-};
31732+} __no_const;
31733
31734 enum bq27x00_chip { BQ27000, BQ27500 };
31735
31736diff -urNp linux-3.0.4/drivers/regulator/max8660.c linux-3.0.4/drivers/regulator/max8660.c
31737--- linux-3.0.4/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
31738+++ linux-3.0.4/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
31739@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
31740 max8660->shadow_regs[MAX8660_OVER1] = 5;
31741 } else {
31742 /* Otherwise devices can be toggled via software */
31743- max8660_dcdc_ops.enable = max8660_dcdc_enable;
31744- max8660_dcdc_ops.disable = max8660_dcdc_disable;
31745+ pax_open_kernel();
31746+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
31747+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
31748+ pax_close_kernel();
31749 }
31750
31751 /*
31752diff -urNp linux-3.0.4/drivers/regulator/mc13892-regulator.c linux-3.0.4/drivers/regulator/mc13892-regulator.c
31753--- linux-3.0.4/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
31754+++ linux-3.0.4/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
31755@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
31756 }
31757 mc13xxx_unlock(mc13892);
31758
31759- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
31760+ pax_open_kernel();
31761+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
31762 = mc13892_vcam_set_mode;
31763- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
31764+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
31765 = mc13892_vcam_get_mode;
31766+ pax_close_kernel();
31767 for (i = 0; i < pdata->num_regulators; i++) {
31768 init_data = &pdata->regulators[i];
31769 priv->regulators[i] = regulator_register(
31770diff -urNp linux-3.0.4/drivers/rtc/rtc-dev.c linux-3.0.4/drivers/rtc/rtc-dev.c
31771--- linux-3.0.4/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
31772+++ linux-3.0.4/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
31773@@ -14,6 +14,7 @@
31774 #include <linux/module.h>
31775 #include <linux/rtc.h>
31776 #include <linux/sched.h>
31777+#include <linux/grsecurity.h>
31778 #include "rtc-core.h"
31779
31780 static dev_t rtc_devt;
31781@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
31782 if (copy_from_user(&tm, uarg, sizeof(tm)))
31783 return -EFAULT;
31784
31785+ gr_log_timechange();
31786+
31787 return rtc_set_time(rtc, &tm);
31788
31789 case RTC_PIE_ON:
31790diff -urNp linux-3.0.4/drivers/scsi/aacraid/aacraid.h linux-3.0.4/drivers/scsi/aacraid/aacraid.h
31791--- linux-3.0.4/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
31792+++ linux-3.0.4/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
31793@@ -492,7 +492,7 @@ struct adapter_ops
31794 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
31795 /* Administrative operations */
31796 int (*adapter_comm)(struct aac_dev * dev, int comm);
31797-};
31798+} __no_const;
31799
31800 /*
31801 * Define which interrupt handler needs to be installed
31802diff -urNp linux-3.0.4/drivers/scsi/aacraid/commctrl.c linux-3.0.4/drivers/scsi/aacraid/commctrl.c
31803--- linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
31804+++ linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
31805@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
31806 u32 actual_fibsize64, actual_fibsize = 0;
31807 int i;
31808
31809+ pax_track_stack();
31810
31811 if (dev->in_reset) {
31812 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
31813diff -urNp linux-3.0.4/drivers/scsi/bfa/bfad.c linux-3.0.4/drivers/scsi/bfa/bfad.c
31814--- linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
31815+++ linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
31816@@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
31817 struct bfad_vport_s *vport, *vport_new;
31818 struct bfa_fcs_driver_info_s driver_info;
31819
31820+ pax_track_stack();
31821+
31822 /* Fill the driver_info info to fcs*/
31823 memset(&driver_info, 0, sizeof(driver_info));
31824 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
31825diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c
31826--- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
31827+++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
31828@@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
31829 u16 len, count;
31830 u16 templen;
31831
31832+ pax_track_stack();
31833+
31834 /*
31835 * get hba attributes
31836 */
31837@@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
31838 u8 count = 0;
31839 u16 templen;
31840
31841+ pax_track_stack();
31842+
31843 /*
31844 * get port attributes
31845 */
31846diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c
31847--- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
31848+++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
31849@@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
31850 struct fc_rpsc_speed_info_s speeds;
31851 struct bfa_port_attr_s pport_attr;
31852
31853+ pax_track_stack();
31854+
31855 bfa_trc(port->fcs, rx_fchs->s_id);
31856 bfa_trc(port->fcs, rx_fchs->d_id);
31857
31858diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa.h linux-3.0.4/drivers/scsi/bfa/bfa.h
31859--- linux-3.0.4/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
31860+++ linux-3.0.4/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
31861@@ -238,7 +238,7 @@ struct bfa_hwif_s {
31862 u32 *nvecs, u32 *maxvec);
31863 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
31864 u32 *end);
31865-};
31866+} __no_const;
31867 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
31868
31869 struct bfa_iocfc_s {
31870diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h
31871--- linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
31872+++ linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
31873@@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
31874 bfa_ioc_disable_cbfn_t disable_cbfn;
31875 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
31876 bfa_ioc_reset_cbfn_t reset_cbfn;
31877-};
31878+} __no_const;
31879
31880 /*
31881 * Heartbeat failure notification queue element.
31882@@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
31883 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
31884 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
31885 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
31886-};
31887+} __no_const;
31888
31889 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
31890 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
31891diff -urNp linux-3.0.4/drivers/scsi/BusLogic.c linux-3.0.4/drivers/scsi/BusLogic.c
31892--- linux-3.0.4/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
31893+++ linux-3.0.4/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
31894@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
31895 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
31896 *PrototypeHostAdapter)
31897 {
31898+ pax_track_stack();
31899+
31900 /*
31901 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
31902 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
31903diff -urNp linux-3.0.4/drivers/scsi/dpt_i2o.c linux-3.0.4/drivers/scsi/dpt_i2o.c
31904--- linux-3.0.4/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
31905+++ linux-3.0.4/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
31906@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
31907 dma_addr_t addr;
31908 ulong flags = 0;
31909
31910+ pax_track_stack();
31911+
31912 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
31913 // get user msg size in u32s
31914 if(get_user(size, &user_msg[0])){
31915@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
31916 s32 rcode;
31917 dma_addr_t addr;
31918
31919+ pax_track_stack();
31920+
31921 memset(msg, 0 , sizeof(msg));
31922 len = scsi_bufflen(cmd);
31923 direction = 0x00000000;
31924diff -urNp linux-3.0.4/drivers/scsi/eata.c linux-3.0.4/drivers/scsi/eata.c
31925--- linux-3.0.4/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
31926+++ linux-3.0.4/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
31927@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
31928 struct hostdata *ha;
31929 char name[16];
31930
31931+ pax_track_stack();
31932+
31933 sprintf(name, "%s%d", driver_name, j);
31934
31935 if (!request_region(port_base, REGION_SIZE, driver_name)) {
31936diff -urNp linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c
31937--- linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
31938+++ linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
31939@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
31940 } buf;
31941 int rc;
31942
31943+ pax_track_stack();
31944+
31945 fiph = (struct fip_header *)skb->data;
31946 sub = fiph->fip_subcode;
31947
31948diff -urNp linux-3.0.4/drivers/scsi/gdth.c linux-3.0.4/drivers/scsi/gdth.c
31949--- linux-3.0.4/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
31950+++ linux-3.0.4/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
31951@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
31952 unsigned long flags;
31953 gdth_ha_str *ha;
31954
31955+ pax_track_stack();
31956+
31957 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
31958 return -EFAULT;
31959 ha = gdth_find_ha(ldrv.ionode);
31960@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
31961 gdth_ha_str *ha;
31962 int rval;
31963
31964+ pax_track_stack();
31965+
31966 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
31967 res.number >= MAX_HDRIVES)
31968 return -EFAULT;
31969@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
31970 gdth_ha_str *ha;
31971 int rval;
31972
31973+ pax_track_stack();
31974+
31975 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
31976 return -EFAULT;
31977 ha = gdth_find_ha(gen.ionode);
31978@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
31979 int i;
31980 gdth_cmd_str gdtcmd;
31981 char cmnd[MAX_COMMAND_SIZE];
31982+
31983+ pax_track_stack();
31984+
31985 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
31986
31987 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
31988diff -urNp linux-3.0.4/drivers/scsi/gdth_proc.c linux-3.0.4/drivers/scsi/gdth_proc.c
31989--- linux-3.0.4/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
31990+++ linux-3.0.4/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
31991@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
31992 u64 paddr;
31993
31994 char cmnd[MAX_COMMAND_SIZE];
31995+
31996+ pax_track_stack();
31997+
31998 memset(cmnd, 0xff, 12);
31999 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
32000
32001@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
32002 gdth_hget_str *phg;
32003 char cmnd[MAX_COMMAND_SIZE];
32004
32005+ pax_track_stack();
32006+
32007 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
32008 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
32009 if (!gdtcmd || !estr)
32010diff -urNp linux-3.0.4/drivers/scsi/hosts.c linux-3.0.4/drivers/scsi/hosts.c
32011--- linux-3.0.4/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
32012+++ linux-3.0.4/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
32013@@ -42,7 +42,7 @@
32014 #include "scsi_logging.h"
32015
32016
32017-static atomic_t scsi_host_next_hn; /* host_no for next new host */
32018+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
32019
32020
32021 static void scsi_host_cls_release(struct device *dev)
32022@@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
32023 * subtract one because we increment first then return, but we need to
32024 * know what the next host number was before increment
32025 */
32026- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
32027+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
32028 shost->dma_channel = 0xff;
32029
32030 /* These three are default values which can be overridden */
32031diff -urNp linux-3.0.4/drivers/scsi/hpsa.c linux-3.0.4/drivers/scsi/hpsa.c
32032--- linux-3.0.4/drivers/scsi/hpsa.c 2011-07-21 22:17:23.000000000 -0400
32033+++ linux-3.0.4/drivers/scsi/hpsa.c 2011-08-23 21:47:55.000000000 -0400
32034@@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
32035 u32 a;
32036
32037 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
32038- return h->access.command_completed(h);
32039+ return h->access->command_completed(h);
32040
32041 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
32042 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
32043@@ -2938,7 +2938,7 @@ static void start_io(struct ctlr_info *h
32044 while (!list_empty(&h->reqQ)) {
32045 c = list_entry(h->reqQ.next, struct CommandList, list);
32046 /* can't do anything if fifo is full */
32047- if ((h->access.fifo_full(h))) {
32048+ if ((h->access->fifo_full(h))) {
32049 dev_warn(&h->pdev->dev, "fifo full\n");
32050 break;
32051 }
32052@@ -2948,7 +2948,7 @@ static void start_io(struct ctlr_info *h
32053 h->Qdepth--;
32054
32055 /* Tell the controller execute command */
32056- h->access.submit_command(h, c);
32057+ h->access->submit_command(h, c);
32058
32059 /* Put job onto the completed Q */
32060 addQ(&h->cmpQ, c);
32061@@ -2957,17 +2957,17 @@ static void start_io(struct ctlr_info *h
32062
32063 static inline unsigned long get_next_completion(struct ctlr_info *h)
32064 {
32065- return h->access.command_completed(h);
32066+ return h->access->command_completed(h);
32067 }
32068
32069 static inline bool interrupt_pending(struct ctlr_info *h)
32070 {
32071- return h->access.intr_pending(h);
32072+ return h->access->intr_pending(h);
32073 }
32074
32075 static inline long interrupt_not_for_us(struct ctlr_info *h)
32076 {
32077- return (h->access.intr_pending(h) == 0) ||
32078+ return (h->access->intr_pending(h) == 0) ||
32079 (h->interrupts_enabled == 0);
32080 }
32081
32082@@ -3857,7 +3857,7 @@ static int __devinit hpsa_pci_init(struc
32083 if (prod_index < 0)
32084 return -ENODEV;
32085 h->product_name = products[prod_index].product_name;
32086- h->access = *(products[prod_index].access);
32087+ h->access = products[prod_index].access;
32088
32089 if (hpsa_board_disabled(h->pdev)) {
32090 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
32091@@ -4134,7 +4134,7 @@ reinit_after_soft_reset:
32092 }
32093
32094 /* make sure the board interrupts are off */
32095- h->access.set_intr_mask(h, HPSA_INTR_OFF);
32096+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
32097
32098 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
32099 goto clean2;
32100@@ -4168,7 +4168,7 @@ reinit_after_soft_reset:
32101 * fake ones to scoop up any residual completions.
32102 */
32103 spin_lock_irqsave(&h->lock, flags);
32104- h->access.set_intr_mask(h, HPSA_INTR_OFF);
32105+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
32106 spin_unlock_irqrestore(&h->lock, flags);
32107 free_irq(h->intr[h->intr_mode], h);
32108 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
32109@@ -4187,9 +4187,9 @@ reinit_after_soft_reset:
32110 dev_info(&h->pdev->dev, "Board READY.\n");
32111 dev_info(&h->pdev->dev,
32112 "Waiting for stale completions to drain.\n");
32113- h->access.set_intr_mask(h, HPSA_INTR_ON);
32114+ h->access->set_intr_mask(h, HPSA_INTR_ON);
32115 msleep(10000);
32116- h->access.set_intr_mask(h, HPSA_INTR_OFF);
32117+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
32118
32119 rc = controller_reset_failed(h->cfgtable);
32120 if (rc)
32121@@ -4210,7 +4210,7 @@ reinit_after_soft_reset:
32122 }
32123
32124 /* Turn the interrupts on so we can service requests */
32125- h->access.set_intr_mask(h, HPSA_INTR_ON);
32126+ h->access->set_intr_mask(h, HPSA_INTR_ON);
32127
32128 hpsa_hba_inquiry(h);
32129 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
32130@@ -4263,7 +4263,7 @@ static void hpsa_shutdown(struct pci_dev
32131 * To write all data in the battery backed cache to disks
32132 */
32133 hpsa_flush_cache(h);
32134- h->access.set_intr_mask(h, HPSA_INTR_OFF);
32135+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
32136 free_irq(h->intr[h->intr_mode], h);
32137 #ifdef CONFIG_PCI_MSI
32138 if (h->msix_vector)
32139@@ -4426,7 +4426,7 @@ static __devinit void hpsa_enter_perform
32140 return;
32141 }
32142 /* Change the access methods to the performant access methods */
32143- h->access = SA5_performant_access;
32144+ h->access = &SA5_performant_access;
32145 h->transMethod = CFGTBL_Trans_Performant;
32146 }
32147
32148diff -urNp linux-3.0.4/drivers/scsi/hpsa.h linux-3.0.4/drivers/scsi/hpsa.h
32149--- linux-3.0.4/drivers/scsi/hpsa.h 2011-09-02 18:11:21.000000000 -0400
32150+++ linux-3.0.4/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
32151@@ -73,7 +73,7 @@ struct ctlr_info {
32152 unsigned int msix_vector;
32153 unsigned int msi_vector;
32154 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
32155- struct access_method access;
32156+ struct access_method *access;
32157
32158 /* queue and queue Info */
32159 struct list_head reqQ;
32160diff -urNp linux-3.0.4/drivers/scsi/ips.h linux-3.0.4/drivers/scsi/ips.h
32161--- linux-3.0.4/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
32162+++ linux-3.0.4/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
32163@@ -1027,7 +1027,7 @@ typedef struct {
32164 int (*intr)(struct ips_ha *);
32165 void (*enableint)(struct ips_ha *);
32166 uint32_t (*statupd)(struct ips_ha *);
32167-} ips_hw_func_t;
32168+} __no_const ips_hw_func_t;
32169
32170 typedef struct ips_ha {
32171 uint8_t ha_id[IPS_MAX_CHANNELS+1];
32172diff -urNp linux-3.0.4/drivers/scsi/libfc/fc_exch.c linux-3.0.4/drivers/scsi/libfc/fc_exch.c
32173--- linux-3.0.4/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
32174+++ linux-3.0.4/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
32175@@ -105,12 +105,12 @@ struct fc_exch_mgr {
32176 * all together if not used XXX
32177 */
32178 struct {
32179- atomic_t no_free_exch;
32180- atomic_t no_free_exch_xid;
32181- atomic_t xid_not_found;
32182- atomic_t xid_busy;
32183- atomic_t seq_not_found;
32184- atomic_t non_bls_resp;
32185+ atomic_unchecked_t no_free_exch;
32186+ atomic_unchecked_t no_free_exch_xid;
32187+ atomic_unchecked_t xid_not_found;
32188+ atomic_unchecked_t xid_busy;
32189+ atomic_unchecked_t seq_not_found;
32190+ atomic_unchecked_t non_bls_resp;
32191 } stats;
32192 };
32193
32194@@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
32195 /* allocate memory for exchange */
32196 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
32197 if (!ep) {
32198- atomic_inc(&mp->stats.no_free_exch);
32199+ atomic_inc_unchecked(&mp->stats.no_free_exch);
32200 goto out;
32201 }
32202 memset(ep, 0, sizeof(*ep));
32203@@ -761,7 +761,7 @@ out:
32204 return ep;
32205 err:
32206 spin_unlock_bh(&pool->lock);
32207- atomic_inc(&mp->stats.no_free_exch_xid);
32208+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
32209 mempool_free(ep, mp->ep_pool);
32210 return NULL;
32211 }
32212@@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
32213 xid = ntohs(fh->fh_ox_id); /* we originated exch */
32214 ep = fc_exch_find(mp, xid);
32215 if (!ep) {
32216- atomic_inc(&mp->stats.xid_not_found);
32217+ atomic_inc_unchecked(&mp->stats.xid_not_found);
32218 reject = FC_RJT_OX_ID;
32219 goto out;
32220 }
32221@@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
32222 ep = fc_exch_find(mp, xid);
32223 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
32224 if (ep) {
32225- atomic_inc(&mp->stats.xid_busy);
32226+ atomic_inc_unchecked(&mp->stats.xid_busy);
32227 reject = FC_RJT_RX_ID;
32228 goto rel;
32229 }
32230@@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
32231 }
32232 xid = ep->xid; /* get our XID */
32233 } else if (!ep) {
32234- atomic_inc(&mp->stats.xid_not_found);
32235+ atomic_inc_unchecked(&mp->stats.xid_not_found);
32236 reject = FC_RJT_RX_ID; /* XID not found */
32237 goto out;
32238 }
32239@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
32240 } else {
32241 sp = &ep->seq;
32242 if (sp->id != fh->fh_seq_id) {
32243- atomic_inc(&mp->stats.seq_not_found);
32244+ atomic_inc_unchecked(&mp->stats.seq_not_found);
32245 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
32246 goto rel;
32247 }
32248@@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
32249
32250 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
32251 if (!ep) {
32252- atomic_inc(&mp->stats.xid_not_found);
32253+ atomic_inc_unchecked(&mp->stats.xid_not_found);
32254 goto out;
32255 }
32256 if (ep->esb_stat & ESB_ST_COMPLETE) {
32257- atomic_inc(&mp->stats.xid_not_found);
32258+ atomic_inc_unchecked(&mp->stats.xid_not_found);
32259 goto rel;
32260 }
32261 if (ep->rxid == FC_XID_UNKNOWN)
32262 ep->rxid = ntohs(fh->fh_rx_id);
32263 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
32264- atomic_inc(&mp->stats.xid_not_found);
32265+ atomic_inc_unchecked(&mp->stats.xid_not_found);
32266 goto rel;
32267 }
32268 if (ep->did != ntoh24(fh->fh_s_id) &&
32269 ep->did != FC_FID_FLOGI) {
32270- atomic_inc(&mp->stats.xid_not_found);
32271+ atomic_inc_unchecked(&mp->stats.xid_not_found);
32272 goto rel;
32273 }
32274 sof = fr_sof(fp);
32275@@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
32276 sp->ssb_stat |= SSB_ST_RESP;
32277 sp->id = fh->fh_seq_id;
32278 } else if (sp->id != fh->fh_seq_id) {
32279- atomic_inc(&mp->stats.seq_not_found);
32280+ atomic_inc_unchecked(&mp->stats.seq_not_found);
32281 goto rel;
32282 }
32283
32284@@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
32285 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
32286
32287 if (!sp)
32288- atomic_inc(&mp->stats.xid_not_found);
32289+ atomic_inc_unchecked(&mp->stats.xid_not_found);
32290 else
32291- atomic_inc(&mp->stats.non_bls_resp);
32292+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
32293
32294 fc_frame_free(fp);
32295 }
32296diff -urNp linux-3.0.4/drivers/scsi/libsas/sas_ata.c linux-3.0.4/drivers/scsi/libsas/sas_ata.c
32297--- linux-3.0.4/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
32298+++ linux-3.0.4/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
32299@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
32300 .postreset = ata_std_postreset,
32301 .error_handler = ata_std_error_handler,
32302 .post_internal_cmd = sas_ata_post_internal,
32303- .qc_defer = ata_std_qc_defer,
32304+ .qc_defer = ata_std_qc_defer,
32305 .qc_prep = ata_noop_qc_prep,
32306 .qc_issue = sas_ata_qc_issue,
32307 .qc_fill_rtf = sas_ata_qc_fill_rtf,
32308diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c
32309--- linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
32310+++ linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
32311@@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
32312
32313 #include <linux/debugfs.h>
32314
32315-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
32316+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
32317 static unsigned long lpfc_debugfs_start_time = 0L;
32318
32319 /* iDiag */
32320@@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
32321 lpfc_debugfs_enable = 0;
32322
32323 len = 0;
32324- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
32325+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
32326 (lpfc_debugfs_max_disc_trc - 1);
32327 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
32328 dtp = vport->disc_trc + i;
32329@@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
32330 lpfc_debugfs_enable = 0;
32331
32332 len = 0;
32333- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
32334+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
32335 (lpfc_debugfs_max_slow_ring_trc - 1);
32336 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
32337 dtp = phba->slow_ring_trc + i;
32338@@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
32339 uint32_t *ptr;
32340 char buffer[1024];
32341
32342+ pax_track_stack();
32343+
32344 off = 0;
32345 spin_lock_irq(&phba->hbalock);
32346
32347@@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
32348 !vport || !vport->disc_trc)
32349 return;
32350
32351- index = atomic_inc_return(&vport->disc_trc_cnt) &
32352+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
32353 (lpfc_debugfs_max_disc_trc - 1);
32354 dtp = vport->disc_trc + index;
32355 dtp->fmt = fmt;
32356 dtp->data1 = data1;
32357 dtp->data2 = data2;
32358 dtp->data3 = data3;
32359- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
32360+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
32361 dtp->jif = jiffies;
32362 #endif
32363 return;
32364@@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
32365 !phba || !phba->slow_ring_trc)
32366 return;
32367
32368- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
32369+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
32370 (lpfc_debugfs_max_slow_ring_trc - 1);
32371 dtp = phba->slow_ring_trc + index;
32372 dtp->fmt = fmt;
32373 dtp->data1 = data1;
32374 dtp->data2 = data2;
32375 dtp->data3 = data3;
32376- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
32377+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
32378 dtp->jif = jiffies;
32379 #endif
32380 return;
32381@@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
32382 "slow_ring buffer\n");
32383 goto debug_failed;
32384 }
32385- atomic_set(&phba->slow_ring_trc_cnt, 0);
32386+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
32387 memset(phba->slow_ring_trc, 0,
32388 (sizeof(struct lpfc_debugfs_trc) *
32389 lpfc_debugfs_max_slow_ring_trc));
32390@@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
32391 "buffer\n");
32392 goto debug_failed;
32393 }
32394- atomic_set(&vport->disc_trc_cnt, 0);
32395+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
32396
32397 snprintf(name, sizeof(name), "discovery_trace");
32398 vport->debug_disc_trc =
32399diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc.h linux-3.0.4/drivers/scsi/lpfc/lpfc.h
32400--- linux-3.0.4/drivers/scsi/lpfc/lpfc.h 2011-07-21 22:17:23.000000000 -0400
32401+++ linux-3.0.4/drivers/scsi/lpfc/lpfc.h 2011-08-23 21:47:55.000000000 -0400
32402@@ -420,7 +420,7 @@ struct lpfc_vport {
32403 struct dentry *debug_nodelist;
32404 struct dentry *vport_debugfs_root;
32405 struct lpfc_debugfs_trc *disc_trc;
32406- atomic_t disc_trc_cnt;
32407+ atomic_unchecked_t disc_trc_cnt;
32408 #endif
32409 uint8_t stat_data_enabled;
32410 uint8_t stat_data_blocked;
32411@@ -826,8 +826,8 @@ struct lpfc_hba {
32412 struct timer_list fabric_block_timer;
32413 unsigned long bit_flags;
32414 #define FABRIC_COMANDS_BLOCKED 0
32415- atomic_t num_rsrc_err;
32416- atomic_t num_cmd_success;
32417+ atomic_unchecked_t num_rsrc_err;
32418+ atomic_unchecked_t num_cmd_success;
32419 unsigned long last_rsrc_error_time;
32420 unsigned long last_ramp_down_time;
32421 unsigned long last_ramp_up_time;
32422@@ -841,7 +841,7 @@ struct lpfc_hba {
32423 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
32424 struct dentry *debug_slow_ring_trc;
32425 struct lpfc_debugfs_trc *slow_ring_trc;
32426- atomic_t slow_ring_trc_cnt;
32427+ atomic_unchecked_t slow_ring_trc_cnt;
32428 /* iDiag debugfs sub-directory */
32429 struct dentry *idiag_root;
32430 struct dentry *idiag_pci_cfg;
32431diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c
32432--- linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c 2011-07-21 22:17:23.000000000 -0400
32433+++ linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c 2011-08-23 21:47:56.000000000 -0400
32434@@ -9923,8 +9923,10 @@ lpfc_init(void)
32435 printk(LPFC_COPYRIGHT "\n");
32436
32437 if (lpfc_enable_npiv) {
32438- lpfc_transport_functions.vport_create = lpfc_vport_create;
32439- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
32440+ pax_open_kernel();
32441+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
32442+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
32443+ pax_close_kernel();
32444 }
32445 lpfc_transport_template =
32446 fc_attach_transport(&lpfc_transport_functions);
32447diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c
32448--- linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-07-21 22:17:23.000000000 -0400
32449+++ linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-23 21:47:56.000000000 -0400
32450@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
32451 uint32_t evt_posted;
32452
32453 spin_lock_irqsave(&phba->hbalock, flags);
32454- atomic_inc(&phba->num_rsrc_err);
32455+ atomic_inc_unchecked(&phba->num_rsrc_err);
32456 phba->last_rsrc_error_time = jiffies;
32457
32458 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
32459@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
32460 unsigned long flags;
32461 struct lpfc_hba *phba = vport->phba;
32462 uint32_t evt_posted;
32463- atomic_inc(&phba->num_cmd_success);
32464+ atomic_inc_unchecked(&phba->num_cmd_success);
32465
32466 if (vport->cfg_lun_queue_depth <= queue_depth)
32467 return;
32468@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
32469 unsigned long num_rsrc_err, num_cmd_success;
32470 int i;
32471
32472- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
32473- num_cmd_success = atomic_read(&phba->num_cmd_success);
32474+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
32475+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
32476
32477 vports = lpfc_create_vport_work_array(phba);
32478 if (vports != NULL)
32479@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
32480 }
32481 }
32482 lpfc_destroy_vport_work_array(phba, vports);
32483- atomic_set(&phba->num_rsrc_err, 0);
32484- atomic_set(&phba->num_cmd_success, 0);
32485+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
32486+ atomic_set_unchecked(&phba->num_cmd_success, 0);
32487 }
32488
32489 /**
32490@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
32491 }
32492 }
32493 lpfc_destroy_vport_work_array(phba, vports);
32494- atomic_set(&phba->num_rsrc_err, 0);
32495- atomic_set(&phba->num_cmd_success, 0);
32496+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
32497+ atomic_set_unchecked(&phba->num_cmd_success, 0);
32498 }
32499
32500 /**
32501diff -urNp linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c
32502--- linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
32503+++ linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
32504@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
32505 int rval;
32506 int i;
32507
32508+ pax_track_stack();
32509+
32510 // Allocate memory for the base list of scb for management module.
32511 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
32512
32513diff -urNp linux-3.0.4/drivers/scsi/osd/osd_initiator.c linux-3.0.4/drivers/scsi/osd/osd_initiator.c
32514--- linux-3.0.4/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
32515+++ linux-3.0.4/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
32516@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
32517 int nelem = ARRAY_SIZE(get_attrs), a = 0;
32518 int ret;
32519
32520+ pax_track_stack();
32521+
32522 or = osd_start_request(od, GFP_KERNEL);
32523 if (!or)
32524 return -ENOMEM;
32525diff -urNp linux-3.0.4/drivers/scsi/pmcraid.c linux-3.0.4/drivers/scsi/pmcraid.c
32526--- linux-3.0.4/drivers/scsi/pmcraid.c 2011-09-02 18:11:21.000000000 -0400
32527+++ linux-3.0.4/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
32528@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
32529 res->scsi_dev = scsi_dev;
32530 scsi_dev->hostdata = res;
32531 res->change_detected = 0;
32532- atomic_set(&res->read_failures, 0);
32533- atomic_set(&res->write_failures, 0);
32534+ atomic_set_unchecked(&res->read_failures, 0);
32535+ atomic_set_unchecked(&res->write_failures, 0);
32536 rc = 0;
32537 }
32538 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
32539@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
32540
32541 /* If this was a SCSI read/write command keep count of errors */
32542 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
32543- atomic_inc(&res->read_failures);
32544+ atomic_inc_unchecked(&res->read_failures);
32545 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
32546- atomic_inc(&res->write_failures);
32547+ atomic_inc_unchecked(&res->write_failures);
32548
32549 if (!RES_IS_GSCSI(res->cfg_entry) &&
32550 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
32551@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
32552 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
32553 * hrrq_id assigned here in queuecommand
32554 */
32555- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
32556+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
32557 pinstance->num_hrrq;
32558 cmd->cmd_done = pmcraid_io_done;
32559
32560@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
32561 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
32562 * hrrq_id assigned here in queuecommand
32563 */
32564- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
32565+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
32566 pinstance->num_hrrq;
32567
32568 if (request_size) {
32569@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
32570
32571 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
32572 /* add resources only after host is added into system */
32573- if (!atomic_read(&pinstance->expose_resources))
32574+ if (!atomic_read_unchecked(&pinstance->expose_resources))
32575 return;
32576
32577 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
32578@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
32579 init_waitqueue_head(&pinstance->reset_wait_q);
32580
32581 atomic_set(&pinstance->outstanding_cmds, 0);
32582- atomic_set(&pinstance->last_message_id, 0);
32583- atomic_set(&pinstance->expose_resources, 0);
32584+ atomic_set_unchecked(&pinstance->last_message_id, 0);
32585+ atomic_set_unchecked(&pinstance->expose_resources, 0);
32586
32587 INIT_LIST_HEAD(&pinstance->free_res_q);
32588 INIT_LIST_HEAD(&pinstance->used_res_q);
32589@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
32590 /* Schedule worker thread to handle CCN and take care of adding and
32591 * removing devices to OS
32592 */
32593- atomic_set(&pinstance->expose_resources, 1);
32594+ atomic_set_unchecked(&pinstance->expose_resources, 1);
32595 schedule_work(&pinstance->worker_q);
32596 return rc;
32597
32598diff -urNp linux-3.0.4/drivers/scsi/pmcraid.h linux-3.0.4/drivers/scsi/pmcraid.h
32599--- linux-3.0.4/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
32600+++ linux-3.0.4/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
32601@@ -749,7 +749,7 @@ struct pmcraid_instance {
32602 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
32603
32604 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
32605- atomic_t last_message_id;
32606+ atomic_unchecked_t last_message_id;
32607
32608 /* configuration table */
32609 struct pmcraid_config_table *cfg_table;
32610@@ -778,7 +778,7 @@ struct pmcraid_instance {
32611 atomic_t outstanding_cmds;
32612
32613 /* should add/delete resources to mid-layer now ?*/
32614- atomic_t expose_resources;
32615+ atomic_unchecked_t expose_resources;
32616
32617
32618
32619@@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
32620 struct pmcraid_config_table_entry_ext cfg_entry_ext;
32621 };
32622 struct scsi_device *scsi_dev; /* Link scsi_device structure */
32623- atomic_t read_failures; /* count of failed READ commands */
32624- atomic_t write_failures; /* count of failed WRITE commands */
32625+ atomic_unchecked_t read_failures; /* count of failed READ commands */
32626+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
32627
32628 /* To indicate add/delete/modify during CCN */
32629 u8 change_detected;
32630diff -urNp linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h
32631--- linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
32632+++ linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
32633@@ -2244,7 +2244,7 @@ struct isp_operations {
32634 int (*get_flash_version) (struct scsi_qla_host *, void *);
32635 int (*start_scsi) (srb_t *);
32636 int (*abort_isp) (struct scsi_qla_host *);
32637-};
32638+} __no_const;
32639
32640 /* MSI-X Support *************************************************************/
32641
32642diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h
32643--- linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
32644+++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
32645@@ -256,7 +256,7 @@ struct ddb_entry {
32646 atomic_t retry_relogin_timer; /* Min Time between relogins
32647 * (4000 only) */
32648 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
32649- atomic_t relogin_retry_count; /* Num of times relogin has been
32650+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
32651 * retried */
32652
32653 uint16_t port;
32654diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c
32655--- linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
32656+++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
32657@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
32658 ddb_entry->fw_ddb_index = fw_ddb_index;
32659 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
32660 atomic_set(&ddb_entry->relogin_timer, 0);
32661- atomic_set(&ddb_entry->relogin_retry_count, 0);
32662+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
32663 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
32664 list_add_tail(&ddb_entry->list, &ha->ddb_list);
32665 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
32666@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
32667 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
32668 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
32669 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
32670- atomic_set(&ddb_entry->relogin_retry_count, 0);
32671+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
32672 atomic_set(&ddb_entry->relogin_timer, 0);
32673 clear_bit(DF_RELOGIN, &ddb_entry->flags);
32674 iscsi_unblock_session(ddb_entry->sess);
32675diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c
32676--- linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
32677+++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
32678@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
32679 ddb_entry->fw_ddb_device_state ==
32680 DDB_DS_SESSION_FAILED) {
32681 /* Reset retry relogin timer */
32682- atomic_inc(&ddb_entry->relogin_retry_count);
32683+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
32684 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
32685 " timed out-retrying"
32686 " relogin (%d)\n",
32687 ha->host_no,
32688 ddb_entry->fw_ddb_index,
32689- atomic_read(&ddb_entry->
32690+ atomic_read_unchecked(&ddb_entry->
32691 relogin_retry_count))
32692 );
32693 start_dpc++;
32694diff -urNp linux-3.0.4/drivers/scsi/scsi.c linux-3.0.4/drivers/scsi/scsi.c
32695--- linux-3.0.4/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
32696+++ linux-3.0.4/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
32697@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
32698 unsigned long timeout;
32699 int rtn = 0;
32700
32701- atomic_inc(&cmd->device->iorequest_cnt);
32702+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
32703
32704 /* check if the device is still usable */
32705 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
32706diff -urNp linux-3.0.4/drivers/scsi/scsi_debug.c linux-3.0.4/drivers/scsi/scsi_debug.c
32707--- linux-3.0.4/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
32708+++ linux-3.0.4/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
32709@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
32710 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
32711 unsigned char *cmd = (unsigned char *)scp->cmnd;
32712
32713+ pax_track_stack();
32714+
32715 if ((errsts = check_readiness(scp, 1, devip)))
32716 return errsts;
32717 memset(arr, 0, sizeof(arr));
32718@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
32719 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
32720 unsigned char *cmd = (unsigned char *)scp->cmnd;
32721
32722+ pax_track_stack();
32723+
32724 if ((errsts = check_readiness(scp, 1, devip)))
32725 return errsts;
32726 memset(arr, 0, sizeof(arr));
32727diff -urNp linux-3.0.4/drivers/scsi/scsi_lib.c linux-3.0.4/drivers/scsi/scsi_lib.c
32728--- linux-3.0.4/drivers/scsi/scsi_lib.c 2011-09-02 18:11:21.000000000 -0400
32729+++ linux-3.0.4/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
32730@@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
32731 shost = sdev->host;
32732 scsi_init_cmd_errh(cmd);
32733 cmd->result = DID_NO_CONNECT << 16;
32734- atomic_inc(&cmd->device->iorequest_cnt);
32735+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
32736
32737 /*
32738 * SCSI request completion path will do scsi_device_unbusy(),
32739@@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
32740
32741 INIT_LIST_HEAD(&cmd->eh_entry);
32742
32743- atomic_inc(&cmd->device->iodone_cnt);
32744+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
32745 if (cmd->result)
32746- atomic_inc(&cmd->device->ioerr_cnt);
32747+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
32748
32749 disposition = scsi_decide_disposition(cmd);
32750 if (disposition != SUCCESS &&
32751diff -urNp linux-3.0.4/drivers/scsi/scsi_sysfs.c linux-3.0.4/drivers/scsi/scsi_sysfs.c
32752--- linux-3.0.4/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
32753+++ linux-3.0.4/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
32754@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
32755 char *buf) \
32756 { \
32757 struct scsi_device *sdev = to_scsi_device(dev); \
32758- unsigned long long count = atomic_read(&sdev->field); \
32759+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
32760 return snprintf(buf, 20, "0x%llx\n", count); \
32761 } \
32762 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
32763diff -urNp linux-3.0.4/drivers/scsi/scsi_tgt_lib.c linux-3.0.4/drivers/scsi/scsi_tgt_lib.c
32764--- linux-3.0.4/drivers/scsi/scsi_tgt_lib.c 2011-07-21 22:17:23.000000000 -0400
32765+++ linux-3.0.4/drivers/scsi/scsi_tgt_lib.c 2011-10-06 04:17:55.000000000 -0400
32766@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct sc
32767 int err;
32768
32769 dprintk("%lx %u\n", uaddr, len);
32770- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
32771+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
32772 if (err) {
32773 /*
32774 * TODO: need to fixup sg_tablesize, max_segment_size,
32775diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_fc.c linux-3.0.4/drivers/scsi/scsi_transport_fc.c
32776--- linux-3.0.4/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
32777+++ linux-3.0.4/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
32778@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
32779 * Netlink Infrastructure
32780 */
32781
32782-static atomic_t fc_event_seq;
32783+static atomic_unchecked_t fc_event_seq;
32784
32785 /**
32786 * fc_get_event_number - Obtain the next sequential FC event number
32787@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
32788 u32
32789 fc_get_event_number(void)
32790 {
32791- return atomic_add_return(1, &fc_event_seq);
32792+ return atomic_add_return_unchecked(1, &fc_event_seq);
32793 }
32794 EXPORT_SYMBOL(fc_get_event_number);
32795
32796@@ -645,7 +645,7 @@ static __init int fc_transport_init(void
32797 {
32798 int error;
32799
32800- atomic_set(&fc_event_seq, 0);
32801+ atomic_set_unchecked(&fc_event_seq, 0);
32802
32803 error = transport_class_register(&fc_host_class);
32804 if (error)
32805@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
32806 char *cp;
32807
32808 *val = simple_strtoul(buf, &cp, 0);
32809- if ((*cp && (*cp != '\n')) || (*val < 0))
32810+ if (*cp && (*cp != '\n'))
32811 return -EINVAL;
32812 /*
32813 * Check for overflow; dev_loss_tmo is u32
32814diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c
32815--- linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
32816+++ linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
32817@@ -83,7 +83,7 @@ struct iscsi_internal {
32818 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
32819 };
32820
32821-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
32822+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
32823 static struct workqueue_struct *iscsi_eh_timer_workq;
32824
32825 /*
32826@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
32827 int err;
32828
32829 ihost = shost->shost_data;
32830- session->sid = atomic_add_return(1, &iscsi_session_nr);
32831+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
32832
32833 if (id == ISCSI_MAX_TARGET) {
32834 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
32835@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
32836 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
32837 ISCSI_TRANSPORT_VERSION);
32838
32839- atomic_set(&iscsi_session_nr, 0);
32840+ atomic_set_unchecked(&iscsi_session_nr, 0);
32841
32842 err = class_register(&iscsi_transport_class);
32843 if (err)
32844diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_srp.c linux-3.0.4/drivers/scsi/scsi_transport_srp.c
32845--- linux-3.0.4/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
32846+++ linux-3.0.4/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
32847@@ -33,7 +33,7 @@
32848 #include "scsi_transport_srp_internal.h"
32849
32850 struct srp_host_attrs {
32851- atomic_t next_port_id;
32852+ atomic_unchecked_t next_port_id;
32853 };
32854 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
32855
32856@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
32857 struct Scsi_Host *shost = dev_to_shost(dev);
32858 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
32859
32860- atomic_set(&srp_host->next_port_id, 0);
32861+ atomic_set_unchecked(&srp_host->next_port_id, 0);
32862 return 0;
32863 }
32864
32865@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
32866 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
32867 rport->roles = ids->roles;
32868
32869- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
32870+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
32871 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
32872
32873 transport_setup_device(&rport->dev);
32874diff -urNp linux-3.0.4/drivers/scsi/sg.c linux-3.0.4/drivers/scsi/sg.c
32875--- linux-3.0.4/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
32876+++ linux-3.0.4/drivers/scsi/sg.c 2011-10-06 04:17:55.000000000 -0400
32877@@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int
32878 sdp->disk->disk_name,
32879 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
32880 NULL,
32881- (char *)arg);
32882+ (char __user *)arg);
32883 case BLKTRACESTART:
32884 return blk_trace_startstop(sdp->device->request_queue, 1);
32885 case BLKTRACESTOP:
32886@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
32887 const struct file_operations * fops;
32888 };
32889
32890-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
32891+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
32892 {"allow_dio", &adio_fops},
32893 {"debug", &debug_fops},
32894 {"def_reserved_size", &dressz_fops},
32895@@ -2325,7 +2325,7 @@ sg_proc_init(void)
32896 {
32897 int k, mask;
32898 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
32899- struct sg_proc_leaf * leaf;
32900+ const struct sg_proc_leaf * leaf;
32901
32902 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
32903 if (!sg_proc_sgp)
32904diff -urNp linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c
32905--- linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
32906+++ linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
32907@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
32908 int do_iounmap = 0;
32909 int do_disable_device = 1;
32910
32911+ pax_track_stack();
32912+
32913 memset(&sym_dev, 0, sizeof(sym_dev));
32914 memset(&nvram, 0, sizeof(nvram));
32915 sym_dev.pdev = pdev;
32916diff -urNp linux-3.0.4/drivers/scsi/vmw_pvscsi.c linux-3.0.4/drivers/scsi/vmw_pvscsi.c
32917--- linux-3.0.4/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
32918+++ linux-3.0.4/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
32919@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
32920 dma_addr_t base;
32921 unsigned i;
32922
32923+ pax_track_stack();
32924+
32925 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
32926 cmd.reqRingNumPages = adapter->req_pages;
32927 cmd.cmpRingNumPages = adapter->cmp_pages;
32928diff -urNp linux-3.0.4/drivers/spi/spi.c linux-3.0.4/drivers/spi/spi.c
32929--- linux-3.0.4/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
32930+++ linux-3.0.4/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
32931@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
32932 EXPORT_SYMBOL_GPL(spi_bus_unlock);
32933
32934 /* portable code must never pass more than 32 bytes */
32935-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
32936+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
32937
32938 static u8 *buf;
32939
32940diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c
32941--- linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-09-02 18:11:21.000000000 -0400
32942+++ linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
32943@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
32944 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
32945
32946
32947-static struct net_device_ops ar6000_netdev_ops = {
32948+static net_device_ops_no_const ar6000_netdev_ops = {
32949 .ndo_init = NULL,
32950 .ndo_open = ar6000_open,
32951 .ndo_stop = ar6000_close,
32952diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
32953--- linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
32954+++ linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
32955@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
32956 typedef struct ar6k_pal_config_s
32957 {
32958 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
32959-}ar6k_pal_config_t;
32960+} __no_const ar6k_pal_config_t;
32961
32962 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
32963 #endif /* _AR6K_PAL_H_ */
32964diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
32965--- linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
32966+++ linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
32967@@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
32968 free_netdev(ifp->net);
32969 }
32970 /* Allocate etherdev, including space for private structure */
32971- ifp->net = alloc_etherdev(sizeof(dhd));
32972+ ifp->net = alloc_etherdev(sizeof(*dhd));
32973 if (!ifp->net) {
32974 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32975 ret = -ENOMEM;
32976 }
32977 if (ret == 0) {
32978 strcpy(ifp->net->name, ifp->name);
32979- memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
32980+ memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
32981 err = dhd_net_attach(&dhd->pub, ifp->idx);
32982 if (err != 0) {
32983 DHD_ERROR(("%s: dhd_net_attach failed, "
32984@@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32985 strcpy(nv_path, nvram_path);
32986
32987 /* Allocate etherdev, including space for private structure */
32988- net = alloc_etherdev(sizeof(dhd));
32989+ net = alloc_etherdev(sizeof(*dhd));
32990 if (!net) {
32991 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32992 goto fail;
32993@@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32994 /*
32995 * Save the dhd_info into the priv
32996 */
32997- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32998+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32999
33000 /* Set network interface name if it was provided as module parameter */
33001 if (iface_name[0]) {
33002@@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
33003 /*
33004 * Save the dhd_info into the priv
33005 */
33006- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
33007+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
33008
33009 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
33010 g_bus = bus;
33011diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
33012--- linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
33013+++ linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
33014@@ -593,7 +593,7 @@ struct phy_func_ptr {
33015 initfn_t carrsuppr;
33016 rxsigpwrfn_t rxsigpwr;
33017 detachfn_t detach;
33018-};
33019+} __no_const;
33020 typedef struct phy_func_ptr phy_func_ptr_t;
33021
33022 struct phy_info {
33023diff -urNp linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h
33024--- linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
33025+++ linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
33026@@ -185,7 +185,7 @@ typedef struct {
33027 u16 func, uint bustype, void *regsva, void *param);
33028 /* detach from device */
33029 void (*detach) (void *ch);
33030-} bcmsdh_driver_t;
33031+} __no_const bcmsdh_driver_t;
33032
33033 /* platform specific/high level functions */
33034 extern int bcmsdh_register(bcmsdh_driver_t *driver);
33035diff -urNp linux-3.0.4/drivers/staging/et131x/et1310_tx.c linux-3.0.4/drivers/staging/et131x/et1310_tx.c
33036--- linux-3.0.4/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
33037+++ linux-3.0.4/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
33038@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
33039 struct net_device_stats *stats = &etdev->net_stats;
33040
33041 if (tcb->flags & fMP_DEST_BROAD)
33042- atomic_inc(&etdev->Stats.brdcstxmt);
33043+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
33044 else if (tcb->flags & fMP_DEST_MULTI)
33045- atomic_inc(&etdev->Stats.multixmt);
33046+ atomic_inc_unchecked(&etdev->Stats.multixmt);
33047 else
33048- atomic_inc(&etdev->Stats.unixmt);
33049+ atomic_inc_unchecked(&etdev->Stats.unixmt);
33050
33051 if (tcb->skb) {
33052 stats->tx_bytes += tcb->skb->len;
33053diff -urNp linux-3.0.4/drivers/staging/et131x/et131x_adapter.h linux-3.0.4/drivers/staging/et131x/et131x_adapter.h
33054--- linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
33055+++ linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
33056@@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
33057 * operations
33058 */
33059 u32 unircv; /* # multicast packets received */
33060- atomic_t unixmt; /* # multicast packets for Tx */
33061+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
33062 u32 multircv; /* # multicast packets received */
33063- atomic_t multixmt; /* # multicast packets for Tx */
33064+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
33065 u32 brdcstrcv; /* # broadcast packets received */
33066- atomic_t brdcstxmt; /* # broadcast packets for Tx */
33067+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
33068 u32 norcvbuf; /* # Rx packets discarded */
33069 u32 noxmtbuf; /* # Tx packets discarded */
33070
33071diff -urNp linux-3.0.4/drivers/staging/hv/channel.c linux-3.0.4/drivers/staging/hv/channel.c
33072--- linux-3.0.4/drivers/staging/hv/channel.c 2011-09-02 18:11:21.000000000 -0400
33073+++ linux-3.0.4/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
33074@@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
33075 int ret = 0;
33076 int t;
33077
33078- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
33079- atomic_inc(&vmbus_connection.next_gpadl_handle);
33080+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
33081+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
33082
33083 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
33084 if (ret)
33085diff -urNp linux-3.0.4/drivers/staging/hv/hv.c linux-3.0.4/drivers/staging/hv/hv.c
33086--- linux-3.0.4/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
33087+++ linux-3.0.4/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
33088@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
33089 u64 output_address = (output) ? virt_to_phys(output) : 0;
33090 u32 output_address_hi = output_address >> 32;
33091 u32 output_address_lo = output_address & 0xFFFFFFFF;
33092- volatile void *hypercall_page = hv_context.hypercall_page;
33093+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
33094
33095 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
33096 "=a"(hv_status_lo) : "d" (control_hi),
33097diff -urNp linux-3.0.4/drivers/staging/hv/hv_mouse.c linux-3.0.4/drivers/staging/hv/hv_mouse.c
33098--- linux-3.0.4/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
33099+++ linux-3.0.4/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
33100@@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
33101 if (hid_dev) {
33102 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
33103
33104- hid_dev->ll_driver->open = mousevsc_hid_open;
33105- hid_dev->ll_driver->close = mousevsc_hid_close;
33106+ pax_open_kernel();
33107+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
33108+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
33109+ pax_close_kernel();
33110
33111 hid_dev->bus = BUS_VIRTUAL;
33112 hid_dev->vendor = input_device_ctx->device_info.vendor;
33113diff -urNp linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h
33114--- linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
33115+++ linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
33116@@ -559,7 +559,7 @@ enum vmbus_connect_state {
33117 struct vmbus_connection {
33118 enum vmbus_connect_state conn_state;
33119
33120- atomic_t next_gpadl_handle;
33121+ atomic_unchecked_t next_gpadl_handle;
33122
33123 /*
33124 * Represents channel interrupts. Each bit position represents a
33125diff -urNp linux-3.0.4/drivers/staging/hv/rndis_filter.c linux-3.0.4/drivers/staging/hv/rndis_filter.c
33126--- linux-3.0.4/drivers/staging/hv/rndis_filter.c 2011-09-02 18:11:21.000000000 -0400
33127+++ linux-3.0.4/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
33128@@ -43,7 +43,7 @@ struct rndis_device {
33129
33130 enum rndis_device_state state;
33131 u32 link_stat;
33132- atomic_t new_req_id;
33133+ atomic_unchecked_t new_req_id;
33134
33135 spinlock_t request_lock;
33136 struct list_head req_list;
33137@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
33138 * template
33139 */
33140 set = &rndis_msg->msg.set_req;
33141- set->req_id = atomic_inc_return(&dev->new_req_id);
33142+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
33143
33144 /* Add to the request list */
33145 spin_lock_irqsave(&dev->request_lock, flags);
33146@@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
33147
33148 /* Setup the rndis set */
33149 halt = &request->request_msg.msg.halt_req;
33150- halt->req_id = atomic_inc_return(&dev->new_req_id);
33151+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
33152
33153 /* Ignore return since this msg is optional. */
33154 rndis_filter_send_request(dev, request);
33155diff -urNp linux-3.0.4/drivers/staging/hv/vmbus_drv.c linux-3.0.4/drivers/staging/hv/vmbus_drv.c
33156--- linux-3.0.4/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
33157+++ linux-3.0.4/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
33158@@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
33159 {
33160 int ret = 0;
33161
33162- static atomic_t device_num = ATOMIC_INIT(0);
33163+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
33164
33165 /* Set the device name. Otherwise, device_register() will fail. */
33166 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
33167- atomic_inc_return(&device_num));
33168+ atomic_inc_return_unchecked(&device_num));
33169
33170 /* The new device belongs to this bus */
33171 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
33172diff -urNp linux-3.0.4/drivers/staging/iio/ring_generic.h linux-3.0.4/drivers/staging/iio/ring_generic.h
33173--- linux-3.0.4/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
33174+++ linux-3.0.4/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
33175@@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
33176
33177 int (*is_enabled)(struct iio_ring_buffer *ring);
33178 int (*enable)(struct iio_ring_buffer *ring);
33179-};
33180+} __no_const;
33181
33182 struct iio_ring_setup_ops {
33183 int (*preenable)(struct iio_dev *);
33184diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet.c linux-3.0.4/drivers/staging/octeon/ethernet.c
33185--- linux-3.0.4/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
33186+++ linux-3.0.4/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
33187@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
33188 * since the RX tasklet also increments it.
33189 */
33190 #ifdef CONFIG_64BIT
33191- atomic64_add(rx_status.dropped_packets,
33192- (atomic64_t *)&priv->stats.rx_dropped);
33193+ atomic64_add_unchecked(rx_status.dropped_packets,
33194+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
33195 #else
33196- atomic_add(rx_status.dropped_packets,
33197- (atomic_t *)&priv->stats.rx_dropped);
33198+ atomic_add_unchecked(rx_status.dropped_packets,
33199+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
33200 #endif
33201 }
33202
33203diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet-rx.c linux-3.0.4/drivers/staging/octeon/ethernet-rx.c
33204--- linux-3.0.4/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
33205+++ linux-3.0.4/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
33206@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
33207 /* Increment RX stats for virtual ports */
33208 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
33209 #ifdef CONFIG_64BIT
33210- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
33211- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
33212+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
33213+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
33214 #else
33215- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
33216- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
33217+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
33218+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
33219 #endif
33220 }
33221 netif_receive_skb(skb);
33222@@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
33223 dev->name);
33224 */
33225 #ifdef CONFIG_64BIT
33226- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
33227+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
33228 #else
33229- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
33230+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
33231 #endif
33232 dev_kfree_skb_irq(skb);
33233 }
33234diff -urNp linux-3.0.4/drivers/staging/pohmelfs/inode.c linux-3.0.4/drivers/staging/pohmelfs/inode.c
33235--- linux-3.0.4/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
33236+++ linux-3.0.4/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
33237@@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
33238 mutex_init(&psb->mcache_lock);
33239 psb->mcache_root = RB_ROOT;
33240 psb->mcache_timeout = msecs_to_jiffies(5000);
33241- atomic_long_set(&psb->mcache_gen, 0);
33242+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
33243
33244 psb->trans_max_pages = 100;
33245
33246@@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
33247 INIT_LIST_HEAD(&psb->crypto_ready_list);
33248 INIT_LIST_HEAD(&psb->crypto_active_list);
33249
33250- atomic_set(&psb->trans_gen, 1);
33251+ atomic_set_unchecked(&psb->trans_gen, 1);
33252 atomic_long_set(&psb->total_inodes, 0);
33253
33254 mutex_init(&psb->state_lock);
33255diff -urNp linux-3.0.4/drivers/staging/pohmelfs/mcache.c linux-3.0.4/drivers/staging/pohmelfs/mcache.c
33256--- linux-3.0.4/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
33257+++ linux-3.0.4/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
33258@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
33259 m->data = data;
33260 m->start = start;
33261 m->size = size;
33262- m->gen = atomic_long_inc_return(&psb->mcache_gen);
33263+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
33264
33265 mutex_lock(&psb->mcache_lock);
33266 err = pohmelfs_mcache_insert(psb, m);
33267diff -urNp linux-3.0.4/drivers/staging/pohmelfs/netfs.h linux-3.0.4/drivers/staging/pohmelfs/netfs.h
33268--- linux-3.0.4/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
33269+++ linux-3.0.4/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
33270@@ -571,14 +571,14 @@ struct pohmelfs_config;
33271 struct pohmelfs_sb {
33272 struct rb_root mcache_root;
33273 struct mutex mcache_lock;
33274- atomic_long_t mcache_gen;
33275+ atomic_long_unchecked_t mcache_gen;
33276 unsigned long mcache_timeout;
33277
33278 unsigned int idx;
33279
33280 unsigned int trans_retries;
33281
33282- atomic_t trans_gen;
33283+ atomic_unchecked_t trans_gen;
33284
33285 unsigned int crypto_attached_size;
33286 unsigned int crypto_align_size;
33287diff -urNp linux-3.0.4/drivers/staging/pohmelfs/trans.c linux-3.0.4/drivers/staging/pohmelfs/trans.c
33288--- linux-3.0.4/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
33289+++ linux-3.0.4/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
33290@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
33291 int err;
33292 struct netfs_cmd *cmd = t->iovec.iov_base;
33293
33294- t->gen = atomic_inc_return(&psb->trans_gen);
33295+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
33296
33297 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
33298 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
33299diff -urNp linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h
33300--- linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
33301+++ linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
33302@@ -83,7 +83,7 @@ struct _io_ops {
33303 u8 *pmem);
33304 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
33305 u8 *pmem);
33306-};
33307+} __no_const;
33308
33309 struct io_req {
33310 struct list_head list;
33311diff -urNp linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c
33312--- linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
33313+++ linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c 2011-08-24 18:21:41.000000000 -0400
33314@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
33315 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
33316
33317 if (rlen)
33318- if (copy_to_user(data, &resp, rlen))
33319+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
33320 return -EFAULT;
33321
33322 return 0;
33323diff -urNp linux-3.0.4/drivers/staging/tty/stallion.c linux-3.0.4/drivers/staging/tty/stallion.c
33324--- linux-3.0.4/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
33325+++ linux-3.0.4/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
33326@@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
33327 struct stlport stl_dummyport;
33328 struct stlport *portp;
33329
33330+ pax_track_stack();
33331+
33332 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
33333 return -EFAULT;
33334 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
33335diff -urNp linux-3.0.4/drivers/staging/usbip/usbip_common.h linux-3.0.4/drivers/staging/usbip/usbip_common.h
33336--- linux-3.0.4/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
33337+++ linux-3.0.4/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
33338@@ -315,7 +315,7 @@ struct usbip_device {
33339 void (*shutdown)(struct usbip_device *);
33340 void (*reset)(struct usbip_device *);
33341 void (*unusable)(struct usbip_device *);
33342- } eh_ops;
33343+ } __no_const eh_ops;
33344 };
33345
33346 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
33347diff -urNp linux-3.0.4/drivers/staging/usbip/vhci.h linux-3.0.4/drivers/staging/usbip/vhci.h
33348--- linux-3.0.4/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
33349+++ linux-3.0.4/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
33350@@ -94,7 +94,7 @@ struct vhci_hcd {
33351 unsigned resuming:1;
33352 unsigned long re_timeout;
33353
33354- atomic_t seqnum;
33355+ atomic_unchecked_t seqnum;
33356
33357 /*
33358 * NOTE:
33359diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_hcd.c linux-3.0.4/drivers/staging/usbip/vhci_hcd.c
33360--- linux-3.0.4/drivers/staging/usbip/vhci_hcd.c 2011-09-02 18:11:21.000000000 -0400
33361+++ linux-3.0.4/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
33362@@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
33363 return;
33364 }
33365
33366- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
33367+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
33368 if (priv->seqnum == 0xffff)
33369 dev_info(&urb->dev->dev, "seqnum max\n");
33370
33371@@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
33372 return -ENOMEM;
33373 }
33374
33375- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
33376+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
33377 if (unlink->seqnum == 0xffff)
33378 pr_info("seqnum max\n");
33379
33380@@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
33381 vdev->rhport = rhport;
33382 }
33383
33384- atomic_set(&vhci->seqnum, 0);
33385+ atomic_set_unchecked(&vhci->seqnum, 0);
33386 spin_lock_init(&vhci->lock);
33387
33388 hcd->power_budget = 0; /* no limit */
33389diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_rx.c linux-3.0.4/drivers/staging/usbip/vhci_rx.c
33390--- linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
33391+++ linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
33392@@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
33393 if (!urb) {
33394 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
33395 pr_info("max seqnum %d\n",
33396- atomic_read(&the_controller->seqnum));
33397+ atomic_read_unchecked(&the_controller->seqnum));
33398 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
33399 return;
33400 }
33401diff -urNp linux-3.0.4/drivers/staging/vt6655/hostap.c linux-3.0.4/drivers/staging/vt6655/hostap.c
33402--- linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
33403+++ linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
33404@@ -79,14 +79,13 @@ static int msglevel
33405 *
33406 */
33407
33408+static net_device_ops_no_const apdev_netdev_ops;
33409+
33410 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
33411 {
33412 PSDevice apdev_priv;
33413 struct net_device *dev = pDevice->dev;
33414 int ret;
33415- const struct net_device_ops apdev_netdev_ops = {
33416- .ndo_start_xmit = pDevice->tx_80211,
33417- };
33418
33419 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
33420
33421@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
33422 *apdev_priv = *pDevice;
33423 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
33424
33425+ /* only half broken now */
33426+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
33427 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
33428
33429 pDevice->apdev->type = ARPHRD_IEEE80211;
33430diff -urNp linux-3.0.4/drivers/staging/vt6656/hostap.c linux-3.0.4/drivers/staging/vt6656/hostap.c
33431--- linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
33432+++ linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
33433@@ -80,14 +80,13 @@ static int msglevel
33434 *
33435 */
33436
33437+static net_device_ops_no_const apdev_netdev_ops;
33438+
33439 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
33440 {
33441 PSDevice apdev_priv;
33442 struct net_device *dev = pDevice->dev;
33443 int ret;
33444- const struct net_device_ops apdev_netdev_ops = {
33445- .ndo_start_xmit = pDevice->tx_80211,
33446- };
33447
33448 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
33449
33450@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
33451 *apdev_priv = *pDevice;
33452 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
33453
33454+ /* only half broken now */
33455+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
33456 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
33457
33458 pDevice->apdev->type = ARPHRD_IEEE80211;
33459diff -urNp linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c
33460--- linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
33461+++ linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
33462@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
33463
33464 struct usbctlx_completor {
33465 int (*complete) (struct usbctlx_completor *);
33466-};
33467+} __no_const;
33468
33469 static int
33470 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
33471diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.c linux-3.0.4/drivers/staging/zcache/tmem.c
33472--- linux-3.0.4/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
33473+++ linux-3.0.4/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
33474@@ -39,7 +39,7 @@
33475 * A tmem host implementation must use this function to register callbacks
33476 * for memory allocation.
33477 */
33478-static struct tmem_hostops tmem_hostops;
33479+static tmem_hostops_no_const tmem_hostops;
33480
33481 static void tmem_objnode_tree_init(void);
33482
33483@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
33484 * A tmem host implementation must use this function to register
33485 * callbacks for a page-accessible memory (PAM) implementation
33486 */
33487-static struct tmem_pamops tmem_pamops;
33488+static tmem_pamops_no_const tmem_pamops;
33489
33490 void tmem_register_pamops(struct tmem_pamops *m)
33491 {
33492diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.h linux-3.0.4/drivers/staging/zcache/tmem.h
33493--- linux-3.0.4/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
33494+++ linux-3.0.4/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
33495@@ -171,6 +171,7 @@ struct tmem_pamops {
33496 int (*get_data)(struct page *, void *, struct tmem_pool *);
33497 void (*free)(void *, struct tmem_pool *);
33498 };
33499+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
33500 extern void tmem_register_pamops(struct tmem_pamops *m);
33501
33502 /* memory allocation methods provided by the host implementation */
33503@@ -180,6 +181,7 @@ struct tmem_hostops {
33504 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
33505 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
33506 };
33507+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
33508 extern void tmem_register_hostops(struct tmem_hostops *m);
33509
33510 /* core tmem accessor functions */
33511diff -urNp linux-3.0.4/drivers/target/target_core_alua.c linux-3.0.4/drivers/target/target_core_alua.c
33512--- linux-3.0.4/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
33513+++ linux-3.0.4/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
33514@@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
33515 char path[ALUA_METADATA_PATH_LEN];
33516 int len;
33517
33518+ pax_track_stack();
33519+
33520 memset(path, 0, ALUA_METADATA_PATH_LEN);
33521
33522 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
33523@@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
33524 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
33525 int len;
33526
33527+ pax_track_stack();
33528+
33529 memset(path, 0, ALUA_METADATA_PATH_LEN);
33530 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
33531
33532diff -urNp linux-3.0.4/drivers/target/target_core_cdb.c linux-3.0.4/drivers/target/target_core_cdb.c
33533--- linux-3.0.4/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
33534+++ linux-3.0.4/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
33535@@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
33536 int length = 0;
33537 unsigned char buf[SE_MODE_PAGE_BUF];
33538
33539+ pax_track_stack();
33540+
33541 memset(buf, 0, SE_MODE_PAGE_BUF);
33542
33543 switch (cdb[2] & 0x3f) {
33544diff -urNp linux-3.0.4/drivers/target/target_core_configfs.c linux-3.0.4/drivers/target/target_core_configfs.c
33545--- linux-3.0.4/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
33546+++ linux-3.0.4/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
33547@@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
33548 ssize_t len = 0;
33549 int reg_count = 0, prf_isid;
33550
33551+ pax_track_stack();
33552+
33553 if (!(su_dev->se_dev_ptr))
33554 return -ENODEV;
33555
33556diff -urNp linux-3.0.4/drivers/target/target_core_pr.c linux-3.0.4/drivers/target/target_core_pr.c
33557--- linux-3.0.4/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
33558+++ linux-3.0.4/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
33559@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
33560 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
33561 u16 tpgt;
33562
33563+ pax_track_stack();
33564+
33565 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
33566 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
33567 /*
33568@@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
33569 ssize_t len = 0;
33570 int reg_count = 0;
33571
33572+ pax_track_stack();
33573+
33574 memset(buf, 0, pr_aptpl_buf_len);
33575 /*
33576 * Called to clear metadata once APTPL has been deactivated.
33577@@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
33578 char path[512];
33579 int ret;
33580
33581+ pax_track_stack();
33582+
33583 memset(iov, 0, sizeof(struct iovec));
33584 memset(path, 0, 512);
33585
33586diff -urNp linux-3.0.4/drivers/target/target_core_tmr.c linux-3.0.4/drivers/target/target_core_tmr.c
33587--- linux-3.0.4/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
33588+++ linux-3.0.4/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
33589@@ -269,7 +269,7 @@ int core_tmr_lun_reset(
33590 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
33591 T_TASK(cmd)->t_task_cdbs,
33592 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
33593- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
33594+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
33595 atomic_read(&T_TASK(cmd)->t_transport_active),
33596 atomic_read(&T_TASK(cmd)->t_transport_stop),
33597 atomic_read(&T_TASK(cmd)->t_transport_sent));
33598@@ -311,7 +311,7 @@ int core_tmr_lun_reset(
33599 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
33600 " task: %p, t_fe_count: %d dev: %p\n", task,
33601 fe_count, dev);
33602- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
33603+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
33604 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
33605 flags);
33606 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
33607@@ -321,7 +321,7 @@ int core_tmr_lun_reset(
33608 }
33609 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
33610 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
33611- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
33612+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
33613 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
33614 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
33615
33616diff -urNp linux-3.0.4/drivers/target/target_core_transport.c linux-3.0.4/drivers/target/target_core_transport.c
33617--- linux-3.0.4/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
33618+++ linux-3.0.4/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
33619@@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
33620
33621 dev->queue_depth = dev_limits->queue_depth;
33622 atomic_set(&dev->depth_left, dev->queue_depth);
33623- atomic_set(&dev->dev_ordered_id, 0);
33624+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
33625
33626 se_dev_set_default_attribs(dev, dev_limits);
33627
33628@@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
33629 * Used to determine when ORDERED commands should go from
33630 * Dormant to Active status.
33631 */
33632- cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
33633+ cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
33634 smp_mb__after_atomic_inc();
33635 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
33636 cmd->se_ordered_id, cmd->sam_task_attr,
33637@@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
33638 " t_transport_active: %d t_transport_stop: %d"
33639 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
33640 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
33641- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
33642+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
33643 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
33644 atomic_read(&T_TASK(cmd)->t_transport_active),
33645 atomic_read(&T_TASK(cmd)->t_transport_stop),
33646@@ -2673,9 +2673,9 @@ check_depth:
33647 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
33648 atomic_set(&task->task_active, 1);
33649 atomic_set(&task->task_sent, 1);
33650- atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
33651+ atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
33652
33653- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
33654+ if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
33655 T_TASK(cmd)->t_task_cdbs)
33656 atomic_set(&cmd->transport_sent, 1);
33657
33658@@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
33659 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
33660 }
33661 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
33662- atomic_read(&T_TASK(cmd)->t_transport_aborted))
33663+ atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
33664 goto remove;
33665
33666 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
33667@@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
33668 {
33669 int ret = 0;
33670
33671- if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
33672+ if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
33673 if (!(send_status) ||
33674 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
33675 return 1;
33676@@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
33677 */
33678 if (cmd->data_direction == DMA_TO_DEVICE) {
33679 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
33680- atomic_inc(&T_TASK(cmd)->t_transport_aborted);
33681+ atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
33682 smp_mb__after_atomic_inc();
33683 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
33684 transport_new_cmd_failure(cmd);
33685@@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
33686 CMD_TFO(cmd)->get_task_tag(cmd),
33687 T_TASK(cmd)->t_task_cdbs,
33688 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
33689- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
33690+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
33691 atomic_read(&T_TASK(cmd)->t_transport_active),
33692 atomic_read(&T_TASK(cmd)->t_transport_stop),
33693 atomic_read(&T_TASK(cmd)->t_transport_sent));
33694diff -urNp linux-3.0.4/drivers/telephony/ixj.c linux-3.0.4/drivers/telephony/ixj.c
33695--- linux-3.0.4/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
33696+++ linux-3.0.4/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
33697@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
33698 bool mContinue;
33699 char *pIn, *pOut;
33700
33701+ pax_track_stack();
33702+
33703 if (!SCI_Prepare(j))
33704 return 0;
33705
33706diff -urNp linux-3.0.4/drivers/tty/hvc/hvcs.c linux-3.0.4/drivers/tty/hvc/hvcs.c
33707--- linux-3.0.4/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
33708+++ linux-3.0.4/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
33709@@ -83,6 +83,7 @@
33710 #include <asm/hvcserver.h>
33711 #include <asm/uaccess.h>
33712 #include <asm/vio.h>
33713+#include <asm/local.h>
33714
33715 /*
33716 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
33717@@ -270,7 +271,7 @@ struct hvcs_struct {
33718 unsigned int index;
33719
33720 struct tty_struct *tty;
33721- int open_count;
33722+ local_t open_count;
33723
33724 /*
33725 * Used to tell the driver kernel_thread what operations need to take
33726@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
33727
33728 spin_lock_irqsave(&hvcsd->lock, flags);
33729
33730- if (hvcsd->open_count > 0) {
33731+ if (local_read(&hvcsd->open_count) > 0) {
33732 spin_unlock_irqrestore(&hvcsd->lock, flags);
33733 printk(KERN_INFO "HVCS: vterm state unchanged. "
33734 "The hvcs device node is still in use.\n");
33735@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
33736 if ((retval = hvcs_partner_connect(hvcsd)))
33737 goto error_release;
33738
33739- hvcsd->open_count = 1;
33740+ local_set(&hvcsd->open_count, 1);
33741 hvcsd->tty = tty;
33742 tty->driver_data = hvcsd;
33743
33744@@ -1179,7 +1180,7 @@ fast_open:
33745
33746 spin_lock_irqsave(&hvcsd->lock, flags);
33747 kref_get(&hvcsd->kref);
33748- hvcsd->open_count++;
33749+ local_inc(&hvcsd->open_count);
33750 hvcsd->todo_mask |= HVCS_SCHED_READ;
33751 spin_unlock_irqrestore(&hvcsd->lock, flags);
33752
33753@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
33754 hvcsd = tty->driver_data;
33755
33756 spin_lock_irqsave(&hvcsd->lock, flags);
33757- if (--hvcsd->open_count == 0) {
33758+ if (local_dec_and_test(&hvcsd->open_count)) {
33759
33760 vio_disable_interrupts(hvcsd->vdev);
33761
33762@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
33763 free_irq(irq, hvcsd);
33764 kref_put(&hvcsd->kref, destroy_hvcs_struct);
33765 return;
33766- } else if (hvcsd->open_count < 0) {
33767+ } else if (local_read(&hvcsd->open_count) < 0) {
33768 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
33769 " is missmanaged.\n",
33770- hvcsd->vdev->unit_address, hvcsd->open_count);
33771+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
33772 }
33773
33774 spin_unlock_irqrestore(&hvcsd->lock, flags);
33775@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
33776
33777 spin_lock_irqsave(&hvcsd->lock, flags);
33778 /* Preserve this so that we know how many kref refs to put */
33779- temp_open_count = hvcsd->open_count;
33780+ temp_open_count = local_read(&hvcsd->open_count);
33781
33782 /*
33783 * Don't kref put inside the spinlock because the destruction
33784@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
33785 hvcsd->tty->driver_data = NULL;
33786 hvcsd->tty = NULL;
33787
33788- hvcsd->open_count = 0;
33789+ local_set(&hvcsd->open_count, 0);
33790
33791 /* This will drop any buffered data on the floor which is OK in a hangup
33792 * scenario. */
33793@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
33794 * the middle of a write operation? This is a crummy place to do this
33795 * but we want to keep it all in the spinlock.
33796 */
33797- if (hvcsd->open_count <= 0) {
33798+ if (local_read(&hvcsd->open_count) <= 0) {
33799 spin_unlock_irqrestore(&hvcsd->lock, flags);
33800 return -ENODEV;
33801 }
33802@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
33803 {
33804 struct hvcs_struct *hvcsd = tty->driver_data;
33805
33806- if (!hvcsd || hvcsd->open_count <= 0)
33807+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
33808 return 0;
33809
33810 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
33811diff -urNp linux-3.0.4/drivers/tty/ipwireless/tty.c linux-3.0.4/drivers/tty/ipwireless/tty.c
33812--- linux-3.0.4/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
33813+++ linux-3.0.4/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
33814@@ -29,6 +29,7 @@
33815 #include <linux/tty_driver.h>
33816 #include <linux/tty_flip.h>
33817 #include <linux/uaccess.h>
33818+#include <asm/local.h>
33819
33820 #include "tty.h"
33821 #include "network.h"
33822@@ -51,7 +52,7 @@ struct ipw_tty {
33823 int tty_type;
33824 struct ipw_network *network;
33825 struct tty_struct *linux_tty;
33826- int open_count;
33827+ local_t open_count;
33828 unsigned int control_lines;
33829 struct mutex ipw_tty_mutex;
33830 int tx_bytes_queued;
33831@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
33832 mutex_unlock(&tty->ipw_tty_mutex);
33833 return -ENODEV;
33834 }
33835- if (tty->open_count == 0)
33836+ if (local_read(&tty->open_count) == 0)
33837 tty->tx_bytes_queued = 0;
33838
33839- tty->open_count++;
33840+ local_inc(&tty->open_count);
33841
33842 tty->linux_tty = linux_tty;
33843 linux_tty->driver_data = tty;
33844@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
33845
33846 static void do_ipw_close(struct ipw_tty *tty)
33847 {
33848- tty->open_count--;
33849-
33850- if (tty->open_count == 0) {
33851+ if (local_dec_return(&tty->open_count) == 0) {
33852 struct tty_struct *linux_tty = tty->linux_tty;
33853
33854 if (linux_tty != NULL) {
33855@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
33856 return;
33857
33858 mutex_lock(&tty->ipw_tty_mutex);
33859- if (tty->open_count == 0) {
33860+ if (local_read(&tty->open_count) == 0) {
33861 mutex_unlock(&tty->ipw_tty_mutex);
33862 return;
33863 }
33864@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
33865 return;
33866 }
33867
33868- if (!tty->open_count) {
33869+ if (!local_read(&tty->open_count)) {
33870 mutex_unlock(&tty->ipw_tty_mutex);
33871 return;
33872 }
33873@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
33874 return -ENODEV;
33875
33876 mutex_lock(&tty->ipw_tty_mutex);
33877- if (!tty->open_count) {
33878+ if (!local_read(&tty->open_count)) {
33879 mutex_unlock(&tty->ipw_tty_mutex);
33880 return -EINVAL;
33881 }
33882@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
33883 if (!tty)
33884 return -ENODEV;
33885
33886- if (!tty->open_count)
33887+ if (!local_read(&tty->open_count))
33888 return -EINVAL;
33889
33890 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
33891@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
33892 if (!tty)
33893 return 0;
33894
33895- if (!tty->open_count)
33896+ if (!local_read(&tty->open_count))
33897 return 0;
33898
33899 return tty->tx_bytes_queued;
33900@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
33901 if (!tty)
33902 return -ENODEV;
33903
33904- if (!tty->open_count)
33905+ if (!local_read(&tty->open_count))
33906 return -EINVAL;
33907
33908 return get_control_lines(tty);
33909@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
33910 if (!tty)
33911 return -ENODEV;
33912
33913- if (!tty->open_count)
33914+ if (!local_read(&tty->open_count))
33915 return -EINVAL;
33916
33917 return set_control_lines(tty, set, clear);
33918@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
33919 if (!tty)
33920 return -ENODEV;
33921
33922- if (!tty->open_count)
33923+ if (!local_read(&tty->open_count))
33924 return -EINVAL;
33925
33926 /* FIXME: Exactly how is the tty object locked here .. */
33927@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
33928 against a parallel ioctl etc */
33929 mutex_lock(&ttyj->ipw_tty_mutex);
33930 }
33931- while (ttyj->open_count)
33932+ while (local_read(&ttyj->open_count))
33933 do_ipw_close(ttyj);
33934 ipwireless_disassociate_network_ttys(network,
33935 ttyj->channel_idx);
33936diff -urNp linux-3.0.4/drivers/tty/n_gsm.c linux-3.0.4/drivers/tty/n_gsm.c
33937--- linux-3.0.4/drivers/tty/n_gsm.c 2011-09-02 18:11:21.000000000 -0400
33938+++ linux-3.0.4/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
33939@@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
33940 return NULL;
33941 spin_lock_init(&dlci->lock);
33942 dlci->fifo = &dlci->_fifo;
33943- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
33944+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
33945 kfree(dlci);
33946 return NULL;
33947 }
33948diff -urNp linux-3.0.4/drivers/tty/n_tty.c linux-3.0.4/drivers/tty/n_tty.c
33949--- linux-3.0.4/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
33950+++ linux-3.0.4/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
33951@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
33952 {
33953 *ops = tty_ldisc_N_TTY;
33954 ops->owner = NULL;
33955- ops->refcount = ops->flags = 0;
33956+ atomic_set(&ops->refcount, 0);
33957+ ops->flags = 0;
33958 }
33959 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
33960diff -urNp linux-3.0.4/drivers/tty/pty.c linux-3.0.4/drivers/tty/pty.c
33961--- linux-3.0.4/drivers/tty/pty.c 2011-07-21 22:17:23.000000000 -0400
33962+++ linux-3.0.4/drivers/tty/pty.c 2011-08-23 21:47:56.000000000 -0400
33963@@ -754,8 +754,10 @@ static void __init unix98_pty_init(void)
33964 register_sysctl_table(pty_root_table);
33965
33966 /* Now create the /dev/ptmx special device */
33967+ pax_open_kernel();
33968 tty_default_fops(&ptmx_fops);
33969- ptmx_fops.open = ptmx_open;
33970+ *(void **)&ptmx_fops.open = ptmx_open;
33971+ pax_close_kernel();
33972
33973 cdev_init(&ptmx_cdev, &ptmx_fops);
33974 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
33975diff -urNp linux-3.0.4/drivers/tty/rocket.c linux-3.0.4/drivers/tty/rocket.c
33976--- linux-3.0.4/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
33977+++ linux-3.0.4/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
33978@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
33979 struct rocket_ports tmp;
33980 int board;
33981
33982+ pax_track_stack();
33983+
33984 if (!retports)
33985 return -EFAULT;
33986 memset(&tmp, 0, sizeof (tmp));
33987diff -urNp linux-3.0.4/drivers/tty/serial/kgdboc.c linux-3.0.4/drivers/tty/serial/kgdboc.c
33988--- linux-3.0.4/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
33989+++ linux-3.0.4/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
33990@@ -23,8 +23,9 @@
33991 #define MAX_CONFIG_LEN 40
33992
33993 static struct kgdb_io kgdboc_io_ops;
33994+static struct kgdb_io kgdboc_io_ops_console;
33995
33996-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
33997+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
33998 static int configured = -1;
33999
34000 static char config[MAX_CONFIG_LEN];
34001@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
34002 kgdboc_unregister_kbd();
34003 if (configured == 1)
34004 kgdb_unregister_io_module(&kgdboc_io_ops);
34005+ else if (configured == 2)
34006+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
34007 }
34008
34009 static int configure_kgdboc(void)
34010@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
34011 int err;
34012 char *cptr = config;
34013 struct console *cons;
34014+ int is_console = 0;
34015
34016 err = kgdboc_option_setup(config);
34017 if (err || !strlen(config) || isspace(config[0]))
34018 goto noconfig;
34019
34020 err = -ENODEV;
34021- kgdboc_io_ops.is_console = 0;
34022 kgdb_tty_driver = NULL;
34023
34024 kgdboc_use_kms = 0;
34025@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
34026 int idx;
34027 if (cons->device && cons->device(cons, &idx) == p &&
34028 idx == tty_line) {
34029- kgdboc_io_ops.is_console = 1;
34030+ is_console = 1;
34031 break;
34032 }
34033 cons = cons->next;
34034@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
34035 kgdb_tty_line = tty_line;
34036
34037 do_register:
34038- err = kgdb_register_io_module(&kgdboc_io_ops);
34039+ if (is_console) {
34040+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
34041+ configured = 2;
34042+ } else {
34043+ err = kgdb_register_io_module(&kgdboc_io_ops);
34044+ configured = 1;
34045+ }
34046 if (err)
34047 goto noconfig;
34048
34049- configured = 1;
34050-
34051 return 0;
34052
34053 noconfig:
34054@@ -212,7 +219,7 @@ noconfig:
34055 static int __init init_kgdboc(void)
34056 {
34057 /* Already configured? */
34058- if (configured == 1)
34059+ if (configured >= 1)
34060 return 0;
34061
34062 return configure_kgdboc();
34063@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
34064 if (config[len - 1] == '\n')
34065 config[len - 1] = '\0';
34066
34067- if (configured == 1)
34068+ if (configured >= 1)
34069 cleanup_kgdboc();
34070
34071 /* Go and configure with the new params. */
34072@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
34073 .post_exception = kgdboc_post_exp_handler,
34074 };
34075
34076+static struct kgdb_io kgdboc_io_ops_console = {
34077+ .name = "kgdboc",
34078+ .read_char = kgdboc_get_char,
34079+ .write_char = kgdboc_put_char,
34080+ .pre_exception = kgdboc_pre_exp_handler,
34081+ .post_exception = kgdboc_post_exp_handler,
34082+ .is_console = 1
34083+};
34084+
34085 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
34086 /* This is only available if kgdboc is a built in for early debugging */
34087 static int __init kgdboc_early_init(char *opt)
34088diff -urNp linux-3.0.4/drivers/tty/serial/mrst_max3110.c linux-3.0.4/drivers/tty/serial/mrst_max3110.c
34089--- linux-3.0.4/drivers/tty/serial/mrst_max3110.c 2011-07-21 22:17:23.000000000 -0400
34090+++ linux-3.0.4/drivers/tty/serial/mrst_max3110.c 2011-08-23 21:48:14.000000000 -0400
34091@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
34092 int loop = 1, num, total = 0;
34093 u8 recv_buf[512], *pbuf;
34094
34095+ pax_track_stack();
34096+
34097 pbuf = recv_buf;
34098 do {
34099 num = max3110_read_multi(max, pbuf);
34100diff -urNp linux-3.0.4/drivers/tty/tty_io.c linux-3.0.4/drivers/tty/tty_io.c
34101--- linux-3.0.4/drivers/tty/tty_io.c 2011-07-21 22:17:23.000000000 -0400
34102+++ linux-3.0.4/drivers/tty/tty_io.c 2011-08-23 21:47:56.000000000 -0400
34103@@ -3215,7 +3215,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
34104
34105 void tty_default_fops(struct file_operations *fops)
34106 {
34107- *fops = tty_fops;
34108+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
34109 }
34110
34111 /*
34112diff -urNp linux-3.0.4/drivers/tty/tty_ldisc.c linux-3.0.4/drivers/tty/tty_ldisc.c
34113--- linux-3.0.4/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
34114+++ linux-3.0.4/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
34115@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
34116 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
34117 struct tty_ldisc_ops *ldo = ld->ops;
34118
34119- ldo->refcount--;
34120+ atomic_dec(&ldo->refcount);
34121 module_put(ldo->owner);
34122 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
34123
34124@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
34125 spin_lock_irqsave(&tty_ldisc_lock, flags);
34126 tty_ldiscs[disc] = new_ldisc;
34127 new_ldisc->num = disc;
34128- new_ldisc->refcount = 0;
34129+ atomic_set(&new_ldisc->refcount, 0);
34130 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
34131
34132 return ret;
34133@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
34134 return -EINVAL;
34135
34136 spin_lock_irqsave(&tty_ldisc_lock, flags);
34137- if (tty_ldiscs[disc]->refcount)
34138+ if (atomic_read(&tty_ldiscs[disc]->refcount))
34139 ret = -EBUSY;
34140 else
34141 tty_ldiscs[disc] = NULL;
34142@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
34143 if (ldops) {
34144 ret = ERR_PTR(-EAGAIN);
34145 if (try_module_get(ldops->owner)) {
34146- ldops->refcount++;
34147+ atomic_inc(&ldops->refcount);
34148 ret = ldops;
34149 }
34150 }
34151@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
34152 unsigned long flags;
34153
34154 spin_lock_irqsave(&tty_ldisc_lock, flags);
34155- ldops->refcount--;
34156+ atomic_dec(&ldops->refcount);
34157 module_put(ldops->owner);
34158 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
34159 }
34160diff -urNp linux-3.0.4/drivers/tty/vt/keyboard.c linux-3.0.4/drivers/tty/vt/keyboard.c
34161--- linux-3.0.4/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
34162+++ linux-3.0.4/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
34163@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
34164 kbd->kbdmode == VC_OFF) &&
34165 value != KVAL(K_SAK))
34166 return; /* SAK is allowed even in raw mode */
34167+
34168+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
34169+ {
34170+ void *func = fn_handler[value];
34171+ if (func == fn_show_state || func == fn_show_ptregs ||
34172+ func == fn_show_mem)
34173+ return;
34174+ }
34175+#endif
34176+
34177 fn_handler[value](vc);
34178 }
34179
34180diff -urNp linux-3.0.4/drivers/tty/vt/vt.c linux-3.0.4/drivers/tty/vt/vt.c
34181--- linux-3.0.4/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
34182+++ linux-3.0.4/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
34183@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
34184
34185 static void notify_write(struct vc_data *vc, unsigned int unicode)
34186 {
34187- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
34188+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
34189 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
34190 }
34191
34192diff -urNp linux-3.0.4/drivers/tty/vt/vt_ioctl.c linux-3.0.4/drivers/tty/vt/vt_ioctl.c
34193--- linux-3.0.4/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
34194+++ linux-3.0.4/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
34195@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
34196 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
34197 return -EFAULT;
34198
34199- if (!capable(CAP_SYS_TTY_CONFIG))
34200- perm = 0;
34201-
34202 switch (cmd) {
34203 case KDGKBENT:
34204 key_map = key_maps[s];
34205@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
34206 val = (i ? K_HOLE : K_NOSUCHMAP);
34207 return put_user(val, &user_kbe->kb_value);
34208 case KDSKBENT:
34209+ if (!capable(CAP_SYS_TTY_CONFIG))
34210+ perm = 0;
34211+
34212 if (!perm)
34213 return -EPERM;
34214 if (!i && v == K_NOSUCHMAP) {
34215@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
34216 int i, j, k;
34217 int ret;
34218
34219- if (!capable(CAP_SYS_TTY_CONFIG))
34220- perm = 0;
34221-
34222 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
34223 if (!kbs) {
34224 ret = -ENOMEM;
34225@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
34226 kfree(kbs);
34227 return ((p && *p) ? -EOVERFLOW : 0);
34228 case KDSKBSENT:
34229+ if (!capable(CAP_SYS_TTY_CONFIG))
34230+ perm = 0;
34231+
34232 if (!perm) {
34233 ret = -EPERM;
34234 goto reterr;
34235diff -urNp linux-3.0.4/drivers/uio/uio.c linux-3.0.4/drivers/uio/uio.c
34236--- linux-3.0.4/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
34237+++ linux-3.0.4/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
34238@@ -25,6 +25,7 @@
34239 #include <linux/kobject.h>
34240 #include <linux/cdev.h>
34241 #include <linux/uio_driver.h>
34242+#include <asm/local.h>
34243
34244 #define UIO_MAX_DEVICES (1U << MINORBITS)
34245
34246@@ -32,10 +33,10 @@ struct uio_device {
34247 struct module *owner;
34248 struct device *dev;
34249 int minor;
34250- atomic_t event;
34251+ atomic_unchecked_t event;
34252 struct fasync_struct *async_queue;
34253 wait_queue_head_t wait;
34254- int vma_count;
34255+ local_t vma_count;
34256 struct uio_info *info;
34257 struct kobject *map_dir;
34258 struct kobject *portio_dir;
34259@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
34260 struct device_attribute *attr, char *buf)
34261 {
34262 struct uio_device *idev = dev_get_drvdata(dev);
34263- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
34264+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
34265 }
34266
34267 static struct device_attribute uio_class_attributes[] = {
34268@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
34269 {
34270 struct uio_device *idev = info->uio_dev;
34271
34272- atomic_inc(&idev->event);
34273+ atomic_inc_unchecked(&idev->event);
34274 wake_up_interruptible(&idev->wait);
34275 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
34276 }
34277@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
34278 }
34279
34280 listener->dev = idev;
34281- listener->event_count = atomic_read(&idev->event);
34282+ listener->event_count = atomic_read_unchecked(&idev->event);
34283 filep->private_data = listener;
34284
34285 if (idev->info->open) {
34286@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
34287 return -EIO;
34288
34289 poll_wait(filep, &idev->wait, wait);
34290- if (listener->event_count != atomic_read(&idev->event))
34291+ if (listener->event_count != atomic_read_unchecked(&idev->event))
34292 return POLLIN | POLLRDNORM;
34293 return 0;
34294 }
34295@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
34296 do {
34297 set_current_state(TASK_INTERRUPTIBLE);
34298
34299- event_count = atomic_read(&idev->event);
34300+ event_count = atomic_read_unchecked(&idev->event);
34301 if (event_count != listener->event_count) {
34302 if (copy_to_user(buf, &event_count, count))
34303 retval = -EFAULT;
34304@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
34305 static void uio_vma_open(struct vm_area_struct *vma)
34306 {
34307 struct uio_device *idev = vma->vm_private_data;
34308- idev->vma_count++;
34309+ local_inc(&idev->vma_count);
34310 }
34311
34312 static void uio_vma_close(struct vm_area_struct *vma)
34313 {
34314 struct uio_device *idev = vma->vm_private_data;
34315- idev->vma_count--;
34316+ local_dec(&idev->vma_count);
34317 }
34318
34319 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
34320@@ -823,7 +824,7 @@ int __uio_register_device(struct module
34321 idev->owner = owner;
34322 idev->info = info;
34323 init_waitqueue_head(&idev->wait);
34324- atomic_set(&idev->event, 0);
34325+ atomic_set_unchecked(&idev->event, 0);
34326
34327 ret = uio_get_minor(idev);
34328 if (ret)
34329diff -urNp linux-3.0.4/drivers/usb/atm/cxacru.c linux-3.0.4/drivers/usb/atm/cxacru.c
34330--- linux-3.0.4/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
34331+++ linux-3.0.4/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
34332@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
34333 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
34334 if (ret < 2)
34335 return -EINVAL;
34336- if (index < 0 || index > 0x7f)
34337+ if (index > 0x7f)
34338 return -EINVAL;
34339 pos += tmp;
34340
34341diff -urNp linux-3.0.4/drivers/usb/atm/usbatm.c linux-3.0.4/drivers/usb/atm/usbatm.c
34342--- linux-3.0.4/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
34343+++ linux-3.0.4/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
34344@@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
34345 if (printk_ratelimit())
34346 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
34347 __func__, vpi, vci);
34348- atomic_inc(&vcc->stats->rx_err);
34349+ atomic_inc_unchecked(&vcc->stats->rx_err);
34350 return;
34351 }
34352
34353@@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
34354 if (length > ATM_MAX_AAL5_PDU) {
34355 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
34356 __func__, length, vcc);
34357- atomic_inc(&vcc->stats->rx_err);
34358+ atomic_inc_unchecked(&vcc->stats->rx_err);
34359 goto out;
34360 }
34361
34362@@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
34363 if (sarb->len < pdu_length) {
34364 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
34365 __func__, pdu_length, sarb->len, vcc);
34366- atomic_inc(&vcc->stats->rx_err);
34367+ atomic_inc_unchecked(&vcc->stats->rx_err);
34368 goto out;
34369 }
34370
34371 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
34372 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
34373 __func__, vcc);
34374- atomic_inc(&vcc->stats->rx_err);
34375+ atomic_inc_unchecked(&vcc->stats->rx_err);
34376 goto out;
34377 }
34378
34379@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
34380 if (printk_ratelimit())
34381 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
34382 __func__, length);
34383- atomic_inc(&vcc->stats->rx_drop);
34384+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34385 goto out;
34386 }
34387
34388@@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
34389
34390 vcc->push(vcc, skb);
34391
34392- atomic_inc(&vcc->stats->rx);
34393+ atomic_inc_unchecked(&vcc->stats->rx);
34394 out:
34395 skb_trim(sarb, 0);
34396 }
34397@@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
34398 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
34399
34400 usbatm_pop(vcc, skb);
34401- atomic_inc(&vcc->stats->tx);
34402+ atomic_inc_unchecked(&vcc->stats->tx);
34403
34404 skb = skb_dequeue(&instance->sndqueue);
34405 }
34406@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
34407 if (!left--)
34408 return sprintf(page,
34409 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
34410- atomic_read(&atm_dev->stats.aal5.tx),
34411- atomic_read(&atm_dev->stats.aal5.tx_err),
34412- atomic_read(&atm_dev->stats.aal5.rx),
34413- atomic_read(&atm_dev->stats.aal5.rx_err),
34414- atomic_read(&atm_dev->stats.aal5.rx_drop));
34415+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
34416+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
34417+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
34418+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
34419+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
34420
34421 if (!left--) {
34422 if (instance->disconnected)
34423diff -urNp linux-3.0.4/drivers/usb/core/devices.c linux-3.0.4/drivers/usb/core/devices.c
34424--- linux-3.0.4/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
34425+++ linux-3.0.4/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
34426@@ -126,7 +126,7 @@ static const char format_endpt[] =
34427 * time it gets called.
34428 */
34429 static struct device_connect_event {
34430- atomic_t count;
34431+ atomic_unchecked_t count;
34432 wait_queue_head_t wait;
34433 } device_event = {
34434 .count = ATOMIC_INIT(1),
34435@@ -164,7 +164,7 @@ static const struct class_info clas_info
34436
34437 void usbfs_conn_disc_event(void)
34438 {
34439- atomic_add(2, &device_event.count);
34440+ atomic_add_unchecked(2, &device_event.count);
34441 wake_up(&device_event.wait);
34442 }
34443
34444@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
34445
34446 poll_wait(file, &device_event.wait, wait);
34447
34448- event_count = atomic_read(&device_event.count);
34449+ event_count = atomic_read_unchecked(&device_event.count);
34450 if (file->f_version != event_count) {
34451 file->f_version = event_count;
34452 return POLLIN | POLLRDNORM;
34453diff -urNp linux-3.0.4/drivers/usb/core/message.c linux-3.0.4/drivers/usb/core/message.c
34454--- linux-3.0.4/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
34455+++ linux-3.0.4/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
34456@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
34457 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
34458 if (buf) {
34459 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
34460- if (len > 0) {
34461- smallbuf = kmalloc(++len, GFP_NOIO);
34462+ if (len++ > 0) {
34463+ smallbuf = kmalloc(len, GFP_NOIO);
34464 if (!smallbuf)
34465 return buf;
34466 memcpy(smallbuf, buf, len);
34467diff -urNp linux-3.0.4/drivers/usb/early/ehci-dbgp.c linux-3.0.4/drivers/usb/early/ehci-dbgp.c
34468--- linux-3.0.4/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
34469+++ linux-3.0.4/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
34470@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
34471
34472 #ifdef CONFIG_KGDB
34473 static struct kgdb_io kgdbdbgp_io_ops;
34474-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
34475+static struct kgdb_io kgdbdbgp_io_ops_console;
34476+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
34477 #else
34478 #define dbgp_kgdb_mode (0)
34479 #endif
34480@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
34481 .write_char = kgdbdbgp_write_char,
34482 };
34483
34484+static struct kgdb_io kgdbdbgp_io_ops_console = {
34485+ .name = "kgdbdbgp",
34486+ .read_char = kgdbdbgp_read_char,
34487+ .write_char = kgdbdbgp_write_char,
34488+ .is_console = 1
34489+};
34490+
34491 static int kgdbdbgp_wait_time;
34492
34493 static int __init kgdbdbgp_parse_config(char *str)
34494@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
34495 ptr++;
34496 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
34497 }
34498- kgdb_register_io_module(&kgdbdbgp_io_ops);
34499- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
34500+ if (early_dbgp_console.index != -1)
34501+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
34502+ else
34503+ kgdb_register_io_module(&kgdbdbgp_io_ops);
34504
34505 return 0;
34506 }
34507diff -urNp linux-3.0.4/drivers/usb/host/xhci-mem.c linux-3.0.4/drivers/usb/host/xhci-mem.c
34508--- linux-3.0.4/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
34509+++ linux-3.0.4/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
34510@@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
34511 unsigned int num_tests;
34512 int i, ret;
34513
34514+ pax_track_stack();
34515+
34516 num_tests = ARRAY_SIZE(simple_test_vector);
34517 for (i = 0; i < num_tests; i++) {
34518 ret = xhci_test_trb_in_td(xhci,
34519diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-hc.h linux-3.0.4/drivers/usb/wusbcore/wa-hc.h
34520--- linux-3.0.4/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
34521+++ linux-3.0.4/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
34522@@ -192,7 +192,7 @@ struct wahc {
34523 struct list_head xfer_delayed_list;
34524 spinlock_t xfer_list_lock;
34525 struct work_struct xfer_work;
34526- atomic_t xfer_id_count;
34527+ atomic_unchecked_t xfer_id_count;
34528 };
34529
34530
34531@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
34532 INIT_LIST_HEAD(&wa->xfer_delayed_list);
34533 spin_lock_init(&wa->xfer_list_lock);
34534 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
34535- atomic_set(&wa->xfer_id_count, 1);
34536+ atomic_set_unchecked(&wa->xfer_id_count, 1);
34537 }
34538
34539 /**
34540diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c
34541--- linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
34542+++ linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
34543@@ -294,7 +294,7 @@ out:
34544 */
34545 static void wa_xfer_id_init(struct wa_xfer *xfer)
34546 {
34547- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
34548+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
34549 }
34550
34551 /*
34552diff -urNp linux-3.0.4/drivers/vhost/vhost.c linux-3.0.4/drivers/vhost/vhost.c
34553--- linux-3.0.4/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
34554+++ linux-3.0.4/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
34555@@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
34556 return get_user(vq->last_used_idx, &used->idx);
34557 }
34558
34559-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
34560+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
34561 {
34562 struct file *eventfp, *filep = NULL,
34563 *pollstart = NULL, *pollstop = NULL;
34564diff -urNp linux-3.0.4/drivers/video/fbcmap.c linux-3.0.4/drivers/video/fbcmap.c
34565--- linux-3.0.4/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
34566+++ linux-3.0.4/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
34567@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
34568 rc = -ENODEV;
34569 goto out;
34570 }
34571- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
34572- !info->fbops->fb_setcmap)) {
34573+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
34574 rc = -EINVAL;
34575 goto out1;
34576 }
34577diff -urNp linux-3.0.4/drivers/video/fbmem.c linux-3.0.4/drivers/video/fbmem.c
34578--- linux-3.0.4/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
34579+++ linux-3.0.4/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
34580@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
34581 image->dx += image->width + 8;
34582 }
34583 } else if (rotate == FB_ROTATE_UD) {
34584- for (x = 0; x < num && image->dx >= 0; x++) {
34585+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
34586 info->fbops->fb_imageblit(info, image);
34587 image->dx -= image->width + 8;
34588 }
34589@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
34590 image->dy += image->height + 8;
34591 }
34592 } else if (rotate == FB_ROTATE_CCW) {
34593- for (x = 0; x < num && image->dy >= 0; x++) {
34594+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
34595 info->fbops->fb_imageblit(info, image);
34596 image->dy -= image->height + 8;
34597 }
34598@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
34599 int flags = info->flags;
34600 int ret = 0;
34601
34602+ pax_track_stack();
34603+
34604 if (var->activate & FB_ACTIVATE_INV_MODE) {
34605 struct fb_videomode mode1, mode2;
34606
34607@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
34608 void __user *argp = (void __user *)arg;
34609 long ret = 0;
34610
34611+ pax_track_stack();
34612+
34613 switch (cmd) {
34614 case FBIOGET_VSCREENINFO:
34615 if (!lock_fb_info(info))
34616@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
34617 return -EFAULT;
34618 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
34619 return -EINVAL;
34620- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
34621+ if (con2fb.framebuffer >= FB_MAX)
34622 return -EINVAL;
34623 if (!registered_fb[con2fb.framebuffer])
34624 request_module("fb%d", con2fb.framebuffer);
34625diff -urNp linux-3.0.4/drivers/video/i810/i810_accel.c linux-3.0.4/drivers/video/i810/i810_accel.c
34626--- linux-3.0.4/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
34627+++ linux-3.0.4/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
34628@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
34629 }
34630 }
34631 printk("ringbuffer lockup!!!\n");
34632+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
34633 i810_report_error(mmio);
34634 par->dev_flags |= LOCKUP;
34635 info->pixmap.scan_align = 1;
34636diff -urNp linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm
34637--- linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm 2011-07-21 22:17:23.000000000 -0400
34638+++ linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm 2011-08-29 23:49:40.000000000 -0400
34639@@ -1,1604 +1,1123 @@
34640 P3
34641-# Standard 224-color Linux logo
34642 80 80
34643 255
34644- 0 0 0 0 0 0 0 0 0 0 0 0
34645- 0 0 0 0 0 0 0 0 0 0 0 0
34646- 0 0 0 0 0 0 0 0 0 0 0 0
34647- 0 0 0 0 0 0 0 0 0 0 0 0
34648- 0 0 0 0 0 0 0 0 0 0 0 0
34649- 0 0 0 0 0 0 0 0 0 0 0 0
34650- 0 0 0 0 0 0 0 0 0 0 0 0
34651- 0 0 0 0 0 0 0 0 0 0 0 0
34652- 0 0 0 0 0 0 0 0 0 0 0 0
34653- 6 6 6 6 6 6 10 10 10 10 10 10
34654- 10 10 10 6 6 6 6 6 6 6 6 6
34655- 0 0 0 0 0 0 0 0 0 0 0 0
34656- 0 0 0 0 0 0 0 0 0 0 0 0
34657- 0 0 0 0 0 0 0 0 0 0 0 0
34658- 0 0 0 0 0 0 0 0 0 0 0 0
34659- 0 0 0 0 0 0 0 0 0 0 0 0
34660- 0 0 0 0 0 0 0 0 0 0 0 0
34661- 0 0 0 0 0 0 0 0 0 0 0 0
34662- 0 0 0 0 0 0 0 0 0 0 0 0
34663- 0 0 0 0 0 0 0 0 0 0 0 0
34664- 0 0 0 0 0 0 0 0 0 0 0 0
34665- 0 0 0 0 0 0 0 0 0 0 0 0
34666- 0 0 0 0 0 0 0 0 0 0 0 0
34667- 0 0 0 0 0 0 0 0 0 0 0 0
34668- 0 0 0 0 0 0 0 0 0 0 0 0
34669- 0 0 0 0 0 0 0 0 0 0 0 0
34670- 0 0 0 0 0 0 0 0 0 0 0 0
34671- 0 0 0 0 0 0 0 0 0 0 0 0
34672- 0 0 0 6 6 6 10 10 10 14 14 14
34673- 22 22 22 26 26 26 30 30 30 34 34 34
34674- 30 30 30 30 30 30 26 26 26 18 18 18
34675- 14 14 14 10 10 10 6 6 6 0 0 0
34676- 0 0 0 0 0 0 0 0 0 0 0 0
34677- 0 0 0 0 0 0 0 0 0 0 0 0
34678- 0 0 0 0 0 0 0 0 0 0 0 0
34679- 0 0 0 0 0 0 0 0 0 0 0 0
34680- 0 0 0 0 0 0 0 0 0 0 0 0
34681- 0 0 0 0 0 0 0 0 0 0 0 0
34682- 0 0 0 0 0 0 0 0 0 0 0 0
34683- 0 0 0 0 0 0 0 0 0 0 0 0
34684- 0 0 0 0 0 0 0 0 0 0 0 0
34685- 0 0 0 0 0 1 0 0 1 0 0 0
34686- 0 0 0 0 0 0 0 0 0 0 0 0
34687- 0 0 0 0 0 0 0 0 0 0 0 0
34688- 0 0 0 0 0 0 0 0 0 0 0 0
34689- 0 0 0 0 0 0 0 0 0 0 0 0
34690- 0 0 0 0 0 0 0 0 0 0 0 0
34691- 0 0 0 0 0 0 0 0 0 0 0 0
34692- 6 6 6 14 14 14 26 26 26 42 42 42
34693- 54 54 54 66 66 66 78 78 78 78 78 78
34694- 78 78 78 74 74 74 66 66 66 54 54 54
34695- 42 42 42 26 26 26 18 18 18 10 10 10
34696- 6 6 6 0 0 0 0 0 0 0 0 0
34697- 0 0 0 0 0 0 0 0 0 0 0 0
34698- 0 0 0 0 0 0 0 0 0 0 0 0
34699- 0 0 0 0 0 0 0 0 0 0 0 0
34700- 0 0 0 0 0 0 0 0 0 0 0 0
34701- 0 0 0 0 0 0 0 0 0 0 0 0
34702- 0 0 0 0 0 0 0 0 0 0 0 0
34703- 0 0 0 0 0 0 0 0 0 0 0 0
34704- 0 0 0 0 0 0 0 0 0 0 0 0
34705- 0 0 1 0 0 0 0 0 0 0 0 0
34706- 0 0 0 0 0 0 0 0 0 0 0 0
34707- 0 0 0 0 0 0 0 0 0 0 0 0
34708- 0 0 0 0 0 0 0 0 0 0 0 0
34709- 0 0 0 0 0 0 0 0 0 0 0 0
34710- 0 0 0 0 0 0 0 0 0 0 0 0
34711- 0 0 0 0 0 0 0 0 0 10 10 10
34712- 22 22 22 42 42 42 66 66 66 86 86 86
34713- 66 66 66 38 38 38 38 38 38 22 22 22
34714- 26 26 26 34 34 34 54 54 54 66 66 66
34715- 86 86 86 70 70 70 46 46 46 26 26 26
34716- 14 14 14 6 6 6 0 0 0 0 0 0
34717- 0 0 0 0 0 0 0 0 0 0 0 0
34718- 0 0 0 0 0 0 0 0 0 0 0 0
34719- 0 0 0 0 0 0 0 0 0 0 0 0
34720- 0 0 0 0 0 0 0 0 0 0 0 0
34721- 0 0 0 0 0 0 0 0 0 0 0 0
34722- 0 0 0 0 0 0 0 0 0 0 0 0
34723- 0 0 0 0 0 0 0 0 0 0 0 0
34724- 0 0 0 0 0 0 0 0 0 0 0 0
34725- 0 0 1 0 0 1 0 0 1 0 0 0
34726- 0 0 0 0 0 0 0 0 0 0 0 0
34727- 0 0 0 0 0 0 0 0 0 0 0 0
34728- 0 0 0 0 0 0 0 0 0 0 0 0
34729- 0 0 0 0 0 0 0 0 0 0 0 0
34730- 0 0 0 0 0 0 0 0 0 0 0 0
34731- 0 0 0 0 0 0 10 10 10 26 26 26
34732- 50 50 50 82 82 82 58 58 58 6 6 6
34733- 2 2 6 2 2 6 2 2 6 2 2 6
34734- 2 2 6 2 2 6 2 2 6 2 2 6
34735- 6 6 6 54 54 54 86 86 86 66 66 66
34736- 38 38 38 18 18 18 6 6 6 0 0 0
34737- 0 0 0 0 0 0 0 0 0 0 0 0
34738- 0 0 0 0 0 0 0 0 0 0 0 0
34739- 0 0 0 0 0 0 0 0 0 0 0 0
34740- 0 0 0 0 0 0 0 0 0 0 0 0
34741- 0 0 0 0 0 0 0 0 0 0 0 0
34742- 0 0 0 0 0 0 0 0 0 0 0 0
34743- 0 0 0 0 0 0 0 0 0 0 0 0
34744- 0 0 0 0 0 0 0 0 0 0 0 0
34745- 0 0 0 0 0 0 0 0 0 0 0 0
34746- 0 0 0 0 0 0 0 0 0 0 0 0
34747- 0 0 0 0 0 0 0 0 0 0 0 0
34748- 0 0 0 0 0 0 0 0 0 0 0 0
34749- 0 0 0 0 0 0 0 0 0 0 0 0
34750- 0 0 0 0 0 0 0 0 0 0 0 0
34751- 0 0 0 6 6 6 22 22 22 50 50 50
34752- 78 78 78 34 34 34 2 2 6 2 2 6
34753- 2 2 6 2 2 6 2 2 6 2 2 6
34754- 2 2 6 2 2 6 2 2 6 2 2 6
34755- 2 2 6 2 2 6 6 6 6 70 70 70
34756- 78 78 78 46 46 46 22 22 22 6 6 6
34757- 0 0 0 0 0 0 0 0 0 0 0 0
34758- 0 0 0 0 0 0 0 0 0 0 0 0
34759- 0 0 0 0 0 0 0 0 0 0 0 0
34760- 0 0 0 0 0 0 0 0 0 0 0 0
34761- 0 0 0 0 0 0 0 0 0 0 0 0
34762- 0 0 0 0 0 0 0 0 0 0 0 0
34763- 0 0 0 0 0 0 0 0 0 0 0 0
34764- 0 0 0 0 0 0 0 0 0 0 0 0
34765- 0 0 1 0 0 1 0 0 1 0 0 0
34766- 0 0 0 0 0 0 0 0 0 0 0 0
34767- 0 0 0 0 0 0 0 0 0 0 0 0
34768- 0 0 0 0 0 0 0 0 0 0 0 0
34769- 0 0 0 0 0 0 0 0 0 0 0 0
34770- 0 0 0 0 0 0 0 0 0 0 0 0
34771- 6 6 6 18 18 18 42 42 42 82 82 82
34772- 26 26 26 2 2 6 2 2 6 2 2 6
34773- 2 2 6 2 2 6 2 2 6 2 2 6
34774- 2 2 6 2 2 6 2 2 6 14 14 14
34775- 46 46 46 34 34 34 6 6 6 2 2 6
34776- 42 42 42 78 78 78 42 42 42 18 18 18
34777- 6 6 6 0 0 0 0 0 0 0 0 0
34778- 0 0 0 0 0 0 0 0 0 0 0 0
34779- 0 0 0 0 0 0 0 0 0 0 0 0
34780- 0 0 0 0 0 0 0 0 0 0 0 0
34781- 0 0 0 0 0 0 0 0 0 0 0 0
34782- 0 0 0 0 0 0 0 0 0 0 0 0
34783- 0 0 0 0 0 0 0 0 0 0 0 0
34784- 0 0 0 0 0 0 0 0 0 0 0 0
34785- 0 0 1 0 0 0 0 0 1 0 0 0
34786- 0 0 0 0 0 0 0 0 0 0 0 0
34787- 0 0 0 0 0 0 0 0 0 0 0 0
34788- 0 0 0 0 0 0 0 0 0 0 0 0
34789- 0 0 0 0 0 0 0 0 0 0 0 0
34790- 0 0 0 0 0 0 0 0 0 0 0 0
34791- 10 10 10 30 30 30 66 66 66 58 58 58
34792- 2 2 6 2 2 6 2 2 6 2 2 6
34793- 2 2 6 2 2 6 2 2 6 2 2 6
34794- 2 2 6 2 2 6 2 2 6 26 26 26
34795- 86 86 86 101 101 101 46 46 46 10 10 10
34796- 2 2 6 58 58 58 70 70 70 34 34 34
34797- 10 10 10 0 0 0 0 0 0 0 0 0
34798- 0 0 0 0 0 0 0 0 0 0 0 0
34799- 0 0 0 0 0 0 0 0 0 0 0 0
34800- 0 0 0 0 0 0 0 0 0 0 0 0
34801- 0 0 0 0 0 0 0 0 0 0 0 0
34802- 0 0 0 0 0 0 0 0 0 0 0 0
34803- 0 0 0 0 0 0 0 0 0 0 0 0
34804- 0 0 0 0 0 0 0 0 0 0 0 0
34805- 0 0 1 0 0 1 0 0 1 0 0 0
34806- 0 0 0 0 0 0 0 0 0 0 0 0
34807- 0 0 0 0 0 0 0 0 0 0 0 0
34808- 0 0 0 0 0 0 0 0 0 0 0 0
34809- 0 0 0 0 0 0 0 0 0 0 0 0
34810- 0 0 0 0 0 0 0 0 0 0 0 0
34811- 14 14 14 42 42 42 86 86 86 10 10 10
34812- 2 2 6 2 2 6 2 2 6 2 2 6
34813- 2 2 6 2 2 6 2 2 6 2 2 6
34814- 2 2 6 2 2 6 2 2 6 30 30 30
34815- 94 94 94 94 94 94 58 58 58 26 26 26
34816- 2 2 6 6 6 6 78 78 78 54 54 54
34817- 22 22 22 6 6 6 0 0 0 0 0 0
34818- 0 0 0 0 0 0 0 0 0 0 0 0
34819- 0 0 0 0 0 0 0 0 0 0 0 0
34820- 0 0 0 0 0 0 0 0 0 0 0 0
34821- 0 0 0 0 0 0 0 0 0 0 0 0
34822- 0 0 0 0 0 0 0 0 0 0 0 0
34823- 0 0 0 0 0 0 0 0 0 0 0 0
34824- 0 0 0 0 0 0 0 0 0 0 0 0
34825- 0 0 0 0 0 0 0 0 0 0 0 0
34826- 0 0 0 0 0 0 0 0 0 0 0 0
34827- 0 0 0 0 0 0 0 0 0 0 0 0
34828- 0 0 0 0 0 0 0 0 0 0 0 0
34829- 0 0 0 0 0 0 0 0 0 0 0 0
34830- 0 0 0 0 0 0 0 0 0 6 6 6
34831- 22 22 22 62 62 62 62 62 62 2 2 6
34832- 2 2 6 2 2 6 2 2 6 2 2 6
34833- 2 2 6 2 2 6 2 2 6 2 2 6
34834- 2 2 6 2 2 6 2 2 6 26 26 26
34835- 54 54 54 38 38 38 18 18 18 10 10 10
34836- 2 2 6 2 2 6 34 34 34 82 82 82
34837- 38 38 38 14 14 14 0 0 0 0 0 0
34838- 0 0 0 0 0 0 0 0 0 0 0 0
34839- 0 0 0 0 0 0 0 0 0 0 0 0
34840- 0 0 0 0 0 0 0 0 0 0 0 0
34841- 0 0 0 0 0 0 0 0 0 0 0 0
34842- 0 0 0 0 0 0 0 0 0 0 0 0
34843- 0 0 0 0 0 0 0 0 0 0 0 0
34844- 0 0 0 0 0 0 0 0 0 0 0 0
34845- 0 0 0 0 0 1 0 0 1 0 0 0
34846- 0 0 0 0 0 0 0 0 0 0 0 0
34847- 0 0 0 0 0 0 0 0 0 0 0 0
34848- 0 0 0 0 0 0 0 0 0 0 0 0
34849- 0 0 0 0 0 0 0 0 0 0 0 0
34850- 0 0 0 0 0 0 0 0 0 6 6 6
34851- 30 30 30 78 78 78 30 30 30 2 2 6
34852- 2 2 6 2 2 6 2 2 6 2 2 6
34853- 2 2 6 2 2 6 2 2 6 2 2 6
34854- 2 2 6 2 2 6 2 2 6 10 10 10
34855- 10 10 10 2 2 6 2 2 6 2 2 6
34856- 2 2 6 2 2 6 2 2 6 78 78 78
34857- 50 50 50 18 18 18 6 6 6 0 0 0
34858- 0 0 0 0 0 0 0 0 0 0 0 0
34859- 0 0 0 0 0 0 0 0 0 0 0 0
34860- 0 0 0 0 0 0 0 0 0 0 0 0
34861- 0 0 0 0 0 0 0 0 0 0 0 0
34862- 0 0 0 0 0 0 0 0 0 0 0 0
34863- 0 0 0 0 0 0 0 0 0 0 0 0
34864- 0 0 0 0 0 0 0 0 0 0 0 0
34865- 0 0 1 0 0 0 0 0 0 0 0 0
34866- 0 0 0 0 0 0 0 0 0 0 0 0
34867- 0 0 0 0 0 0 0 0 0 0 0 0
34868- 0 0 0 0 0 0 0 0 0 0 0 0
34869- 0 0 0 0 0 0 0 0 0 0 0 0
34870- 0 0 0 0 0 0 0 0 0 10 10 10
34871- 38 38 38 86 86 86 14 14 14 2 2 6
34872- 2 2 6 2 2 6 2 2 6 2 2 6
34873- 2 2 6 2 2 6 2 2 6 2 2 6
34874- 2 2 6 2 2 6 2 2 6 2 2 6
34875- 2 2 6 2 2 6 2 2 6 2 2 6
34876- 2 2 6 2 2 6 2 2 6 54 54 54
34877- 66 66 66 26 26 26 6 6 6 0 0 0
34878- 0 0 0 0 0 0 0 0 0 0 0 0
34879- 0 0 0 0 0 0 0 0 0 0 0 0
34880- 0 0 0 0 0 0 0 0 0 0 0 0
34881- 0 0 0 0 0 0 0 0 0 0 0 0
34882- 0 0 0 0 0 0 0 0 0 0 0 0
34883- 0 0 0 0 0 0 0 0 0 0 0 0
34884- 0 0 0 0 0 0 0 0 0 0 0 0
34885- 0 0 0 0 0 1 0 0 1 0 0 0
34886- 0 0 0 0 0 0 0 0 0 0 0 0
34887- 0 0 0 0 0 0 0 0 0 0 0 0
34888- 0 0 0 0 0 0 0 0 0 0 0 0
34889- 0 0 0 0 0 0 0 0 0 0 0 0
34890- 0 0 0 0 0 0 0 0 0 14 14 14
34891- 42 42 42 82 82 82 2 2 6 2 2 6
34892- 2 2 6 6 6 6 10 10 10 2 2 6
34893- 2 2 6 2 2 6 2 2 6 2 2 6
34894- 2 2 6 2 2 6 2 2 6 6 6 6
34895- 14 14 14 10 10 10 2 2 6 2 2 6
34896- 2 2 6 2 2 6 2 2 6 18 18 18
34897- 82 82 82 34 34 34 10 10 10 0 0 0
34898- 0 0 0 0 0 0 0 0 0 0 0 0
34899- 0 0 0 0 0 0 0 0 0 0 0 0
34900- 0 0 0 0 0 0 0 0 0 0 0 0
34901- 0 0 0 0 0 0 0 0 0 0 0 0
34902- 0 0 0 0 0 0 0 0 0 0 0 0
34903- 0 0 0 0 0 0 0 0 0 0 0 0
34904- 0 0 0 0 0 0 0 0 0 0 0 0
34905- 0 0 1 0 0 0 0 0 0 0 0 0
34906- 0 0 0 0 0 0 0 0 0 0 0 0
34907- 0 0 0 0 0 0 0 0 0 0 0 0
34908- 0 0 0 0 0 0 0 0 0 0 0 0
34909- 0 0 0 0 0 0 0 0 0 0 0 0
34910- 0 0 0 0 0 0 0 0 0 14 14 14
34911- 46 46 46 86 86 86 2 2 6 2 2 6
34912- 6 6 6 6 6 6 22 22 22 34 34 34
34913- 6 6 6 2 2 6 2 2 6 2 2 6
34914- 2 2 6 2 2 6 18 18 18 34 34 34
34915- 10 10 10 50 50 50 22 22 22 2 2 6
34916- 2 2 6 2 2 6 2 2 6 10 10 10
34917- 86 86 86 42 42 42 14 14 14 0 0 0
34918- 0 0 0 0 0 0 0 0 0 0 0 0
34919- 0 0 0 0 0 0 0 0 0 0 0 0
34920- 0 0 0 0 0 0 0 0 0 0 0 0
34921- 0 0 0 0 0 0 0 0 0 0 0 0
34922- 0 0 0 0 0 0 0 0 0 0 0 0
34923- 0 0 0 0 0 0 0 0 0 0 0 0
34924- 0 0 0 0 0 0 0 0 0 0 0 0
34925- 0 0 1 0 0 1 0 0 1 0 0 0
34926- 0 0 0 0 0 0 0 0 0 0 0 0
34927- 0 0 0 0 0 0 0 0 0 0 0 0
34928- 0 0 0 0 0 0 0 0 0 0 0 0
34929- 0 0 0 0 0 0 0 0 0 0 0 0
34930- 0 0 0 0 0 0 0 0 0 14 14 14
34931- 46 46 46 86 86 86 2 2 6 2 2 6
34932- 38 38 38 116 116 116 94 94 94 22 22 22
34933- 22 22 22 2 2 6 2 2 6 2 2 6
34934- 14 14 14 86 86 86 138 138 138 162 162 162
34935-154 154 154 38 38 38 26 26 26 6 6 6
34936- 2 2 6 2 2 6 2 2 6 2 2 6
34937- 86 86 86 46 46 46 14 14 14 0 0 0
34938- 0 0 0 0 0 0 0 0 0 0 0 0
34939- 0 0 0 0 0 0 0 0 0 0 0 0
34940- 0 0 0 0 0 0 0 0 0 0 0 0
34941- 0 0 0 0 0 0 0 0 0 0 0 0
34942- 0 0 0 0 0 0 0 0 0 0 0 0
34943- 0 0 0 0 0 0 0 0 0 0 0 0
34944- 0 0 0 0 0 0 0 0 0 0 0 0
34945- 0 0 0 0 0 0 0 0 0 0 0 0
34946- 0 0 0 0 0 0 0 0 0 0 0 0
34947- 0 0 0 0 0 0 0 0 0 0 0 0
34948- 0 0 0 0 0 0 0 0 0 0 0 0
34949- 0 0 0 0 0 0 0 0 0 0 0 0
34950- 0 0 0 0 0 0 0 0 0 14 14 14
34951- 46 46 46 86 86 86 2 2 6 14 14 14
34952-134 134 134 198 198 198 195 195 195 116 116 116
34953- 10 10 10 2 2 6 2 2 6 6 6 6
34954-101 98 89 187 187 187 210 210 210 218 218 218
34955-214 214 214 134 134 134 14 14 14 6 6 6
34956- 2 2 6 2 2 6 2 2 6 2 2 6
34957- 86 86 86 50 50 50 18 18 18 6 6 6
34958- 0 0 0 0 0 0 0 0 0 0 0 0
34959- 0 0 0 0 0 0 0 0 0 0 0 0
34960- 0 0 0 0 0 0 0 0 0 0 0 0
34961- 0 0 0 0 0 0 0 0 0 0 0 0
34962- 0 0 0 0 0 0 0 0 0 0 0 0
34963- 0 0 0 0 0 0 0 0 0 0 0 0
34964- 0 0 0 0 0 0 0 0 1 0 0 0
34965- 0 0 1 0 0 1 0 0 1 0 0 0
34966- 0 0 0 0 0 0 0 0 0 0 0 0
34967- 0 0 0 0 0 0 0 0 0 0 0 0
34968- 0 0 0 0 0 0 0 0 0 0 0 0
34969- 0 0 0 0 0 0 0 0 0 0 0 0
34970- 0 0 0 0 0 0 0 0 0 14 14 14
34971- 46 46 46 86 86 86 2 2 6 54 54 54
34972-218 218 218 195 195 195 226 226 226 246 246 246
34973- 58 58 58 2 2 6 2 2 6 30 30 30
34974-210 210 210 253 253 253 174 174 174 123 123 123
34975-221 221 221 234 234 234 74 74 74 2 2 6
34976- 2 2 6 2 2 6 2 2 6 2 2 6
34977- 70 70 70 58 58 58 22 22 22 6 6 6
34978- 0 0 0 0 0 0 0 0 0 0 0 0
34979- 0 0 0 0 0 0 0 0 0 0 0 0
34980- 0 0 0 0 0 0 0 0 0 0 0 0
34981- 0 0 0 0 0 0 0 0 0 0 0 0
34982- 0 0 0 0 0 0 0 0 0 0 0 0
34983- 0 0 0 0 0 0 0 0 0 0 0 0
34984- 0 0 0 0 0 0 0 0 0 0 0 0
34985- 0 0 0 0 0 0 0 0 0 0 0 0
34986- 0 0 0 0 0 0 0 0 0 0 0 0
34987- 0 0 0 0 0 0 0 0 0 0 0 0
34988- 0 0 0 0 0 0 0 0 0 0 0 0
34989- 0 0 0 0 0 0 0 0 0 0 0 0
34990- 0 0 0 0 0 0 0 0 0 14 14 14
34991- 46 46 46 82 82 82 2 2 6 106 106 106
34992-170 170 170 26 26 26 86 86 86 226 226 226
34993-123 123 123 10 10 10 14 14 14 46 46 46
34994-231 231 231 190 190 190 6 6 6 70 70 70
34995- 90 90 90 238 238 238 158 158 158 2 2 6
34996- 2 2 6 2 2 6 2 2 6 2 2 6
34997- 70 70 70 58 58 58 22 22 22 6 6 6
34998- 0 0 0 0 0 0 0 0 0 0 0 0
34999- 0 0 0 0 0 0 0 0 0 0 0 0
35000- 0 0 0 0 0 0 0 0 0 0 0 0
35001- 0 0 0 0 0 0 0 0 0 0 0 0
35002- 0 0 0 0 0 0 0 0 0 0 0 0
35003- 0 0 0 0 0 0 0 0 0 0 0 0
35004- 0 0 0 0 0 0 0 0 1 0 0 0
35005- 0 0 1 0 0 1 0 0 1 0 0 0
35006- 0 0 0 0 0 0 0 0 0 0 0 0
35007- 0 0 0 0 0 0 0 0 0 0 0 0
35008- 0 0 0 0 0 0 0 0 0 0 0 0
35009- 0 0 0 0 0 0 0 0 0 0 0 0
35010- 0 0 0 0 0 0 0 0 0 14 14 14
35011- 42 42 42 86 86 86 6 6 6 116 116 116
35012-106 106 106 6 6 6 70 70 70 149 149 149
35013-128 128 128 18 18 18 38 38 38 54 54 54
35014-221 221 221 106 106 106 2 2 6 14 14 14
35015- 46 46 46 190 190 190 198 198 198 2 2 6
35016- 2 2 6 2 2 6 2 2 6 2 2 6
35017- 74 74 74 62 62 62 22 22 22 6 6 6
35018- 0 0 0 0 0 0 0 0 0 0 0 0
35019- 0 0 0 0 0 0 0 0 0 0 0 0
35020- 0 0 0 0 0 0 0 0 0 0 0 0
35021- 0 0 0 0 0 0 0 0 0 0 0 0
35022- 0 0 0 0 0 0 0 0 0 0 0 0
35023- 0 0 0 0 0 0 0 0 0 0 0 0
35024- 0 0 0 0 0 0 0 0 1 0 0 0
35025- 0 0 1 0 0 0 0 0 1 0 0 0
35026- 0 0 0 0 0 0 0 0 0 0 0 0
35027- 0 0 0 0 0 0 0 0 0 0 0 0
35028- 0 0 0 0 0 0 0 0 0 0 0 0
35029- 0 0 0 0 0 0 0 0 0 0 0 0
35030- 0 0 0 0 0 0 0 0 0 14 14 14
35031- 42 42 42 94 94 94 14 14 14 101 101 101
35032-128 128 128 2 2 6 18 18 18 116 116 116
35033-118 98 46 121 92 8 121 92 8 98 78 10
35034-162 162 162 106 106 106 2 2 6 2 2 6
35035- 2 2 6 195 195 195 195 195 195 6 6 6
35036- 2 2 6 2 2 6 2 2 6 2 2 6
35037- 74 74 74 62 62 62 22 22 22 6 6 6
35038- 0 0 0 0 0 0 0 0 0 0 0 0
35039- 0 0 0 0 0 0 0 0 0 0 0 0
35040- 0 0 0 0 0 0 0 0 0 0 0 0
35041- 0 0 0 0 0 0 0 0 0 0 0 0
35042- 0 0 0 0 0 0 0 0 0 0 0 0
35043- 0 0 0 0 0 0 0 0 0 0 0 0
35044- 0 0 0 0 0 0 0 0 1 0 0 1
35045- 0 0 1 0 0 0 0 0 1 0 0 0
35046- 0 0 0 0 0 0 0 0 0 0 0 0
35047- 0 0 0 0 0 0 0 0 0 0 0 0
35048- 0 0 0 0 0 0 0 0 0 0 0 0
35049- 0 0 0 0 0 0 0 0 0 0 0 0
35050- 0 0 0 0 0 0 0 0 0 10 10 10
35051- 38 38 38 90 90 90 14 14 14 58 58 58
35052-210 210 210 26 26 26 54 38 6 154 114 10
35053-226 170 11 236 186 11 225 175 15 184 144 12
35054-215 174 15 175 146 61 37 26 9 2 2 6
35055- 70 70 70 246 246 246 138 138 138 2 2 6
35056- 2 2 6 2 2 6 2 2 6 2 2 6
35057- 70 70 70 66 66 66 26 26 26 6 6 6
35058- 0 0 0 0 0 0 0 0 0 0 0 0
35059- 0 0 0 0 0 0 0 0 0 0 0 0
35060- 0 0 0 0 0 0 0 0 0 0 0 0
35061- 0 0 0 0 0 0 0 0 0 0 0 0
35062- 0 0 0 0 0 0 0 0 0 0 0 0
35063- 0 0 0 0 0 0 0 0 0 0 0 0
35064- 0 0 0 0 0 0 0 0 0 0 0 0
35065- 0 0 0 0 0 0 0 0 0 0 0 0
35066- 0 0 0 0 0 0 0 0 0 0 0 0
35067- 0 0 0 0 0 0 0 0 0 0 0 0
35068- 0 0 0 0 0 0 0 0 0 0 0 0
35069- 0 0 0 0 0 0 0 0 0 0 0 0
35070- 0 0 0 0 0 0 0 0 0 10 10 10
35071- 38 38 38 86 86 86 14 14 14 10 10 10
35072-195 195 195 188 164 115 192 133 9 225 175 15
35073-239 182 13 234 190 10 232 195 16 232 200 30
35074-245 207 45 241 208 19 232 195 16 184 144 12
35075-218 194 134 211 206 186 42 42 42 2 2 6
35076- 2 2 6 2 2 6 2 2 6 2 2 6
35077- 50 50 50 74 74 74 30 30 30 6 6 6
35078- 0 0 0 0 0 0 0 0 0 0 0 0
35079- 0 0 0 0 0 0 0 0 0 0 0 0
35080- 0 0 0 0 0 0 0 0 0 0 0 0
35081- 0 0 0 0 0 0 0 0 0 0 0 0
35082- 0 0 0 0 0 0 0 0 0 0 0 0
35083- 0 0 0 0 0 0 0 0 0 0 0 0
35084- 0 0 0 0 0 0 0 0 0 0 0 0
35085- 0 0 0 0 0 0 0 0 0 0 0 0
35086- 0 0 0 0 0 0 0 0 0 0 0 0
35087- 0 0 0 0 0 0 0 0 0 0 0 0
35088- 0 0 0 0 0 0 0 0 0 0 0 0
35089- 0 0 0 0 0 0 0 0 0 0 0 0
35090- 0 0 0 0 0 0 0 0 0 10 10 10
35091- 34 34 34 86 86 86 14 14 14 2 2 6
35092-121 87 25 192 133 9 219 162 10 239 182 13
35093-236 186 11 232 195 16 241 208 19 244 214 54
35094-246 218 60 246 218 38 246 215 20 241 208 19
35095-241 208 19 226 184 13 121 87 25 2 2 6
35096- 2 2 6 2 2 6 2 2 6 2 2 6
35097- 50 50 50 82 82 82 34 34 34 10 10 10
35098- 0 0 0 0 0 0 0 0 0 0 0 0
35099- 0 0 0 0 0 0 0 0 0 0 0 0
35100- 0 0 0 0 0 0 0 0 0 0 0 0
35101- 0 0 0 0 0 0 0 0 0 0 0 0
35102- 0 0 0 0 0 0 0 0 0 0 0 0
35103- 0 0 0 0 0 0 0 0 0 0 0 0
35104- 0 0 0 0 0 0 0 0 0 0 0 0
35105- 0 0 0 0 0 0 0 0 0 0 0 0
35106- 0 0 0 0 0 0 0 0 0 0 0 0
35107- 0 0 0 0 0 0 0 0 0 0 0 0
35108- 0 0 0 0 0 0 0 0 0 0 0 0
35109- 0 0 0 0 0 0 0 0 0 0 0 0
35110- 0 0 0 0 0 0 0 0 0 10 10 10
35111- 34 34 34 82 82 82 30 30 30 61 42 6
35112-180 123 7 206 145 10 230 174 11 239 182 13
35113-234 190 10 238 202 15 241 208 19 246 218 74
35114-246 218 38 246 215 20 246 215 20 246 215 20
35115-226 184 13 215 174 15 184 144 12 6 6 6
35116- 2 2 6 2 2 6 2 2 6 2 2 6
35117- 26 26 26 94 94 94 42 42 42 14 14 14
35118- 0 0 0 0 0 0 0 0 0 0 0 0
35119- 0 0 0 0 0 0 0 0 0 0 0 0
35120- 0 0 0 0 0 0 0 0 0 0 0 0
35121- 0 0 0 0 0 0 0 0 0 0 0 0
35122- 0 0 0 0 0 0 0 0 0 0 0 0
35123- 0 0 0 0 0 0 0 0 0 0 0 0
35124- 0 0 0 0 0 0 0 0 0 0 0 0
35125- 0 0 0 0 0 0 0 0 0 0 0 0
35126- 0 0 0 0 0 0 0 0 0 0 0 0
35127- 0 0 0 0 0 0 0 0 0 0 0 0
35128- 0 0 0 0 0 0 0 0 0 0 0 0
35129- 0 0 0 0 0 0 0 0 0 0 0 0
35130- 0 0 0 0 0 0 0 0 0 10 10 10
35131- 30 30 30 78 78 78 50 50 50 104 69 6
35132-192 133 9 216 158 10 236 178 12 236 186 11
35133-232 195 16 241 208 19 244 214 54 245 215 43
35134-246 215 20 246 215 20 241 208 19 198 155 10
35135-200 144 11 216 158 10 156 118 10 2 2 6
35136- 2 2 6 2 2 6 2 2 6 2 2 6
35137- 6 6 6 90 90 90 54 54 54 18 18 18
35138- 6 6 6 0 0 0 0 0 0 0 0 0
35139- 0 0 0 0 0 0 0 0 0 0 0 0
35140- 0 0 0 0 0 0 0 0 0 0 0 0
35141- 0 0 0 0 0 0 0 0 0 0 0 0
35142- 0 0 0 0 0 0 0 0 0 0 0 0
35143- 0 0 0 0 0 0 0 0 0 0 0 0
35144- 0 0 0 0 0 0 0 0 0 0 0 0
35145- 0 0 0 0 0 0 0 0 0 0 0 0
35146- 0 0 0 0 0 0 0 0 0 0 0 0
35147- 0 0 0 0 0 0 0 0 0 0 0 0
35148- 0 0 0 0 0 0 0 0 0 0 0 0
35149- 0 0 0 0 0 0 0 0 0 0 0 0
35150- 0 0 0 0 0 0 0 0 0 10 10 10
35151- 30 30 30 78 78 78 46 46 46 22 22 22
35152-137 92 6 210 162 10 239 182 13 238 190 10
35153-238 202 15 241 208 19 246 215 20 246 215 20
35154-241 208 19 203 166 17 185 133 11 210 150 10
35155-216 158 10 210 150 10 102 78 10 2 2 6
35156- 6 6 6 54 54 54 14 14 14 2 2 6
35157- 2 2 6 62 62 62 74 74 74 30 30 30
35158- 10 10 10 0 0 0 0 0 0 0 0 0
35159- 0 0 0 0 0 0 0 0 0 0 0 0
35160- 0 0 0 0 0 0 0 0 0 0 0 0
35161- 0 0 0 0 0 0 0 0 0 0 0 0
35162- 0 0 0 0 0 0 0 0 0 0 0 0
35163- 0 0 0 0 0 0 0 0 0 0 0 0
35164- 0 0 0 0 0 0 0 0 0 0 0 0
35165- 0 0 0 0 0 0 0 0 0 0 0 0
35166- 0 0 0 0 0 0 0 0 0 0 0 0
35167- 0 0 0 0 0 0 0 0 0 0 0 0
35168- 0 0 0 0 0 0 0 0 0 0 0 0
35169- 0 0 0 0 0 0 0 0 0 0 0 0
35170- 0 0 0 0 0 0 0 0 0 10 10 10
35171- 34 34 34 78 78 78 50 50 50 6 6 6
35172- 94 70 30 139 102 15 190 146 13 226 184 13
35173-232 200 30 232 195 16 215 174 15 190 146 13
35174-168 122 10 192 133 9 210 150 10 213 154 11
35175-202 150 34 182 157 106 101 98 89 2 2 6
35176- 2 2 6 78 78 78 116 116 116 58 58 58
35177- 2 2 6 22 22 22 90 90 90 46 46 46
35178- 18 18 18 6 6 6 0 0 0 0 0 0
35179- 0 0 0 0 0 0 0 0 0 0 0 0
35180- 0 0 0 0 0 0 0 0 0 0 0 0
35181- 0 0 0 0 0 0 0 0 0 0 0 0
35182- 0 0 0 0 0 0 0 0 0 0 0 0
35183- 0 0 0 0 0 0 0 0 0 0 0 0
35184- 0 0 0 0 0 0 0 0 0 0 0 0
35185- 0 0 0 0 0 0 0 0 0 0 0 0
35186- 0 0 0 0 0 0 0 0 0 0 0 0
35187- 0 0 0 0 0 0 0 0 0 0 0 0
35188- 0 0 0 0 0 0 0 0 0 0 0 0
35189- 0 0 0 0 0 0 0 0 0 0 0 0
35190- 0 0 0 0 0 0 0 0 0 10 10 10
35191- 38 38 38 86 86 86 50 50 50 6 6 6
35192-128 128 128 174 154 114 156 107 11 168 122 10
35193-198 155 10 184 144 12 197 138 11 200 144 11
35194-206 145 10 206 145 10 197 138 11 188 164 115
35195-195 195 195 198 198 198 174 174 174 14 14 14
35196- 2 2 6 22 22 22 116 116 116 116 116 116
35197- 22 22 22 2 2 6 74 74 74 70 70 70
35198- 30 30 30 10 10 10 0 0 0 0 0 0
35199- 0 0 0 0 0 0 0 0 0 0 0 0
35200- 0 0 0 0 0 0 0 0 0 0 0 0
35201- 0 0 0 0 0 0 0 0 0 0 0 0
35202- 0 0 0 0 0 0 0 0 0 0 0 0
35203- 0 0 0 0 0 0 0 0 0 0 0 0
35204- 0 0 0 0 0 0 0 0 0 0 0 0
35205- 0 0 0 0 0 0 0 0 0 0 0 0
35206- 0 0 0 0 0 0 0 0 0 0 0 0
35207- 0 0 0 0 0 0 0 0 0 0 0 0
35208- 0 0 0 0 0 0 0 0 0 0 0 0
35209- 0 0 0 0 0 0 0 0 0 0 0 0
35210- 0 0 0 0 0 0 6 6 6 18 18 18
35211- 50 50 50 101 101 101 26 26 26 10 10 10
35212-138 138 138 190 190 190 174 154 114 156 107 11
35213-197 138 11 200 144 11 197 138 11 192 133 9
35214-180 123 7 190 142 34 190 178 144 187 187 187
35215-202 202 202 221 221 221 214 214 214 66 66 66
35216- 2 2 6 2 2 6 50 50 50 62 62 62
35217- 6 6 6 2 2 6 10 10 10 90 90 90
35218- 50 50 50 18 18 18 6 6 6 0 0 0
35219- 0 0 0 0 0 0 0 0 0 0 0 0
35220- 0 0 0 0 0 0 0 0 0 0 0 0
35221- 0 0 0 0 0 0 0 0 0 0 0 0
35222- 0 0 0 0 0 0 0 0 0 0 0 0
35223- 0 0 0 0 0 0 0 0 0 0 0 0
35224- 0 0 0 0 0 0 0 0 0 0 0 0
35225- 0 0 0 0 0 0 0 0 0 0 0 0
35226- 0 0 0 0 0 0 0 0 0 0 0 0
35227- 0 0 0 0 0 0 0 0 0 0 0 0
35228- 0 0 0 0 0 0 0 0 0 0 0 0
35229- 0 0 0 0 0 0 0 0 0 0 0 0
35230- 0 0 0 0 0 0 10 10 10 34 34 34
35231- 74 74 74 74 74 74 2 2 6 6 6 6
35232-144 144 144 198 198 198 190 190 190 178 166 146
35233-154 121 60 156 107 11 156 107 11 168 124 44
35234-174 154 114 187 187 187 190 190 190 210 210 210
35235-246 246 246 253 253 253 253 253 253 182 182 182
35236- 6 6 6 2 2 6 2 2 6 2 2 6
35237- 2 2 6 2 2 6 2 2 6 62 62 62
35238- 74 74 74 34 34 34 14 14 14 0 0 0
35239- 0 0 0 0 0 0 0 0 0 0 0 0
35240- 0 0 0 0 0 0 0 0 0 0 0 0
35241- 0 0 0 0 0 0 0 0 0 0 0 0
35242- 0 0 0 0 0 0 0 0 0 0 0 0
35243- 0 0 0 0 0 0 0 0 0 0 0 0
35244- 0 0 0 0 0 0 0 0 0 0 0 0
35245- 0 0 0 0 0 0 0 0 0 0 0 0
35246- 0 0 0 0 0 0 0 0 0 0 0 0
35247- 0 0 0 0 0 0 0 0 0 0 0 0
35248- 0 0 0 0 0 0 0 0 0 0 0 0
35249- 0 0 0 0 0 0 0 0 0 0 0 0
35250- 0 0 0 10 10 10 22 22 22 54 54 54
35251- 94 94 94 18 18 18 2 2 6 46 46 46
35252-234 234 234 221 221 221 190 190 190 190 190 190
35253-190 190 190 187 187 187 187 187 187 190 190 190
35254-190 190 190 195 195 195 214 214 214 242 242 242
35255-253 253 253 253 253 253 253 253 253 253 253 253
35256- 82 82 82 2 2 6 2 2 6 2 2 6
35257- 2 2 6 2 2 6 2 2 6 14 14 14
35258- 86 86 86 54 54 54 22 22 22 6 6 6
35259- 0 0 0 0 0 0 0 0 0 0 0 0
35260- 0 0 0 0 0 0 0 0 0 0 0 0
35261- 0 0 0 0 0 0 0 0 0 0 0 0
35262- 0 0 0 0 0 0 0 0 0 0 0 0
35263- 0 0 0 0 0 0 0 0 0 0 0 0
35264- 0 0 0 0 0 0 0 0 0 0 0 0
35265- 0 0 0 0 0 0 0 0 0 0 0 0
35266- 0 0 0 0 0 0 0 0 0 0 0 0
35267- 0 0 0 0 0 0 0 0 0 0 0 0
35268- 0 0 0 0 0 0 0 0 0 0 0 0
35269- 0 0 0 0 0 0 0 0 0 0 0 0
35270- 6 6 6 18 18 18 46 46 46 90 90 90
35271- 46 46 46 18 18 18 6 6 6 182 182 182
35272-253 253 253 246 246 246 206 206 206 190 190 190
35273-190 190 190 190 190 190 190 190 190 190 190 190
35274-206 206 206 231 231 231 250 250 250 253 253 253
35275-253 253 253 253 253 253 253 253 253 253 253 253
35276-202 202 202 14 14 14 2 2 6 2 2 6
35277- 2 2 6 2 2 6 2 2 6 2 2 6
35278- 42 42 42 86 86 86 42 42 42 18 18 18
35279- 6 6 6 0 0 0 0 0 0 0 0 0
35280- 0 0 0 0 0 0 0 0 0 0 0 0
35281- 0 0 0 0 0 0 0 0 0 0 0 0
35282- 0 0 0 0 0 0 0 0 0 0 0 0
35283- 0 0 0 0 0 0 0 0 0 0 0 0
35284- 0 0 0 0 0 0 0 0 0 0 0 0
35285- 0 0 0 0 0 0 0 0 0 0 0 0
35286- 0 0 0 0 0 0 0 0 0 0 0 0
35287- 0 0 0 0 0 0 0 0 0 0 0 0
35288- 0 0 0 0 0 0 0 0 0 0 0 0
35289- 0 0 0 0 0 0 0 0 0 6 6 6
35290- 14 14 14 38 38 38 74 74 74 66 66 66
35291- 2 2 6 6 6 6 90 90 90 250 250 250
35292-253 253 253 253 253 253 238 238 238 198 198 198
35293-190 190 190 190 190 190 195 195 195 221 221 221
35294-246 246 246 253 253 253 253 253 253 253 253 253
35295-253 253 253 253 253 253 253 253 253 253 253 253
35296-253 253 253 82 82 82 2 2 6 2 2 6
35297- 2 2 6 2 2 6 2 2 6 2 2 6
35298- 2 2 6 78 78 78 70 70 70 34 34 34
35299- 14 14 14 6 6 6 0 0 0 0 0 0
35300- 0 0 0 0 0 0 0 0 0 0 0 0
35301- 0 0 0 0 0 0 0 0 0 0 0 0
35302- 0 0 0 0 0 0 0 0 0 0 0 0
35303- 0 0 0 0 0 0 0 0 0 0 0 0
35304- 0 0 0 0 0 0 0 0 0 0 0 0
35305- 0 0 0 0 0 0 0 0 0 0 0 0
35306- 0 0 0 0 0 0 0 0 0 0 0 0
35307- 0 0 0 0 0 0 0 0 0 0 0 0
35308- 0 0 0 0 0 0 0 0 0 0 0 0
35309- 0 0 0 0 0 0 0 0 0 14 14 14
35310- 34 34 34 66 66 66 78 78 78 6 6 6
35311- 2 2 6 18 18 18 218 218 218 253 253 253
35312-253 253 253 253 253 253 253 253 253 246 246 246
35313-226 226 226 231 231 231 246 246 246 253 253 253
35314-253 253 253 253 253 253 253 253 253 253 253 253
35315-253 253 253 253 253 253 253 253 253 253 253 253
35316-253 253 253 178 178 178 2 2 6 2 2 6
35317- 2 2 6 2 2 6 2 2 6 2 2 6
35318- 2 2 6 18 18 18 90 90 90 62 62 62
35319- 30 30 30 10 10 10 0 0 0 0 0 0
35320- 0 0 0 0 0 0 0 0 0 0 0 0
35321- 0 0 0 0 0 0 0 0 0 0 0 0
35322- 0 0 0 0 0 0 0 0 0 0 0 0
35323- 0 0 0 0 0 0 0 0 0 0 0 0
35324- 0 0 0 0 0 0 0 0 0 0 0 0
35325- 0 0 0 0 0 0 0 0 0 0 0 0
35326- 0 0 0 0 0 0 0 0 0 0 0 0
35327- 0 0 0 0 0 0 0 0 0 0 0 0
35328- 0 0 0 0 0 0 0 0 0 0 0 0
35329- 0 0 0 0 0 0 10 10 10 26 26 26
35330- 58 58 58 90 90 90 18 18 18 2 2 6
35331- 2 2 6 110 110 110 253 253 253 253 253 253
35332-253 253 253 253 253 253 253 253 253 253 253 253
35333-250 250 250 253 253 253 253 253 253 253 253 253
35334-253 253 253 253 253 253 253 253 253 253 253 253
35335-253 253 253 253 253 253 253 253 253 253 253 253
35336-253 253 253 231 231 231 18 18 18 2 2 6
35337- 2 2 6 2 2 6 2 2 6 2 2 6
35338- 2 2 6 2 2 6 18 18 18 94 94 94
35339- 54 54 54 26 26 26 10 10 10 0 0 0
35340- 0 0 0 0 0 0 0 0 0 0 0 0
35341- 0 0 0 0 0 0 0 0 0 0 0 0
35342- 0 0 0 0 0 0 0 0 0 0 0 0
35343- 0 0 0 0 0 0 0 0 0 0 0 0
35344- 0 0 0 0 0 0 0 0 0 0 0 0
35345- 0 0 0 0 0 0 0 0 0 0 0 0
35346- 0 0 0 0 0 0 0 0 0 0 0 0
35347- 0 0 0 0 0 0 0 0 0 0 0 0
35348- 0 0 0 0 0 0 0 0 0 0 0 0
35349- 0 0 0 6 6 6 22 22 22 50 50 50
35350- 90 90 90 26 26 26 2 2 6 2 2 6
35351- 14 14 14 195 195 195 250 250 250 253 253 253
35352-253 253 253 253 253 253 253 253 253 253 253 253
35353-253 253 253 253 253 253 253 253 253 253 253 253
35354-253 253 253 253 253 253 253 253 253 253 253 253
35355-253 253 253 253 253 253 253 253 253 253 253 253
35356-250 250 250 242 242 242 54 54 54 2 2 6
35357- 2 2 6 2 2 6 2 2 6 2 2 6
35358- 2 2 6 2 2 6 2 2 6 38 38 38
35359- 86 86 86 50 50 50 22 22 22 6 6 6
35360- 0 0 0 0 0 0 0 0 0 0 0 0
35361- 0 0 0 0 0 0 0 0 0 0 0 0
35362- 0 0 0 0 0 0 0 0 0 0 0 0
35363- 0 0 0 0 0 0 0 0 0 0 0 0
35364- 0 0 0 0 0 0 0 0 0 0 0 0
35365- 0 0 0 0 0 0 0 0 0 0 0 0
35366- 0 0 0 0 0 0 0 0 0 0 0 0
35367- 0 0 0 0 0 0 0 0 0 0 0 0
35368- 0 0 0 0 0 0 0 0 0 0 0 0
35369- 6 6 6 14 14 14 38 38 38 82 82 82
35370- 34 34 34 2 2 6 2 2 6 2 2 6
35371- 42 42 42 195 195 195 246 246 246 253 253 253
35372-253 253 253 253 253 253 253 253 253 250 250 250
35373-242 242 242 242 242 242 250 250 250 253 253 253
35374-253 253 253 253 253 253 253 253 253 253 253 253
35375-253 253 253 250 250 250 246 246 246 238 238 238
35376-226 226 226 231 231 231 101 101 101 6 6 6
35377- 2 2 6 2 2 6 2 2 6 2 2 6
35378- 2 2 6 2 2 6 2 2 6 2 2 6
35379- 38 38 38 82 82 82 42 42 42 14 14 14
35380- 6 6 6 0 0 0 0 0 0 0 0 0
35381- 0 0 0 0 0 0 0 0 0 0 0 0
35382- 0 0 0 0 0 0 0 0 0 0 0 0
35383- 0 0 0 0 0 0 0 0 0 0 0 0
35384- 0 0 0 0 0 0 0 0 0 0 0 0
35385- 0 0 0 0 0 0 0 0 0 0 0 0
35386- 0 0 0 0 0 0 0 0 0 0 0 0
35387- 0 0 0 0 0 0 0 0 0 0 0 0
35388- 0 0 0 0 0 0 0 0 0 0 0 0
35389- 10 10 10 26 26 26 62 62 62 66 66 66
35390- 2 2 6 2 2 6 2 2 6 6 6 6
35391- 70 70 70 170 170 170 206 206 206 234 234 234
35392-246 246 246 250 250 250 250 250 250 238 238 238
35393-226 226 226 231 231 231 238 238 238 250 250 250
35394-250 250 250 250 250 250 246 246 246 231 231 231
35395-214 214 214 206 206 206 202 202 202 202 202 202
35396-198 198 198 202 202 202 182 182 182 18 18 18
35397- 2 2 6 2 2 6 2 2 6 2 2 6
35398- 2 2 6 2 2 6 2 2 6 2 2 6
35399- 2 2 6 62 62 62 66 66 66 30 30 30
35400- 10 10 10 0 0 0 0 0 0 0 0 0
35401- 0 0 0 0 0 0 0 0 0 0 0 0
35402- 0 0 0 0 0 0 0 0 0 0 0 0
35403- 0 0 0 0 0 0 0 0 0 0 0 0
35404- 0 0 0 0 0 0 0 0 0 0 0 0
35405- 0 0 0 0 0 0 0 0 0 0 0 0
35406- 0 0 0 0 0 0 0 0 0 0 0 0
35407- 0 0 0 0 0 0 0 0 0 0 0 0
35408- 0 0 0 0 0 0 0 0 0 0 0 0
35409- 14 14 14 42 42 42 82 82 82 18 18 18
35410- 2 2 6 2 2 6 2 2 6 10 10 10
35411- 94 94 94 182 182 182 218 218 218 242 242 242
35412-250 250 250 253 253 253 253 253 253 250 250 250
35413-234 234 234 253 253 253 253 253 253 253 253 253
35414-253 253 253 253 253 253 253 253 253 246 246 246
35415-238 238 238 226 226 226 210 210 210 202 202 202
35416-195 195 195 195 195 195 210 210 210 158 158 158
35417- 6 6 6 14 14 14 50 50 50 14 14 14
35418- 2 2 6 2 2 6 2 2 6 2 2 6
35419- 2 2 6 6 6 6 86 86 86 46 46 46
35420- 18 18 18 6 6 6 0 0 0 0 0 0
35421- 0 0 0 0 0 0 0 0 0 0 0 0
35422- 0 0 0 0 0 0 0 0 0 0 0 0
35423- 0 0 0 0 0 0 0 0 0 0 0 0
35424- 0 0 0 0 0 0 0 0 0 0 0 0
35425- 0 0 0 0 0 0 0 0 0 0 0 0
35426- 0 0 0 0 0 0 0 0 0 0 0 0
35427- 0 0 0 0 0 0 0 0 0 0 0 0
35428- 0 0 0 0 0 0 0 0 0 6 6 6
35429- 22 22 22 54 54 54 70 70 70 2 2 6
35430- 2 2 6 10 10 10 2 2 6 22 22 22
35431-166 166 166 231 231 231 250 250 250 253 253 253
35432-253 253 253 253 253 253 253 253 253 250 250 250
35433-242 242 242 253 253 253 253 253 253 253 253 253
35434-253 253 253 253 253 253 253 253 253 253 253 253
35435-253 253 253 253 253 253 253 253 253 246 246 246
35436-231 231 231 206 206 206 198 198 198 226 226 226
35437- 94 94 94 2 2 6 6 6 6 38 38 38
35438- 30 30 30 2 2 6 2 2 6 2 2 6
35439- 2 2 6 2 2 6 62 62 62 66 66 66
35440- 26 26 26 10 10 10 0 0 0 0 0 0
35441- 0 0 0 0 0 0 0 0 0 0 0 0
35442- 0 0 0 0 0 0 0 0 0 0 0 0
35443- 0 0 0 0 0 0 0 0 0 0 0 0
35444- 0 0 0 0 0 0 0 0 0 0 0 0
35445- 0 0 0 0 0 0 0 0 0 0 0 0
35446- 0 0 0 0 0 0 0 0 0 0 0 0
35447- 0 0 0 0 0 0 0 0 0 0 0 0
35448- 0 0 0 0 0 0 0 0 0 10 10 10
35449- 30 30 30 74 74 74 50 50 50 2 2 6
35450- 26 26 26 26 26 26 2 2 6 106 106 106
35451-238 238 238 253 253 253 253 253 253 253 253 253
35452-253 253 253 253 253 253 253 253 253 253 253 253
35453-253 253 253 253 253 253 253 253 253 253 253 253
35454-253 253 253 253 253 253 253 253 253 253 253 253
35455-253 253 253 253 253 253 253 253 253 253 253 253
35456-253 253 253 246 246 246 218 218 218 202 202 202
35457-210 210 210 14 14 14 2 2 6 2 2 6
35458- 30 30 30 22 22 22 2 2 6 2 2 6
35459- 2 2 6 2 2 6 18 18 18 86 86 86
35460- 42 42 42 14 14 14 0 0 0 0 0 0
35461- 0 0 0 0 0 0 0 0 0 0 0 0
35462- 0 0 0 0 0 0 0 0 0 0 0 0
35463- 0 0 0 0 0 0 0 0 0 0 0 0
35464- 0 0 0 0 0 0 0 0 0 0 0 0
35465- 0 0 0 0 0 0 0 0 0 0 0 0
35466- 0 0 0 0 0 0 0 0 0 0 0 0
35467- 0 0 0 0 0 0 0 0 0 0 0 0
35468- 0 0 0 0 0 0 0 0 0 14 14 14
35469- 42 42 42 90 90 90 22 22 22 2 2 6
35470- 42 42 42 2 2 6 18 18 18 218 218 218
35471-253 253 253 253 253 253 253 253 253 253 253 253
35472-253 253 253 253 253 253 253 253 253 253 253 253
35473-253 253 253 253 253 253 253 253 253 253 253 253
35474-253 253 253 253 253 253 253 253 253 253 253 253
35475-253 253 253 253 253 253 253 253 253 253 253 253
35476-253 253 253 253 253 253 250 250 250 221 221 221
35477-218 218 218 101 101 101 2 2 6 14 14 14
35478- 18 18 18 38 38 38 10 10 10 2 2 6
35479- 2 2 6 2 2 6 2 2 6 78 78 78
35480- 58 58 58 22 22 22 6 6 6 0 0 0
35481- 0 0 0 0 0 0 0 0 0 0 0 0
35482- 0 0 0 0 0 0 0 0 0 0 0 0
35483- 0 0 0 0 0 0 0 0 0 0 0 0
35484- 0 0 0 0 0 0 0 0 0 0 0 0
35485- 0 0 0 0 0 0 0 0 0 0 0 0
35486- 0 0 0 0 0 0 0 0 0 0 0 0
35487- 0 0 0 0 0 0 0 0 0 0 0 0
35488- 0 0 0 0 0 0 6 6 6 18 18 18
35489- 54 54 54 82 82 82 2 2 6 26 26 26
35490- 22 22 22 2 2 6 123 123 123 253 253 253
35491-253 253 253 253 253 253 253 253 253 253 253 253
35492-253 253 253 253 253 253 253 253 253 253 253 253
35493-253 253 253 253 253 253 253 253 253 253 253 253
35494-253 253 253 253 253 253 253 253 253 253 253 253
35495-253 253 253 253 253 253 253 253 253 253 253 253
35496-253 253 253 253 253 253 253 253 253 250 250 250
35497-238 238 238 198 198 198 6 6 6 38 38 38
35498- 58 58 58 26 26 26 38 38 38 2 2 6
35499- 2 2 6 2 2 6 2 2 6 46 46 46
35500- 78 78 78 30 30 30 10 10 10 0 0 0
35501- 0 0 0 0 0 0 0 0 0 0 0 0
35502- 0 0 0 0 0 0 0 0 0 0 0 0
35503- 0 0 0 0 0 0 0 0 0 0 0 0
35504- 0 0 0 0 0 0 0 0 0 0 0 0
35505- 0 0 0 0 0 0 0 0 0 0 0 0
35506- 0 0 0 0 0 0 0 0 0 0 0 0
35507- 0 0 0 0 0 0 0 0 0 0 0 0
35508- 0 0 0 0 0 0 10 10 10 30 30 30
35509- 74 74 74 58 58 58 2 2 6 42 42 42
35510- 2 2 6 22 22 22 231 231 231 253 253 253
35511-253 253 253 253 253 253 253 253 253 253 253 253
35512-253 253 253 253 253 253 253 253 253 250 250 250
35513-253 253 253 253 253 253 253 253 253 253 253 253
35514-253 253 253 253 253 253 253 253 253 253 253 253
35515-253 253 253 253 253 253 253 253 253 253 253 253
35516-253 253 253 253 253 253 253 253 253 253 253 253
35517-253 253 253 246 246 246 46 46 46 38 38 38
35518- 42 42 42 14 14 14 38 38 38 14 14 14
35519- 2 2 6 2 2 6 2 2 6 6 6 6
35520- 86 86 86 46 46 46 14 14 14 0 0 0
35521- 0 0 0 0 0 0 0 0 0 0 0 0
35522- 0 0 0 0 0 0 0 0 0 0 0 0
35523- 0 0 0 0 0 0 0 0 0 0 0 0
35524- 0 0 0 0 0 0 0 0 0 0 0 0
35525- 0 0 0 0 0 0 0 0 0 0 0 0
35526- 0 0 0 0 0 0 0 0 0 0 0 0
35527- 0 0 0 0 0 0 0 0 0 0 0 0
35528- 0 0 0 6 6 6 14 14 14 42 42 42
35529- 90 90 90 18 18 18 18 18 18 26 26 26
35530- 2 2 6 116 116 116 253 253 253 253 253 253
35531-253 253 253 253 253 253 253 253 253 253 253 253
35532-253 253 253 253 253 253 250 250 250 238 238 238
35533-253 253 253 253 253 253 253 253 253 253 253 253
35534-253 253 253 253 253 253 253 253 253 253 253 253
35535-253 253 253 253 253 253 253 253 253 253 253 253
35536-253 253 253 253 253 253 253 253 253 253 253 253
35537-253 253 253 253 253 253 94 94 94 6 6 6
35538- 2 2 6 2 2 6 10 10 10 34 34 34
35539- 2 2 6 2 2 6 2 2 6 2 2 6
35540- 74 74 74 58 58 58 22 22 22 6 6 6
35541- 0 0 0 0 0 0 0 0 0 0 0 0
35542- 0 0 0 0 0 0 0 0 0 0 0 0
35543- 0 0 0 0 0 0 0 0 0 0 0 0
35544- 0 0 0 0 0 0 0 0 0 0 0 0
35545- 0 0 0 0 0 0 0 0 0 0 0 0
35546- 0 0 0 0 0 0 0 0 0 0 0 0
35547- 0 0 0 0 0 0 0 0 0 0 0 0
35548- 0 0 0 10 10 10 26 26 26 66 66 66
35549- 82 82 82 2 2 6 38 38 38 6 6 6
35550- 14 14 14 210 210 210 253 253 253 253 253 253
35551-253 253 253 253 253 253 253 253 253 253 253 253
35552-253 253 253 253 253 253 246 246 246 242 242 242
35553-253 253 253 253 253 253 253 253 253 253 253 253
35554-253 253 253 253 253 253 253 253 253 253 253 253
35555-253 253 253 253 253 253 253 253 253 253 253 253
35556-253 253 253 253 253 253 253 253 253 253 253 253
35557-253 253 253 253 253 253 144 144 144 2 2 6
35558- 2 2 6 2 2 6 2 2 6 46 46 46
35559- 2 2 6 2 2 6 2 2 6 2 2 6
35560- 42 42 42 74 74 74 30 30 30 10 10 10
35561- 0 0 0 0 0 0 0 0 0 0 0 0
35562- 0 0 0 0 0 0 0 0 0 0 0 0
35563- 0 0 0 0 0 0 0 0 0 0 0 0
35564- 0 0 0 0 0 0 0 0 0 0 0 0
35565- 0 0 0 0 0 0 0 0 0 0 0 0
35566- 0 0 0 0 0 0 0 0 0 0 0 0
35567- 0 0 0 0 0 0 0 0 0 0 0 0
35568- 6 6 6 14 14 14 42 42 42 90 90 90
35569- 26 26 26 6 6 6 42 42 42 2 2 6
35570- 74 74 74 250 250 250 253 253 253 253 253 253
35571-253 253 253 253 253 253 253 253 253 253 253 253
35572-253 253 253 253 253 253 242 242 242 242 242 242
35573-253 253 253 253 253 253 253 253 253 253 253 253
35574-253 253 253 253 253 253 253 253 253 253 253 253
35575-253 253 253 253 253 253 253 253 253 253 253 253
35576-253 253 253 253 253 253 253 253 253 253 253 253
35577-253 253 253 253 253 253 182 182 182 2 2 6
35578- 2 2 6 2 2 6 2 2 6 46 46 46
35579- 2 2 6 2 2 6 2 2 6 2 2 6
35580- 10 10 10 86 86 86 38 38 38 10 10 10
35581- 0 0 0 0 0 0 0 0 0 0 0 0
35582- 0 0 0 0 0 0 0 0 0 0 0 0
35583- 0 0 0 0 0 0 0 0 0 0 0 0
35584- 0 0 0 0 0 0 0 0 0 0 0 0
35585- 0 0 0 0 0 0 0 0 0 0 0 0
35586- 0 0 0 0 0 0 0 0 0 0 0 0
35587- 0 0 0 0 0 0 0 0 0 0 0 0
35588- 10 10 10 26 26 26 66 66 66 82 82 82
35589- 2 2 6 22 22 22 18 18 18 2 2 6
35590-149 149 149 253 253 253 253 253 253 253 253 253
35591-253 253 253 253 253 253 253 253 253 253 253 253
35592-253 253 253 253 253 253 234 234 234 242 242 242
35593-253 253 253 253 253 253 253 253 253 253 253 253
35594-253 253 253 253 253 253 253 253 253 253 253 253
35595-253 253 253 253 253 253 253 253 253 253 253 253
35596-253 253 253 253 253 253 253 253 253 253 253 253
35597-253 253 253 253 253 253 206 206 206 2 2 6
35598- 2 2 6 2 2 6 2 2 6 38 38 38
35599- 2 2 6 2 2 6 2 2 6 2 2 6
35600- 6 6 6 86 86 86 46 46 46 14 14 14
35601- 0 0 0 0 0 0 0 0 0 0 0 0
35602- 0 0 0 0 0 0 0 0 0 0 0 0
35603- 0 0 0 0 0 0 0 0 0 0 0 0
35604- 0 0 0 0 0 0 0 0 0 0 0 0
35605- 0 0 0 0 0 0 0 0 0 0 0 0
35606- 0 0 0 0 0 0 0 0 0 0 0 0
35607- 0 0 0 0 0 0 0 0 0 6 6 6
35608- 18 18 18 46 46 46 86 86 86 18 18 18
35609- 2 2 6 34 34 34 10 10 10 6 6 6
35610-210 210 210 253 253 253 253 253 253 253 253 253
35611-253 253 253 253 253 253 253 253 253 253 253 253
35612-253 253 253 253 253 253 234 234 234 242 242 242
35613-253 253 253 253 253 253 253 253 253 253 253 253
35614-253 253 253 253 253 253 253 253 253 253 253 253
35615-253 253 253 253 253 253 253 253 253 253 253 253
35616-253 253 253 253 253 253 253 253 253 253 253 253
35617-253 253 253 253 253 253 221 221 221 6 6 6
35618- 2 2 6 2 2 6 6 6 6 30 30 30
35619- 2 2 6 2 2 6 2 2 6 2 2 6
35620- 2 2 6 82 82 82 54 54 54 18 18 18
35621- 6 6 6 0 0 0 0 0 0 0 0 0
35622- 0 0 0 0 0 0 0 0 0 0 0 0
35623- 0 0 0 0 0 0 0 0 0 0 0 0
35624- 0 0 0 0 0 0 0 0 0 0 0 0
35625- 0 0 0 0 0 0 0 0 0 0 0 0
35626- 0 0 0 0 0 0 0 0 0 0 0 0
35627- 0 0 0 0 0 0 0 0 0 10 10 10
35628- 26 26 26 66 66 66 62 62 62 2 2 6
35629- 2 2 6 38 38 38 10 10 10 26 26 26
35630-238 238 238 253 253 253 253 253 253 253 253 253
35631-253 253 253 253 253 253 253 253 253 253 253 253
35632-253 253 253 253 253 253 231 231 231 238 238 238
35633-253 253 253 253 253 253 253 253 253 253 253 253
35634-253 253 253 253 253 253 253 253 253 253 253 253
35635-253 253 253 253 253 253 253 253 253 253 253 253
35636-253 253 253 253 253 253 253 253 253 253 253 253
35637-253 253 253 253 253 253 231 231 231 6 6 6
35638- 2 2 6 2 2 6 10 10 10 30 30 30
35639- 2 2 6 2 2 6 2 2 6 2 2 6
35640- 2 2 6 66 66 66 58 58 58 22 22 22
35641- 6 6 6 0 0 0 0 0 0 0 0 0
35642- 0 0 0 0 0 0 0 0 0 0 0 0
35643- 0 0 0 0 0 0 0 0 0 0 0 0
35644- 0 0 0 0 0 0 0 0 0 0 0 0
35645- 0 0 0 0 0 0 0 0 0 0 0 0
35646- 0 0 0 0 0 0 0 0 0 0 0 0
35647- 0 0 0 0 0 0 0 0 0 10 10 10
35648- 38 38 38 78 78 78 6 6 6 2 2 6
35649- 2 2 6 46 46 46 14 14 14 42 42 42
35650-246 246 246 253 253 253 253 253 253 253 253 253
35651-253 253 253 253 253 253 253 253 253 253 253 253
35652-253 253 253 253 253 253 231 231 231 242 242 242
35653-253 253 253 253 253 253 253 253 253 253 253 253
35654-253 253 253 253 253 253 253 253 253 253 253 253
35655-253 253 253 253 253 253 253 253 253 253 253 253
35656-253 253 253 253 253 253 253 253 253 253 253 253
35657-253 253 253 253 253 253 234 234 234 10 10 10
35658- 2 2 6 2 2 6 22 22 22 14 14 14
35659- 2 2 6 2 2 6 2 2 6 2 2 6
35660- 2 2 6 66 66 66 62 62 62 22 22 22
35661- 6 6 6 0 0 0 0 0 0 0 0 0
35662- 0 0 0 0 0 0 0 0 0 0 0 0
35663- 0 0 0 0 0 0 0 0 0 0 0 0
35664- 0 0 0 0 0 0 0 0 0 0 0 0
35665- 0 0 0 0 0 0 0 0 0 0 0 0
35666- 0 0 0 0 0 0 0 0 0 0 0 0
35667- 0 0 0 0 0 0 6 6 6 18 18 18
35668- 50 50 50 74 74 74 2 2 6 2 2 6
35669- 14 14 14 70 70 70 34 34 34 62 62 62
35670-250 250 250 253 253 253 253 253 253 253 253 253
35671-253 253 253 253 253 253 253 253 253 253 253 253
35672-253 253 253 253 253 253 231 231 231 246 246 246
35673-253 253 253 253 253 253 253 253 253 253 253 253
35674-253 253 253 253 253 253 253 253 253 253 253 253
35675-253 253 253 253 253 253 253 253 253 253 253 253
35676-253 253 253 253 253 253 253 253 253 253 253 253
35677-253 253 253 253 253 253 234 234 234 14 14 14
35678- 2 2 6 2 2 6 30 30 30 2 2 6
35679- 2 2 6 2 2 6 2 2 6 2 2 6
35680- 2 2 6 66 66 66 62 62 62 22 22 22
35681- 6 6 6 0 0 0 0 0 0 0 0 0
35682- 0 0 0 0 0 0 0 0 0 0 0 0
35683- 0 0 0 0 0 0 0 0 0 0 0 0
35684- 0 0 0 0 0 0 0 0 0 0 0 0
35685- 0 0 0 0 0 0 0 0 0 0 0 0
35686- 0 0 0 0 0 0 0 0 0 0 0 0
35687- 0 0 0 0 0 0 6 6 6 18 18 18
35688- 54 54 54 62 62 62 2 2 6 2 2 6
35689- 2 2 6 30 30 30 46 46 46 70 70 70
35690-250 250 250 253 253 253 253 253 253 253 253 253
35691-253 253 253 253 253 253 253 253 253 253 253 253
35692-253 253 253 253 253 253 231 231 231 246 246 246
35693-253 253 253 253 253 253 253 253 253 253 253 253
35694-253 253 253 253 253 253 253 253 253 253 253 253
35695-253 253 253 253 253 253 253 253 253 253 253 253
35696-253 253 253 253 253 253 253 253 253 253 253 253
35697-253 253 253 253 253 253 226 226 226 10 10 10
35698- 2 2 6 6 6 6 30 30 30 2 2 6
35699- 2 2 6 2 2 6 2 2 6 2 2 6
35700- 2 2 6 66 66 66 58 58 58 22 22 22
35701- 6 6 6 0 0 0 0 0 0 0 0 0
35702- 0 0 0 0 0 0 0 0 0 0 0 0
35703- 0 0 0 0 0 0 0 0 0 0 0 0
35704- 0 0 0 0 0 0 0 0 0 0 0 0
35705- 0 0 0 0 0 0 0 0 0 0 0 0
35706- 0 0 0 0 0 0 0 0 0 0 0 0
35707- 0 0 0 0 0 0 6 6 6 22 22 22
35708- 58 58 58 62 62 62 2 2 6 2 2 6
35709- 2 2 6 2 2 6 30 30 30 78 78 78
35710-250 250 250 253 253 253 253 253 253 253 253 253
35711-253 253 253 253 253 253 253 253 253 253 253 253
35712-253 253 253 253 253 253 231 231 231 246 246 246
35713-253 253 253 253 253 253 253 253 253 253 253 253
35714-253 253 253 253 253 253 253 253 253 253 253 253
35715-253 253 253 253 253 253 253 253 253 253 253 253
35716-253 253 253 253 253 253 253 253 253 253 253 253
35717-253 253 253 253 253 253 206 206 206 2 2 6
35718- 22 22 22 34 34 34 18 14 6 22 22 22
35719- 26 26 26 18 18 18 6 6 6 2 2 6
35720- 2 2 6 82 82 82 54 54 54 18 18 18
35721- 6 6 6 0 0 0 0 0 0 0 0 0
35722- 0 0 0 0 0 0 0 0 0 0 0 0
35723- 0 0 0 0 0 0 0 0 0 0 0 0
35724- 0 0 0 0 0 0 0 0 0 0 0 0
35725- 0 0 0 0 0 0 0 0 0 0 0 0
35726- 0 0 0 0 0 0 0 0 0 0 0 0
35727- 0 0 0 0 0 0 6 6 6 26 26 26
35728- 62 62 62 106 106 106 74 54 14 185 133 11
35729-210 162 10 121 92 8 6 6 6 62 62 62
35730-238 238 238 253 253 253 253 253 253 253 253 253
35731-253 253 253 253 253 253 253 253 253 253 253 253
35732-253 253 253 253 253 253 231 231 231 246 246 246
35733-253 253 253 253 253 253 253 253 253 253 253 253
35734-253 253 253 253 253 253 253 253 253 253 253 253
35735-253 253 253 253 253 253 253 253 253 253 253 253
35736-253 253 253 253 253 253 253 253 253 253 253 253
35737-253 253 253 253 253 253 158 158 158 18 18 18
35738- 14 14 14 2 2 6 2 2 6 2 2 6
35739- 6 6 6 18 18 18 66 66 66 38 38 38
35740- 6 6 6 94 94 94 50 50 50 18 18 18
35741- 6 6 6 0 0 0 0 0 0 0 0 0
35742- 0 0 0 0 0 0 0 0 0 0 0 0
35743- 0 0 0 0 0 0 0 0 0 0 0 0
35744- 0 0 0 0 0 0 0 0 0 0 0 0
35745- 0 0 0 0 0 0 0 0 0 0 0 0
35746- 0 0 0 0 0 0 0 0 0 6 6 6
35747- 10 10 10 10 10 10 18 18 18 38 38 38
35748- 78 78 78 142 134 106 216 158 10 242 186 14
35749-246 190 14 246 190 14 156 118 10 10 10 10
35750- 90 90 90 238 238 238 253 253 253 253 253 253
35751-253 253 253 253 253 253 253 253 253 253 253 253
35752-253 253 253 253 253 253 231 231 231 250 250 250
35753-253 253 253 253 253 253 253 253 253 253 253 253
35754-253 253 253 253 253 253 253 253 253 253 253 253
35755-253 253 253 253 253 253 253 253 253 253 253 253
35756-253 253 253 253 253 253 253 253 253 246 230 190
35757-238 204 91 238 204 91 181 142 44 37 26 9
35758- 2 2 6 2 2 6 2 2 6 2 2 6
35759- 2 2 6 2 2 6 38 38 38 46 46 46
35760- 26 26 26 106 106 106 54 54 54 18 18 18
35761- 6 6 6 0 0 0 0 0 0 0 0 0
35762- 0 0 0 0 0 0 0 0 0 0 0 0
35763- 0 0 0 0 0 0 0 0 0 0 0 0
35764- 0 0 0 0 0 0 0 0 0 0 0 0
35765- 0 0 0 0 0 0 0 0 0 0 0 0
35766- 0 0 0 6 6 6 14 14 14 22 22 22
35767- 30 30 30 38 38 38 50 50 50 70 70 70
35768-106 106 106 190 142 34 226 170 11 242 186 14
35769-246 190 14 246 190 14 246 190 14 154 114 10
35770- 6 6 6 74 74 74 226 226 226 253 253 253
35771-253 253 253 253 253 253 253 253 253 253 253 253
35772-253 253 253 253 253 253 231 231 231 250 250 250
35773-253 253 253 253 253 253 253 253 253 253 253 253
35774-253 253 253 253 253 253 253 253 253 253 253 253
35775-253 253 253 253 253 253 253 253 253 253 253 253
35776-253 253 253 253 253 253 253 253 253 228 184 62
35777-241 196 14 241 208 19 232 195 16 38 30 10
35778- 2 2 6 2 2 6 2 2 6 2 2 6
35779- 2 2 6 6 6 6 30 30 30 26 26 26
35780-203 166 17 154 142 90 66 66 66 26 26 26
35781- 6 6 6 0 0 0 0 0 0 0 0 0
35782- 0 0 0 0 0 0 0 0 0 0 0 0
35783- 0 0 0 0 0 0 0 0 0 0 0 0
35784- 0 0 0 0 0 0 0 0 0 0 0 0
35785- 0 0 0 0 0 0 0 0 0 0 0 0
35786- 6 6 6 18 18 18 38 38 38 58 58 58
35787- 78 78 78 86 86 86 101 101 101 123 123 123
35788-175 146 61 210 150 10 234 174 13 246 186 14
35789-246 190 14 246 190 14 246 190 14 238 190 10
35790-102 78 10 2 2 6 46 46 46 198 198 198
35791-253 253 253 253 253 253 253 253 253 253 253 253
35792-253 253 253 253 253 253 234 234 234 242 242 242
35793-253 253 253 253 253 253 253 253 253 253 253 253
35794-253 253 253 253 253 253 253 253 253 253 253 253
35795-253 253 253 253 253 253 253 253 253 253 253 253
35796-253 253 253 253 253 253 253 253 253 224 178 62
35797-242 186 14 241 196 14 210 166 10 22 18 6
35798- 2 2 6 2 2 6 2 2 6 2 2 6
35799- 2 2 6 2 2 6 6 6 6 121 92 8
35800-238 202 15 232 195 16 82 82 82 34 34 34
35801- 10 10 10 0 0 0 0 0 0 0 0 0
35802- 0 0 0 0 0 0 0 0 0 0 0 0
35803- 0 0 0 0 0 0 0 0 0 0 0 0
35804- 0 0 0 0 0 0 0 0 0 0 0 0
35805- 0 0 0 0 0 0 0 0 0 0 0 0
35806- 14 14 14 38 38 38 70 70 70 154 122 46
35807-190 142 34 200 144 11 197 138 11 197 138 11
35808-213 154 11 226 170 11 242 186 14 246 190 14
35809-246 190 14 246 190 14 246 190 14 246 190 14
35810-225 175 15 46 32 6 2 2 6 22 22 22
35811-158 158 158 250 250 250 253 253 253 253 253 253
35812-253 253 253 253 253 253 253 253 253 253 253 253
35813-253 253 253 253 253 253 253 253 253 253 253 253
35814-253 253 253 253 253 253 253 253 253 253 253 253
35815-253 253 253 253 253 253 253 253 253 253 253 253
35816-253 253 253 250 250 250 242 242 242 224 178 62
35817-239 182 13 236 186 11 213 154 11 46 32 6
35818- 2 2 6 2 2 6 2 2 6 2 2 6
35819- 2 2 6 2 2 6 61 42 6 225 175 15
35820-238 190 10 236 186 11 112 100 78 42 42 42
35821- 14 14 14 0 0 0 0 0 0 0 0 0
35822- 0 0 0 0 0 0 0 0 0 0 0 0
35823- 0 0 0 0 0 0 0 0 0 0 0 0
35824- 0 0 0 0 0 0 0 0 0 0 0 0
35825- 0 0 0 0 0 0 0 0 0 6 6 6
35826- 22 22 22 54 54 54 154 122 46 213 154 11
35827-226 170 11 230 174 11 226 170 11 226 170 11
35828-236 178 12 242 186 14 246 190 14 246 190 14
35829-246 190 14 246 190 14 246 190 14 246 190 14
35830-241 196 14 184 144 12 10 10 10 2 2 6
35831- 6 6 6 116 116 116 242 242 242 253 253 253
35832-253 253 253 253 253 253 253 253 253 253 253 253
35833-253 253 253 253 253 253 253 253 253 253 253 253
35834-253 253 253 253 253 253 253 253 253 253 253 253
35835-253 253 253 253 253 253 253 253 253 253 253 253
35836-253 253 253 231 231 231 198 198 198 214 170 54
35837-236 178 12 236 178 12 210 150 10 137 92 6
35838- 18 14 6 2 2 6 2 2 6 2 2 6
35839- 6 6 6 70 47 6 200 144 11 236 178 12
35840-239 182 13 239 182 13 124 112 88 58 58 58
35841- 22 22 22 6 6 6 0 0 0 0 0 0
35842- 0 0 0 0 0 0 0 0 0 0 0 0
35843- 0 0 0 0 0 0 0 0 0 0 0 0
35844- 0 0 0 0 0 0 0 0 0 0 0 0
35845- 0 0 0 0 0 0 0 0 0 10 10 10
35846- 30 30 30 70 70 70 180 133 36 226 170 11
35847-239 182 13 242 186 14 242 186 14 246 186 14
35848-246 190 14 246 190 14 246 190 14 246 190 14
35849-246 190 14 246 190 14 246 190 14 246 190 14
35850-246 190 14 232 195 16 98 70 6 2 2 6
35851- 2 2 6 2 2 6 66 66 66 221 221 221
35852-253 253 253 253 253 253 253 253 253 253 253 253
35853-253 253 253 253 253 253 253 253 253 253 253 253
35854-253 253 253 253 253 253 253 253 253 253 253 253
35855-253 253 253 253 253 253 253 253 253 253 253 253
35856-253 253 253 206 206 206 198 198 198 214 166 58
35857-230 174 11 230 174 11 216 158 10 192 133 9
35858-163 110 8 116 81 8 102 78 10 116 81 8
35859-167 114 7 197 138 11 226 170 11 239 182 13
35860-242 186 14 242 186 14 162 146 94 78 78 78
35861- 34 34 34 14 14 14 6 6 6 0 0 0
35862- 0 0 0 0 0 0 0 0 0 0 0 0
35863- 0 0 0 0 0 0 0 0 0 0 0 0
35864- 0 0 0 0 0 0 0 0 0 0 0 0
35865- 0 0 0 0 0 0 0 0 0 6 6 6
35866- 30 30 30 78 78 78 190 142 34 226 170 11
35867-239 182 13 246 190 14 246 190 14 246 190 14
35868-246 190 14 246 190 14 246 190 14 246 190 14
35869-246 190 14 246 190 14 246 190 14 246 190 14
35870-246 190 14 241 196 14 203 166 17 22 18 6
35871- 2 2 6 2 2 6 2 2 6 38 38 38
35872-218 218 218 253 253 253 253 253 253 253 253 253
35873-253 253 253 253 253 253 253 253 253 253 253 253
35874-253 253 253 253 253 253 253 253 253 253 253 253
35875-253 253 253 253 253 253 253 253 253 253 253 253
35876-250 250 250 206 206 206 198 198 198 202 162 69
35877-226 170 11 236 178 12 224 166 10 210 150 10
35878-200 144 11 197 138 11 192 133 9 197 138 11
35879-210 150 10 226 170 11 242 186 14 246 190 14
35880-246 190 14 246 186 14 225 175 15 124 112 88
35881- 62 62 62 30 30 30 14 14 14 6 6 6
35882- 0 0 0 0 0 0 0 0 0 0 0 0
35883- 0 0 0 0 0 0 0 0 0 0 0 0
35884- 0 0 0 0 0 0 0 0 0 0 0 0
35885- 0 0 0 0 0 0 0 0 0 10 10 10
35886- 30 30 30 78 78 78 174 135 50 224 166 10
35887-239 182 13 246 190 14 246 190 14 246 190 14
35888-246 190 14 246 190 14 246 190 14 246 190 14
35889-246 190 14 246 190 14 246 190 14 246 190 14
35890-246 190 14 246 190 14 241 196 14 139 102 15
35891- 2 2 6 2 2 6 2 2 6 2 2 6
35892- 78 78 78 250 250 250 253 253 253 253 253 253
35893-253 253 253 253 253 253 253 253 253 253 253 253
35894-253 253 253 253 253 253 253 253 253 253 253 253
35895-253 253 253 253 253 253 253 253 253 253 253 253
35896-250 250 250 214 214 214 198 198 198 190 150 46
35897-219 162 10 236 178 12 234 174 13 224 166 10
35898-216 158 10 213 154 11 213 154 11 216 158 10
35899-226 170 11 239 182 13 246 190 14 246 190 14
35900-246 190 14 246 190 14 242 186 14 206 162 42
35901-101 101 101 58 58 58 30 30 30 14 14 14
35902- 6 6 6 0 0 0 0 0 0 0 0 0
35903- 0 0 0 0 0 0 0 0 0 0 0 0
35904- 0 0 0 0 0 0 0 0 0 0 0 0
35905- 0 0 0 0 0 0 0 0 0 10 10 10
35906- 30 30 30 74 74 74 174 135 50 216 158 10
35907-236 178 12 246 190 14 246 190 14 246 190 14
35908-246 190 14 246 190 14 246 190 14 246 190 14
35909-246 190 14 246 190 14 246 190 14 246 190 14
35910-246 190 14 246 190 14 241 196 14 226 184 13
35911- 61 42 6 2 2 6 2 2 6 2 2 6
35912- 22 22 22 238 238 238 253 253 253 253 253 253
35913-253 253 253 253 253 253 253 253 253 253 253 253
35914-253 253 253 253 253 253 253 253 253 253 253 253
35915-253 253 253 253 253 253 253 253 253 253 253 253
35916-253 253 253 226 226 226 187 187 187 180 133 36
35917-216 158 10 236 178 12 239 182 13 236 178 12
35918-230 174 11 226 170 11 226 170 11 230 174 11
35919-236 178 12 242 186 14 246 190 14 246 190 14
35920-246 190 14 246 190 14 246 186 14 239 182 13
35921-206 162 42 106 106 106 66 66 66 34 34 34
35922- 14 14 14 6 6 6 0 0 0 0 0 0
35923- 0 0 0 0 0 0 0 0 0 0 0 0
35924- 0 0 0 0 0 0 0 0 0 0 0 0
35925- 0 0 0 0 0 0 0 0 0 6 6 6
35926- 26 26 26 70 70 70 163 133 67 213 154 11
35927-236 178 12 246 190 14 246 190 14 246 190 14
35928-246 190 14 246 190 14 246 190 14 246 190 14
35929-246 190 14 246 190 14 246 190 14 246 190 14
35930-246 190 14 246 190 14 246 190 14 241 196 14
35931-190 146 13 18 14 6 2 2 6 2 2 6
35932- 46 46 46 246 246 246 253 253 253 253 253 253
35933-253 253 253 253 253 253 253 253 253 253 253 253
35934-253 253 253 253 253 253 253 253 253 253 253 253
35935-253 253 253 253 253 253 253 253 253 253 253 253
35936-253 253 253 221 221 221 86 86 86 156 107 11
35937-216 158 10 236 178 12 242 186 14 246 186 14
35938-242 186 14 239 182 13 239 182 13 242 186 14
35939-242 186 14 246 186 14 246 190 14 246 190 14
35940-246 190 14 246 190 14 246 190 14 246 190 14
35941-242 186 14 225 175 15 142 122 72 66 66 66
35942- 30 30 30 10 10 10 0 0 0 0 0 0
35943- 0 0 0 0 0 0 0 0 0 0 0 0
35944- 0 0 0 0 0 0 0 0 0 0 0 0
35945- 0 0 0 0 0 0 0 0 0 6 6 6
35946- 26 26 26 70 70 70 163 133 67 210 150 10
35947-236 178 12 246 190 14 246 190 14 246 190 14
35948-246 190 14 246 190 14 246 190 14 246 190 14
35949-246 190 14 246 190 14 246 190 14 246 190 14
35950-246 190 14 246 190 14 246 190 14 246 190 14
35951-232 195 16 121 92 8 34 34 34 106 106 106
35952-221 221 221 253 253 253 253 253 253 253 253 253
35953-253 253 253 253 253 253 253 253 253 253 253 253
35954-253 253 253 253 253 253 253 253 253 253 253 253
35955-253 253 253 253 253 253 253 253 253 253 253 253
35956-242 242 242 82 82 82 18 14 6 163 110 8
35957-216 158 10 236 178 12 242 186 14 246 190 14
35958-246 190 14 246 190 14 246 190 14 246 190 14
35959-246 190 14 246 190 14 246 190 14 246 190 14
35960-246 190 14 246 190 14 246 190 14 246 190 14
35961-246 190 14 246 190 14 242 186 14 163 133 67
35962- 46 46 46 18 18 18 6 6 6 0 0 0
35963- 0 0 0 0 0 0 0 0 0 0 0 0
35964- 0 0 0 0 0 0 0 0 0 0 0 0
35965- 0 0 0 0 0 0 0 0 0 10 10 10
35966- 30 30 30 78 78 78 163 133 67 210 150 10
35967-236 178 12 246 186 14 246 190 14 246 190 14
35968-246 190 14 246 190 14 246 190 14 246 190 14
35969-246 190 14 246 190 14 246 190 14 246 190 14
35970-246 190 14 246 190 14 246 190 14 246 190 14
35971-241 196 14 215 174 15 190 178 144 253 253 253
35972-253 253 253 253 253 253 253 253 253 253 253 253
35973-253 253 253 253 253 253 253 253 253 253 253 253
35974-253 253 253 253 253 253 253 253 253 253 253 253
35975-253 253 253 253 253 253 253 253 253 218 218 218
35976- 58 58 58 2 2 6 22 18 6 167 114 7
35977-216 158 10 236 178 12 246 186 14 246 190 14
35978-246 190 14 246 190 14 246 190 14 246 190 14
35979-246 190 14 246 190 14 246 190 14 246 190 14
35980-246 190 14 246 190 14 246 190 14 246 190 14
35981-246 190 14 246 186 14 242 186 14 190 150 46
35982- 54 54 54 22 22 22 6 6 6 0 0 0
35983- 0 0 0 0 0 0 0 0 0 0 0 0
35984- 0 0 0 0 0 0 0 0 0 0 0 0
35985- 0 0 0 0 0 0 0 0 0 14 14 14
35986- 38 38 38 86 86 86 180 133 36 213 154 11
35987-236 178 12 246 186 14 246 190 14 246 190 14
35988-246 190 14 246 190 14 246 190 14 246 190 14
35989-246 190 14 246 190 14 246 190 14 246 190 14
35990-246 190 14 246 190 14 246 190 14 246 190 14
35991-246 190 14 232 195 16 190 146 13 214 214 214
35992-253 253 253 253 253 253 253 253 253 253 253 253
35993-253 253 253 253 253 253 253 253 253 253 253 253
35994-253 253 253 253 253 253 253 253 253 253 253 253
35995-253 253 253 250 250 250 170 170 170 26 26 26
35996- 2 2 6 2 2 6 37 26 9 163 110 8
35997-219 162 10 239 182 13 246 186 14 246 190 14
35998-246 190 14 246 190 14 246 190 14 246 190 14
35999-246 190 14 246 190 14 246 190 14 246 190 14
36000-246 190 14 246 190 14 246 190 14 246 190 14
36001-246 186 14 236 178 12 224 166 10 142 122 72
36002- 46 46 46 18 18 18 6 6 6 0 0 0
36003- 0 0 0 0 0 0 0 0 0 0 0 0
36004- 0 0 0 0 0 0 0 0 0 0 0 0
36005- 0 0 0 0 0 0 6 6 6 18 18 18
36006- 50 50 50 109 106 95 192 133 9 224 166 10
36007-242 186 14 246 190 14 246 190 14 246 190 14
36008-246 190 14 246 190 14 246 190 14 246 190 14
36009-246 190 14 246 190 14 246 190 14 246 190 14
36010-246 190 14 246 190 14 246 190 14 246 190 14
36011-242 186 14 226 184 13 210 162 10 142 110 46
36012-226 226 226 253 253 253 253 253 253 253 253 253
36013-253 253 253 253 253 253 253 253 253 253 253 253
36014-253 253 253 253 253 253 253 253 253 253 253 253
36015-198 198 198 66 66 66 2 2 6 2 2 6
36016- 2 2 6 2 2 6 50 34 6 156 107 11
36017-219 162 10 239 182 13 246 186 14 246 190 14
36018-246 190 14 246 190 14 246 190 14 246 190 14
36019-246 190 14 246 190 14 246 190 14 246 190 14
36020-246 190 14 246 190 14 246 190 14 242 186 14
36021-234 174 13 213 154 11 154 122 46 66 66 66
36022- 30 30 30 10 10 10 0 0 0 0 0 0
36023- 0 0 0 0 0 0 0 0 0 0 0 0
36024- 0 0 0 0 0 0 0 0 0 0 0 0
36025- 0 0 0 0 0 0 6 6 6 22 22 22
36026- 58 58 58 154 121 60 206 145 10 234 174 13
36027-242 186 14 246 186 14 246 190 14 246 190 14
36028-246 190 14 246 190 14 246 190 14 246 190 14
36029-246 190 14 246 190 14 246 190 14 246 190 14
36030-246 190 14 246 190 14 246 190 14 246 190 14
36031-246 186 14 236 178 12 210 162 10 163 110 8
36032- 61 42 6 138 138 138 218 218 218 250 250 250
36033-253 253 253 253 253 253 253 253 253 250 250 250
36034-242 242 242 210 210 210 144 144 144 66 66 66
36035- 6 6 6 2 2 6 2 2 6 2 2 6
36036- 2 2 6 2 2 6 61 42 6 163 110 8
36037-216 158 10 236 178 12 246 190 14 246 190 14
36038-246 190 14 246 190 14 246 190 14 246 190 14
36039-246 190 14 246 190 14 246 190 14 246 190 14
36040-246 190 14 239 182 13 230 174 11 216 158 10
36041-190 142 34 124 112 88 70 70 70 38 38 38
36042- 18 18 18 6 6 6 0 0 0 0 0 0
36043- 0 0 0 0 0 0 0 0 0 0 0 0
36044- 0 0 0 0 0 0 0 0 0 0 0 0
36045- 0 0 0 0 0 0 6 6 6 22 22 22
36046- 62 62 62 168 124 44 206 145 10 224 166 10
36047-236 178 12 239 182 13 242 186 14 242 186 14
36048-246 186 14 246 190 14 246 190 14 246 190 14
36049-246 190 14 246 190 14 246 190 14 246 190 14
36050-246 190 14 246 190 14 246 190 14 246 190 14
36051-246 190 14 236 178 12 216 158 10 175 118 6
36052- 80 54 7 2 2 6 6 6 6 30 30 30
36053- 54 54 54 62 62 62 50 50 50 38 38 38
36054- 14 14 14 2 2 6 2 2 6 2 2 6
36055- 2 2 6 2 2 6 2 2 6 2 2 6
36056- 2 2 6 6 6 6 80 54 7 167 114 7
36057-213 154 11 236 178 12 246 190 14 246 190 14
36058-246 190 14 246 190 14 246 190 14 246 190 14
36059-246 190 14 242 186 14 239 182 13 239 182 13
36060-230 174 11 210 150 10 174 135 50 124 112 88
36061- 82 82 82 54 54 54 34 34 34 18 18 18
36062- 6 6 6 0 0 0 0 0 0 0 0 0
36063- 0 0 0 0 0 0 0 0 0 0 0 0
36064- 0 0 0 0 0 0 0 0 0 0 0 0
36065- 0 0 0 0 0 0 6 6 6 18 18 18
36066- 50 50 50 158 118 36 192 133 9 200 144 11
36067-216 158 10 219 162 10 224 166 10 226 170 11
36068-230 174 11 236 178 12 239 182 13 239 182 13
36069-242 186 14 246 186 14 246 190 14 246 190 14
36070-246 190 14 246 190 14 246 190 14 246 190 14
36071-246 186 14 230 174 11 210 150 10 163 110 8
36072-104 69 6 10 10 10 2 2 6 2 2 6
36073- 2 2 6 2 2 6 2 2 6 2 2 6
36074- 2 2 6 2 2 6 2 2 6 2 2 6
36075- 2 2 6 2 2 6 2 2 6 2 2 6
36076- 2 2 6 6 6 6 91 60 6 167 114 7
36077-206 145 10 230 174 11 242 186 14 246 190 14
36078-246 190 14 246 190 14 246 186 14 242 186 14
36079-239 182 13 230 174 11 224 166 10 213 154 11
36080-180 133 36 124 112 88 86 86 86 58 58 58
36081- 38 38 38 22 22 22 10 10 10 6 6 6
36082- 0 0 0 0 0 0 0 0 0 0 0 0
36083- 0 0 0 0 0 0 0 0 0 0 0 0
36084- 0 0 0 0 0 0 0 0 0 0 0 0
36085- 0 0 0 0 0 0 0 0 0 14 14 14
36086- 34 34 34 70 70 70 138 110 50 158 118 36
36087-167 114 7 180 123 7 192 133 9 197 138 11
36088-200 144 11 206 145 10 213 154 11 219 162 10
36089-224 166 10 230 174 11 239 182 13 242 186 14
36090-246 186 14 246 186 14 246 186 14 246 186 14
36091-239 182 13 216 158 10 185 133 11 152 99 6
36092-104 69 6 18 14 6 2 2 6 2 2 6
36093- 2 2 6 2 2 6 2 2 6 2 2 6
36094- 2 2 6 2 2 6 2 2 6 2 2 6
36095- 2 2 6 2 2 6 2 2 6 2 2 6
36096- 2 2 6 6 6 6 80 54 7 152 99 6
36097-192 133 9 219 162 10 236 178 12 239 182 13
36098-246 186 14 242 186 14 239 182 13 236 178 12
36099-224 166 10 206 145 10 192 133 9 154 121 60
36100- 94 94 94 62 62 62 42 42 42 22 22 22
36101- 14 14 14 6 6 6 0 0 0 0 0 0
36102- 0 0 0 0 0 0 0 0 0 0 0 0
36103- 0 0 0 0 0 0 0 0 0 0 0 0
36104- 0 0 0 0 0 0 0 0 0 0 0 0
36105- 0 0 0 0 0 0 0 0 0 6 6 6
36106- 18 18 18 34 34 34 58 58 58 78 78 78
36107-101 98 89 124 112 88 142 110 46 156 107 11
36108-163 110 8 167 114 7 175 118 6 180 123 7
36109-185 133 11 197 138 11 210 150 10 219 162 10
36110-226 170 11 236 178 12 236 178 12 234 174 13
36111-219 162 10 197 138 11 163 110 8 130 83 6
36112- 91 60 6 10 10 10 2 2 6 2 2 6
36113- 18 18 18 38 38 38 38 38 38 38 38 38
36114- 38 38 38 38 38 38 38 38 38 38 38 38
36115- 38 38 38 38 38 38 26 26 26 2 2 6
36116- 2 2 6 6 6 6 70 47 6 137 92 6
36117-175 118 6 200 144 11 219 162 10 230 174 11
36118-234 174 13 230 174 11 219 162 10 210 150 10
36119-192 133 9 163 110 8 124 112 88 82 82 82
36120- 50 50 50 30 30 30 14 14 14 6 6 6
36121- 0 0 0 0 0 0 0 0 0 0 0 0
36122- 0 0 0 0 0 0 0 0 0 0 0 0
36123- 0 0 0 0 0 0 0 0 0 0 0 0
36124- 0 0 0 0 0 0 0 0 0 0 0 0
36125- 0 0 0 0 0 0 0 0 0 0 0 0
36126- 6 6 6 14 14 14 22 22 22 34 34 34
36127- 42 42 42 58 58 58 74 74 74 86 86 86
36128-101 98 89 122 102 70 130 98 46 121 87 25
36129-137 92 6 152 99 6 163 110 8 180 123 7
36130-185 133 11 197 138 11 206 145 10 200 144 11
36131-180 123 7 156 107 11 130 83 6 104 69 6
36132- 50 34 6 54 54 54 110 110 110 101 98 89
36133- 86 86 86 82 82 82 78 78 78 78 78 78
36134- 78 78 78 78 78 78 78 78 78 78 78 78
36135- 78 78 78 82 82 82 86 86 86 94 94 94
36136-106 106 106 101 101 101 86 66 34 124 80 6
36137-156 107 11 180 123 7 192 133 9 200 144 11
36138-206 145 10 200 144 11 192 133 9 175 118 6
36139-139 102 15 109 106 95 70 70 70 42 42 42
36140- 22 22 22 10 10 10 0 0 0 0 0 0
36141- 0 0 0 0 0 0 0 0 0 0 0 0
36142- 0 0 0 0 0 0 0 0 0 0 0 0
36143- 0 0 0 0 0 0 0 0 0 0 0 0
36144- 0 0 0 0 0 0 0 0 0 0 0 0
36145- 0 0 0 0 0 0 0 0 0 0 0 0
36146- 0 0 0 0 0 0 6 6 6 10 10 10
36147- 14 14 14 22 22 22 30 30 30 38 38 38
36148- 50 50 50 62 62 62 74 74 74 90 90 90
36149-101 98 89 112 100 78 121 87 25 124 80 6
36150-137 92 6 152 99 6 152 99 6 152 99 6
36151-138 86 6 124 80 6 98 70 6 86 66 30
36152-101 98 89 82 82 82 58 58 58 46 46 46
36153- 38 38 38 34 34 34 34 34 34 34 34 34
36154- 34 34 34 34 34 34 34 34 34 34 34 34
36155- 34 34 34 34 34 34 38 38 38 42 42 42
36156- 54 54 54 82 82 82 94 86 76 91 60 6
36157-134 86 6 156 107 11 167 114 7 175 118 6
36158-175 118 6 167 114 7 152 99 6 121 87 25
36159-101 98 89 62 62 62 34 34 34 18 18 18
36160- 6 6 6 0 0 0 0 0 0 0 0 0
36161- 0 0 0 0 0 0 0 0 0 0 0 0
36162- 0 0 0 0 0 0 0 0 0 0 0 0
36163- 0 0 0 0 0 0 0 0 0 0 0 0
36164- 0 0 0 0 0 0 0 0 0 0 0 0
36165- 0 0 0 0 0 0 0 0 0 0 0 0
36166- 0 0 0 0 0 0 0 0 0 0 0 0
36167- 0 0 0 6 6 6 6 6 6 10 10 10
36168- 18 18 18 22 22 22 30 30 30 42 42 42
36169- 50 50 50 66 66 66 86 86 86 101 98 89
36170-106 86 58 98 70 6 104 69 6 104 69 6
36171-104 69 6 91 60 6 82 62 34 90 90 90
36172- 62 62 62 38 38 38 22 22 22 14 14 14
36173- 10 10 10 10 10 10 10 10 10 10 10 10
36174- 10 10 10 10 10 10 6 6 6 10 10 10
36175- 10 10 10 10 10 10 10 10 10 14 14 14
36176- 22 22 22 42 42 42 70 70 70 89 81 66
36177- 80 54 7 104 69 6 124 80 6 137 92 6
36178-134 86 6 116 81 8 100 82 52 86 86 86
36179- 58 58 58 30 30 30 14 14 14 6 6 6
36180- 0 0 0 0 0 0 0 0 0 0 0 0
36181- 0 0 0 0 0 0 0 0 0 0 0 0
36182- 0 0 0 0 0 0 0 0 0 0 0 0
36183- 0 0 0 0 0 0 0 0 0 0 0 0
36184- 0 0 0 0 0 0 0 0 0 0 0 0
36185- 0 0 0 0 0 0 0 0 0 0 0 0
36186- 0 0 0 0 0 0 0 0 0 0 0 0
36187- 0 0 0 0 0 0 0 0 0 0 0 0
36188- 0 0 0 6 6 6 10 10 10 14 14 14
36189- 18 18 18 26 26 26 38 38 38 54 54 54
36190- 70 70 70 86 86 86 94 86 76 89 81 66
36191- 89 81 66 86 86 86 74 74 74 50 50 50
36192- 30 30 30 14 14 14 6 6 6 0 0 0
36193- 0 0 0 0 0 0 0 0 0 0 0 0
36194- 0 0 0 0 0 0 0 0 0 0 0 0
36195- 0 0 0 0 0 0 0 0 0 0 0 0
36196- 6 6 6 18 18 18 34 34 34 58 58 58
36197- 82 82 82 89 81 66 89 81 66 89 81 66
36198- 94 86 66 94 86 76 74 74 74 50 50 50
36199- 26 26 26 14 14 14 6 6 6 0 0 0
36200- 0 0 0 0 0 0 0 0 0 0 0 0
36201- 0 0 0 0 0 0 0 0 0 0 0 0
36202- 0 0 0 0 0 0 0 0 0 0 0 0
36203- 0 0 0 0 0 0 0 0 0 0 0 0
36204- 0 0 0 0 0 0 0 0 0 0 0 0
36205- 0 0 0 0 0 0 0 0 0 0 0 0
36206- 0 0 0 0 0 0 0 0 0 0 0 0
36207- 0 0 0 0 0 0 0 0 0 0 0 0
36208- 0 0 0 0 0 0 0 0 0 0 0 0
36209- 6 6 6 6 6 6 14 14 14 18 18 18
36210- 30 30 30 38 38 38 46 46 46 54 54 54
36211- 50 50 50 42 42 42 30 30 30 18 18 18
36212- 10 10 10 0 0 0 0 0 0 0 0 0
36213- 0 0 0 0 0 0 0 0 0 0 0 0
36214- 0 0 0 0 0 0 0 0 0 0 0 0
36215- 0 0 0 0 0 0 0 0 0 0 0 0
36216- 0 0 0 6 6 6 14 14 14 26 26 26
36217- 38 38 38 50 50 50 58 58 58 58 58 58
36218- 54 54 54 42 42 42 30 30 30 18 18 18
36219- 10 10 10 0 0 0 0 0 0 0 0 0
36220- 0 0 0 0 0 0 0 0 0 0 0 0
36221- 0 0 0 0 0 0 0 0 0 0 0 0
36222- 0 0 0 0 0 0 0 0 0 0 0 0
36223- 0 0 0 0 0 0 0 0 0 0 0 0
36224- 0 0 0 0 0 0 0 0 0 0 0 0
36225- 0 0 0 0 0 0 0 0 0 0 0 0
36226- 0 0 0 0 0 0 0 0 0 0 0 0
36227- 0 0 0 0 0 0 0 0 0 0 0 0
36228- 0 0 0 0 0 0 0 0 0 0 0 0
36229- 0 0 0 0 0 0 0 0 0 6 6 6
36230- 6 6 6 10 10 10 14 14 14 18 18 18
36231- 18 18 18 14 14 14 10 10 10 6 6 6
36232- 0 0 0 0 0 0 0 0 0 0 0 0
36233- 0 0 0 0 0 0 0 0 0 0 0 0
36234- 0 0 0 0 0 0 0 0 0 0 0 0
36235- 0 0 0 0 0 0 0 0 0 0 0 0
36236- 0 0 0 0 0 0 0 0 0 6 6 6
36237- 14 14 14 18 18 18 22 22 22 22 22 22
36238- 18 18 18 14 14 14 10 10 10 6 6 6
36239- 0 0 0 0 0 0 0 0 0 0 0 0
36240- 0 0 0 0 0 0 0 0 0 0 0 0
36241- 0 0 0 0 0 0 0 0 0 0 0 0
36242- 0 0 0 0 0 0 0 0 0 0 0 0
36243- 0 0 0 0 0 0 0 0 0 0 0 0
36244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36257+4 4 4 4 4 4
36258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36271+4 4 4 4 4 4
36272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36285+4 4 4 4 4 4
36286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36299+4 4 4 4 4 4
36300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36313+4 4 4 4 4 4
36314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36327+4 4 4 4 4 4
36328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36332+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
36333+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
36334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36337+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
36338+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
36339+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
36340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36341+4 4 4 4 4 4
36342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36346+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
36347+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
36348+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36351+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
36352+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
36353+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
36354+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36355+4 4 4 4 4 4
36356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36360+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
36361+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
36362+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
36363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36365+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
36366+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
36367+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
36368+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
36369+4 4 4 4 4 4
36370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36373+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
36374+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
36375+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
36376+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
36377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36378+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
36379+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
36380+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
36381+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
36382+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
36383+4 4 4 4 4 4
36384+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36387+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
36388+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
36389+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
36390+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
36391+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36392+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
36393+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
36394+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
36395+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
36396+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
36397+4 4 4 4 4 4
36398+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
36401+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
36402+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
36403+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
36404+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
36405+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
36406+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
36407+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
36408+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
36409+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
36410+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
36411+4 4 4 4 4 4
36412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36414+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
36415+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
36416+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
36417+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
36418+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
36419+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
36420+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
36421+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
36422+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
36423+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
36424+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
36425+4 4 4 4 4 4
36426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36428+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
36429+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
36430+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
36431+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
36432+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
36433+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
36434+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
36435+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
36436+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
36437+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
36438+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
36439+4 4 4 4 4 4
36440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36442+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
36443+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
36444+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
36445+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
36446+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
36447+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
36448+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
36449+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
36450+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
36451+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
36452+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
36453+4 4 4 4 4 4
36454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36456+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
36457+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
36458+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
36459+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
36460+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
36461+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
36462+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
36463+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
36464+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
36465+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
36466+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
36467+4 4 4 4 4 4
36468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36469+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
36470+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
36471+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
36472+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
36473+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
36474+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
36475+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
36476+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
36477+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
36478+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
36479+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
36480+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
36481+4 4 4 4 4 4
36482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36483+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
36484+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
36485+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
36486+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
36487+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
36488+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
36489+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
36490+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
36491+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
36492+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
36493+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
36494+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
36495+0 0 0 4 4 4
36496+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
36497+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
36498+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
36499+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
36500+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
36501+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
36502+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
36503+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
36504+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
36505+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
36506+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
36507+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
36508+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
36509+2 0 0 0 0 0
36510+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
36511+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
36512+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
36513+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
36514+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
36515+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
36516+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
36517+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
36518+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
36519+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
36520+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
36521+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
36522+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
36523+37 38 37 0 0 0
36524+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
36525+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
36526+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
36527+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
36528+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
36529+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
36530+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
36531+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
36532+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
36533+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
36534+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
36535+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
36536+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
36537+85 115 134 4 0 0
36538+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
36539+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
36540+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
36541+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
36542+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
36543+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
36544+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
36545+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
36546+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
36547+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
36548+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
36549+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
36550+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
36551+60 73 81 4 0 0
36552+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
36553+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
36554+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
36555+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
36556+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
36557+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
36558+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
36559+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
36560+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
36561+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
36562+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
36563+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
36564+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
36565+16 19 21 4 0 0
36566+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
36567+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
36568+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
36569+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
36570+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
36571+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
36572+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
36573+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
36574+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
36575+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
36576+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
36577+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
36578+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
36579+4 0 0 4 3 3
36580+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
36581+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
36582+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
36583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
36584+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
36585+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
36586+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
36587+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
36588+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
36589+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
36590+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
36591+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
36592+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
36593+3 2 2 4 4 4
36594+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
36595+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
36596+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
36597+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
36598+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
36599+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
36600+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
36601+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
36602+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
36603+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
36604+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
36605+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
36606+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
36607+4 4 4 4 4 4
36608+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
36609+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
36610+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
36611+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
36612+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
36613+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
36614+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
36615+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
36616+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
36617+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
36618+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
36619+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
36620+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
36621+4 4 4 4 4 4
36622+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
36623+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
36624+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
36625+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
36626+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
36627+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
36628+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
36629+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
36630+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
36631+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
36632+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
36633+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
36634+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
36635+5 5 5 5 5 5
36636+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
36637+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
36638+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
36639+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
36640+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
36641+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36642+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
36643+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
36644+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
36645+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
36646+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
36647+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
36648+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
36649+5 5 5 4 4 4
36650+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
36651+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
36652+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
36653+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
36654+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
36655+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
36656+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
36657+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
36658+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
36659+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
36660+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
36661+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
36662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36663+4 4 4 4 4 4
36664+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
36665+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
36666+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
36667+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
36668+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
36669+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36670+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36671+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
36672+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
36673+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
36674+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
36675+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
36676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36677+4 4 4 4 4 4
36678+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
36679+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
36680+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
36681+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
36682+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
36683+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
36684+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
36685+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
36686+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
36687+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
36688+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
36689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36691+4 4 4 4 4 4
36692+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
36693+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
36694+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
36695+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
36696+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
36697+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36698+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36699+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
36700+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
36701+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
36702+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
36703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36705+4 4 4 4 4 4
36706+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
36707+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
36708+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
36709+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
36710+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
36711+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
36712+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
36713+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
36714+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
36715+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
36716+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36719+4 4 4 4 4 4
36720+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
36721+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
36722+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
36723+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
36724+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
36725+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
36726+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
36727+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
36728+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
36729+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
36730+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
36731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36733+4 4 4 4 4 4
36734+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
36735+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
36736+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
36737+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
36738+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
36739+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
36740+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
36741+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
36742+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
36743+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
36744+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
36745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36747+4 4 4 4 4 4
36748+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
36749+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
36750+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
36751+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
36752+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
36753+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
36754+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
36755+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
36756+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
36757+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
36758+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36761+4 4 4 4 4 4
36762+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
36763+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
36764+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
36765+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
36766+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36767+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
36768+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
36769+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
36770+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
36771+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
36772+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36775+4 4 4 4 4 4
36776+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
36777+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
36778+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
36779+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
36780+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36781+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
36782+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
36783+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
36784+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
36785+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
36786+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36789+4 4 4 4 4 4
36790+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
36791+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
36792+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
36793+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
36794+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36795+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
36796+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
36797+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
36798+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
36799+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36800+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36803+4 4 4 4 4 4
36804+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
36805+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
36806+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
36807+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
36808+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
36809+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
36810+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
36811+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
36812+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
36813+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36814+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36817+4 4 4 4 4 4
36818+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
36819+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
36820+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
36821+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
36822+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36823+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
36824+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
36825+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
36826+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
36827+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36828+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36831+4 4 4 4 4 4
36832+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
36833+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
36834+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
36835+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
36836+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
36837+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
36838+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
36839+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
36840+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
36841+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36842+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36845+4 4 4 4 4 4
36846+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
36847+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
36848+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
36849+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
36850+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
36851+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
36852+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
36853+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
36854+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
36855+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36856+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36859+4 4 4 4 4 4
36860+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
36861+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
36862+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
36863+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
36864+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
36865+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
36866+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
36867+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
36868+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
36869+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36870+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36873+4 4 4 4 4 4
36874+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
36875+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
36876+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
36877+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
36878+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
36879+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
36880+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
36881+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
36882+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
36883+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36884+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36887+4 4 4 4 4 4
36888+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
36889+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
36890+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
36891+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
36892+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
36893+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
36894+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
36895+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
36896+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
36897+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36898+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36901+4 4 4 4 4 4
36902+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
36903+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
36904+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
36905+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
36906+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
36907+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
36908+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
36909+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
36910+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
36911+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36912+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36915+4 4 4 4 4 4
36916+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
36917+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
36918+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
36919+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
36920+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
36921+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
36922+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36923+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
36924+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
36925+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36926+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36929+4 4 4 4 4 4
36930+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
36931+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
36932+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
36933+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
36934+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
36935+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
36936+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36937+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
36938+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
36939+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36940+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36943+4 4 4 4 4 4
36944+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
36945+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
36946+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
36947+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
36948+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
36949+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
36950+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
36951+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
36952+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
36953+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36954+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36957+4 4 4 4 4 4
36958+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
36959+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
36960+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
36961+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
36962+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
36963+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
36964+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
36965+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
36966+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
36967+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36968+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36971+4 4 4 4 4 4
36972+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
36973+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
36974+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
36975+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
36976+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
36977+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
36978+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
36979+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
36980+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
36981+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36982+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36985+4 4 4 4 4 4
36986+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
36987+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
36988+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
36989+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
36990+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
36991+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
36992+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
36993+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
36994+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
36995+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36996+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36999+4 4 4 4 4 4
37000+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
37001+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
37002+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
37003+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
37004+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
37005+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
37006+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
37007+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
37008+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
37009+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
37010+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37013+4 4 4 4 4 4
37014+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
37015+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
37016+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
37017+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
37018+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
37019+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
37020+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
37021+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
37022+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
37023+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
37024+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37027+4 4 4 4 4 4
37028+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
37029+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
37030+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
37031+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
37032+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
37033+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
37034+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37035+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
37036+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
37037+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
37038+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
37039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37041+4 4 4 4 4 4
37042+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
37043+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
37044+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
37045+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
37046+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
37047+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
37048+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
37049+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
37050+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
37051+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
37052+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37055+4 4 4 4 4 4
37056+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
37057+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
37058+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
37059+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
37060+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
37061+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
37062+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
37063+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
37064+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
37065+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
37066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37069+4 4 4 4 4 4
37070+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
37071+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
37072+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
37073+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
37074+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
37075+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
37076+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
37077+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
37078+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
37079+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
37080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37083+4 4 4 4 4 4
37084+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
37085+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
37086+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
37087+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
37088+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
37089+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
37090+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
37091+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
37092+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
37093+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
37094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37097+4 4 4 4 4 4
37098+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
37099+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
37100+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
37101+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
37102+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
37103+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
37104+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
37105+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
37106+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
37107+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
37108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37111+4 4 4 4 4 4
37112+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
37113+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
37114+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
37115+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
37116+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
37117+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
37118+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
37119+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
37120+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
37121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37125+4 4 4 4 4 4
37126+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
37127+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
37128+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
37129+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
37130+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
37131+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
37132+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
37133+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
37134+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
37135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37139+4 4 4 4 4 4
37140+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
37141+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
37142+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
37143+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
37144+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
37145+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
37146+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
37147+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
37148+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37153+4 4 4 4 4 4
37154+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
37155+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
37156+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
37157+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
37158+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
37159+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
37160+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
37161+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
37162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37167+4 4 4 4 4 4
37168+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
37169+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
37170+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
37171+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
37172+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
37173+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
37174+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
37175+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
37176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37181+4 4 4 4 4 4
37182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
37183+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
37184+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
37185+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
37186+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
37187+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
37188+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
37189+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
37190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37195+4 4 4 4 4 4
37196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37197+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
37198+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
37199+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
37200+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
37201+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
37202+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
37203+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
37204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37209+4 4 4 4 4 4
37210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37211+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
37212+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
37213+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
37214+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
37215+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
37216+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
37217+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
37218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37223+4 4 4 4 4 4
37224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37226+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
37227+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
37228+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
37229+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
37230+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
37231+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
37232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37237+4 4 4 4 4 4
37238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
37241+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
37242+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
37243+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
37244+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
37245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37251+4 4 4 4 4 4
37252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37255+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
37256+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
37257+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
37258+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
37259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37265+4 4 4 4 4 4
37266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37269+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
37270+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
37271+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
37272+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
37273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37279+4 4 4 4 4 4
37280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37283+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
37284+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
37285+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
37286+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
37287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37293+4 4 4 4 4 4
37294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
37298+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
37299+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
37300+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
37301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37307+4 4 4 4 4 4
37308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37312+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
37313+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
37314+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
37315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37321+4 4 4 4 4 4
37322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37326+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
37327+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
37328+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37335+4 4 4 4 4 4
37336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37340+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
37341+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
37342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37349+4 4 4 4 4 4
37350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37354+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
37355+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
37356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37361+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37363+4 4 4 4 4 4
37364diff -urNp linux-3.0.4/drivers/video/udlfb.c linux-3.0.4/drivers/video/udlfb.c
37365--- linux-3.0.4/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
37366+++ linux-3.0.4/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
37367@@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
37368 dlfb_urb_completion(urb);
37369
37370 error:
37371- atomic_add(bytes_sent, &dev->bytes_sent);
37372- atomic_add(bytes_identical, &dev->bytes_identical);
37373- atomic_add(width*height*2, &dev->bytes_rendered);
37374+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
37375+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
37376+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
37377 end_cycles = get_cycles();
37378- atomic_add(((unsigned int) ((end_cycles - start_cycles)
37379+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
37380 >> 10)), /* Kcycles */
37381 &dev->cpu_kcycles_used);
37382
37383@@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
37384 dlfb_urb_completion(urb);
37385
37386 error:
37387- atomic_add(bytes_sent, &dev->bytes_sent);
37388- atomic_add(bytes_identical, &dev->bytes_identical);
37389- atomic_add(bytes_rendered, &dev->bytes_rendered);
37390+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
37391+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
37392+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
37393 end_cycles = get_cycles();
37394- atomic_add(((unsigned int) ((end_cycles - start_cycles)
37395+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
37396 >> 10)), /* Kcycles */
37397 &dev->cpu_kcycles_used);
37398 }
37399@@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
37400 struct fb_info *fb_info = dev_get_drvdata(fbdev);
37401 struct dlfb_data *dev = fb_info->par;
37402 return snprintf(buf, PAGE_SIZE, "%u\n",
37403- atomic_read(&dev->bytes_rendered));
37404+ atomic_read_unchecked(&dev->bytes_rendered));
37405 }
37406
37407 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
37408@@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
37409 struct fb_info *fb_info = dev_get_drvdata(fbdev);
37410 struct dlfb_data *dev = fb_info->par;
37411 return snprintf(buf, PAGE_SIZE, "%u\n",
37412- atomic_read(&dev->bytes_identical));
37413+ atomic_read_unchecked(&dev->bytes_identical));
37414 }
37415
37416 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
37417@@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
37418 struct fb_info *fb_info = dev_get_drvdata(fbdev);
37419 struct dlfb_data *dev = fb_info->par;
37420 return snprintf(buf, PAGE_SIZE, "%u\n",
37421- atomic_read(&dev->bytes_sent));
37422+ atomic_read_unchecked(&dev->bytes_sent));
37423 }
37424
37425 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
37426@@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
37427 struct fb_info *fb_info = dev_get_drvdata(fbdev);
37428 struct dlfb_data *dev = fb_info->par;
37429 return snprintf(buf, PAGE_SIZE, "%u\n",
37430- atomic_read(&dev->cpu_kcycles_used));
37431+ atomic_read_unchecked(&dev->cpu_kcycles_used));
37432 }
37433
37434 static ssize_t edid_show(
37435@@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
37436 struct fb_info *fb_info = dev_get_drvdata(fbdev);
37437 struct dlfb_data *dev = fb_info->par;
37438
37439- atomic_set(&dev->bytes_rendered, 0);
37440- atomic_set(&dev->bytes_identical, 0);
37441- atomic_set(&dev->bytes_sent, 0);
37442- atomic_set(&dev->cpu_kcycles_used, 0);
37443+ atomic_set_unchecked(&dev->bytes_rendered, 0);
37444+ atomic_set_unchecked(&dev->bytes_identical, 0);
37445+ atomic_set_unchecked(&dev->bytes_sent, 0);
37446+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
37447
37448 return count;
37449 }
37450diff -urNp linux-3.0.4/drivers/video/uvesafb.c linux-3.0.4/drivers/video/uvesafb.c
37451--- linux-3.0.4/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
37452+++ linux-3.0.4/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
37453@@ -19,6 +19,7 @@
37454 #include <linux/io.h>
37455 #include <linux/mutex.h>
37456 #include <linux/slab.h>
37457+#include <linux/moduleloader.h>
37458 #include <video/edid.h>
37459 #include <video/uvesafb.h>
37460 #ifdef CONFIG_X86
37461@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
37462 NULL,
37463 };
37464
37465- return call_usermodehelper(v86d_path, argv, envp, 1);
37466+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
37467 }
37468
37469 /*
37470@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
37471 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
37472 par->pmi_setpal = par->ypan = 0;
37473 } else {
37474+
37475+#ifdef CONFIG_PAX_KERNEXEC
37476+#ifdef CONFIG_MODULES
37477+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
37478+#endif
37479+ if (!par->pmi_code) {
37480+ par->pmi_setpal = par->ypan = 0;
37481+ return 0;
37482+ }
37483+#endif
37484+
37485 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
37486 + task->t.regs.edi);
37487+
37488+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37489+ pax_open_kernel();
37490+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
37491+ pax_close_kernel();
37492+
37493+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
37494+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
37495+#else
37496 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
37497 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
37498+#endif
37499+
37500 printk(KERN_INFO "uvesafb: protected mode interface info at "
37501 "%04x:%04x\n",
37502 (u16)task->t.regs.es, (u16)task->t.regs.edi);
37503@@ -1821,6 +1844,11 @@ out:
37504 if (par->vbe_modes)
37505 kfree(par->vbe_modes);
37506
37507+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37508+ if (par->pmi_code)
37509+ module_free_exec(NULL, par->pmi_code);
37510+#endif
37511+
37512 framebuffer_release(info);
37513 return err;
37514 }
37515@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
37516 kfree(par->vbe_state_orig);
37517 if (par->vbe_state_saved)
37518 kfree(par->vbe_state_saved);
37519+
37520+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37521+ if (par->pmi_code)
37522+ module_free_exec(NULL, par->pmi_code);
37523+#endif
37524+
37525 }
37526
37527 framebuffer_release(info);
37528diff -urNp linux-3.0.4/drivers/video/vesafb.c linux-3.0.4/drivers/video/vesafb.c
37529--- linux-3.0.4/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
37530+++ linux-3.0.4/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
37531@@ -9,6 +9,7 @@
37532 */
37533
37534 #include <linux/module.h>
37535+#include <linux/moduleloader.h>
37536 #include <linux/kernel.h>
37537 #include <linux/errno.h>
37538 #include <linux/string.h>
37539@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
37540 static int vram_total __initdata; /* Set total amount of memory */
37541 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
37542 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
37543-static void (*pmi_start)(void) __read_mostly;
37544-static void (*pmi_pal) (void) __read_mostly;
37545+static void (*pmi_start)(void) __read_only;
37546+static void (*pmi_pal) (void) __read_only;
37547 static int depth __read_mostly;
37548 static int vga_compat __read_mostly;
37549 /* --------------------------------------------------------------------- */
37550@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
37551 unsigned int size_vmode;
37552 unsigned int size_remap;
37553 unsigned int size_total;
37554+ void *pmi_code = NULL;
37555
37556 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
37557 return -ENODEV;
37558@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
37559 size_remap = size_total;
37560 vesafb_fix.smem_len = size_remap;
37561
37562-#ifndef __i386__
37563- screen_info.vesapm_seg = 0;
37564-#endif
37565-
37566 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
37567 printk(KERN_WARNING
37568 "vesafb: cannot reserve video memory at 0x%lx\n",
37569@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
37570 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
37571 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
37572
37573+#ifdef __i386__
37574+
37575+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37576+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
37577+ if (!pmi_code)
37578+#elif !defined(CONFIG_PAX_KERNEXEC)
37579+ if (0)
37580+#endif
37581+
37582+#endif
37583+ screen_info.vesapm_seg = 0;
37584+
37585 if (screen_info.vesapm_seg) {
37586- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
37587- screen_info.vesapm_seg,screen_info.vesapm_off);
37588+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
37589+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
37590 }
37591
37592 if (screen_info.vesapm_seg < 0xc000)
37593@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
37594
37595 if (ypan || pmi_setpal) {
37596 unsigned short *pmi_base;
37597+
37598 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
37599- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
37600- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
37601+
37602+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37603+ pax_open_kernel();
37604+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
37605+#else
37606+ pmi_code = pmi_base;
37607+#endif
37608+
37609+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
37610+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
37611+
37612+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37613+ pmi_start = ktva_ktla(pmi_start);
37614+ pmi_pal = ktva_ktla(pmi_pal);
37615+ pax_close_kernel();
37616+#endif
37617+
37618 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
37619 if (pmi_base[3]) {
37620 printk(KERN_INFO "vesafb: pmi: ports = ");
37621@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
37622 info->node, info->fix.id);
37623 return 0;
37624 err:
37625+
37626+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37627+ module_free_exec(NULL, pmi_code);
37628+#endif
37629+
37630 if (info->screen_base)
37631 iounmap(info->screen_base);
37632 framebuffer_release(info);
37633diff -urNp linux-3.0.4/drivers/video/via/via_clock.h linux-3.0.4/drivers/video/via/via_clock.h
37634--- linux-3.0.4/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
37635+++ linux-3.0.4/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
37636@@ -56,7 +56,7 @@ struct via_clock {
37637
37638 void (*set_engine_pll_state)(u8 state);
37639 void (*set_engine_pll)(struct via_pll_config config);
37640-};
37641+} __no_const;
37642
37643
37644 static inline u32 get_pll_internal_frequency(u32 ref_freq,
37645diff -urNp linux-3.0.4/drivers/virtio/virtio_balloon.c linux-3.0.4/drivers/virtio/virtio_balloon.c
37646--- linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
37647+++ linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
37648@@ -174,6 +174,8 @@ static void update_balloon_stats(struct
37649 struct sysinfo i;
37650 int idx = 0;
37651
37652+ pax_track_stack();
37653+
37654 all_vm_events(events);
37655 si_meminfo(&i);
37656
37657diff -urNp linux-3.0.4/fs/9p/vfs_inode.c linux-3.0.4/fs/9p/vfs_inode.c
37658--- linux-3.0.4/fs/9p/vfs_inode.c 2011-07-21 22:17:23.000000000 -0400
37659+++ linux-3.0.4/fs/9p/vfs_inode.c 2011-08-23 21:47:56.000000000 -0400
37660@@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
37661 void
37662 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
37663 {
37664- char *s = nd_get_link(nd);
37665+ const char *s = nd_get_link(nd);
37666
37667 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
37668 IS_ERR(s) ? "<error>" : s);
37669diff -urNp linux-3.0.4/fs/aio.c linux-3.0.4/fs/aio.c
37670--- linux-3.0.4/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
37671+++ linux-3.0.4/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
37672@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
37673 size += sizeof(struct io_event) * nr_events;
37674 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
37675
37676- if (nr_pages < 0)
37677+ if (nr_pages <= 0)
37678 return -EINVAL;
37679
37680 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
37681@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
37682 struct aio_timeout to;
37683 int retry = 0;
37684
37685+ pax_track_stack();
37686+
37687 /* needed to zero any padding within an entry (there shouldn't be
37688 * any, but C is fun!
37689 */
37690@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
37691 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
37692 {
37693 ssize_t ret;
37694+ struct iovec iovstack;
37695
37696 #ifdef CONFIG_COMPAT
37697 if (compat)
37698 ret = compat_rw_copy_check_uvector(type,
37699 (struct compat_iovec __user *)kiocb->ki_buf,
37700- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
37701+ kiocb->ki_nbytes, 1, &iovstack,
37702 &kiocb->ki_iovec);
37703 else
37704 #endif
37705 ret = rw_copy_check_uvector(type,
37706 (struct iovec __user *)kiocb->ki_buf,
37707- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
37708+ kiocb->ki_nbytes, 1, &iovstack,
37709 &kiocb->ki_iovec);
37710 if (ret < 0)
37711 goto out;
37712
37713+ if (kiocb->ki_iovec == &iovstack) {
37714+ kiocb->ki_inline_vec = iovstack;
37715+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
37716+ }
37717 kiocb->ki_nr_segs = kiocb->ki_nbytes;
37718 kiocb->ki_cur_seg = 0;
37719 /* ki_nbytes/left now reflect bytes instead of segs */
37720diff -urNp linux-3.0.4/fs/attr.c linux-3.0.4/fs/attr.c
37721--- linux-3.0.4/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
37722+++ linux-3.0.4/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
37723@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
37724 unsigned long limit;
37725
37726 limit = rlimit(RLIMIT_FSIZE);
37727+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
37728 if (limit != RLIM_INFINITY && offset > limit)
37729 goto out_sig;
37730 if (offset > inode->i_sb->s_maxbytes)
37731diff -urNp linux-3.0.4/fs/autofs4/waitq.c linux-3.0.4/fs/autofs4/waitq.c
37732--- linux-3.0.4/fs/autofs4/waitq.c 2011-07-21 22:17:23.000000000 -0400
37733+++ linux-3.0.4/fs/autofs4/waitq.c 2011-10-06 04:17:55.000000000 -0400
37734@@ -60,7 +60,7 @@ static int autofs4_write(struct file *fi
37735 {
37736 unsigned long sigpipe, flags;
37737 mm_segment_t fs;
37738- const char *data = (const char *)addr;
37739+ const char __user *data = (const char __force_user *)addr;
37740 ssize_t wr = 0;
37741
37742 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
37743diff -urNp linux-3.0.4/fs/befs/linuxvfs.c linux-3.0.4/fs/befs/linuxvfs.c
37744--- linux-3.0.4/fs/befs/linuxvfs.c 2011-09-02 18:11:26.000000000 -0400
37745+++ linux-3.0.4/fs/befs/linuxvfs.c 2011-08-29 23:26:27.000000000 -0400
37746@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
37747 {
37748 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
37749 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
37750- char *link = nd_get_link(nd);
37751+ const char *link = nd_get_link(nd);
37752 if (!IS_ERR(link))
37753 kfree(link);
37754 }
37755diff -urNp linux-3.0.4/fs/binfmt_aout.c linux-3.0.4/fs/binfmt_aout.c
37756--- linux-3.0.4/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
37757+++ linux-3.0.4/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
37758@@ -16,6 +16,7 @@
37759 #include <linux/string.h>
37760 #include <linux/fs.h>
37761 #include <linux/file.h>
37762+#include <linux/security.h>
37763 #include <linux/stat.h>
37764 #include <linux/fcntl.h>
37765 #include <linux/ptrace.h>
37766@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
37767 #endif
37768 # define START_STACK(u) ((void __user *)u.start_stack)
37769
37770+ memset(&dump, 0, sizeof(dump));
37771+
37772 fs = get_fs();
37773 set_fs(KERNEL_DS);
37774 has_dumped = 1;
37775@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
37776
37777 /* If the size of the dump file exceeds the rlimit, then see what would happen
37778 if we wrote the stack, but not the data area. */
37779+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
37780 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
37781 dump.u_dsize = 0;
37782
37783 /* Make sure we have enough room to write the stack and data areas. */
37784+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
37785 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
37786 dump.u_ssize = 0;
37787
37788@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
37789 rlim = rlimit(RLIMIT_DATA);
37790 if (rlim >= RLIM_INFINITY)
37791 rlim = ~0;
37792+
37793+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
37794 if (ex.a_data + ex.a_bss > rlim)
37795 return -ENOMEM;
37796
37797@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
37798 install_exec_creds(bprm);
37799 current->flags &= ~PF_FORKNOEXEC;
37800
37801+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
37802+ current->mm->pax_flags = 0UL;
37803+#endif
37804+
37805+#ifdef CONFIG_PAX_PAGEEXEC
37806+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
37807+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
37808+
37809+#ifdef CONFIG_PAX_EMUTRAMP
37810+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
37811+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
37812+#endif
37813+
37814+#ifdef CONFIG_PAX_MPROTECT
37815+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
37816+ current->mm->pax_flags |= MF_PAX_MPROTECT;
37817+#endif
37818+
37819+ }
37820+#endif
37821+
37822 if (N_MAGIC(ex) == OMAGIC) {
37823 unsigned long text_addr, map_size;
37824 loff_t pos;
37825@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
37826
37827 down_write(&current->mm->mmap_sem);
37828 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
37829- PROT_READ | PROT_WRITE | PROT_EXEC,
37830+ PROT_READ | PROT_WRITE,
37831 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
37832 fd_offset + ex.a_text);
37833 up_write(&current->mm->mmap_sem);
37834diff -urNp linux-3.0.4/fs/binfmt_elf.c linux-3.0.4/fs/binfmt_elf.c
37835--- linux-3.0.4/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
37836+++ linux-3.0.4/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
37837@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
37838 #define elf_core_dump NULL
37839 #endif
37840
37841+#ifdef CONFIG_PAX_MPROTECT
37842+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
37843+#endif
37844+
37845 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
37846 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
37847 #else
37848@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
37849 .load_binary = load_elf_binary,
37850 .load_shlib = load_elf_library,
37851 .core_dump = elf_core_dump,
37852+
37853+#ifdef CONFIG_PAX_MPROTECT
37854+ .handle_mprotect= elf_handle_mprotect,
37855+#endif
37856+
37857 .min_coredump = ELF_EXEC_PAGESIZE,
37858 };
37859
37860@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
37861
37862 static int set_brk(unsigned long start, unsigned long end)
37863 {
37864+ unsigned long e = end;
37865+
37866 start = ELF_PAGEALIGN(start);
37867 end = ELF_PAGEALIGN(end);
37868 if (end > start) {
37869@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
37870 if (BAD_ADDR(addr))
37871 return addr;
37872 }
37873- current->mm->start_brk = current->mm->brk = end;
37874+ current->mm->start_brk = current->mm->brk = e;
37875 return 0;
37876 }
37877
37878@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
37879 elf_addr_t __user *u_rand_bytes;
37880 const char *k_platform = ELF_PLATFORM;
37881 const char *k_base_platform = ELF_BASE_PLATFORM;
37882- unsigned char k_rand_bytes[16];
37883+ u32 k_rand_bytes[4];
37884 int items;
37885 elf_addr_t *elf_info;
37886 int ei_index = 0;
37887 const struct cred *cred = current_cred();
37888 struct vm_area_struct *vma;
37889+ unsigned long saved_auxv[AT_VECTOR_SIZE];
37890+
37891+ pax_track_stack();
37892
37893 /*
37894 * In some cases (e.g. Hyper-Threading), we want to avoid L1
37895@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
37896 * Generate 16 random bytes for userspace PRNG seeding.
37897 */
37898 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
37899- u_rand_bytes = (elf_addr_t __user *)
37900- STACK_ALLOC(p, sizeof(k_rand_bytes));
37901+ srandom32(k_rand_bytes[0] ^ random32());
37902+ srandom32(k_rand_bytes[1] ^ random32());
37903+ srandom32(k_rand_bytes[2] ^ random32());
37904+ srandom32(k_rand_bytes[3] ^ random32());
37905+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
37906+ u_rand_bytes = (elf_addr_t __user *) p;
37907 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
37908 return -EFAULT;
37909
37910@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
37911 return -EFAULT;
37912 current->mm->env_end = p;
37913
37914+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
37915+
37916 /* Put the elf_info on the stack in the right place. */
37917 sp = (elf_addr_t __user *)envp + 1;
37918- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
37919+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
37920 return -EFAULT;
37921 return 0;
37922 }
37923@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
37924 {
37925 struct elf_phdr *elf_phdata;
37926 struct elf_phdr *eppnt;
37927- unsigned long load_addr = 0;
37928+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
37929 int load_addr_set = 0;
37930 unsigned long last_bss = 0, elf_bss = 0;
37931- unsigned long error = ~0UL;
37932+ unsigned long error = -EINVAL;
37933 unsigned long total_size;
37934 int retval, i, size;
37935
37936@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
37937 goto out_close;
37938 }
37939
37940+#ifdef CONFIG_PAX_SEGMEXEC
37941+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
37942+ pax_task_size = SEGMEXEC_TASK_SIZE;
37943+#endif
37944+
37945 eppnt = elf_phdata;
37946 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
37947 if (eppnt->p_type == PT_LOAD) {
37948@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
37949 k = load_addr + eppnt->p_vaddr;
37950 if (BAD_ADDR(k) ||
37951 eppnt->p_filesz > eppnt->p_memsz ||
37952- eppnt->p_memsz > TASK_SIZE ||
37953- TASK_SIZE - eppnt->p_memsz < k) {
37954+ eppnt->p_memsz > pax_task_size ||
37955+ pax_task_size - eppnt->p_memsz < k) {
37956 error = -ENOMEM;
37957 goto out_close;
37958 }
37959@@ -528,6 +553,193 @@ out:
37960 return error;
37961 }
37962
37963+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
37964+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
37965+{
37966+ unsigned long pax_flags = 0UL;
37967+
37968+#ifdef CONFIG_PAX_PAGEEXEC
37969+ if (elf_phdata->p_flags & PF_PAGEEXEC)
37970+ pax_flags |= MF_PAX_PAGEEXEC;
37971+#endif
37972+
37973+#ifdef CONFIG_PAX_SEGMEXEC
37974+ if (elf_phdata->p_flags & PF_SEGMEXEC)
37975+ pax_flags |= MF_PAX_SEGMEXEC;
37976+#endif
37977+
37978+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
37979+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37980+ if ((__supported_pte_mask & _PAGE_NX))
37981+ pax_flags &= ~MF_PAX_SEGMEXEC;
37982+ else
37983+ pax_flags &= ~MF_PAX_PAGEEXEC;
37984+ }
37985+#endif
37986+
37987+#ifdef CONFIG_PAX_EMUTRAMP
37988+ if (elf_phdata->p_flags & PF_EMUTRAMP)
37989+ pax_flags |= MF_PAX_EMUTRAMP;
37990+#endif
37991+
37992+#ifdef CONFIG_PAX_MPROTECT
37993+ if (elf_phdata->p_flags & PF_MPROTECT)
37994+ pax_flags |= MF_PAX_MPROTECT;
37995+#endif
37996+
37997+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
37998+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
37999+ pax_flags |= MF_PAX_RANDMMAP;
38000+#endif
38001+
38002+ return pax_flags;
38003+}
38004+#endif
38005+
38006+#ifdef CONFIG_PAX_PT_PAX_FLAGS
38007+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
38008+{
38009+ unsigned long pax_flags = 0UL;
38010+
38011+#ifdef CONFIG_PAX_PAGEEXEC
38012+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
38013+ pax_flags |= MF_PAX_PAGEEXEC;
38014+#endif
38015+
38016+#ifdef CONFIG_PAX_SEGMEXEC
38017+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
38018+ pax_flags |= MF_PAX_SEGMEXEC;
38019+#endif
38020+
38021+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
38022+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38023+ if ((__supported_pte_mask & _PAGE_NX))
38024+ pax_flags &= ~MF_PAX_SEGMEXEC;
38025+ else
38026+ pax_flags &= ~MF_PAX_PAGEEXEC;
38027+ }
38028+#endif
38029+
38030+#ifdef CONFIG_PAX_EMUTRAMP
38031+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
38032+ pax_flags |= MF_PAX_EMUTRAMP;
38033+#endif
38034+
38035+#ifdef CONFIG_PAX_MPROTECT
38036+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
38037+ pax_flags |= MF_PAX_MPROTECT;
38038+#endif
38039+
38040+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
38041+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
38042+ pax_flags |= MF_PAX_RANDMMAP;
38043+#endif
38044+
38045+ return pax_flags;
38046+}
38047+#endif
38048+
38049+#ifdef CONFIG_PAX_EI_PAX
38050+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
38051+{
38052+ unsigned long pax_flags = 0UL;
38053+
38054+#ifdef CONFIG_PAX_PAGEEXEC
38055+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
38056+ pax_flags |= MF_PAX_PAGEEXEC;
38057+#endif
38058+
38059+#ifdef CONFIG_PAX_SEGMEXEC
38060+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
38061+ pax_flags |= MF_PAX_SEGMEXEC;
38062+#endif
38063+
38064+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
38065+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38066+ if ((__supported_pte_mask & _PAGE_NX))
38067+ pax_flags &= ~MF_PAX_SEGMEXEC;
38068+ else
38069+ pax_flags &= ~MF_PAX_PAGEEXEC;
38070+ }
38071+#endif
38072+
38073+#ifdef CONFIG_PAX_EMUTRAMP
38074+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
38075+ pax_flags |= MF_PAX_EMUTRAMP;
38076+#endif
38077+
38078+#ifdef CONFIG_PAX_MPROTECT
38079+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
38080+ pax_flags |= MF_PAX_MPROTECT;
38081+#endif
38082+
38083+#ifdef CONFIG_PAX_ASLR
38084+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
38085+ pax_flags |= MF_PAX_RANDMMAP;
38086+#endif
38087+
38088+ return pax_flags;
38089+}
38090+#endif
38091+
38092+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
38093+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
38094+{
38095+ unsigned long pax_flags = 0UL;
38096+
38097+#ifdef CONFIG_PAX_PT_PAX_FLAGS
38098+ unsigned long i;
38099+ int found_flags = 0;
38100+#endif
38101+
38102+#ifdef CONFIG_PAX_EI_PAX
38103+ pax_flags = pax_parse_ei_pax(elf_ex);
38104+#endif
38105+
38106+#ifdef CONFIG_PAX_PT_PAX_FLAGS
38107+ for (i = 0UL; i < elf_ex->e_phnum; i++)
38108+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
38109+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
38110+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
38111+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
38112+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
38113+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
38114+ return -EINVAL;
38115+
38116+#ifdef CONFIG_PAX_SOFTMODE
38117+ if (pax_softmode)
38118+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
38119+ else
38120+#endif
38121+
38122+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
38123+ found_flags = 1;
38124+ break;
38125+ }
38126+#endif
38127+
38128+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
38129+ if (found_flags == 0) {
38130+ struct elf_phdr phdr;
38131+ memset(&phdr, 0, sizeof(phdr));
38132+ phdr.p_flags = PF_NOEMUTRAMP;
38133+#ifdef CONFIG_PAX_SOFTMODE
38134+ if (pax_softmode)
38135+ pax_flags = pax_parse_softmode(&phdr);
38136+ else
38137+#endif
38138+ pax_flags = pax_parse_hardmode(&phdr);
38139+ }
38140+#endif
38141+
38142+ if (0 > pax_check_flags(&pax_flags))
38143+ return -EINVAL;
38144+
38145+ current->mm->pax_flags = pax_flags;
38146+ return 0;
38147+}
38148+#endif
38149+
38150 /*
38151 * These are the functions used to load ELF style executables and shared
38152 * libraries. There is no binary dependent code anywhere else.
38153@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
38154 {
38155 unsigned int random_variable = 0;
38156
38157+#ifdef CONFIG_PAX_RANDUSTACK
38158+ if (randomize_va_space)
38159+ return stack_top - current->mm->delta_stack;
38160+#endif
38161+
38162 if ((current->flags & PF_RANDOMIZE) &&
38163 !(current->personality & ADDR_NO_RANDOMIZE)) {
38164 random_variable = get_random_int() & STACK_RND_MASK;
38165@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
38166 unsigned long load_addr = 0, load_bias = 0;
38167 int load_addr_set = 0;
38168 char * elf_interpreter = NULL;
38169- unsigned long error;
38170+ unsigned long error = 0;
38171 struct elf_phdr *elf_ppnt, *elf_phdata;
38172 unsigned long elf_bss, elf_brk;
38173 int retval, i;
38174@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
38175 unsigned long start_code, end_code, start_data, end_data;
38176 unsigned long reloc_func_desc __maybe_unused = 0;
38177 int executable_stack = EXSTACK_DEFAULT;
38178- unsigned long def_flags = 0;
38179 struct {
38180 struct elfhdr elf_ex;
38181 struct elfhdr interp_elf_ex;
38182 } *loc;
38183+ unsigned long pax_task_size = TASK_SIZE;
38184
38185 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
38186 if (!loc) {
38187@@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
38188
38189 /* OK, This is the point of no return */
38190 current->flags &= ~PF_FORKNOEXEC;
38191- current->mm->def_flags = def_flags;
38192+
38193+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
38194+ current->mm->pax_flags = 0UL;
38195+#endif
38196+
38197+#ifdef CONFIG_PAX_DLRESOLVE
38198+ current->mm->call_dl_resolve = 0UL;
38199+#endif
38200+
38201+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
38202+ current->mm->call_syscall = 0UL;
38203+#endif
38204+
38205+#ifdef CONFIG_PAX_ASLR
38206+ current->mm->delta_mmap = 0UL;
38207+ current->mm->delta_stack = 0UL;
38208+#endif
38209+
38210+ current->mm->def_flags = 0;
38211+
38212+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
38213+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
38214+ send_sig(SIGKILL, current, 0);
38215+ goto out_free_dentry;
38216+ }
38217+#endif
38218+
38219+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
38220+ pax_set_initial_flags(bprm);
38221+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
38222+ if (pax_set_initial_flags_func)
38223+ (pax_set_initial_flags_func)(bprm);
38224+#endif
38225+
38226+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
38227+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
38228+ current->mm->context.user_cs_limit = PAGE_SIZE;
38229+ current->mm->def_flags |= VM_PAGEEXEC;
38230+ }
38231+#endif
38232+
38233+#ifdef CONFIG_PAX_SEGMEXEC
38234+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
38235+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
38236+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
38237+ pax_task_size = SEGMEXEC_TASK_SIZE;
38238+ current->mm->def_flags |= VM_NOHUGEPAGE;
38239+ }
38240+#endif
38241+
38242+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
38243+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38244+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
38245+ put_cpu();
38246+ }
38247+#endif
38248
38249 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
38250 may depend on the personality. */
38251 SET_PERSONALITY(loc->elf_ex);
38252+
38253+#ifdef CONFIG_PAX_ASLR
38254+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
38255+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
38256+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
38257+ }
38258+#endif
38259+
38260+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38261+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38262+ executable_stack = EXSTACK_DISABLE_X;
38263+ current->personality &= ~READ_IMPLIES_EXEC;
38264+ } else
38265+#endif
38266+
38267 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
38268 current->personality |= READ_IMPLIES_EXEC;
38269
38270@@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
38271 #else
38272 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
38273 #endif
38274+
38275+#ifdef CONFIG_PAX_RANDMMAP
38276+ /* PaX: randomize base address at the default exe base if requested */
38277+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
38278+#ifdef CONFIG_SPARC64
38279+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
38280+#else
38281+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
38282+#endif
38283+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
38284+ elf_flags |= MAP_FIXED;
38285+ }
38286+#endif
38287+
38288 }
38289
38290 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
38291@@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
38292 * allowed task size. Note that p_filesz must always be
38293 * <= p_memsz so it is only necessary to check p_memsz.
38294 */
38295- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
38296- elf_ppnt->p_memsz > TASK_SIZE ||
38297- TASK_SIZE - elf_ppnt->p_memsz < k) {
38298+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
38299+ elf_ppnt->p_memsz > pax_task_size ||
38300+ pax_task_size - elf_ppnt->p_memsz < k) {
38301 /* set_brk can never work. Avoid overflows. */
38302 send_sig(SIGKILL, current, 0);
38303 retval = -EINVAL;
38304@@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
38305 start_data += load_bias;
38306 end_data += load_bias;
38307
38308+#ifdef CONFIG_PAX_RANDMMAP
38309+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
38310+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
38311+#endif
38312+
38313 /* Calling set_brk effectively mmaps the pages that we need
38314 * for the bss and break sections. We must do this before
38315 * mapping in the interpreter, to make sure it doesn't wind
38316@@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
38317 goto out_free_dentry;
38318 }
38319 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
38320- send_sig(SIGSEGV, current, 0);
38321- retval = -EFAULT; /* Nobody gets to see this, but.. */
38322- goto out_free_dentry;
38323+ /*
38324+ * This bss-zeroing can fail if the ELF
38325+ * file specifies odd protections. So
38326+ * we don't check the return value
38327+ */
38328 }
38329
38330 if (elf_interpreter) {
38331@@ -1090,7 +1398,7 @@ out:
38332 * Decide what to dump of a segment, part, all or none.
38333 */
38334 static unsigned long vma_dump_size(struct vm_area_struct *vma,
38335- unsigned long mm_flags)
38336+ unsigned long mm_flags, long signr)
38337 {
38338 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
38339
38340@@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
38341 if (vma->vm_file == NULL)
38342 return 0;
38343
38344- if (FILTER(MAPPED_PRIVATE))
38345+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
38346 goto whole;
38347
38348 /*
38349@@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
38350 {
38351 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
38352 int i = 0;
38353- do
38354+ do {
38355 i += 2;
38356- while (auxv[i - 2] != AT_NULL);
38357+ } while (auxv[i - 2] != AT_NULL);
38358 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
38359 }
38360
38361@@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
38362 }
38363
38364 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
38365- unsigned long mm_flags)
38366+ struct coredump_params *cprm)
38367 {
38368 struct vm_area_struct *vma;
38369 size_t size = 0;
38370
38371 for (vma = first_vma(current, gate_vma); vma != NULL;
38372 vma = next_vma(vma, gate_vma))
38373- size += vma_dump_size(vma, mm_flags);
38374+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
38375 return size;
38376 }
38377
38378@@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
38379
38380 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
38381
38382- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
38383+ offset += elf_core_vma_data_size(gate_vma, cprm);
38384 offset += elf_core_extra_data_size();
38385 e_shoff = offset;
38386
38387@@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
38388 offset = dataoff;
38389
38390 size += sizeof(*elf);
38391+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
38392 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
38393 goto end_coredump;
38394
38395 size += sizeof(*phdr4note);
38396+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
38397 if (size > cprm->limit
38398 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
38399 goto end_coredump;
38400@@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
38401 phdr.p_offset = offset;
38402 phdr.p_vaddr = vma->vm_start;
38403 phdr.p_paddr = 0;
38404- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
38405+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
38406 phdr.p_memsz = vma->vm_end - vma->vm_start;
38407 offset += phdr.p_filesz;
38408 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
38409@@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
38410 phdr.p_align = ELF_EXEC_PAGESIZE;
38411
38412 size += sizeof(phdr);
38413+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
38414 if (size > cprm->limit
38415 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
38416 goto end_coredump;
38417@@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
38418 unsigned long addr;
38419 unsigned long end;
38420
38421- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
38422+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
38423
38424 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
38425 struct page *page;
38426@@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
38427 page = get_dump_page(addr);
38428 if (page) {
38429 void *kaddr = kmap(page);
38430+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
38431 stop = ((size += PAGE_SIZE) > cprm->limit) ||
38432 !dump_write(cprm->file, kaddr,
38433 PAGE_SIZE);
38434@@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
38435
38436 if (e_phnum == PN_XNUM) {
38437 size += sizeof(*shdr4extnum);
38438+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
38439 if (size > cprm->limit
38440 || !dump_write(cprm->file, shdr4extnum,
38441 sizeof(*shdr4extnum)))
38442@@ -2067,6 +2380,97 @@ out:
38443
38444 #endif /* CONFIG_ELF_CORE */
38445
38446+#ifdef CONFIG_PAX_MPROTECT
38447+/* PaX: non-PIC ELF libraries need relocations on their executable segments
38448+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
38449+ * we'll remove VM_MAYWRITE for good on RELRO segments.
38450+ *
38451+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
38452+ * basis because we want to allow the common case and not the special ones.
38453+ */
38454+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
38455+{
38456+ struct elfhdr elf_h;
38457+ struct elf_phdr elf_p;
38458+ unsigned long i;
38459+ unsigned long oldflags;
38460+ bool is_textrel_rw, is_textrel_rx, is_relro;
38461+
38462+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
38463+ return;
38464+
38465+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
38466+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
38467+
38468+#ifdef CONFIG_PAX_ELFRELOCS
38469+ /* possible TEXTREL */
38470+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
38471+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
38472+#else
38473+ is_textrel_rw = false;
38474+ is_textrel_rx = false;
38475+#endif
38476+
38477+ /* possible RELRO */
38478+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
38479+
38480+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
38481+ return;
38482+
38483+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
38484+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
38485+
38486+#ifdef CONFIG_PAX_ETEXECRELOCS
38487+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
38488+#else
38489+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
38490+#endif
38491+
38492+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
38493+ !elf_check_arch(&elf_h) ||
38494+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
38495+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
38496+ return;
38497+
38498+ for (i = 0UL; i < elf_h.e_phnum; i++) {
38499+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
38500+ return;
38501+ switch (elf_p.p_type) {
38502+ case PT_DYNAMIC:
38503+ if (!is_textrel_rw && !is_textrel_rx)
38504+ continue;
38505+ i = 0UL;
38506+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
38507+ elf_dyn dyn;
38508+
38509+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
38510+ return;
38511+ if (dyn.d_tag == DT_NULL)
38512+ return;
38513+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
38514+ gr_log_textrel(vma);
38515+ if (is_textrel_rw)
38516+ vma->vm_flags |= VM_MAYWRITE;
38517+ else
38518+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
38519+ vma->vm_flags &= ~VM_MAYWRITE;
38520+ return;
38521+ }
38522+ i++;
38523+ }
38524+ return;
38525+
38526+ case PT_GNU_RELRO:
38527+ if (!is_relro)
38528+ continue;
38529+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
38530+ vma->vm_flags &= ~VM_MAYWRITE;
38531+ return;
38532+ }
38533+ }
38534+}
38535+#endif
38536+
38537 static int __init init_elf_binfmt(void)
38538 {
38539 return register_binfmt(&elf_format);
38540diff -urNp linux-3.0.4/fs/binfmt_flat.c linux-3.0.4/fs/binfmt_flat.c
38541--- linux-3.0.4/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
38542+++ linux-3.0.4/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
38543@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
38544 realdatastart = (unsigned long) -ENOMEM;
38545 printk("Unable to allocate RAM for process data, errno %d\n",
38546 (int)-realdatastart);
38547+ down_write(&current->mm->mmap_sem);
38548 do_munmap(current->mm, textpos, text_len);
38549+ up_write(&current->mm->mmap_sem);
38550 ret = realdatastart;
38551 goto err;
38552 }
38553@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
38554 }
38555 if (IS_ERR_VALUE(result)) {
38556 printk("Unable to read data+bss, errno %d\n", (int)-result);
38557+ down_write(&current->mm->mmap_sem);
38558 do_munmap(current->mm, textpos, text_len);
38559 do_munmap(current->mm, realdatastart, len);
38560+ up_write(&current->mm->mmap_sem);
38561 ret = result;
38562 goto err;
38563 }
38564@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
38565 }
38566 if (IS_ERR_VALUE(result)) {
38567 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
38568+ down_write(&current->mm->mmap_sem);
38569 do_munmap(current->mm, textpos, text_len + data_len + extra +
38570 MAX_SHARED_LIBS * sizeof(unsigned long));
38571+ up_write(&current->mm->mmap_sem);
38572 ret = result;
38573 goto err;
38574 }
38575diff -urNp linux-3.0.4/fs/bio.c linux-3.0.4/fs/bio.c
38576--- linux-3.0.4/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
38577+++ linux-3.0.4/fs/bio.c 2011-10-06 04:17:55.000000000 -0400
38578@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
38579 const int read = bio_data_dir(bio) == READ;
38580 struct bio_map_data *bmd = bio->bi_private;
38581 int i;
38582- char *p = bmd->sgvecs[0].iov_base;
38583+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
38584
38585 __bio_for_each_segment(bvec, bio, i, 0) {
38586 char *addr = page_address(bvec->bv_page);
38587diff -urNp linux-3.0.4/fs/block_dev.c linux-3.0.4/fs/block_dev.c
38588--- linux-3.0.4/fs/block_dev.c 2011-07-21 22:17:23.000000000 -0400
38589+++ linux-3.0.4/fs/block_dev.c 2011-08-23 21:47:56.000000000 -0400
38590@@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
38591 else if (bdev->bd_contains == bdev)
38592 return true; /* is a whole device which isn't held */
38593
38594- else if (whole->bd_holder == bd_may_claim)
38595+ else if (whole->bd_holder == (void *)bd_may_claim)
38596 return true; /* is a partition of a device that is being partitioned */
38597 else if (whole->bd_holder != NULL)
38598 return false; /* is a partition of a held device */
38599diff -urNp linux-3.0.4/fs/btrfs/ctree.c linux-3.0.4/fs/btrfs/ctree.c
38600--- linux-3.0.4/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
38601+++ linux-3.0.4/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
38602@@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
38603 free_extent_buffer(buf);
38604 add_root_to_dirty_list(root);
38605 } else {
38606- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
38607- parent_start = parent->start;
38608- else
38609+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
38610+ if (parent)
38611+ parent_start = parent->start;
38612+ else
38613+ parent_start = 0;
38614+ } else
38615 parent_start = 0;
38616
38617 WARN_ON(trans->transid != btrfs_header_generation(parent));
38618diff -urNp linux-3.0.4/fs/btrfs/inode.c linux-3.0.4/fs/btrfs/inode.c
38619--- linux-3.0.4/fs/btrfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38620+++ linux-3.0.4/fs/btrfs/inode.c 2011-08-23 21:48:14.000000000 -0400
38621@@ -6895,7 +6895,7 @@ fail:
38622 return -ENOMEM;
38623 }
38624
38625-static int btrfs_getattr(struct vfsmount *mnt,
38626+int btrfs_getattr(struct vfsmount *mnt,
38627 struct dentry *dentry, struct kstat *stat)
38628 {
38629 struct inode *inode = dentry->d_inode;
38630@@ -6907,6 +6907,14 @@ static int btrfs_getattr(struct vfsmount
38631 return 0;
38632 }
38633
38634+EXPORT_SYMBOL(btrfs_getattr);
38635+
38636+dev_t get_btrfs_dev_from_inode(struct inode *inode)
38637+{
38638+ return BTRFS_I(inode)->root->anon_super.s_dev;
38639+}
38640+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
38641+
38642 /*
38643 * If a file is moved, it will inherit the cow and compression flags of the new
38644 * directory.
38645diff -urNp linux-3.0.4/fs/btrfs/ioctl.c linux-3.0.4/fs/btrfs/ioctl.c
38646--- linux-3.0.4/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
38647+++ linux-3.0.4/fs/btrfs/ioctl.c 2011-10-06 04:17:55.000000000 -0400
38648@@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
38649 for (i = 0; i < num_types; i++) {
38650 struct btrfs_space_info *tmp;
38651
38652+ /* Don't copy in more than we allocated */
38653 if (!slot_count)
38654 break;
38655
38656+ slot_count--;
38657+
38658 info = NULL;
38659 rcu_read_lock();
38660 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
38661@@ -2700,15 +2703,12 @@ long btrfs_ioctl_space_info(struct btrfs
38662 memcpy(dest, &space, sizeof(space));
38663 dest++;
38664 space_args.total_spaces++;
38665- slot_count--;
38666 }
38667- if (!slot_count)
38668- break;
38669 }
38670 up_read(&info->groups_sem);
38671 }
38672
38673- user_dest = (struct btrfs_ioctl_space_info *)
38674+ user_dest = (struct btrfs_ioctl_space_info __user *)
38675 (arg + sizeof(struct btrfs_ioctl_space_args));
38676
38677 if (copy_to_user(user_dest, dest_orig, alloc_size))
38678diff -urNp linux-3.0.4/fs/btrfs/relocation.c linux-3.0.4/fs/btrfs/relocation.c
38679--- linux-3.0.4/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
38680+++ linux-3.0.4/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
38681@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
38682 }
38683 spin_unlock(&rc->reloc_root_tree.lock);
38684
38685- BUG_ON((struct btrfs_root *)node->data != root);
38686+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
38687
38688 if (!del) {
38689 spin_lock(&rc->reloc_root_tree.lock);
38690diff -urNp linux-3.0.4/fs/cachefiles/bind.c linux-3.0.4/fs/cachefiles/bind.c
38691--- linux-3.0.4/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
38692+++ linux-3.0.4/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
38693@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
38694 args);
38695
38696 /* start by checking things over */
38697- ASSERT(cache->fstop_percent >= 0 &&
38698- cache->fstop_percent < cache->fcull_percent &&
38699+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
38700 cache->fcull_percent < cache->frun_percent &&
38701 cache->frun_percent < 100);
38702
38703- ASSERT(cache->bstop_percent >= 0 &&
38704- cache->bstop_percent < cache->bcull_percent &&
38705+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
38706 cache->bcull_percent < cache->brun_percent &&
38707 cache->brun_percent < 100);
38708
38709diff -urNp linux-3.0.4/fs/cachefiles/daemon.c linux-3.0.4/fs/cachefiles/daemon.c
38710--- linux-3.0.4/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
38711+++ linux-3.0.4/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
38712@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
38713 if (n > buflen)
38714 return -EMSGSIZE;
38715
38716- if (copy_to_user(_buffer, buffer, n) != 0)
38717+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
38718 return -EFAULT;
38719
38720 return n;
38721@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
38722 if (test_bit(CACHEFILES_DEAD, &cache->flags))
38723 return -EIO;
38724
38725- if (datalen < 0 || datalen > PAGE_SIZE - 1)
38726+ if (datalen > PAGE_SIZE - 1)
38727 return -EOPNOTSUPP;
38728
38729 /* drag the command string into the kernel so we can parse it */
38730@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
38731 if (args[0] != '%' || args[1] != '\0')
38732 return -EINVAL;
38733
38734- if (fstop < 0 || fstop >= cache->fcull_percent)
38735+ if (fstop >= cache->fcull_percent)
38736 return cachefiles_daemon_range_error(cache, args);
38737
38738 cache->fstop_percent = fstop;
38739@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
38740 if (args[0] != '%' || args[1] != '\0')
38741 return -EINVAL;
38742
38743- if (bstop < 0 || bstop >= cache->bcull_percent)
38744+ if (bstop >= cache->bcull_percent)
38745 return cachefiles_daemon_range_error(cache, args);
38746
38747 cache->bstop_percent = bstop;
38748diff -urNp linux-3.0.4/fs/cachefiles/internal.h linux-3.0.4/fs/cachefiles/internal.h
38749--- linux-3.0.4/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
38750+++ linux-3.0.4/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
38751@@ -57,7 +57,7 @@ struct cachefiles_cache {
38752 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
38753 struct rb_root active_nodes; /* active nodes (can't be culled) */
38754 rwlock_t active_lock; /* lock for active_nodes */
38755- atomic_t gravecounter; /* graveyard uniquifier */
38756+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
38757 unsigned frun_percent; /* when to stop culling (% files) */
38758 unsigned fcull_percent; /* when to start culling (% files) */
38759 unsigned fstop_percent; /* when to stop allocating (% files) */
38760@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
38761 * proc.c
38762 */
38763 #ifdef CONFIG_CACHEFILES_HISTOGRAM
38764-extern atomic_t cachefiles_lookup_histogram[HZ];
38765-extern atomic_t cachefiles_mkdir_histogram[HZ];
38766-extern atomic_t cachefiles_create_histogram[HZ];
38767+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
38768+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
38769+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
38770
38771 extern int __init cachefiles_proc_init(void);
38772 extern void cachefiles_proc_cleanup(void);
38773 static inline
38774-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
38775+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
38776 {
38777 unsigned long jif = jiffies - start_jif;
38778 if (jif >= HZ)
38779 jif = HZ - 1;
38780- atomic_inc(&histogram[jif]);
38781+ atomic_inc_unchecked(&histogram[jif]);
38782 }
38783
38784 #else
38785diff -urNp linux-3.0.4/fs/cachefiles/namei.c linux-3.0.4/fs/cachefiles/namei.c
38786--- linux-3.0.4/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
38787+++ linux-3.0.4/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
38788@@ -318,7 +318,7 @@ try_again:
38789 /* first step is to make up a grave dentry in the graveyard */
38790 sprintf(nbuffer, "%08x%08x",
38791 (uint32_t) get_seconds(),
38792- (uint32_t) atomic_inc_return(&cache->gravecounter));
38793+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
38794
38795 /* do the multiway lock magic */
38796 trap = lock_rename(cache->graveyard, dir);
38797diff -urNp linux-3.0.4/fs/cachefiles/proc.c linux-3.0.4/fs/cachefiles/proc.c
38798--- linux-3.0.4/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
38799+++ linux-3.0.4/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
38800@@ -14,9 +14,9 @@
38801 #include <linux/seq_file.h>
38802 #include "internal.h"
38803
38804-atomic_t cachefiles_lookup_histogram[HZ];
38805-atomic_t cachefiles_mkdir_histogram[HZ];
38806-atomic_t cachefiles_create_histogram[HZ];
38807+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
38808+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
38809+atomic_unchecked_t cachefiles_create_histogram[HZ];
38810
38811 /*
38812 * display the latency histogram
38813@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
38814 return 0;
38815 default:
38816 index = (unsigned long) v - 3;
38817- x = atomic_read(&cachefiles_lookup_histogram[index]);
38818- y = atomic_read(&cachefiles_mkdir_histogram[index]);
38819- z = atomic_read(&cachefiles_create_histogram[index]);
38820+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
38821+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
38822+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
38823 if (x == 0 && y == 0 && z == 0)
38824 return 0;
38825
38826diff -urNp linux-3.0.4/fs/cachefiles/rdwr.c linux-3.0.4/fs/cachefiles/rdwr.c
38827--- linux-3.0.4/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
38828+++ linux-3.0.4/fs/cachefiles/rdwr.c 2011-10-06 04:17:55.000000000 -0400
38829@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
38830 old_fs = get_fs();
38831 set_fs(KERNEL_DS);
38832 ret = file->f_op->write(
38833- file, (const void __user *) data, len, &pos);
38834+ file, (const void __force_user *) data, len, &pos);
38835 set_fs(old_fs);
38836 kunmap(page);
38837 if (ret != len)
38838diff -urNp linux-3.0.4/fs/ceph/dir.c linux-3.0.4/fs/ceph/dir.c
38839--- linux-3.0.4/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
38840+++ linux-3.0.4/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
38841@@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
38842 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
38843 struct ceph_mds_client *mdsc = fsc->mdsc;
38844 unsigned frag = fpos_frag(filp->f_pos);
38845- int off = fpos_off(filp->f_pos);
38846+ unsigned int off = fpos_off(filp->f_pos);
38847 int err;
38848 u32 ftype;
38849 struct ceph_mds_reply_info_parsed *rinfo;
38850diff -urNp linux-3.0.4/fs/cifs/cifs_debug.c linux-3.0.4/fs/cifs/cifs_debug.c
38851--- linux-3.0.4/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
38852+++ linux-3.0.4/fs/cifs/cifs_debug.c 2011-08-25 17:18:05.000000000 -0400
38853@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
38854
38855 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
38856 #ifdef CONFIG_CIFS_STATS2
38857- atomic_set(&totBufAllocCount, 0);
38858- atomic_set(&totSmBufAllocCount, 0);
38859+ atomic_set_unchecked(&totBufAllocCount, 0);
38860+ atomic_set_unchecked(&totSmBufAllocCount, 0);
38861 #endif /* CONFIG_CIFS_STATS2 */
38862 spin_lock(&cifs_tcp_ses_lock);
38863 list_for_each(tmp1, &cifs_tcp_ses_list) {
38864@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
38865 tcon = list_entry(tmp3,
38866 struct cifs_tcon,
38867 tcon_list);
38868- atomic_set(&tcon->num_smbs_sent, 0);
38869- atomic_set(&tcon->num_writes, 0);
38870- atomic_set(&tcon->num_reads, 0);
38871- atomic_set(&tcon->num_oplock_brks, 0);
38872- atomic_set(&tcon->num_opens, 0);
38873- atomic_set(&tcon->num_posixopens, 0);
38874- atomic_set(&tcon->num_posixmkdirs, 0);
38875- atomic_set(&tcon->num_closes, 0);
38876- atomic_set(&tcon->num_deletes, 0);
38877- atomic_set(&tcon->num_mkdirs, 0);
38878- atomic_set(&tcon->num_rmdirs, 0);
38879- atomic_set(&tcon->num_renames, 0);
38880- atomic_set(&tcon->num_t2renames, 0);
38881- atomic_set(&tcon->num_ffirst, 0);
38882- atomic_set(&tcon->num_fnext, 0);
38883- atomic_set(&tcon->num_fclose, 0);
38884- atomic_set(&tcon->num_hardlinks, 0);
38885- atomic_set(&tcon->num_symlinks, 0);
38886- atomic_set(&tcon->num_locks, 0);
38887+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
38888+ atomic_set_unchecked(&tcon->num_writes, 0);
38889+ atomic_set_unchecked(&tcon->num_reads, 0);
38890+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
38891+ atomic_set_unchecked(&tcon->num_opens, 0);
38892+ atomic_set_unchecked(&tcon->num_posixopens, 0);
38893+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
38894+ atomic_set_unchecked(&tcon->num_closes, 0);
38895+ atomic_set_unchecked(&tcon->num_deletes, 0);
38896+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
38897+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
38898+ atomic_set_unchecked(&tcon->num_renames, 0);
38899+ atomic_set_unchecked(&tcon->num_t2renames, 0);
38900+ atomic_set_unchecked(&tcon->num_ffirst, 0);
38901+ atomic_set_unchecked(&tcon->num_fnext, 0);
38902+ atomic_set_unchecked(&tcon->num_fclose, 0);
38903+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
38904+ atomic_set_unchecked(&tcon->num_symlinks, 0);
38905+ atomic_set_unchecked(&tcon->num_locks, 0);
38906 }
38907 }
38908 }
38909@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
38910 smBufAllocCount.counter, cifs_min_small);
38911 #ifdef CONFIG_CIFS_STATS2
38912 seq_printf(m, "Total Large %d Small %d Allocations\n",
38913- atomic_read(&totBufAllocCount),
38914- atomic_read(&totSmBufAllocCount));
38915+ atomic_read_unchecked(&totBufAllocCount),
38916+ atomic_read_unchecked(&totSmBufAllocCount));
38917 #endif /* CONFIG_CIFS_STATS2 */
38918
38919 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
38920@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
38921 if (tcon->need_reconnect)
38922 seq_puts(m, "\tDISCONNECTED ");
38923 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
38924- atomic_read(&tcon->num_smbs_sent),
38925- atomic_read(&tcon->num_oplock_brks));
38926+ atomic_read_unchecked(&tcon->num_smbs_sent),
38927+ atomic_read_unchecked(&tcon->num_oplock_brks));
38928 seq_printf(m, "\nReads: %d Bytes: %lld",
38929- atomic_read(&tcon->num_reads),
38930+ atomic_read_unchecked(&tcon->num_reads),
38931 (long long)(tcon->bytes_read));
38932 seq_printf(m, "\nWrites: %d Bytes: %lld",
38933- atomic_read(&tcon->num_writes),
38934+ atomic_read_unchecked(&tcon->num_writes),
38935 (long long)(tcon->bytes_written));
38936 seq_printf(m, "\nFlushes: %d",
38937- atomic_read(&tcon->num_flushes));
38938+ atomic_read_unchecked(&tcon->num_flushes));
38939 seq_printf(m, "\nLocks: %d HardLinks: %d "
38940 "Symlinks: %d",
38941- atomic_read(&tcon->num_locks),
38942- atomic_read(&tcon->num_hardlinks),
38943- atomic_read(&tcon->num_symlinks));
38944+ atomic_read_unchecked(&tcon->num_locks),
38945+ atomic_read_unchecked(&tcon->num_hardlinks),
38946+ atomic_read_unchecked(&tcon->num_symlinks));
38947 seq_printf(m, "\nOpens: %d Closes: %d "
38948 "Deletes: %d",
38949- atomic_read(&tcon->num_opens),
38950- atomic_read(&tcon->num_closes),
38951- atomic_read(&tcon->num_deletes));
38952+ atomic_read_unchecked(&tcon->num_opens),
38953+ atomic_read_unchecked(&tcon->num_closes),
38954+ atomic_read_unchecked(&tcon->num_deletes));
38955 seq_printf(m, "\nPosix Opens: %d "
38956 "Posix Mkdirs: %d",
38957- atomic_read(&tcon->num_posixopens),
38958- atomic_read(&tcon->num_posixmkdirs));
38959+ atomic_read_unchecked(&tcon->num_posixopens),
38960+ atomic_read_unchecked(&tcon->num_posixmkdirs));
38961 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
38962- atomic_read(&tcon->num_mkdirs),
38963- atomic_read(&tcon->num_rmdirs));
38964+ atomic_read_unchecked(&tcon->num_mkdirs),
38965+ atomic_read_unchecked(&tcon->num_rmdirs));
38966 seq_printf(m, "\nRenames: %d T2 Renames %d",
38967- atomic_read(&tcon->num_renames),
38968- atomic_read(&tcon->num_t2renames));
38969+ atomic_read_unchecked(&tcon->num_renames),
38970+ atomic_read_unchecked(&tcon->num_t2renames));
38971 seq_printf(m, "\nFindFirst: %d FNext %d "
38972 "FClose %d",
38973- atomic_read(&tcon->num_ffirst),
38974- atomic_read(&tcon->num_fnext),
38975- atomic_read(&tcon->num_fclose));
38976+ atomic_read_unchecked(&tcon->num_ffirst),
38977+ atomic_read_unchecked(&tcon->num_fnext),
38978+ atomic_read_unchecked(&tcon->num_fclose));
38979 }
38980 }
38981 }
38982diff -urNp linux-3.0.4/fs/cifs/cifsfs.c linux-3.0.4/fs/cifs/cifsfs.c
38983--- linux-3.0.4/fs/cifs/cifsfs.c 2011-09-02 18:11:21.000000000 -0400
38984+++ linux-3.0.4/fs/cifs/cifsfs.c 2011-08-25 17:18:05.000000000 -0400
38985@@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
38986 cifs_req_cachep = kmem_cache_create("cifs_request",
38987 CIFSMaxBufSize +
38988 MAX_CIFS_HDR_SIZE, 0,
38989- SLAB_HWCACHE_ALIGN, NULL);
38990+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
38991 if (cifs_req_cachep == NULL)
38992 return -ENOMEM;
38993
38994@@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
38995 efficient to alloc 1 per page off the slab compared to 17K (5page)
38996 alloc of large cifs buffers even when page debugging is on */
38997 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
38998- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
38999+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
39000 NULL);
39001 if (cifs_sm_req_cachep == NULL) {
39002 mempool_destroy(cifs_req_poolp);
39003@@ -1106,8 +1106,8 @@ init_cifs(void)
39004 atomic_set(&bufAllocCount, 0);
39005 atomic_set(&smBufAllocCount, 0);
39006 #ifdef CONFIG_CIFS_STATS2
39007- atomic_set(&totBufAllocCount, 0);
39008- atomic_set(&totSmBufAllocCount, 0);
39009+ atomic_set_unchecked(&totBufAllocCount, 0);
39010+ atomic_set_unchecked(&totSmBufAllocCount, 0);
39011 #endif /* CONFIG_CIFS_STATS2 */
39012
39013 atomic_set(&midCount, 0);
39014diff -urNp linux-3.0.4/fs/cifs/cifsglob.h linux-3.0.4/fs/cifs/cifsglob.h
39015--- linux-3.0.4/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
39016+++ linux-3.0.4/fs/cifs/cifsglob.h 2011-08-25 17:18:05.000000000 -0400
39017@@ -381,28 +381,28 @@ struct cifs_tcon {
39018 __u16 Flags; /* optional support bits */
39019 enum statusEnum tidStatus;
39020 #ifdef CONFIG_CIFS_STATS
39021- atomic_t num_smbs_sent;
39022- atomic_t num_writes;
39023- atomic_t num_reads;
39024- atomic_t num_flushes;
39025- atomic_t num_oplock_brks;
39026- atomic_t num_opens;
39027- atomic_t num_closes;
39028- atomic_t num_deletes;
39029- atomic_t num_mkdirs;
39030- atomic_t num_posixopens;
39031- atomic_t num_posixmkdirs;
39032- atomic_t num_rmdirs;
39033- atomic_t num_renames;
39034- atomic_t num_t2renames;
39035- atomic_t num_ffirst;
39036- atomic_t num_fnext;
39037- atomic_t num_fclose;
39038- atomic_t num_hardlinks;
39039- atomic_t num_symlinks;
39040- atomic_t num_locks;
39041- atomic_t num_acl_get;
39042- atomic_t num_acl_set;
39043+ atomic_unchecked_t num_smbs_sent;
39044+ atomic_unchecked_t num_writes;
39045+ atomic_unchecked_t num_reads;
39046+ atomic_unchecked_t num_flushes;
39047+ atomic_unchecked_t num_oplock_brks;
39048+ atomic_unchecked_t num_opens;
39049+ atomic_unchecked_t num_closes;
39050+ atomic_unchecked_t num_deletes;
39051+ atomic_unchecked_t num_mkdirs;
39052+ atomic_unchecked_t num_posixopens;
39053+ atomic_unchecked_t num_posixmkdirs;
39054+ atomic_unchecked_t num_rmdirs;
39055+ atomic_unchecked_t num_renames;
39056+ atomic_unchecked_t num_t2renames;
39057+ atomic_unchecked_t num_ffirst;
39058+ atomic_unchecked_t num_fnext;
39059+ atomic_unchecked_t num_fclose;
39060+ atomic_unchecked_t num_hardlinks;
39061+ atomic_unchecked_t num_symlinks;
39062+ atomic_unchecked_t num_locks;
39063+ atomic_unchecked_t num_acl_get;
39064+ atomic_unchecked_t num_acl_set;
39065 #ifdef CONFIG_CIFS_STATS2
39066 unsigned long long time_writes;
39067 unsigned long long time_reads;
39068@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
39069 }
39070
39071 #ifdef CONFIG_CIFS_STATS
39072-#define cifs_stats_inc atomic_inc
39073+#define cifs_stats_inc atomic_inc_unchecked
39074
39075 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
39076 unsigned int bytes)
39077@@ -911,8 +911,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
39078 /* Various Debug counters */
39079 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
39080 #ifdef CONFIG_CIFS_STATS2
39081-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
39082-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
39083+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
39084+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
39085 #endif
39086 GLOBAL_EXTERN atomic_t smBufAllocCount;
39087 GLOBAL_EXTERN atomic_t midCount;
39088diff -urNp linux-3.0.4/fs/cifs/link.c linux-3.0.4/fs/cifs/link.c
39089--- linux-3.0.4/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
39090+++ linux-3.0.4/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
39091@@ -587,7 +587,7 @@ symlink_exit:
39092
39093 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
39094 {
39095- char *p = nd_get_link(nd);
39096+ const char *p = nd_get_link(nd);
39097 if (!IS_ERR(p))
39098 kfree(p);
39099 }
39100diff -urNp linux-3.0.4/fs/cifs/misc.c linux-3.0.4/fs/cifs/misc.c
39101--- linux-3.0.4/fs/cifs/misc.c 2011-07-21 22:17:23.000000000 -0400
39102+++ linux-3.0.4/fs/cifs/misc.c 2011-08-25 17:18:05.000000000 -0400
39103@@ -156,7 +156,7 @@ cifs_buf_get(void)
39104 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
39105 atomic_inc(&bufAllocCount);
39106 #ifdef CONFIG_CIFS_STATS2
39107- atomic_inc(&totBufAllocCount);
39108+ atomic_inc_unchecked(&totBufAllocCount);
39109 #endif /* CONFIG_CIFS_STATS2 */
39110 }
39111
39112@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
39113 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
39114 atomic_inc(&smBufAllocCount);
39115 #ifdef CONFIG_CIFS_STATS2
39116- atomic_inc(&totSmBufAllocCount);
39117+ atomic_inc_unchecked(&totSmBufAllocCount);
39118 #endif /* CONFIG_CIFS_STATS2 */
39119
39120 }
39121diff -urNp linux-3.0.4/fs/coda/cache.c linux-3.0.4/fs/coda/cache.c
39122--- linux-3.0.4/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
39123+++ linux-3.0.4/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
39124@@ -24,7 +24,7 @@
39125 #include "coda_linux.h"
39126 #include "coda_cache.h"
39127
39128-static atomic_t permission_epoch = ATOMIC_INIT(0);
39129+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
39130
39131 /* replace or extend an acl cache hit */
39132 void coda_cache_enter(struct inode *inode, int mask)
39133@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
39134 struct coda_inode_info *cii = ITOC(inode);
39135
39136 spin_lock(&cii->c_lock);
39137- cii->c_cached_epoch = atomic_read(&permission_epoch);
39138+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
39139 if (cii->c_uid != current_fsuid()) {
39140 cii->c_uid = current_fsuid();
39141 cii->c_cached_perm = mask;
39142@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
39143 {
39144 struct coda_inode_info *cii = ITOC(inode);
39145 spin_lock(&cii->c_lock);
39146- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
39147+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
39148 spin_unlock(&cii->c_lock);
39149 }
39150
39151 /* remove all acl caches */
39152 void coda_cache_clear_all(struct super_block *sb)
39153 {
39154- atomic_inc(&permission_epoch);
39155+ atomic_inc_unchecked(&permission_epoch);
39156 }
39157
39158
39159@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
39160 spin_lock(&cii->c_lock);
39161 hit = (mask & cii->c_cached_perm) == mask &&
39162 cii->c_uid == current_fsuid() &&
39163- cii->c_cached_epoch == atomic_read(&permission_epoch);
39164+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
39165 spin_unlock(&cii->c_lock);
39166
39167 return hit;
39168diff -urNp linux-3.0.4/fs/compat_binfmt_elf.c linux-3.0.4/fs/compat_binfmt_elf.c
39169--- linux-3.0.4/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
39170+++ linux-3.0.4/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
39171@@ -30,11 +30,13 @@
39172 #undef elf_phdr
39173 #undef elf_shdr
39174 #undef elf_note
39175+#undef elf_dyn
39176 #undef elf_addr_t
39177 #define elfhdr elf32_hdr
39178 #define elf_phdr elf32_phdr
39179 #define elf_shdr elf32_shdr
39180 #define elf_note elf32_note
39181+#define elf_dyn Elf32_Dyn
39182 #define elf_addr_t Elf32_Addr
39183
39184 /*
39185diff -urNp linux-3.0.4/fs/compat.c linux-3.0.4/fs/compat.c
39186--- linux-3.0.4/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
39187+++ linux-3.0.4/fs/compat.c 2011-10-06 04:17:55.000000000 -0400
39188@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const
39189 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
39190 {
39191 compat_ino_t ino = stat->ino;
39192- typeof(ubuf->st_uid) uid = 0;
39193- typeof(ubuf->st_gid) gid = 0;
39194+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
39195+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
39196 int err;
39197
39198 SET_UID(uid, stat->uid);
39199@@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u3
39200
39201 set_fs(KERNEL_DS);
39202 /* The __user pointer cast is valid because of the set_fs() */
39203- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
39204+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
39205 set_fs(oldfs);
39206 /* truncating is ok because it's a user address */
39207 if (!ret)
39208@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
39209 goto out;
39210
39211 ret = -EINVAL;
39212- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
39213+ if (nr_segs > UIO_MAXIOV)
39214 goto out;
39215 if (nr_segs > fast_segs) {
39216 ret = -ENOMEM;
39217@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
39218
39219 struct compat_readdir_callback {
39220 struct compat_old_linux_dirent __user *dirent;
39221+ struct file * file;
39222 int result;
39223 };
39224
39225@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
39226 buf->result = -EOVERFLOW;
39227 return -EOVERFLOW;
39228 }
39229+
39230+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
39231+ return 0;
39232+
39233 buf->result++;
39234 dirent = buf->dirent;
39235 if (!access_ok(VERIFY_WRITE, dirent,
39236@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
39237
39238 buf.result = 0;
39239 buf.dirent = dirent;
39240+ buf.file = file;
39241
39242 error = vfs_readdir(file, compat_fillonedir, &buf);
39243 if (buf.result)
39244@@ -917,6 +923,7 @@ struct compat_linux_dirent {
39245 struct compat_getdents_callback {
39246 struct compat_linux_dirent __user *current_dir;
39247 struct compat_linux_dirent __user *previous;
39248+ struct file * file;
39249 int count;
39250 int error;
39251 };
39252@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
39253 buf->error = -EOVERFLOW;
39254 return -EOVERFLOW;
39255 }
39256+
39257+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
39258+ return 0;
39259+
39260 dirent = buf->previous;
39261 if (dirent) {
39262 if (__put_user(offset, &dirent->d_off))
39263@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
39264 buf.previous = NULL;
39265 buf.count = count;
39266 buf.error = 0;
39267+ buf.file = file;
39268
39269 error = vfs_readdir(file, compat_filldir, &buf);
39270 if (error >= 0)
39271@@ -1006,6 +1018,7 @@ out:
39272 struct compat_getdents_callback64 {
39273 struct linux_dirent64 __user *current_dir;
39274 struct linux_dirent64 __user *previous;
39275+ struct file * file;
39276 int count;
39277 int error;
39278 };
39279@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
39280 buf->error = -EINVAL; /* only used if we fail.. */
39281 if (reclen > buf->count)
39282 return -EINVAL;
39283+
39284+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
39285+ return 0;
39286+
39287 dirent = buf->previous;
39288
39289 if (dirent) {
39290@@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(un
39291 buf.previous = NULL;
39292 buf.count = count;
39293 buf.error = 0;
39294+ buf.file = file;
39295
39296 error = vfs_readdir(file, compat_filldir64, &buf);
39297 if (error >= 0)
39298 error = buf.error;
39299 lastdirent = buf.previous;
39300 if (lastdirent) {
39301- typeof(lastdirent->d_off) d_off = file->f_pos;
39302+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
39303 if (__put_user_unaligned(d_off, &lastdirent->d_off))
39304 error = -EFAULT;
39305 else
39306@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
39307 struct fdtable *fdt;
39308 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
39309
39310+ pax_track_stack();
39311+
39312 if (n < 0)
39313 goto out_nofds;
39314
39315@@ -1904,7 +1924,7 @@ asmlinkage long compat_sys_nfsservctl(in
39316 oldfs = get_fs();
39317 set_fs(KERNEL_DS);
39318 /* The __user pointer casts are valid because of the set_fs() */
39319- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
39320+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
39321 set_fs(oldfs);
39322
39323 if (err)
39324diff -urNp linux-3.0.4/fs/compat_ioctl.c linux-3.0.4/fs/compat_ioctl.c
39325--- linux-3.0.4/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
39326+++ linux-3.0.4/fs/compat_ioctl.c 2011-10-06 04:17:55.000000000 -0400
39327@@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
39328
39329 err = get_user(palp, &up->palette);
39330 err |= get_user(length, &up->length);
39331+ if (err)
39332+ return -EFAULT;
39333
39334 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
39335 err = put_user(compat_ptr(palp), &up_native->palette);
39336@@ -619,7 +621,7 @@ static int serial_struct_ioctl(unsigned
39337 return -EFAULT;
39338 if (__get_user(udata, &ss32->iomem_base))
39339 return -EFAULT;
39340- ss.iomem_base = compat_ptr(udata);
39341+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
39342 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
39343 __get_user(ss.port_high, &ss32->port_high))
39344 return -EFAULT;
39345@@ -794,7 +796,7 @@ static int compat_ioctl_preallocate(stru
39346 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
39347 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
39348 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
39349- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
39350+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
39351 return -EFAULT;
39352
39353 return ioctl_preallocate(file, p);
39354@@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
39355 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
39356 {
39357 unsigned int a, b;
39358- a = *(unsigned int *)p;
39359- b = *(unsigned int *)q;
39360+ a = *(const unsigned int *)p;
39361+ b = *(const unsigned int *)q;
39362 if (a > b)
39363 return 1;
39364 if (a < b)
39365diff -urNp linux-3.0.4/fs/configfs/dir.c linux-3.0.4/fs/configfs/dir.c
39366--- linux-3.0.4/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
39367+++ linux-3.0.4/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
39368@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
39369 }
39370 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
39371 struct configfs_dirent *next;
39372- const char * name;
39373+ const unsigned char * name;
39374+ char d_name[sizeof(next->s_dentry->d_iname)];
39375 int len;
39376 struct inode *inode = NULL;
39377
39378@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
39379 continue;
39380
39381 name = configfs_get_name(next);
39382- len = strlen(name);
39383+ if (next->s_dentry && name == next->s_dentry->d_iname) {
39384+ len = next->s_dentry->d_name.len;
39385+ memcpy(d_name, name, len);
39386+ name = d_name;
39387+ } else
39388+ len = strlen(name);
39389
39390 /*
39391 * We'll have a dentry and an inode for
39392diff -urNp linux-3.0.4/fs/dcache.c linux-3.0.4/fs/dcache.c
39393--- linux-3.0.4/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
39394+++ linux-3.0.4/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
39395@@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
39396 mempages -= reserve;
39397
39398 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
39399- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
39400+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
39401
39402 dcache_init();
39403 inode_init();
39404diff -urNp linux-3.0.4/fs/ecryptfs/inode.c linux-3.0.4/fs/ecryptfs/inode.c
39405--- linux-3.0.4/fs/ecryptfs/inode.c 2011-09-02 18:11:21.000000000 -0400
39406+++ linux-3.0.4/fs/ecryptfs/inode.c 2011-10-06 04:17:55.000000000 -0400
39407@@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
39408 old_fs = get_fs();
39409 set_fs(get_ds());
39410 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
39411- (char __user *)lower_buf,
39412+ (char __force_user *)lower_buf,
39413 lower_bufsiz);
39414 set_fs(old_fs);
39415 if (rc < 0)
39416@@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
39417 }
39418 old_fs = get_fs();
39419 set_fs(get_ds());
39420- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
39421+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
39422 set_fs(old_fs);
39423 if (rc < 0) {
39424 kfree(buf);
39425@@ -765,7 +765,7 @@ out:
39426 static void
39427 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
39428 {
39429- char *buf = nd_get_link(nd);
39430+ const char *buf = nd_get_link(nd);
39431 if (!IS_ERR(buf)) {
39432 /* Free the char* */
39433 kfree(buf);
39434diff -urNp linux-3.0.4/fs/ecryptfs/miscdev.c linux-3.0.4/fs/ecryptfs/miscdev.c
39435--- linux-3.0.4/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
39436+++ linux-3.0.4/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
39437@@ -328,7 +328,7 @@ check_list:
39438 goto out_unlock_msg_ctx;
39439 i = 5;
39440 if (msg_ctx->msg) {
39441- if (copy_to_user(&buf[i], packet_length, packet_length_size))
39442+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
39443 goto out_unlock_msg_ctx;
39444 i += packet_length_size;
39445 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
39446diff -urNp linux-3.0.4/fs/ecryptfs/read_write.c linux-3.0.4/fs/ecryptfs/read_write.c
39447--- linux-3.0.4/fs/ecryptfs/read_write.c 2011-09-02 18:11:21.000000000 -0400
39448+++ linux-3.0.4/fs/ecryptfs/read_write.c 2011-10-06 04:17:55.000000000 -0400
39449@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *e
39450 return -EIO;
39451 fs_save = get_fs();
39452 set_fs(get_ds());
39453- rc = vfs_write(lower_file, data, size, &offset);
39454+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
39455 set_fs(fs_save);
39456 mark_inode_dirty_sync(ecryptfs_inode);
39457 return rc;
39458@@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff
39459 return -EIO;
39460 fs_save = get_fs();
39461 set_fs(get_ds());
39462- rc = vfs_read(lower_file, data, size, &offset);
39463+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
39464 set_fs(fs_save);
39465 return rc;
39466 }
39467diff -urNp linux-3.0.4/fs/exec.c linux-3.0.4/fs/exec.c
39468--- linux-3.0.4/fs/exec.c 2011-07-21 22:17:23.000000000 -0400
39469+++ linux-3.0.4/fs/exec.c 2011-10-06 04:17:55.000000000 -0400
39470@@ -55,12 +55,24 @@
39471 #include <linux/pipe_fs_i.h>
39472 #include <linux/oom.h>
39473 #include <linux/compat.h>
39474+#include <linux/random.h>
39475+#include <linux/seq_file.h>
39476+
39477+#ifdef CONFIG_PAX_REFCOUNT
39478+#include <linux/kallsyms.h>
39479+#include <linux/kdebug.h>
39480+#endif
39481
39482 #include <asm/uaccess.h>
39483 #include <asm/mmu_context.h>
39484 #include <asm/tlb.h>
39485 #include "internal.h"
39486
39487+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
39488+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
39489+EXPORT_SYMBOL(pax_set_initial_flags_func);
39490+#endif
39491+
39492 int core_uses_pid;
39493 char core_pattern[CORENAME_MAX_SIZE] = "core";
39494 unsigned int core_pipe_limit;
39495@@ -70,7 +82,7 @@ struct core_name {
39496 char *corename;
39497 int used, size;
39498 };
39499-static atomic_t call_count = ATOMIC_INIT(1);
39500+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
39501
39502 /* The maximal length of core_pattern is also specified in sysctl.c */
39503
39504@@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
39505 char *tmp = getname(library);
39506 int error = PTR_ERR(tmp);
39507 static const struct open_flags uselib_flags = {
39508- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
39509+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
39510 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
39511 .intent = LOOKUP_OPEN
39512 };
39513@@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
39514 int write)
39515 {
39516 struct page *page;
39517- int ret;
39518
39519-#ifdef CONFIG_STACK_GROWSUP
39520- if (write) {
39521- ret = expand_downwards(bprm->vma, pos);
39522- if (ret < 0)
39523- return NULL;
39524- }
39525-#endif
39526- ret = get_user_pages(current, bprm->mm, pos,
39527- 1, write, 1, &page, NULL);
39528- if (ret <= 0)
39529+ if (0 > expand_downwards(bprm->vma, pos))
39530+ return NULL;
39531+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
39532 return NULL;
39533
39534 if (write) {
39535@@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
39536 vma->vm_end = STACK_TOP_MAX;
39537 vma->vm_start = vma->vm_end - PAGE_SIZE;
39538 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
39539+
39540+#ifdef CONFIG_PAX_SEGMEXEC
39541+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
39542+#endif
39543+
39544 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
39545 INIT_LIST_HEAD(&vma->anon_vma_chain);
39546
39547@@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
39548 mm->stack_vm = mm->total_vm = 1;
39549 up_write(&mm->mmap_sem);
39550 bprm->p = vma->vm_end - sizeof(void *);
39551+
39552+#ifdef CONFIG_PAX_RANDUSTACK
39553+ if (randomize_va_space)
39554+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
39555+#endif
39556+
39557 return 0;
39558 err:
39559 up_write(&mm->mmap_sem);
39560@@ -403,19 +418,7 @@ err:
39561 return err;
39562 }
39563
39564-struct user_arg_ptr {
39565-#ifdef CONFIG_COMPAT
39566- bool is_compat;
39567-#endif
39568- union {
39569- const char __user *const __user *native;
39570-#ifdef CONFIG_COMPAT
39571- compat_uptr_t __user *compat;
39572-#endif
39573- } ptr;
39574-};
39575-
39576-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
39577+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
39578 {
39579 const char __user *native;
39580
39581@@ -424,14 +427,14 @@ static const char __user *get_user_arg_p
39582 compat_uptr_t compat;
39583
39584 if (get_user(compat, argv.ptr.compat + nr))
39585- return ERR_PTR(-EFAULT);
39586+ return (const char __force_user *)ERR_PTR(-EFAULT);
39587
39588 return compat_ptr(compat);
39589 }
39590 #endif
39591
39592 if (get_user(native, argv.ptr.native + nr))
39593- return ERR_PTR(-EFAULT);
39594+ return (const char __force_user *)ERR_PTR(-EFAULT);
39595
39596 return native;
39597 }
39598@@ -450,7 +453,7 @@ static int count(struct user_arg_ptr arg
39599 if (!p)
39600 break;
39601
39602- if (IS_ERR(p))
39603+ if (IS_ERR((const char __force_kernel *)p))
39604 return -EFAULT;
39605
39606 if (i++ >= max)
39607@@ -484,7 +487,7 @@ static int copy_strings(int argc, struct
39608
39609 ret = -EFAULT;
39610 str = get_user_arg_ptr(argv, argc);
39611- if (IS_ERR(str))
39612+ if (IS_ERR((const char __force_kernel *)str))
39613 goto out;
39614
39615 len = strnlen_user(str, MAX_ARG_STRLEN);
39616@@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
39617 int r;
39618 mm_segment_t oldfs = get_fs();
39619 struct user_arg_ptr argv = {
39620- .ptr.native = (const char __user *const __user *)__argv,
39621+ .ptr.native = (const char __force_user *const __force_user *)__argv,
39622 };
39623
39624 set_fs(KERNEL_DS);
39625@@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
39626 unsigned long new_end = old_end - shift;
39627 struct mmu_gather tlb;
39628
39629- BUG_ON(new_start > new_end);
39630+ if (new_start >= new_end || new_start < mmap_min_addr)
39631+ return -ENOMEM;
39632
39633 /*
39634 * ensure there are no vmas between where we want to go
39635@@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
39636 if (vma != find_vma(mm, new_start))
39637 return -EFAULT;
39638
39639+#ifdef CONFIG_PAX_SEGMEXEC
39640+ BUG_ON(pax_find_mirror_vma(vma));
39641+#endif
39642+
39643 /*
39644 * cover the whole range: [new_start, old_end)
39645 */
39646@@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
39647 stack_top = arch_align_stack(stack_top);
39648 stack_top = PAGE_ALIGN(stack_top);
39649
39650- if (unlikely(stack_top < mmap_min_addr) ||
39651- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
39652- return -ENOMEM;
39653-
39654 stack_shift = vma->vm_end - stack_top;
39655
39656 bprm->p -= stack_shift;
39657@@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
39658 bprm->exec -= stack_shift;
39659
39660 down_write(&mm->mmap_sem);
39661+
39662+ /* Move stack pages down in memory. */
39663+ if (stack_shift) {
39664+ ret = shift_arg_pages(vma, stack_shift);
39665+ if (ret)
39666+ goto out_unlock;
39667+ }
39668+
39669 vm_flags = VM_STACK_FLAGS;
39670
39671+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39672+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39673+ vm_flags &= ~VM_EXEC;
39674+
39675+#ifdef CONFIG_PAX_MPROTECT
39676+ if (mm->pax_flags & MF_PAX_MPROTECT)
39677+ vm_flags &= ~VM_MAYEXEC;
39678+#endif
39679+
39680+ }
39681+#endif
39682+
39683 /*
39684 * Adjust stack execute permissions; explicitly enable for
39685 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
39686@@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
39687 goto out_unlock;
39688 BUG_ON(prev != vma);
39689
39690- /* Move stack pages down in memory. */
39691- if (stack_shift) {
39692- ret = shift_arg_pages(vma, stack_shift);
39693- if (ret)
39694- goto out_unlock;
39695- }
39696-
39697 /* mprotect_fixup is overkill to remove the temporary stack flags */
39698 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
39699
39700@@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
39701 struct file *file;
39702 int err;
39703 static const struct open_flags open_exec_flags = {
39704- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
39705+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
39706 .acc_mode = MAY_EXEC | MAY_OPEN,
39707 .intent = LOOKUP_OPEN
39708 };
39709@@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
39710 old_fs = get_fs();
39711 set_fs(get_ds());
39712 /* The cast to a user pointer is valid due to the set_fs() */
39713- result = vfs_read(file, (void __user *)addr, count, &pos);
39714+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
39715 set_fs(old_fs);
39716 return result;
39717 }
39718@@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
39719 }
39720 rcu_read_unlock();
39721
39722- if (p->fs->users > n_fs) {
39723+ if (atomic_read(&p->fs->users) > n_fs) {
39724 bprm->unsafe |= LSM_UNSAFE_SHARE;
39725 } else {
39726 res = -EAGAIN;
39727@@ -1428,11 +1445,35 @@ static int do_execve_common(const char *
39728 struct user_arg_ptr envp,
39729 struct pt_regs *regs)
39730 {
39731+#ifdef CONFIG_GRKERNSEC
39732+ struct file *old_exec_file;
39733+ struct acl_subject_label *old_acl;
39734+ struct rlimit old_rlim[RLIM_NLIMITS];
39735+#endif
39736 struct linux_binprm *bprm;
39737 struct file *file;
39738 struct files_struct *displaced;
39739 bool clear_in_exec;
39740 int retval;
39741+ const struct cred *cred = current_cred();
39742+
39743+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
39744+
39745+ /*
39746+ * We move the actual failure in case of RLIMIT_NPROC excess from
39747+ * set*uid() to execve() because too many poorly written programs
39748+ * don't check setuid() return code. Here we additionally recheck
39749+ * whether NPROC limit is still exceeded.
39750+ */
39751+ if ((current->flags & PF_NPROC_EXCEEDED) &&
39752+ atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
39753+ retval = -EAGAIN;
39754+ goto out_ret;
39755+ }
39756+
39757+ /* We're below the limit (still or again), so we don't want to make
39758+ * further execve() calls fail. */
39759+ current->flags &= ~PF_NPROC_EXCEEDED;
39760
39761 retval = unshare_files(&displaced);
39762 if (retval)
39763@@ -1464,6 +1505,16 @@ static int do_execve_common(const char *
39764 bprm->filename = filename;
39765 bprm->interp = filename;
39766
39767+ if (gr_process_user_ban()) {
39768+ retval = -EPERM;
39769+ goto out_file;
39770+ }
39771+
39772+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
39773+ retval = -EACCES;
39774+ goto out_file;
39775+ }
39776+
39777 retval = bprm_mm_init(bprm);
39778 if (retval)
39779 goto out_file;
39780@@ -1493,9 +1544,40 @@ static int do_execve_common(const char *
39781 if (retval < 0)
39782 goto out;
39783
39784+ if (!gr_tpe_allow(file)) {
39785+ retval = -EACCES;
39786+ goto out;
39787+ }
39788+
39789+ if (gr_check_crash_exec(file)) {
39790+ retval = -EACCES;
39791+ goto out;
39792+ }
39793+
39794+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
39795+
39796+ gr_handle_exec_args(bprm, argv);
39797+
39798+#ifdef CONFIG_GRKERNSEC
39799+ old_acl = current->acl;
39800+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
39801+ old_exec_file = current->exec_file;
39802+ get_file(file);
39803+ current->exec_file = file;
39804+#endif
39805+
39806+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
39807+ bprm->unsafe & LSM_UNSAFE_SHARE);
39808+ if (retval < 0)
39809+ goto out_fail;
39810+
39811 retval = search_binary_handler(bprm,regs);
39812 if (retval < 0)
39813- goto out;
39814+ goto out_fail;
39815+#ifdef CONFIG_GRKERNSEC
39816+ if (old_exec_file)
39817+ fput(old_exec_file);
39818+#endif
39819
39820 /* execve succeeded */
39821 current->fs->in_exec = 0;
39822@@ -1506,6 +1588,14 @@ static int do_execve_common(const char *
39823 put_files_struct(displaced);
39824 return retval;
39825
39826+out_fail:
39827+#ifdef CONFIG_GRKERNSEC
39828+ current->acl = old_acl;
39829+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
39830+ fput(current->exec_file);
39831+ current->exec_file = old_exec_file;
39832+#endif
39833+
39834 out:
39835 if (bprm->mm) {
39836 acct_arg_size(bprm, 0);
39837@@ -1579,7 +1669,7 @@ static int expand_corename(struct core_n
39838 {
39839 char *old_corename = cn->corename;
39840
39841- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
39842+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
39843 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
39844
39845 if (!cn->corename) {
39846@@ -1667,7 +1757,7 @@ static int format_corename(struct core_n
39847 int pid_in_pattern = 0;
39848 int err = 0;
39849
39850- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
39851+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
39852 cn->corename = kmalloc(cn->size, GFP_KERNEL);
39853 cn->used = 0;
39854
39855@@ -1758,6 +1848,219 @@ out:
39856 return ispipe;
39857 }
39858
39859+int pax_check_flags(unsigned long *flags)
39860+{
39861+ int retval = 0;
39862+
39863+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
39864+ if (*flags & MF_PAX_SEGMEXEC)
39865+ {
39866+ *flags &= ~MF_PAX_SEGMEXEC;
39867+ retval = -EINVAL;
39868+ }
39869+#endif
39870+
39871+ if ((*flags & MF_PAX_PAGEEXEC)
39872+
39873+#ifdef CONFIG_PAX_PAGEEXEC
39874+ && (*flags & MF_PAX_SEGMEXEC)
39875+#endif
39876+
39877+ )
39878+ {
39879+ *flags &= ~MF_PAX_PAGEEXEC;
39880+ retval = -EINVAL;
39881+ }
39882+
39883+ if ((*flags & MF_PAX_MPROTECT)
39884+
39885+#ifdef CONFIG_PAX_MPROTECT
39886+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
39887+#endif
39888+
39889+ )
39890+ {
39891+ *flags &= ~MF_PAX_MPROTECT;
39892+ retval = -EINVAL;
39893+ }
39894+
39895+ if ((*flags & MF_PAX_EMUTRAMP)
39896+
39897+#ifdef CONFIG_PAX_EMUTRAMP
39898+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
39899+#endif
39900+
39901+ )
39902+ {
39903+ *flags &= ~MF_PAX_EMUTRAMP;
39904+ retval = -EINVAL;
39905+ }
39906+
39907+ return retval;
39908+}
39909+
39910+EXPORT_SYMBOL(pax_check_flags);
39911+
39912+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39913+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
39914+{
39915+ struct task_struct *tsk = current;
39916+ struct mm_struct *mm = current->mm;
39917+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
39918+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
39919+ char *path_exec = NULL;
39920+ char *path_fault = NULL;
39921+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
39922+
39923+ if (buffer_exec && buffer_fault) {
39924+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
39925+
39926+ down_read(&mm->mmap_sem);
39927+ vma = mm->mmap;
39928+ while (vma && (!vma_exec || !vma_fault)) {
39929+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
39930+ vma_exec = vma;
39931+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
39932+ vma_fault = vma;
39933+ vma = vma->vm_next;
39934+ }
39935+ if (vma_exec) {
39936+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
39937+ if (IS_ERR(path_exec))
39938+ path_exec = "<path too long>";
39939+ else {
39940+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
39941+ if (path_exec) {
39942+ *path_exec = 0;
39943+ path_exec = buffer_exec;
39944+ } else
39945+ path_exec = "<path too long>";
39946+ }
39947+ }
39948+ if (vma_fault) {
39949+ start = vma_fault->vm_start;
39950+ end = vma_fault->vm_end;
39951+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
39952+ if (vma_fault->vm_file) {
39953+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
39954+ if (IS_ERR(path_fault))
39955+ path_fault = "<path too long>";
39956+ else {
39957+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
39958+ if (path_fault) {
39959+ *path_fault = 0;
39960+ path_fault = buffer_fault;
39961+ } else
39962+ path_fault = "<path too long>";
39963+ }
39964+ } else
39965+ path_fault = "<anonymous mapping>";
39966+ }
39967+ up_read(&mm->mmap_sem);
39968+ }
39969+ if (tsk->signal->curr_ip)
39970+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
39971+ else
39972+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
39973+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
39974+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
39975+ task_uid(tsk), task_euid(tsk), pc, sp);
39976+ free_page((unsigned long)buffer_exec);
39977+ free_page((unsigned long)buffer_fault);
39978+ pax_report_insns(pc, sp);
39979+ do_coredump(SIGKILL, SIGKILL, regs);
39980+}
39981+#endif
39982+
39983+#ifdef CONFIG_PAX_REFCOUNT
39984+void pax_report_refcount_overflow(struct pt_regs *regs)
39985+{
39986+ if (current->signal->curr_ip)
39987+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
39988+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
39989+ else
39990+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
39991+ current->comm, task_pid_nr(current), current_uid(), current_euid());
39992+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
39993+ show_regs(regs);
39994+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
39995+}
39996+#endif
39997+
39998+#ifdef CONFIG_PAX_USERCOPY
39999+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
40000+int object_is_on_stack(const void *obj, unsigned long len)
40001+{
40002+ const void * const stack = task_stack_page(current);
40003+ const void * const stackend = stack + THREAD_SIZE;
40004+
40005+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
40006+ const void *frame = NULL;
40007+ const void *oldframe;
40008+#endif
40009+
40010+ if (obj + len < obj)
40011+ return -1;
40012+
40013+ if (obj + len <= stack || stackend <= obj)
40014+ return 0;
40015+
40016+ if (obj < stack || stackend < obj + len)
40017+ return -1;
40018+
40019+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
40020+ oldframe = __builtin_frame_address(1);
40021+ if (oldframe)
40022+ frame = __builtin_frame_address(2);
40023+ /*
40024+ low ----------------------------------------------> high
40025+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
40026+ ^----------------^
40027+ allow copies only within here
40028+ */
40029+ while (stack <= frame && frame < stackend) {
40030+ /* if obj + len extends past the last frame, this
40031+ check won't pass and the next frame will be 0,
40032+ causing us to bail out and correctly report
40033+ the copy as invalid
40034+ */
40035+ if (obj + len <= frame)
40036+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
40037+ oldframe = frame;
40038+ frame = *(const void * const *)frame;
40039+ }
40040+ return -1;
40041+#else
40042+ return 1;
40043+#endif
40044+}
40045+
40046+
40047+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
40048+{
40049+ if (current->signal->curr_ip)
40050+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
40051+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
40052+ else
40053+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
40054+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
40055+ dump_stack();
40056+ gr_handle_kernel_exploit();
40057+ do_group_exit(SIGKILL);
40058+}
40059+#endif
40060+
40061+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
40062+void pax_track_stack(void)
40063+{
40064+ unsigned long sp = (unsigned long)&sp;
40065+ if (sp < current_thread_info()->lowest_stack &&
40066+ sp > (unsigned long)task_stack_page(current))
40067+ current_thread_info()->lowest_stack = sp;
40068+}
40069+EXPORT_SYMBOL(pax_track_stack);
40070+#endif
40071+
40072 static int zap_process(struct task_struct *start, int exit_code)
40073 {
40074 struct task_struct *t;
40075@@ -1969,17 +2272,17 @@ static void wait_for_dump_helpers(struct
40076 pipe = file->f_path.dentry->d_inode->i_pipe;
40077
40078 pipe_lock(pipe);
40079- pipe->readers++;
40080- pipe->writers--;
40081+ atomic_inc(&pipe->readers);
40082+ atomic_dec(&pipe->writers);
40083
40084- while ((pipe->readers > 1) && (!signal_pending(current))) {
40085+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
40086 wake_up_interruptible_sync(&pipe->wait);
40087 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
40088 pipe_wait(pipe);
40089 }
40090
40091- pipe->readers--;
40092- pipe->writers++;
40093+ atomic_dec(&pipe->readers);
40094+ atomic_inc(&pipe->writers);
40095 pipe_unlock(pipe);
40096
40097 }
40098@@ -2040,7 +2343,7 @@ void do_coredump(long signr, int exit_co
40099 int retval = 0;
40100 int flag = 0;
40101 int ispipe;
40102- static atomic_t core_dump_count = ATOMIC_INIT(0);
40103+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
40104 struct coredump_params cprm = {
40105 .signr = signr,
40106 .regs = regs,
40107@@ -2055,6 +2358,9 @@ void do_coredump(long signr, int exit_co
40108
40109 audit_core_dumps(signr);
40110
40111+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
40112+ gr_handle_brute_attach(current, cprm.mm_flags);
40113+
40114 binfmt = mm->binfmt;
40115 if (!binfmt || !binfmt->core_dump)
40116 goto fail;
40117@@ -2095,6 +2401,8 @@ void do_coredump(long signr, int exit_co
40118 goto fail_corename;
40119 }
40120
40121+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
40122+
40123 if (ispipe) {
40124 int dump_count;
40125 char **helper_argv;
40126@@ -2122,7 +2430,7 @@ void do_coredump(long signr, int exit_co
40127 }
40128 cprm.limit = RLIM_INFINITY;
40129
40130- dump_count = atomic_inc_return(&core_dump_count);
40131+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
40132 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
40133 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
40134 task_tgid_vnr(current), current->comm);
40135@@ -2192,7 +2500,7 @@ close_fail:
40136 filp_close(cprm.file, NULL);
40137 fail_dropcount:
40138 if (ispipe)
40139- atomic_dec(&core_dump_count);
40140+ atomic_dec_unchecked(&core_dump_count);
40141 fail_unlock:
40142 kfree(cn.corename);
40143 fail_corename:
40144@@ -2211,7 +2519,7 @@ fail:
40145 */
40146 int dump_write(struct file *file, const void *addr, int nr)
40147 {
40148- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
40149+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
40150 }
40151 EXPORT_SYMBOL(dump_write);
40152
40153diff -urNp linux-3.0.4/fs/ext2/balloc.c linux-3.0.4/fs/ext2/balloc.c
40154--- linux-3.0.4/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
40155+++ linux-3.0.4/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
40156@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
40157
40158 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
40159 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
40160- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
40161+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
40162 sbi->s_resuid != current_fsuid() &&
40163 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
40164 return 0;
40165diff -urNp linux-3.0.4/fs/ext3/balloc.c linux-3.0.4/fs/ext3/balloc.c
40166--- linux-3.0.4/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
40167+++ linux-3.0.4/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
40168@@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
40169
40170 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
40171 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
40172- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
40173+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
40174 sbi->s_resuid != current_fsuid() &&
40175 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
40176 return 0;
40177diff -urNp linux-3.0.4/fs/ext3/ioctl.c linux-3.0.4/fs/ext3/ioctl.c
40178--- linux-3.0.4/fs/ext3/ioctl.c 2011-07-21 22:17:23.000000000 -0400
40179+++ linux-3.0.4/fs/ext3/ioctl.c 2011-10-06 04:17:55.000000000 -0400
40180@@ -285,7 +285,7 @@ group_add_out:
40181 if (!capable(CAP_SYS_ADMIN))
40182 return -EPERM;
40183
40184- if (copy_from_user(&range, (struct fstrim_range *)arg,
40185+ if (copy_from_user(&range, (struct fstrim_range __user *)arg,
40186 sizeof(range)))
40187 return -EFAULT;
40188
40189@@ -293,7 +293,7 @@ group_add_out:
40190 if (ret < 0)
40191 return ret;
40192
40193- if (copy_to_user((struct fstrim_range *)arg, &range,
40194+ if (copy_to_user((struct fstrim_range __user *)arg, &range,
40195 sizeof(range)))
40196 return -EFAULT;
40197
40198diff -urNp linux-3.0.4/fs/ext4/balloc.c linux-3.0.4/fs/ext4/balloc.c
40199--- linux-3.0.4/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
40200+++ linux-3.0.4/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
40201@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
40202 /* Hm, nope. Are (enough) root reserved blocks available? */
40203 if (sbi->s_resuid == current_fsuid() ||
40204 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
40205- capable(CAP_SYS_RESOURCE) ||
40206- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
40207+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
40208+ capable_nolog(CAP_SYS_RESOURCE)) {
40209
40210 if (free_blocks >= (nblocks + dirty_blocks))
40211 return 1;
40212diff -urNp linux-3.0.4/fs/ext4/ext4.h linux-3.0.4/fs/ext4/ext4.h
40213--- linux-3.0.4/fs/ext4/ext4.h 2011-09-02 18:11:21.000000000 -0400
40214+++ linux-3.0.4/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
40215@@ -1177,19 +1177,19 @@ struct ext4_sb_info {
40216 unsigned long s_mb_last_start;
40217
40218 /* stats for buddy allocator */
40219- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
40220- atomic_t s_bal_success; /* we found long enough chunks */
40221- atomic_t s_bal_allocated; /* in blocks */
40222- atomic_t s_bal_ex_scanned; /* total extents scanned */
40223- atomic_t s_bal_goals; /* goal hits */
40224- atomic_t s_bal_breaks; /* too long searches */
40225- atomic_t s_bal_2orders; /* 2^order hits */
40226+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
40227+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
40228+ atomic_unchecked_t s_bal_allocated; /* in blocks */
40229+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
40230+ atomic_unchecked_t s_bal_goals; /* goal hits */
40231+ atomic_unchecked_t s_bal_breaks; /* too long searches */
40232+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
40233 spinlock_t s_bal_lock;
40234 unsigned long s_mb_buddies_generated;
40235 unsigned long long s_mb_generation_time;
40236- atomic_t s_mb_lost_chunks;
40237- atomic_t s_mb_preallocated;
40238- atomic_t s_mb_discarded;
40239+ atomic_unchecked_t s_mb_lost_chunks;
40240+ atomic_unchecked_t s_mb_preallocated;
40241+ atomic_unchecked_t s_mb_discarded;
40242 atomic_t s_lock_busy;
40243
40244 /* locality groups */
40245diff -urNp linux-3.0.4/fs/ext4/ioctl.c linux-3.0.4/fs/ext4/ioctl.c
40246--- linux-3.0.4/fs/ext4/ioctl.c 2011-07-21 22:17:23.000000000 -0400
40247+++ linux-3.0.4/fs/ext4/ioctl.c 2011-10-06 04:17:55.000000000 -0400
40248@@ -344,7 +344,7 @@ mext_out:
40249 if (!blk_queue_discard(q))
40250 return -EOPNOTSUPP;
40251
40252- if (copy_from_user(&range, (struct fstrim_range *)arg,
40253+ if (copy_from_user(&range, (struct fstrim_range __user *)arg,
40254 sizeof(range)))
40255 return -EFAULT;
40256
40257@@ -354,7 +354,7 @@ mext_out:
40258 if (ret < 0)
40259 return ret;
40260
40261- if (copy_to_user((struct fstrim_range *)arg, &range,
40262+ if (copy_to_user((struct fstrim_range __user *)arg, &range,
40263 sizeof(range)))
40264 return -EFAULT;
40265
40266diff -urNp linux-3.0.4/fs/ext4/mballoc.c linux-3.0.4/fs/ext4/mballoc.c
40267--- linux-3.0.4/fs/ext4/mballoc.c 2011-09-02 18:11:21.000000000 -0400
40268+++ linux-3.0.4/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
40269@@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
40270 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
40271
40272 if (EXT4_SB(sb)->s_mb_stats)
40273- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
40274+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
40275
40276 break;
40277 }
40278@@ -2087,7 +2087,7 @@ repeat:
40279 ac->ac_status = AC_STATUS_CONTINUE;
40280 ac->ac_flags |= EXT4_MB_HINT_FIRST;
40281 cr = 3;
40282- atomic_inc(&sbi->s_mb_lost_chunks);
40283+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
40284 goto repeat;
40285 }
40286 }
40287@@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
40288 ext4_grpblk_t counters[16];
40289 } sg;
40290
40291+ pax_track_stack();
40292+
40293 group--;
40294 if (group == 0)
40295 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
40296@@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
40297 if (sbi->s_mb_stats) {
40298 printk(KERN_INFO
40299 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
40300- atomic_read(&sbi->s_bal_allocated),
40301- atomic_read(&sbi->s_bal_reqs),
40302- atomic_read(&sbi->s_bal_success));
40303+ atomic_read_unchecked(&sbi->s_bal_allocated),
40304+ atomic_read_unchecked(&sbi->s_bal_reqs),
40305+ atomic_read_unchecked(&sbi->s_bal_success));
40306 printk(KERN_INFO
40307 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
40308 "%u 2^N hits, %u breaks, %u lost\n",
40309- atomic_read(&sbi->s_bal_ex_scanned),
40310- atomic_read(&sbi->s_bal_goals),
40311- atomic_read(&sbi->s_bal_2orders),
40312- atomic_read(&sbi->s_bal_breaks),
40313- atomic_read(&sbi->s_mb_lost_chunks));
40314+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
40315+ atomic_read_unchecked(&sbi->s_bal_goals),
40316+ atomic_read_unchecked(&sbi->s_bal_2orders),
40317+ atomic_read_unchecked(&sbi->s_bal_breaks),
40318+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
40319 printk(KERN_INFO
40320 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
40321 sbi->s_mb_buddies_generated++,
40322 sbi->s_mb_generation_time);
40323 printk(KERN_INFO
40324 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
40325- atomic_read(&sbi->s_mb_preallocated),
40326- atomic_read(&sbi->s_mb_discarded));
40327+ atomic_read_unchecked(&sbi->s_mb_preallocated),
40328+ atomic_read_unchecked(&sbi->s_mb_discarded));
40329 }
40330
40331 free_percpu(sbi->s_locality_groups);
40332@@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
40333 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
40334
40335 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
40336- atomic_inc(&sbi->s_bal_reqs);
40337- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
40338+ atomic_inc_unchecked(&sbi->s_bal_reqs);
40339+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
40340 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
40341- atomic_inc(&sbi->s_bal_success);
40342- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
40343+ atomic_inc_unchecked(&sbi->s_bal_success);
40344+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
40345 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
40346 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
40347- atomic_inc(&sbi->s_bal_goals);
40348+ atomic_inc_unchecked(&sbi->s_bal_goals);
40349 if (ac->ac_found > sbi->s_mb_max_to_scan)
40350- atomic_inc(&sbi->s_bal_breaks);
40351+ atomic_inc_unchecked(&sbi->s_bal_breaks);
40352 }
40353
40354 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
40355@@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
40356 trace_ext4_mb_new_inode_pa(ac, pa);
40357
40358 ext4_mb_use_inode_pa(ac, pa);
40359- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
40360+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
40361
40362 ei = EXT4_I(ac->ac_inode);
40363 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
40364@@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
40365 trace_ext4_mb_new_group_pa(ac, pa);
40366
40367 ext4_mb_use_group_pa(ac, pa);
40368- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
40369+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
40370
40371 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
40372 lg = ac->ac_lg;
40373@@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
40374 * from the bitmap and continue.
40375 */
40376 }
40377- atomic_add(free, &sbi->s_mb_discarded);
40378+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
40379
40380 return err;
40381 }
40382@@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
40383 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
40384 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
40385 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
40386- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
40387+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
40388 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
40389
40390 return 0;
40391diff -urNp linux-3.0.4/fs/fcntl.c linux-3.0.4/fs/fcntl.c
40392--- linux-3.0.4/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
40393+++ linux-3.0.4/fs/fcntl.c 2011-10-06 04:17:55.000000000 -0400
40394@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
40395 if (err)
40396 return err;
40397
40398+ if (gr_handle_chroot_fowner(pid, type))
40399+ return -ENOENT;
40400+ if (gr_check_protected_task_fowner(pid, type))
40401+ return -EACCES;
40402+
40403 f_modown(filp, pid, type, force);
40404 return 0;
40405 }
40406@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
40407
40408 static int f_setown_ex(struct file *filp, unsigned long arg)
40409 {
40410- struct f_owner_ex * __user owner_p = (void * __user)arg;
40411+ struct f_owner_ex __user *owner_p = (void __user *)arg;
40412 struct f_owner_ex owner;
40413 struct pid *pid;
40414 int type;
40415@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp
40416
40417 static int f_getown_ex(struct file *filp, unsigned long arg)
40418 {
40419- struct f_owner_ex * __user owner_p = (void * __user)arg;
40420+ struct f_owner_ex __user *owner_p = (void __user *)arg;
40421 struct f_owner_ex owner;
40422 int ret = 0;
40423
40424@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
40425 switch (cmd) {
40426 case F_DUPFD:
40427 case F_DUPFD_CLOEXEC:
40428+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
40429 if (arg >= rlimit(RLIMIT_NOFILE))
40430 break;
40431 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
40432@@ -835,14 +841,14 @@ static int __init fcntl_init(void)
40433 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
40434 * is defined as O_NONBLOCK on some platforms and not on others.
40435 */
40436- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
40437+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
40438 O_RDONLY | O_WRONLY | O_RDWR |
40439 O_CREAT | O_EXCL | O_NOCTTY |
40440 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
40441 __O_SYNC | O_DSYNC | FASYNC |
40442 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
40443 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
40444- __FMODE_EXEC | O_PATH
40445+ __FMODE_EXEC | O_PATH | FMODE_GREXEC
40446 ));
40447
40448 fasync_cache = kmem_cache_create("fasync_cache",
40449diff -urNp linux-3.0.4/fs/fifo.c linux-3.0.4/fs/fifo.c
40450--- linux-3.0.4/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
40451+++ linux-3.0.4/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
40452@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
40453 */
40454 filp->f_op = &read_pipefifo_fops;
40455 pipe->r_counter++;
40456- if (pipe->readers++ == 0)
40457+ if (atomic_inc_return(&pipe->readers) == 1)
40458 wake_up_partner(inode);
40459
40460- if (!pipe->writers) {
40461+ if (!atomic_read(&pipe->writers)) {
40462 if ((filp->f_flags & O_NONBLOCK)) {
40463 /* suppress POLLHUP until we have
40464 * seen a writer */
40465@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
40466 * errno=ENXIO when there is no process reading the FIFO.
40467 */
40468 ret = -ENXIO;
40469- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
40470+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
40471 goto err;
40472
40473 filp->f_op = &write_pipefifo_fops;
40474 pipe->w_counter++;
40475- if (!pipe->writers++)
40476+ if (atomic_inc_return(&pipe->writers) == 1)
40477 wake_up_partner(inode);
40478
40479- if (!pipe->readers) {
40480+ if (!atomic_read(&pipe->readers)) {
40481 wait_for_partner(inode, &pipe->r_counter);
40482 if (signal_pending(current))
40483 goto err_wr;
40484@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
40485 */
40486 filp->f_op = &rdwr_pipefifo_fops;
40487
40488- pipe->readers++;
40489- pipe->writers++;
40490+ atomic_inc(&pipe->readers);
40491+ atomic_inc(&pipe->writers);
40492 pipe->r_counter++;
40493 pipe->w_counter++;
40494- if (pipe->readers == 1 || pipe->writers == 1)
40495+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
40496 wake_up_partner(inode);
40497 break;
40498
40499@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
40500 return 0;
40501
40502 err_rd:
40503- if (!--pipe->readers)
40504+ if (atomic_dec_and_test(&pipe->readers))
40505 wake_up_interruptible(&pipe->wait);
40506 ret = -ERESTARTSYS;
40507 goto err;
40508
40509 err_wr:
40510- if (!--pipe->writers)
40511+ if (atomic_dec_and_test(&pipe->writers))
40512 wake_up_interruptible(&pipe->wait);
40513 ret = -ERESTARTSYS;
40514 goto err;
40515
40516 err:
40517- if (!pipe->readers && !pipe->writers)
40518+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
40519 free_pipe_info(inode);
40520
40521 err_nocleanup:
40522diff -urNp linux-3.0.4/fs/file.c linux-3.0.4/fs/file.c
40523--- linux-3.0.4/fs/file.c 2011-07-21 22:17:23.000000000 -0400
40524+++ linux-3.0.4/fs/file.c 2011-08-23 21:48:14.000000000 -0400
40525@@ -15,6 +15,7 @@
40526 #include <linux/slab.h>
40527 #include <linux/vmalloc.h>
40528 #include <linux/file.h>
40529+#include <linux/security.h>
40530 #include <linux/fdtable.h>
40531 #include <linux/bitops.h>
40532 #include <linux/interrupt.h>
40533@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
40534 * N.B. For clone tasks sharing a files structure, this test
40535 * will limit the total number of files that can be opened.
40536 */
40537+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
40538 if (nr >= rlimit(RLIMIT_NOFILE))
40539 return -EMFILE;
40540
40541diff -urNp linux-3.0.4/fs/filesystems.c linux-3.0.4/fs/filesystems.c
40542--- linux-3.0.4/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
40543+++ linux-3.0.4/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
40544@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
40545 int len = dot ? dot - name : strlen(name);
40546
40547 fs = __get_fs_type(name, len);
40548+
40549+#ifdef CONFIG_GRKERNSEC_MODHARDEN
40550+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
40551+#else
40552 if (!fs && (request_module("%.*s", len, name) == 0))
40553+#endif
40554 fs = __get_fs_type(name, len);
40555
40556 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
40557diff -urNp linux-3.0.4/fs/fscache/cookie.c linux-3.0.4/fs/fscache/cookie.c
40558--- linux-3.0.4/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
40559+++ linux-3.0.4/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
40560@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
40561 parent ? (char *) parent->def->name : "<no-parent>",
40562 def->name, netfs_data);
40563
40564- fscache_stat(&fscache_n_acquires);
40565+ fscache_stat_unchecked(&fscache_n_acquires);
40566
40567 /* if there's no parent cookie, then we don't create one here either */
40568 if (!parent) {
40569- fscache_stat(&fscache_n_acquires_null);
40570+ fscache_stat_unchecked(&fscache_n_acquires_null);
40571 _leave(" [no parent]");
40572 return NULL;
40573 }
40574@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
40575 /* allocate and initialise a cookie */
40576 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
40577 if (!cookie) {
40578- fscache_stat(&fscache_n_acquires_oom);
40579+ fscache_stat_unchecked(&fscache_n_acquires_oom);
40580 _leave(" [ENOMEM]");
40581 return NULL;
40582 }
40583@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
40584
40585 switch (cookie->def->type) {
40586 case FSCACHE_COOKIE_TYPE_INDEX:
40587- fscache_stat(&fscache_n_cookie_index);
40588+ fscache_stat_unchecked(&fscache_n_cookie_index);
40589 break;
40590 case FSCACHE_COOKIE_TYPE_DATAFILE:
40591- fscache_stat(&fscache_n_cookie_data);
40592+ fscache_stat_unchecked(&fscache_n_cookie_data);
40593 break;
40594 default:
40595- fscache_stat(&fscache_n_cookie_special);
40596+ fscache_stat_unchecked(&fscache_n_cookie_special);
40597 break;
40598 }
40599
40600@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
40601 if (fscache_acquire_non_index_cookie(cookie) < 0) {
40602 atomic_dec(&parent->n_children);
40603 __fscache_cookie_put(cookie);
40604- fscache_stat(&fscache_n_acquires_nobufs);
40605+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
40606 _leave(" = NULL");
40607 return NULL;
40608 }
40609 }
40610
40611- fscache_stat(&fscache_n_acquires_ok);
40612+ fscache_stat_unchecked(&fscache_n_acquires_ok);
40613 _leave(" = %p", cookie);
40614 return cookie;
40615 }
40616@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
40617 cache = fscache_select_cache_for_object(cookie->parent);
40618 if (!cache) {
40619 up_read(&fscache_addremove_sem);
40620- fscache_stat(&fscache_n_acquires_no_cache);
40621+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
40622 _leave(" = -ENOMEDIUM [no cache]");
40623 return -ENOMEDIUM;
40624 }
40625@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
40626 object = cache->ops->alloc_object(cache, cookie);
40627 fscache_stat_d(&fscache_n_cop_alloc_object);
40628 if (IS_ERR(object)) {
40629- fscache_stat(&fscache_n_object_no_alloc);
40630+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
40631 ret = PTR_ERR(object);
40632 goto error;
40633 }
40634
40635- fscache_stat(&fscache_n_object_alloc);
40636+ fscache_stat_unchecked(&fscache_n_object_alloc);
40637
40638 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
40639
40640@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
40641 struct fscache_object *object;
40642 struct hlist_node *_p;
40643
40644- fscache_stat(&fscache_n_updates);
40645+ fscache_stat_unchecked(&fscache_n_updates);
40646
40647 if (!cookie) {
40648- fscache_stat(&fscache_n_updates_null);
40649+ fscache_stat_unchecked(&fscache_n_updates_null);
40650 _leave(" [no cookie]");
40651 return;
40652 }
40653@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
40654 struct fscache_object *object;
40655 unsigned long event;
40656
40657- fscache_stat(&fscache_n_relinquishes);
40658+ fscache_stat_unchecked(&fscache_n_relinquishes);
40659 if (retire)
40660- fscache_stat(&fscache_n_relinquishes_retire);
40661+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
40662
40663 if (!cookie) {
40664- fscache_stat(&fscache_n_relinquishes_null);
40665+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
40666 _leave(" [no cookie]");
40667 return;
40668 }
40669@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
40670
40671 /* wait for the cookie to finish being instantiated (or to fail) */
40672 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
40673- fscache_stat(&fscache_n_relinquishes_waitcrt);
40674+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
40675 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
40676 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
40677 }
40678diff -urNp linux-3.0.4/fs/fscache/internal.h linux-3.0.4/fs/fscache/internal.h
40679--- linux-3.0.4/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
40680+++ linux-3.0.4/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
40681@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
40682 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
40683 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
40684
40685-extern atomic_t fscache_n_op_pend;
40686-extern atomic_t fscache_n_op_run;
40687-extern atomic_t fscache_n_op_enqueue;
40688-extern atomic_t fscache_n_op_deferred_release;
40689-extern atomic_t fscache_n_op_release;
40690-extern atomic_t fscache_n_op_gc;
40691-extern atomic_t fscache_n_op_cancelled;
40692-extern atomic_t fscache_n_op_rejected;
40693-
40694-extern atomic_t fscache_n_attr_changed;
40695-extern atomic_t fscache_n_attr_changed_ok;
40696-extern atomic_t fscache_n_attr_changed_nobufs;
40697-extern atomic_t fscache_n_attr_changed_nomem;
40698-extern atomic_t fscache_n_attr_changed_calls;
40699-
40700-extern atomic_t fscache_n_allocs;
40701-extern atomic_t fscache_n_allocs_ok;
40702-extern atomic_t fscache_n_allocs_wait;
40703-extern atomic_t fscache_n_allocs_nobufs;
40704-extern atomic_t fscache_n_allocs_intr;
40705-extern atomic_t fscache_n_allocs_object_dead;
40706-extern atomic_t fscache_n_alloc_ops;
40707-extern atomic_t fscache_n_alloc_op_waits;
40708-
40709-extern atomic_t fscache_n_retrievals;
40710-extern atomic_t fscache_n_retrievals_ok;
40711-extern atomic_t fscache_n_retrievals_wait;
40712-extern atomic_t fscache_n_retrievals_nodata;
40713-extern atomic_t fscache_n_retrievals_nobufs;
40714-extern atomic_t fscache_n_retrievals_intr;
40715-extern atomic_t fscache_n_retrievals_nomem;
40716-extern atomic_t fscache_n_retrievals_object_dead;
40717-extern atomic_t fscache_n_retrieval_ops;
40718-extern atomic_t fscache_n_retrieval_op_waits;
40719-
40720-extern atomic_t fscache_n_stores;
40721-extern atomic_t fscache_n_stores_ok;
40722-extern atomic_t fscache_n_stores_again;
40723-extern atomic_t fscache_n_stores_nobufs;
40724-extern atomic_t fscache_n_stores_oom;
40725-extern atomic_t fscache_n_store_ops;
40726-extern atomic_t fscache_n_store_calls;
40727-extern atomic_t fscache_n_store_pages;
40728-extern atomic_t fscache_n_store_radix_deletes;
40729-extern atomic_t fscache_n_store_pages_over_limit;
40730-
40731-extern atomic_t fscache_n_store_vmscan_not_storing;
40732-extern atomic_t fscache_n_store_vmscan_gone;
40733-extern atomic_t fscache_n_store_vmscan_busy;
40734-extern atomic_t fscache_n_store_vmscan_cancelled;
40735-
40736-extern atomic_t fscache_n_marks;
40737-extern atomic_t fscache_n_uncaches;
40738-
40739-extern atomic_t fscache_n_acquires;
40740-extern atomic_t fscache_n_acquires_null;
40741-extern atomic_t fscache_n_acquires_no_cache;
40742-extern atomic_t fscache_n_acquires_ok;
40743-extern atomic_t fscache_n_acquires_nobufs;
40744-extern atomic_t fscache_n_acquires_oom;
40745-
40746-extern atomic_t fscache_n_updates;
40747-extern atomic_t fscache_n_updates_null;
40748-extern atomic_t fscache_n_updates_run;
40749-
40750-extern atomic_t fscache_n_relinquishes;
40751-extern atomic_t fscache_n_relinquishes_null;
40752-extern atomic_t fscache_n_relinquishes_waitcrt;
40753-extern atomic_t fscache_n_relinquishes_retire;
40754-
40755-extern atomic_t fscache_n_cookie_index;
40756-extern atomic_t fscache_n_cookie_data;
40757-extern atomic_t fscache_n_cookie_special;
40758-
40759-extern atomic_t fscache_n_object_alloc;
40760-extern atomic_t fscache_n_object_no_alloc;
40761-extern atomic_t fscache_n_object_lookups;
40762-extern atomic_t fscache_n_object_lookups_negative;
40763-extern atomic_t fscache_n_object_lookups_positive;
40764-extern atomic_t fscache_n_object_lookups_timed_out;
40765-extern atomic_t fscache_n_object_created;
40766-extern atomic_t fscache_n_object_avail;
40767-extern atomic_t fscache_n_object_dead;
40768-
40769-extern atomic_t fscache_n_checkaux_none;
40770-extern atomic_t fscache_n_checkaux_okay;
40771-extern atomic_t fscache_n_checkaux_update;
40772-extern atomic_t fscache_n_checkaux_obsolete;
40773+extern atomic_unchecked_t fscache_n_op_pend;
40774+extern atomic_unchecked_t fscache_n_op_run;
40775+extern atomic_unchecked_t fscache_n_op_enqueue;
40776+extern atomic_unchecked_t fscache_n_op_deferred_release;
40777+extern atomic_unchecked_t fscache_n_op_release;
40778+extern atomic_unchecked_t fscache_n_op_gc;
40779+extern atomic_unchecked_t fscache_n_op_cancelled;
40780+extern atomic_unchecked_t fscache_n_op_rejected;
40781+
40782+extern atomic_unchecked_t fscache_n_attr_changed;
40783+extern atomic_unchecked_t fscache_n_attr_changed_ok;
40784+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
40785+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
40786+extern atomic_unchecked_t fscache_n_attr_changed_calls;
40787+
40788+extern atomic_unchecked_t fscache_n_allocs;
40789+extern atomic_unchecked_t fscache_n_allocs_ok;
40790+extern atomic_unchecked_t fscache_n_allocs_wait;
40791+extern atomic_unchecked_t fscache_n_allocs_nobufs;
40792+extern atomic_unchecked_t fscache_n_allocs_intr;
40793+extern atomic_unchecked_t fscache_n_allocs_object_dead;
40794+extern atomic_unchecked_t fscache_n_alloc_ops;
40795+extern atomic_unchecked_t fscache_n_alloc_op_waits;
40796+
40797+extern atomic_unchecked_t fscache_n_retrievals;
40798+extern atomic_unchecked_t fscache_n_retrievals_ok;
40799+extern atomic_unchecked_t fscache_n_retrievals_wait;
40800+extern atomic_unchecked_t fscache_n_retrievals_nodata;
40801+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
40802+extern atomic_unchecked_t fscache_n_retrievals_intr;
40803+extern atomic_unchecked_t fscache_n_retrievals_nomem;
40804+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
40805+extern atomic_unchecked_t fscache_n_retrieval_ops;
40806+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
40807+
40808+extern atomic_unchecked_t fscache_n_stores;
40809+extern atomic_unchecked_t fscache_n_stores_ok;
40810+extern atomic_unchecked_t fscache_n_stores_again;
40811+extern atomic_unchecked_t fscache_n_stores_nobufs;
40812+extern atomic_unchecked_t fscache_n_stores_oom;
40813+extern atomic_unchecked_t fscache_n_store_ops;
40814+extern atomic_unchecked_t fscache_n_store_calls;
40815+extern atomic_unchecked_t fscache_n_store_pages;
40816+extern atomic_unchecked_t fscache_n_store_radix_deletes;
40817+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
40818+
40819+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
40820+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
40821+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
40822+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
40823+
40824+extern atomic_unchecked_t fscache_n_marks;
40825+extern atomic_unchecked_t fscache_n_uncaches;
40826+
40827+extern atomic_unchecked_t fscache_n_acquires;
40828+extern atomic_unchecked_t fscache_n_acquires_null;
40829+extern atomic_unchecked_t fscache_n_acquires_no_cache;
40830+extern atomic_unchecked_t fscache_n_acquires_ok;
40831+extern atomic_unchecked_t fscache_n_acquires_nobufs;
40832+extern atomic_unchecked_t fscache_n_acquires_oom;
40833+
40834+extern atomic_unchecked_t fscache_n_updates;
40835+extern atomic_unchecked_t fscache_n_updates_null;
40836+extern atomic_unchecked_t fscache_n_updates_run;
40837+
40838+extern atomic_unchecked_t fscache_n_relinquishes;
40839+extern atomic_unchecked_t fscache_n_relinquishes_null;
40840+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
40841+extern atomic_unchecked_t fscache_n_relinquishes_retire;
40842+
40843+extern atomic_unchecked_t fscache_n_cookie_index;
40844+extern atomic_unchecked_t fscache_n_cookie_data;
40845+extern atomic_unchecked_t fscache_n_cookie_special;
40846+
40847+extern atomic_unchecked_t fscache_n_object_alloc;
40848+extern atomic_unchecked_t fscache_n_object_no_alloc;
40849+extern atomic_unchecked_t fscache_n_object_lookups;
40850+extern atomic_unchecked_t fscache_n_object_lookups_negative;
40851+extern atomic_unchecked_t fscache_n_object_lookups_positive;
40852+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
40853+extern atomic_unchecked_t fscache_n_object_created;
40854+extern atomic_unchecked_t fscache_n_object_avail;
40855+extern atomic_unchecked_t fscache_n_object_dead;
40856+
40857+extern atomic_unchecked_t fscache_n_checkaux_none;
40858+extern atomic_unchecked_t fscache_n_checkaux_okay;
40859+extern atomic_unchecked_t fscache_n_checkaux_update;
40860+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
40861
40862 extern atomic_t fscache_n_cop_alloc_object;
40863 extern atomic_t fscache_n_cop_lookup_object;
40864@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
40865 atomic_inc(stat);
40866 }
40867
40868+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
40869+{
40870+ atomic_inc_unchecked(stat);
40871+}
40872+
40873 static inline void fscache_stat_d(atomic_t *stat)
40874 {
40875 atomic_dec(stat);
40876@@ -267,6 +272,7 @@ extern const struct file_operations fsca
40877
40878 #define __fscache_stat(stat) (NULL)
40879 #define fscache_stat(stat) do {} while (0)
40880+#define fscache_stat_unchecked(stat) do {} while (0)
40881 #define fscache_stat_d(stat) do {} while (0)
40882 #endif
40883
40884diff -urNp linux-3.0.4/fs/fscache/object.c linux-3.0.4/fs/fscache/object.c
40885--- linux-3.0.4/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
40886+++ linux-3.0.4/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
40887@@ -128,7 +128,7 @@ static void fscache_object_state_machine
40888 /* update the object metadata on disk */
40889 case FSCACHE_OBJECT_UPDATING:
40890 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
40891- fscache_stat(&fscache_n_updates_run);
40892+ fscache_stat_unchecked(&fscache_n_updates_run);
40893 fscache_stat(&fscache_n_cop_update_object);
40894 object->cache->ops->update_object(object);
40895 fscache_stat_d(&fscache_n_cop_update_object);
40896@@ -217,7 +217,7 @@ static void fscache_object_state_machine
40897 spin_lock(&object->lock);
40898 object->state = FSCACHE_OBJECT_DEAD;
40899 spin_unlock(&object->lock);
40900- fscache_stat(&fscache_n_object_dead);
40901+ fscache_stat_unchecked(&fscache_n_object_dead);
40902 goto terminal_transit;
40903
40904 /* handle the parent cache of this object being withdrawn from
40905@@ -232,7 +232,7 @@ static void fscache_object_state_machine
40906 spin_lock(&object->lock);
40907 object->state = FSCACHE_OBJECT_DEAD;
40908 spin_unlock(&object->lock);
40909- fscache_stat(&fscache_n_object_dead);
40910+ fscache_stat_unchecked(&fscache_n_object_dead);
40911 goto terminal_transit;
40912
40913 /* complain about the object being woken up once it is
40914@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
40915 parent->cookie->def->name, cookie->def->name,
40916 object->cache->tag->name);
40917
40918- fscache_stat(&fscache_n_object_lookups);
40919+ fscache_stat_unchecked(&fscache_n_object_lookups);
40920 fscache_stat(&fscache_n_cop_lookup_object);
40921 ret = object->cache->ops->lookup_object(object);
40922 fscache_stat_d(&fscache_n_cop_lookup_object);
40923@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
40924 if (ret == -ETIMEDOUT) {
40925 /* probably stuck behind another object, so move this one to
40926 * the back of the queue */
40927- fscache_stat(&fscache_n_object_lookups_timed_out);
40928+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
40929 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
40930 }
40931
40932@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
40933
40934 spin_lock(&object->lock);
40935 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
40936- fscache_stat(&fscache_n_object_lookups_negative);
40937+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
40938
40939 /* transit here to allow write requests to begin stacking up
40940 * and read requests to begin returning ENODATA */
40941@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
40942 * result, in which case there may be data available */
40943 spin_lock(&object->lock);
40944 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
40945- fscache_stat(&fscache_n_object_lookups_positive);
40946+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
40947
40948 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
40949
40950@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
40951 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
40952 } else {
40953 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
40954- fscache_stat(&fscache_n_object_created);
40955+ fscache_stat_unchecked(&fscache_n_object_created);
40956
40957 object->state = FSCACHE_OBJECT_AVAILABLE;
40958 spin_unlock(&object->lock);
40959@@ -602,7 +602,7 @@ static void fscache_object_available(str
40960 fscache_enqueue_dependents(object);
40961
40962 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
40963- fscache_stat(&fscache_n_object_avail);
40964+ fscache_stat_unchecked(&fscache_n_object_avail);
40965
40966 _leave("");
40967 }
40968@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
40969 enum fscache_checkaux result;
40970
40971 if (!object->cookie->def->check_aux) {
40972- fscache_stat(&fscache_n_checkaux_none);
40973+ fscache_stat_unchecked(&fscache_n_checkaux_none);
40974 return FSCACHE_CHECKAUX_OKAY;
40975 }
40976
40977@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
40978 switch (result) {
40979 /* entry okay as is */
40980 case FSCACHE_CHECKAUX_OKAY:
40981- fscache_stat(&fscache_n_checkaux_okay);
40982+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
40983 break;
40984
40985 /* entry requires update */
40986 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
40987- fscache_stat(&fscache_n_checkaux_update);
40988+ fscache_stat_unchecked(&fscache_n_checkaux_update);
40989 break;
40990
40991 /* entry requires deletion */
40992 case FSCACHE_CHECKAUX_OBSOLETE:
40993- fscache_stat(&fscache_n_checkaux_obsolete);
40994+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
40995 break;
40996
40997 default:
40998diff -urNp linux-3.0.4/fs/fscache/operation.c linux-3.0.4/fs/fscache/operation.c
40999--- linux-3.0.4/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
41000+++ linux-3.0.4/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
41001@@ -17,7 +17,7 @@
41002 #include <linux/slab.h>
41003 #include "internal.h"
41004
41005-atomic_t fscache_op_debug_id;
41006+atomic_unchecked_t fscache_op_debug_id;
41007 EXPORT_SYMBOL(fscache_op_debug_id);
41008
41009 /**
41010@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
41011 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
41012 ASSERTCMP(atomic_read(&op->usage), >, 0);
41013
41014- fscache_stat(&fscache_n_op_enqueue);
41015+ fscache_stat_unchecked(&fscache_n_op_enqueue);
41016 switch (op->flags & FSCACHE_OP_TYPE) {
41017 case FSCACHE_OP_ASYNC:
41018 _debug("queue async");
41019@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
41020 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
41021 if (op->processor)
41022 fscache_enqueue_operation(op);
41023- fscache_stat(&fscache_n_op_run);
41024+ fscache_stat_unchecked(&fscache_n_op_run);
41025 }
41026
41027 /*
41028@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
41029 if (object->n_ops > 1) {
41030 atomic_inc(&op->usage);
41031 list_add_tail(&op->pend_link, &object->pending_ops);
41032- fscache_stat(&fscache_n_op_pend);
41033+ fscache_stat_unchecked(&fscache_n_op_pend);
41034 } else if (!list_empty(&object->pending_ops)) {
41035 atomic_inc(&op->usage);
41036 list_add_tail(&op->pend_link, &object->pending_ops);
41037- fscache_stat(&fscache_n_op_pend);
41038+ fscache_stat_unchecked(&fscache_n_op_pend);
41039 fscache_start_operations(object);
41040 } else {
41041 ASSERTCMP(object->n_in_progress, ==, 0);
41042@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
41043 object->n_exclusive++; /* reads and writes must wait */
41044 atomic_inc(&op->usage);
41045 list_add_tail(&op->pend_link, &object->pending_ops);
41046- fscache_stat(&fscache_n_op_pend);
41047+ fscache_stat_unchecked(&fscache_n_op_pend);
41048 ret = 0;
41049 } else {
41050 /* not allowed to submit ops in any other state */
41051@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
41052 if (object->n_exclusive > 0) {
41053 atomic_inc(&op->usage);
41054 list_add_tail(&op->pend_link, &object->pending_ops);
41055- fscache_stat(&fscache_n_op_pend);
41056+ fscache_stat_unchecked(&fscache_n_op_pend);
41057 } else if (!list_empty(&object->pending_ops)) {
41058 atomic_inc(&op->usage);
41059 list_add_tail(&op->pend_link, &object->pending_ops);
41060- fscache_stat(&fscache_n_op_pend);
41061+ fscache_stat_unchecked(&fscache_n_op_pend);
41062 fscache_start_operations(object);
41063 } else {
41064 ASSERTCMP(object->n_exclusive, ==, 0);
41065@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
41066 object->n_ops++;
41067 atomic_inc(&op->usage);
41068 list_add_tail(&op->pend_link, &object->pending_ops);
41069- fscache_stat(&fscache_n_op_pend);
41070+ fscache_stat_unchecked(&fscache_n_op_pend);
41071 ret = 0;
41072 } else if (object->state == FSCACHE_OBJECT_DYING ||
41073 object->state == FSCACHE_OBJECT_LC_DYING ||
41074 object->state == FSCACHE_OBJECT_WITHDRAWING) {
41075- fscache_stat(&fscache_n_op_rejected);
41076+ fscache_stat_unchecked(&fscache_n_op_rejected);
41077 ret = -ENOBUFS;
41078 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
41079 fscache_report_unexpected_submission(object, op, ostate);
41080@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
41081
41082 ret = -EBUSY;
41083 if (!list_empty(&op->pend_link)) {
41084- fscache_stat(&fscache_n_op_cancelled);
41085+ fscache_stat_unchecked(&fscache_n_op_cancelled);
41086 list_del_init(&op->pend_link);
41087 object->n_ops--;
41088 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
41089@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
41090 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
41091 BUG();
41092
41093- fscache_stat(&fscache_n_op_release);
41094+ fscache_stat_unchecked(&fscache_n_op_release);
41095
41096 if (op->release) {
41097 op->release(op);
41098@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
41099 * lock, and defer it otherwise */
41100 if (!spin_trylock(&object->lock)) {
41101 _debug("defer put");
41102- fscache_stat(&fscache_n_op_deferred_release);
41103+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
41104
41105 cache = object->cache;
41106 spin_lock(&cache->op_gc_list_lock);
41107@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
41108
41109 _debug("GC DEFERRED REL OBJ%x OP%x",
41110 object->debug_id, op->debug_id);
41111- fscache_stat(&fscache_n_op_gc);
41112+ fscache_stat_unchecked(&fscache_n_op_gc);
41113
41114 ASSERTCMP(atomic_read(&op->usage), ==, 0);
41115
41116diff -urNp linux-3.0.4/fs/fscache/page.c linux-3.0.4/fs/fscache/page.c
41117--- linux-3.0.4/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
41118+++ linux-3.0.4/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
41119@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
41120 val = radix_tree_lookup(&cookie->stores, page->index);
41121 if (!val) {
41122 rcu_read_unlock();
41123- fscache_stat(&fscache_n_store_vmscan_not_storing);
41124+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
41125 __fscache_uncache_page(cookie, page);
41126 return true;
41127 }
41128@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
41129 spin_unlock(&cookie->stores_lock);
41130
41131 if (xpage) {
41132- fscache_stat(&fscache_n_store_vmscan_cancelled);
41133- fscache_stat(&fscache_n_store_radix_deletes);
41134+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
41135+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
41136 ASSERTCMP(xpage, ==, page);
41137 } else {
41138- fscache_stat(&fscache_n_store_vmscan_gone);
41139+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
41140 }
41141
41142 wake_up_bit(&cookie->flags, 0);
41143@@ -107,7 +107,7 @@ page_busy:
41144 /* we might want to wait here, but that could deadlock the allocator as
41145 * the work threads writing to the cache may all end up sleeping
41146 * on memory allocation */
41147- fscache_stat(&fscache_n_store_vmscan_busy);
41148+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
41149 return false;
41150 }
41151 EXPORT_SYMBOL(__fscache_maybe_release_page);
41152@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
41153 FSCACHE_COOKIE_STORING_TAG);
41154 if (!radix_tree_tag_get(&cookie->stores, page->index,
41155 FSCACHE_COOKIE_PENDING_TAG)) {
41156- fscache_stat(&fscache_n_store_radix_deletes);
41157+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
41158 xpage = radix_tree_delete(&cookie->stores, page->index);
41159 }
41160 spin_unlock(&cookie->stores_lock);
41161@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
41162
41163 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
41164
41165- fscache_stat(&fscache_n_attr_changed_calls);
41166+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
41167
41168 if (fscache_object_is_active(object)) {
41169 fscache_stat(&fscache_n_cop_attr_changed);
41170@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
41171
41172 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
41173
41174- fscache_stat(&fscache_n_attr_changed);
41175+ fscache_stat_unchecked(&fscache_n_attr_changed);
41176
41177 op = kzalloc(sizeof(*op), GFP_KERNEL);
41178 if (!op) {
41179- fscache_stat(&fscache_n_attr_changed_nomem);
41180+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
41181 _leave(" = -ENOMEM");
41182 return -ENOMEM;
41183 }
41184@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
41185 if (fscache_submit_exclusive_op(object, op) < 0)
41186 goto nobufs;
41187 spin_unlock(&cookie->lock);
41188- fscache_stat(&fscache_n_attr_changed_ok);
41189+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
41190 fscache_put_operation(op);
41191 _leave(" = 0");
41192 return 0;
41193@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
41194 nobufs:
41195 spin_unlock(&cookie->lock);
41196 kfree(op);
41197- fscache_stat(&fscache_n_attr_changed_nobufs);
41198+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
41199 _leave(" = %d", -ENOBUFS);
41200 return -ENOBUFS;
41201 }
41202@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
41203 /* allocate a retrieval operation and attempt to submit it */
41204 op = kzalloc(sizeof(*op), GFP_NOIO);
41205 if (!op) {
41206- fscache_stat(&fscache_n_retrievals_nomem);
41207+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
41208 return NULL;
41209 }
41210
41211@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
41212 return 0;
41213 }
41214
41215- fscache_stat(&fscache_n_retrievals_wait);
41216+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
41217
41218 jif = jiffies;
41219 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
41220 fscache_wait_bit_interruptible,
41221 TASK_INTERRUPTIBLE) != 0) {
41222- fscache_stat(&fscache_n_retrievals_intr);
41223+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
41224 _leave(" = -ERESTARTSYS");
41225 return -ERESTARTSYS;
41226 }
41227@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
41228 */
41229 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
41230 struct fscache_retrieval *op,
41231- atomic_t *stat_op_waits,
41232- atomic_t *stat_object_dead)
41233+ atomic_unchecked_t *stat_op_waits,
41234+ atomic_unchecked_t *stat_object_dead)
41235 {
41236 int ret;
41237
41238@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
41239 goto check_if_dead;
41240
41241 _debug(">>> WT");
41242- fscache_stat(stat_op_waits);
41243+ fscache_stat_unchecked(stat_op_waits);
41244 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
41245 fscache_wait_bit_interruptible,
41246 TASK_INTERRUPTIBLE) < 0) {
41247@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
41248
41249 check_if_dead:
41250 if (unlikely(fscache_object_is_dead(object))) {
41251- fscache_stat(stat_object_dead);
41252+ fscache_stat_unchecked(stat_object_dead);
41253 return -ENOBUFS;
41254 }
41255 return 0;
41256@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
41257
41258 _enter("%p,%p,,,", cookie, page);
41259
41260- fscache_stat(&fscache_n_retrievals);
41261+ fscache_stat_unchecked(&fscache_n_retrievals);
41262
41263 if (hlist_empty(&cookie->backing_objects))
41264 goto nobufs;
41265@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
41266 goto nobufs_unlock;
41267 spin_unlock(&cookie->lock);
41268
41269- fscache_stat(&fscache_n_retrieval_ops);
41270+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
41271
41272 /* pin the netfs read context in case we need to do the actual netfs
41273 * read because we've encountered a cache read failure */
41274@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
41275
41276 error:
41277 if (ret == -ENOMEM)
41278- fscache_stat(&fscache_n_retrievals_nomem);
41279+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
41280 else if (ret == -ERESTARTSYS)
41281- fscache_stat(&fscache_n_retrievals_intr);
41282+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
41283 else if (ret == -ENODATA)
41284- fscache_stat(&fscache_n_retrievals_nodata);
41285+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
41286 else if (ret < 0)
41287- fscache_stat(&fscache_n_retrievals_nobufs);
41288+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
41289 else
41290- fscache_stat(&fscache_n_retrievals_ok);
41291+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
41292
41293 fscache_put_retrieval(op);
41294 _leave(" = %d", ret);
41295@@ -429,7 +429,7 @@ nobufs_unlock:
41296 spin_unlock(&cookie->lock);
41297 kfree(op);
41298 nobufs:
41299- fscache_stat(&fscache_n_retrievals_nobufs);
41300+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
41301 _leave(" = -ENOBUFS");
41302 return -ENOBUFS;
41303 }
41304@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
41305
41306 _enter("%p,,%d,,,", cookie, *nr_pages);
41307
41308- fscache_stat(&fscache_n_retrievals);
41309+ fscache_stat_unchecked(&fscache_n_retrievals);
41310
41311 if (hlist_empty(&cookie->backing_objects))
41312 goto nobufs;
41313@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
41314 goto nobufs_unlock;
41315 spin_unlock(&cookie->lock);
41316
41317- fscache_stat(&fscache_n_retrieval_ops);
41318+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
41319
41320 /* pin the netfs read context in case we need to do the actual netfs
41321 * read because we've encountered a cache read failure */
41322@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
41323
41324 error:
41325 if (ret == -ENOMEM)
41326- fscache_stat(&fscache_n_retrievals_nomem);
41327+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
41328 else if (ret == -ERESTARTSYS)
41329- fscache_stat(&fscache_n_retrievals_intr);
41330+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
41331 else if (ret == -ENODATA)
41332- fscache_stat(&fscache_n_retrievals_nodata);
41333+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
41334 else if (ret < 0)
41335- fscache_stat(&fscache_n_retrievals_nobufs);
41336+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
41337 else
41338- fscache_stat(&fscache_n_retrievals_ok);
41339+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
41340
41341 fscache_put_retrieval(op);
41342 _leave(" = %d", ret);
41343@@ -545,7 +545,7 @@ nobufs_unlock:
41344 spin_unlock(&cookie->lock);
41345 kfree(op);
41346 nobufs:
41347- fscache_stat(&fscache_n_retrievals_nobufs);
41348+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
41349 _leave(" = -ENOBUFS");
41350 return -ENOBUFS;
41351 }
41352@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
41353
41354 _enter("%p,%p,,,", cookie, page);
41355
41356- fscache_stat(&fscache_n_allocs);
41357+ fscache_stat_unchecked(&fscache_n_allocs);
41358
41359 if (hlist_empty(&cookie->backing_objects))
41360 goto nobufs;
41361@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
41362 goto nobufs_unlock;
41363 spin_unlock(&cookie->lock);
41364
41365- fscache_stat(&fscache_n_alloc_ops);
41366+ fscache_stat_unchecked(&fscache_n_alloc_ops);
41367
41368 ret = fscache_wait_for_retrieval_activation(
41369 object, op,
41370@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
41371
41372 error:
41373 if (ret == -ERESTARTSYS)
41374- fscache_stat(&fscache_n_allocs_intr);
41375+ fscache_stat_unchecked(&fscache_n_allocs_intr);
41376 else if (ret < 0)
41377- fscache_stat(&fscache_n_allocs_nobufs);
41378+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
41379 else
41380- fscache_stat(&fscache_n_allocs_ok);
41381+ fscache_stat_unchecked(&fscache_n_allocs_ok);
41382
41383 fscache_put_retrieval(op);
41384 _leave(" = %d", ret);
41385@@ -625,7 +625,7 @@ nobufs_unlock:
41386 spin_unlock(&cookie->lock);
41387 kfree(op);
41388 nobufs:
41389- fscache_stat(&fscache_n_allocs_nobufs);
41390+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
41391 _leave(" = -ENOBUFS");
41392 return -ENOBUFS;
41393 }
41394@@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
41395
41396 spin_lock(&cookie->stores_lock);
41397
41398- fscache_stat(&fscache_n_store_calls);
41399+ fscache_stat_unchecked(&fscache_n_store_calls);
41400
41401 /* find a page to store */
41402 page = NULL;
41403@@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
41404 page = results[0];
41405 _debug("gang %d [%lx]", n, page->index);
41406 if (page->index > op->store_limit) {
41407- fscache_stat(&fscache_n_store_pages_over_limit);
41408+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
41409 goto superseded;
41410 }
41411
41412@@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
41413 spin_unlock(&cookie->stores_lock);
41414 spin_unlock(&object->lock);
41415
41416- fscache_stat(&fscache_n_store_pages);
41417+ fscache_stat_unchecked(&fscache_n_store_pages);
41418 fscache_stat(&fscache_n_cop_write_page);
41419 ret = object->cache->ops->write_page(op, page);
41420 fscache_stat_d(&fscache_n_cop_write_page);
41421@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
41422 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
41423 ASSERT(PageFsCache(page));
41424
41425- fscache_stat(&fscache_n_stores);
41426+ fscache_stat_unchecked(&fscache_n_stores);
41427
41428 op = kzalloc(sizeof(*op), GFP_NOIO);
41429 if (!op)
41430@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
41431 spin_unlock(&cookie->stores_lock);
41432 spin_unlock(&object->lock);
41433
41434- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
41435+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
41436 op->store_limit = object->store_limit;
41437
41438 if (fscache_submit_op(object, &op->op) < 0)
41439@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
41440
41441 spin_unlock(&cookie->lock);
41442 radix_tree_preload_end();
41443- fscache_stat(&fscache_n_store_ops);
41444- fscache_stat(&fscache_n_stores_ok);
41445+ fscache_stat_unchecked(&fscache_n_store_ops);
41446+ fscache_stat_unchecked(&fscache_n_stores_ok);
41447
41448 /* the work queue now carries its own ref on the object */
41449 fscache_put_operation(&op->op);
41450@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
41451 return 0;
41452
41453 already_queued:
41454- fscache_stat(&fscache_n_stores_again);
41455+ fscache_stat_unchecked(&fscache_n_stores_again);
41456 already_pending:
41457 spin_unlock(&cookie->stores_lock);
41458 spin_unlock(&object->lock);
41459 spin_unlock(&cookie->lock);
41460 radix_tree_preload_end();
41461 kfree(op);
41462- fscache_stat(&fscache_n_stores_ok);
41463+ fscache_stat_unchecked(&fscache_n_stores_ok);
41464 _leave(" = 0");
41465 return 0;
41466
41467@@ -851,14 +851,14 @@ nobufs:
41468 spin_unlock(&cookie->lock);
41469 radix_tree_preload_end();
41470 kfree(op);
41471- fscache_stat(&fscache_n_stores_nobufs);
41472+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
41473 _leave(" = -ENOBUFS");
41474 return -ENOBUFS;
41475
41476 nomem_free:
41477 kfree(op);
41478 nomem:
41479- fscache_stat(&fscache_n_stores_oom);
41480+ fscache_stat_unchecked(&fscache_n_stores_oom);
41481 _leave(" = -ENOMEM");
41482 return -ENOMEM;
41483 }
41484@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
41485 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
41486 ASSERTCMP(page, !=, NULL);
41487
41488- fscache_stat(&fscache_n_uncaches);
41489+ fscache_stat_unchecked(&fscache_n_uncaches);
41490
41491 /* cache withdrawal may beat us to it */
41492 if (!PageFsCache(page))
41493@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
41494 unsigned long loop;
41495
41496 #ifdef CONFIG_FSCACHE_STATS
41497- atomic_add(pagevec->nr, &fscache_n_marks);
41498+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
41499 #endif
41500
41501 for (loop = 0; loop < pagevec->nr; loop++) {
41502diff -urNp linux-3.0.4/fs/fscache/stats.c linux-3.0.4/fs/fscache/stats.c
41503--- linux-3.0.4/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
41504+++ linux-3.0.4/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
41505@@ -18,95 +18,95 @@
41506 /*
41507 * operation counters
41508 */
41509-atomic_t fscache_n_op_pend;
41510-atomic_t fscache_n_op_run;
41511-atomic_t fscache_n_op_enqueue;
41512-atomic_t fscache_n_op_requeue;
41513-atomic_t fscache_n_op_deferred_release;
41514-atomic_t fscache_n_op_release;
41515-atomic_t fscache_n_op_gc;
41516-atomic_t fscache_n_op_cancelled;
41517-atomic_t fscache_n_op_rejected;
41518-
41519-atomic_t fscache_n_attr_changed;
41520-atomic_t fscache_n_attr_changed_ok;
41521-atomic_t fscache_n_attr_changed_nobufs;
41522-atomic_t fscache_n_attr_changed_nomem;
41523-atomic_t fscache_n_attr_changed_calls;
41524-
41525-atomic_t fscache_n_allocs;
41526-atomic_t fscache_n_allocs_ok;
41527-atomic_t fscache_n_allocs_wait;
41528-atomic_t fscache_n_allocs_nobufs;
41529-atomic_t fscache_n_allocs_intr;
41530-atomic_t fscache_n_allocs_object_dead;
41531-atomic_t fscache_n_alloc_ops;
41532-atomic_t fscache_n_alloc_op_waits;
41533-
41534-atomic_t fscache_n_retrievals;
41535-atomic_t fscache_n_retrievals_ok;
41536-atomic_t fscache_n_retrievals_wait;
41537-atomic_t fscache_n_retrievals_nodata;
41538-atomic_t fscache_n_retrievals_nobufs;
41539-atomic_t fscache_n_retrievals_intr;
41540-atomic_t fscache_n_retrievals_nomem;
41541-atomic_t fscache_n_retrievals_object_dead;
41542-atomic_t fscache_n_retrieval_ops;
41543-atomic_t fscache_n_retrieval_op_waits;
41544-
41545-atomic_t fscache_n_stores;
41546-atomic_t fscache_n_stores_ok;
41547-atomic_t fscache_n_stores_again;
41548-atomic_t fscache_n_stores_nobufs;
41549-atomic_t fscache_n_stores_oom;
41550-atomic_t fscache_n_store_ops;
41551-atomic_t fscache_n_store_calls;
41552-atomic_t fscache_n_store_pages;
41553-atomic_t fscache_n_store_radix_deletes;
41554-atomic_t fscache_n_store_pages_over_limit;
41555-
41556-atomic_t fscache_n_store_vmscan_not_storing;
41557-atomic_t fscache_n_store_vmscan_gone;
41558-atomic_t fscache_n_store_vmscan_busy;
41559-atomic_t fscache_n_store_vmscan_cancelled;
41560-
41561-atomic_t fscache_n_marks;
41562-atomic_t fscache_n_uncaches;
41563-
41564-atomic_t fscache_n_acquires;
41565-atomic_t fscache_n_acquires_null;
41566-atomic_t fscache_n_acquires_no_cache;
41567-atomic_t fscache_n_acquires_ok;
41568-atomic_t fscache_n_acquires_nobufs;
41569-atomic_t fscache_n_acquires_oom;
41570-
41571-atomic_t fscache_n_updates;
41572-atomic_t fscache_n_updates_null;
41573-atomic_t fscache_n_updates_run;
41574-
41575-atomic_t fscache_n_relinquishes;
41576-atomic_t fscache_n_relinquishes_null;
41577-atomic_t fscache_n_relinquishes_waitcrt;
41578-atomic_t fscache_n_relinquishes_retire;
41579-
41580-atomic_t fscache_n_cookie_index;
41581-atomic_t fscache_n_cookie_data;
41582-atomic_t fscache_n_cookie_special;
41583-
41584-atomic_t fscache_n_object_alloc;
41585-atomic_t fscache_n_object_no_alloc;
41586-atomic_t fscache_n_object_lookups;
41587-atomic_t fscache_n_object_lookups_negative;
41588-atomic_t fscache_n_object_lookups_positive;
41589-atomic_t fscache_n_object_lookups_timed_out;
41590-atomic_t fscache_n_object_created;
41591-atomic_t fscache_n_object_avail;
41592-atomic_t fscache_n_object_dead;
41593-
41594-atomic_t fscache_n_checkaux_none;
41595-atomic_t fscache_n_checkaux_okay;
41596-atomic_t fscache_n_checkaux_update;
41597-atomic_t fscache_n_checkaux_obsolete;
41598+atomic_unchecked_t fscache_n_op_pend;
41599+atomic_unchecked_t fscache_n_op_run;
41600+atomic_unchecked_t fscache_n_op_enqueue;
41601+atomic_unchecked_t fscache_n_op_requeue;
41602+atomic_unchecked_t fscache_n_op_deferred_release;
41603+atomic_unchecked_t fscache_n_op_release;
41604+atomic_unchecked_t fscache_n_op_gc;
41605+atomic_unchecked_t fscache_n_op_cancelled;
41606+atomic_unchecked_t fscache_n_op_rejected;
41607+
41608+atomic_unchecked_t fscache_n_attr_changed;
41609+atomic_unchecked_t fscache_n_attr_changed_ok;
41610+atomic_unchecked_t fscache_n_attr_changed_nobufs;
41611+atomic_unchecked_t fscache_n_attr_changed_nomem;
41612+atomic_unchecked_t fscache_n_attr_changed_calls;
41613+
41614+atomic_unchecked_t fscache_n_allocs;
41615+atomic_unchecked_t fscache_n_allocs_ok;
41616+atomic_unchecked_t fscache_n_allocs_wait;
41617+atomic_unchecked_t fscache_n_allocs_nobufs;
41618+atomic_unchecked_t fscache_n_allocs_intr;
41619+atomic_unchecked_t fscache_n_allocs_object_dead;
41620+atomic_unchecked_t fscache_n_alloc_ops;
41621+atomic_unchecked_t fscache_n_alloc_op_waits;
41622+
41623+atomic_unchecked_t fscache_n_retrievals;
41624+atomic_unchecked_t fscache_n_retrievals_ok;
41625+atomic_unchecked_t fscache_n_retrievals_wait;
41626+atomic_unchecked_t fscache_n_retrievals_nodata;
41627+atomic_unchecked_t fscache_n_retrievals_nobufs;
41628+atomic_unchecked_t fscache_n_retrievals_intr;
41629+atomic_unchecked_t fscache_n_retrievals_nomem;
41630+atomic_unchecked_t fscache_n_retrievals_object_dead;
41631+atomic_unchecked_t fscache_n_retrieval_ops;
41632+atomic_unchecked_t fscache_n_retrieval_op_waits;
41633+
41634+atomic_unchecked_t fscache_n_stores;
41635+atomic_unchecked_t fscache_n_stores_ok;
41636+atomic_unchecked_t fscache_n_stores_again;
41637+atomic_unchecked_t fscache_n_stores_nobufs;
41638+atomic_unchecked_t fscache_n_stores_oom;
41639+atomic_unchecked_t fscache_n_store_ops;
41640+atomic_unchecked_t fscache_n_store_calls;
41641+atomic_unchecked_t fscache_n_store_pages;
41642+atomic_unchecked_t fscache_n_store_radix_deletes;
41643+atomic_unchecked_t fscache_n_store_pages_over_limit;
41644+
41645+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
41646+atomic_unchecked_t fscache_n_store_vmscan_gone;
41647+atomic_unchecked_t fscache_n_store_vmscan_busy;
41648+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
41649+
41650+atomic_unchecked_t fscache_n_marks;
41651+atomic_unchecked_t fscache_n_uncaches;
41652+
41653+atomic_unchecked_t fscache_n_acquires;
41654+atomic_unchecked_t fscache_n_acquires_null;
41655+atomic_unchecked_t fscache_n_acquires_no_cache;
41656+atomic_unchecked_t fscache_n_acquires_ok;
41657+atomic_unchecked_t fscache_n_acquires_nobufs;
41658+atomic_unchecked_t fscache_n_acquires_oom;
41659+
41660+atomic_unchecked_t fscache_n_updates;
41661+atomic_unchecked_t fscache_n_updates_null;
41662+atomic_unchecked_t fscache_n_updates_run;
41663+
41664+atomic_unchecked_t fscache_n_relinquishes;
41665+atomic_unchecked_t fscache_n_relinquishes_null;
41666+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
41667+atomic_unchecked_t fscache_n_relinquishes_retire;
41668+
41669+atomic_unchecked_t fscache_n_cookie_index;
41670+atomic_unchecked_t fscache_n_cookie_data;
41671+atomic_unchecked_t fscache_n_cookie_special;
41672+
41673+atomic_unchecked_t fscache_n_object_alloc;
41674+atomic_unchecked_t fscache_n_object_no_alloc;
41675+atomic_unchecked_t fscache_n_object_lookups;
41676+atomic_unchecked_t fscache_n_object_lookups_negative;
41677+atomic_unchecked_t fscache_n_object_lookups_positive;
41678+atomic_unchecked_t fscache_n_object_lookups_timed_out;
41679+atomic_unchecked_t fscache_n_object_created;
41680+atomic_unchecked_t fscache_n_object_avail;
41681+atomic_unchecked_t fscache_n_object_dead;
41682+
41683+atomic_unchecked_t fscache_n_checkaux_none;
41684+atomic_unchecked_t fscache_n_checkaux_okay;
41685+atomic_unchecked_t fscache_n_checkaux_update;
41686+atomic_unchecked_t fscache_n_checkaux_obsolete;
41687
41688 atomic_t fscache_n_cop_alloc_object;
41689 atomic_t fscache_n_cop_lookup_object;
41690@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
41691 seq_puts(m, "FS-Cache statistics\n");
41692
41693 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
41694- atomic_read(&fscache_n_cookie_index),
41695- atomic_read(&fscache_n_cookie_data),
41696- atomic_read(&fscache_n_cookie_special));
41697+ atomic_read_unchecked(&fscache_n_cookie_index),
41698+ atomic_read_unchecked(&fscache_n_cookie_data),
41699+ atomic_read_unchecked(&fscache_n_cookie_special));
41700
41701 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
41702- atomic_read(&fscache_n_object_alloc),
41703- atomic_read(&fscache_n_object_no_alloc),
41704- atomic_read(&fscache_n_object_avail),
41705- atomic_read(&fscache_n_object_dead));
41706+ atomic_read_unchecked(&fscache_n_object_alloc),
41707+ atomic_read_unchecked(&fscache_n_object_no_alloc),
41708+ atomic_read_unchecked(&fscache_n_object_avail),
41709+ atomic_read_unchecked(&fscache_n_object_dead));
41710 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
41711- atomic_read(&fscache_n_checkaux_none),
41712- atomic_read(&fscache_n_checkaux_okay),
41713- atomic_read(&fscache_n_checkaux_update),
41714- atomic_read(&fscache_n_checkaux_obsolete));
41715+ atomic_read_unchecked(&fscache_n_checkaux_none),
41716+ atomic_read_unchecked(&fscache_n_checkaux_okay),
41717+ atomic_read_unchecked(&fscache_n_checkaux_update),
41718+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
41719
41720 seq_printf(m, "Pages : mrk=%u unc=%u\n",
41721- atomic_read(&fscache_n_marks),
41722- atomic_read(&fscache_n_uncaches));
41723+ atomic_read_unchecked(&fscache_n_marks),
41724+ atomic_read_unchecked(&fscache_n_uncaches));
41725
41726 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
41727 " oom=%u\n",
41728- atomic_read(&fscache_n_acquires),
41729- atomic_read(&fscache_n_acquires_null),
41730- atomic_read(&fscache_n_acquires_no_cache),
41731- atomic_read(&fscache_n_acquires_ok),
41732- atomic_read(&fscache_n_acquires_nobufs),
41733- atomic_read(&fscache_n_acquires_oom));
41734+ atomic_read_unchecked(&fscache_n_acquires),
41735+ atomic_read_unchecked(&fscache_n_acquires_null),
41736+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
41737+ atomic_read_unchecked(&fscache_n_acquires_ok),
41738+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
41739+ atomic_read_unchecked(&fscache_n_acquires_oom));
41740
41741 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
41742- atomic_read(&fscache_n_object_lookups),
41743- atomic_read(&fscache_n_object_lookups_negative),
41744- atomic_read(&fscache_n_object_lookups_positive),
41745- atomic_read(&fscache_n_object_created),
41746- atomic_read(&fscache_n_object_lookups_timed_out));
41747+ atomic_read_unchecked(&fscache_n_object_lookups),
41748+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
41749+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
41750+ atomic_read_unchecked(&fscache_n_object_created),
41751+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
41752
41753 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
41754- atomic_read(&fscache_n_updates),
41755- atomic_read(&fscache_n_updates_null),
41756- atomic_read(&fscache_n_updates_run));
41757+ atomic_read_unchecked(&fscache_n_updates),
41758+ atomic_read_unchecked(&fscache_n_updates_null),
41759+ atomic_read_unchecked(&fscache_n_updates_run));
41760
41761 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
41762- atomic_read(&fscache_n_relinquishes),
41763- atomic_read(&fscache_n_relinquishes_null),
41764- atomic_read(&fscache_n_relinquishes_waitcrt),
41765- atomic_read(&fscache_n_relinquishes_retire));
41766+ atomic_read_unchecked(&fscache_n_relinquishes),
41767+ atomic_read_unchecked(&fscache_n_relinquishes_null),
41768+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
41769+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
41770
41771 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
41772- atomic_read(&fscache_n_attr_changed),
41773- atomic_read(&fscache_n_attr_changed_ok),
41774- atomic_read(&fscache_n_attr_changed_nobufs),
41775- atomic_read(&fscache_n_attr_changed_nomem),
41776- atomic_read(&fscache_n_attr_changed_calls));
41777+ atomic_read_unchecked(&fscache_n_attr_changed),
41778+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
41779+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
41780+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
41781+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
41782
41783 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
41784- atomic_read(&fscache_n_allocs),
41785- atomic_read(&fscache_n_allocs_ok),
41786- atomic_read(&fscache_n_allocs_wait),
41787- atomic_read(&fscache_n_allocs_nobufs),
41788- atomic_read(&fscache_n_allocs_intr));
41789+ atomic_read_unchecked(&fscache_n_allocs),
41790+ atomic_read_unchecked(&fscache_n_allocs_ok),
41791+ atomic_read_unchecked(&fscache_n_allocs_wait),
41792+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
41793+ atomic_read_unchecked(&fscache_n_allocs_intr));
41794 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
41795- atomic_read(&fscache_n_alloc_ops),
41796- atomic_read(&fscache_n_alloc_op_waits),
41797- atomic_read(&fscache_n_allocs_object_dead));
41798+ atomic_read_unchecked(&fscache_n_alloc_ops),
41799+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
41800+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
41801
41802 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
41803 " int=%u oom=%u\n",
41804- atomic_read(&fscache_n_retrievals),
41805- atomic_read(&fscache_n_retrievals_ok),
41806- atomic_read(&fscache_n_retrievals_wait),
41807- atomic_read(&fscache_n_retrievals_nodata),
41808- atomic_read(&fscache_n_retrievals_nobufs),
41809- atomic_read(&fscache_n_retrievals_intr),
41810- atomic_read(&fscache_n_retrievals_nomem));
41811+ atomic_read_unchecked(&fscache_n_retrievals),
41812+ atomic_read_unchecked(&fscache_n_retrievals_ok),
41813+ atomic_read_unchecked(&fscache_n_retrievals_wait),
41814+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
41815+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
41816+ atomic_read_unchecked(&fscache_n_retrievals_intr),
41817+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
41818 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
41819- atomic_read(&fscache_n_retrieval_ops),
41820- atomic_read(&fscache_n_retrieval_op_waits),
41821- atomic_read(&fscache_n_retrievals_object_dead));
41822+ atomic_read_unchecked(&fscache_n_retrieval_ops),
41823+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
41824+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
41825
41826 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
41827- atomic_read(&fscache_n_stores),
41828- atomic_read(&fscache_n_stores_ok),
41829- atomic_read(&fscache_n_stores_again),
41830- atomic_read(&fscache_n_stores_nobufs),
41831- atomic_read(&fscache_n_stores_oom));
41832+ atomic_read_unchecked(&fscache_n_stores),
41833+ atomic_read_unchecked(&fscache_n_stores_ok),
41834+ atomic_read_unchecked(&fscache_n_stores_again),
41835+ atomic_read_unchecked(&fscache_n_stores_nobufs),
41836+ atomic_read_unchecked(&fscache_n_stores_oom));
41837 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
41838- atomic_read(&fscache_n_store_ops),
41839- atomic_read(&fscache_n_store_calls),
41840- atomic_read(&fscache_n_store_pages),
41841- atomic_read(&fscache_n_store_radix_deletes),
41842- atomic_read(&fscache_n_store_pages_over_limit));
41843+ atomic_read_unchecked(&fscache_n_store_ops),
41844+ atomic_read_unchecked(&fscache_n_store_calls),
41845+ atomic_read_unchecked(&fscache_n_store_pages),
41846+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
41847+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
41848
41849 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
41850- atomic_read(&fscache_n_store_vmscan_not_storing),
41851- atomic_read(&fscache_n_store_vmscan_gone),
41852- atomic_read(&fscache_n_store_vmscan_busy),
41853- atomic_read(&fscache_n_store_vmscan_cancelled));
41854+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
41855+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
41856+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
41857+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
41858
41859 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
41860- atomic_read(&fscache_n_op_pend),
41861- atomic_read(&fscache_n_op_run),
41862- atomic_read(&fscache_n_op_enqueue),
41863- atomic_read(&fscache_n_op_cancelled),
41864- atomic_read(&fscache_n_op_rejected));
41865+ atomic_read_unchecked(&fscache_n_op_pend),
41866+ atomic_read_unchecked(&fscache_n_op_run),
41867+ atomic_read_unchecked(&fscache_n_op_enqueue),
41868+ atomic_read_unchecked(&fscache_n_op_cancelled),
41869+ atomic_read_unchecked(&fscache_n_op_rejected));
41870 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
41871- atomic_read(&fscache_n_op_deferred_release),
41872- atomic_read(&fscache_n_op_release),
41873- atomic_read(&fscache_n_op_gc));
41874+ atomic_read_unchecked(&fscache_n_op_deferred_release),
41875+ atomic_read_unchecked(&fscache_n_op_release),
41876+ atomic_read_unchecked(&fscache_n_op_gc));
41877
41878 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
41879 atomic_read(&fscache_n_cop_alloc_object),
41880diff -urNp linux-3.0.4/fs/fs_struct.c linux-3.0.4/fs/fs_struct.c
41881--- linux-3.0.4/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
41882+++ linux-3.0.4/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
41883@@ -4,6 +4,7 @@
41884 #include <linux/path.h>
41885 #include <linux/slab.h>
41886 #include <linux/fs_struct.h>
41887+#include <linux/grsecurity.h>
41888 #include "internal.h"
41889
41890 static inline void path_get_longterm(struct path *path)
41891@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
41892 old_root = fs->root;
41893 fs->root = *path;
41894 path_get_longterm(path);
41895+ gr_set_chroot_entries(current, path);
41896 write_seqcount_end(&fs->seq);
41897 spin_unlock(&fs->lock);
41898 if (old_root.dentry)
41899@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
41900 && fs->root.mnt == old_root->mnt) {
41901 path_get_longterm(new_root);
41902 fs->root = *new_root;
41903+ gr_set_chroot_entries(p, new_root);
41904 count++;
41905 }
41906 if (fs->pwd.dentry == old_root->dentry
41907@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
41908 spin_lock(&fs->lock);
41909 write_seqcount_begin(&fs->seq);
41910 tsk->fs = NULL;
41911- kill = !--fs->users;
41912+ gr_clear_chroot_entries(tsk);
41913+ kill = !atomic_dec_return(&fs->users);
41914 write_seqcount_end(&fs->seq);
41915 spin_unlock(&fs->lock);
41916 task_unlock(tsk);
41917@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
41918 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
41919 /* We don't need to lock fs - think why ;-) */
41920 if (fs) {
41921- fs->users = 1;
41922+ atomic_set(&fs->users, 1);
41923 fs->in_exec = 0;
41924 spin_lock_init(&fs->lock);
41925 seqcount_init(&fs->seq);
41926@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
41927 spin_lock(&old->lock);
41928 fs->root = old->root;
41929 path_get_longterm(&fs->root);
41930+ /* instead of calling gr_set_chroot_entries here,
41931+ we call it from every caller of this function
41932+ */
41933 fs->pwd = old->pwd;
41934 path_get_longterm(&fs->pwd);
41935 spin_unlock(&old->lock);
41936@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
41937
41938 task_lock(current);
41939 spin_lock(&fs->lock);
41940- kill = !--fs->users;
41941+ kill = !atomic_dec_return(&fs->users);
41942 current->fs = new_fs;
41943+ gr_set_chroot_entries(current, &new_fs->root);
41944 spin_unlock(&fs->lock);
41945 task_unlock(current);
41946
41947@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
41948
41949 /* to be mentioned only in INIT_TASK */
41950 struct fs_struct init_fs = {
41951- .users = 1,
41952+ .users = ATOMIC_INIT(1),
41953 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
41954 .seq = SEQCNT_ZERO,
41955 .umask = 0022,
41956@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
41957 task_lock(current);
41958
41959 spin_lock(&init_fs.lock);
41960- init_fs.users++;
41961+ atomic_inc(&init_fs.users);
41962 spin_unlock(&init_fs.lock);
41963
41964 spin_lock(&fs->lock);
41965 current->fs = &init_fs;
41966- kill = !--fs->users;
41967+ gr_set_chroot_entries(current, &current->fs->root);
41968+ kill = !atomic_dec_return(&fs->users);
41969 spin_unlock(&fs->lock);
41970
41971 task_unlock(current);
41972diff -urNp linux-3.0.4/fs/fuse/cuse.c linux-3.0.4/fs/fuse/cuse.c
41973--- linux-3.0.4/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
41974+++ linux-3.0.4/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
41975@@ -586,10 +586,12 @@ static int __init cuse_init(void)
41976 INIT_LIST_HEAD(&cuse_conntbl[i]);
41977
41978 /* inherit and extend fuse_dev_operations */
41979- cuse_channel_fops = fuse_dev_operations;
41980- cuse_channel_fops.owner = THIS_MODULE;
41981- cuse_channel_fops.open = cuse_channel_open;
41982- cuse_channel_fops.release = cuse_channel_release;
41983+ pax_open_kernel();
41984+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
41985+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
41986+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
41987+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
41988+ pax_close_kernel();
41989
41990 cuse_class = class_create(THIS_MODULE, "cuse");
41991 if (IS_ERR(cuse_class))
41992diff -urNp linux-3.0.4/fs/fuse/dev.c linux-3.0.4/fs/fuse/dev.c
41993--- linux-3.0.4/fs/fuse/dev.c 2011-09-02 18:11:26.000000000 -0400
41994+++ linux-3.0.4/fs/fuse/dev.c 2011-08-29 23:26:27.000000000 -0400
41995@@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
41996 ret = 0;
41997 pipe_lock(pipe);
41998
41999- if (!pipe->readers) {
42000+ if (!atomic_read(&pipe->readers)) {
42001 send_sig(SIGPIPE, current, 0);
42002 if (!ret)
42003 ret = -EPIPE;
42004diff -urNp linux-3.0.4/fs/fuse/dir.c linux-3.0.4/fs/fuse/dir.c
42005--- linux-3.0.4/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
42006+++ linux-3.0.4/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
42007@@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
42008 return link;
42009 }
42010
42011-static void free_link(char *link)
42012+static void free_link(const char *link)
42013 {
42014 if (!IS_ERR(link))
42015 free_page((unsigned long) link);
42016diff -urNp linux-3.0.4/fs/gfs2/inode.c linux-3.0.4/fs/gfs2/inode.c
42017--- linux-3.0.4/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
42018+++ linux-3.0.4/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
42019@@ -1525,7 +1525,7 @@ out:
42020
42021 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42022 {
42023- char *s = nd_get_link(nd);
42024+ const char *s = nd_get_link(nd);
42025 if (!IS_ERR(s))
42026 kfree(s);
42027 }
42028diff -urNp linux-3.0.4/fs/hfsplus/catalog.c linux-3.0.4/fs/hfsplus/catalog.c
42029--- linux-3.0.4/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
42030+++ linux-3.0.4/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
42031@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
42032 int err;
42033 u16 type;
42034
42035+ pax_track_stack();
42036+
42037 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
42038 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
42039 if (err)
42040@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
42041 int entry_size;
42042 int err;
42043
42044+ pax_track_stack();
42045+
42046 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
42047 str->name, cnid, inode->i_nlink);
42048 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
42049@@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
42050 int entry_size, type;
42051 int err = 0;
42052
42053+ pax_track_stack();
42054+
42055 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
42056 cnid, src_dir->i_ino, src_name->name,
42057 dst_dir->i_ino, dst_name->name);
42058diff -urNp linux-3.0.4/fs/hfsplus/dir.c linux-3.0.4/fs/hfsplus/dir.c
42059--- linux-3.0.4/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
42060+++ linux-3.0.4/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
42061@@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
42062 struct hfsplus_readdir_data *rd;
42063 u16 type;
42064
42065+ pax_track_stack();
42066+
42067 if (filp->f_pos >= inode->i_size)
42068 return 0;
42069
42070diff -urNp linux-3.0.4/fs/hfsplus/inode.c linux-3.0.4/fs/hfsplus/inode.c
42071--- linux-3.0.4/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
42072+++ linux-3.0.4/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
42073@@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
42074 int res = 0;
42075 u16 type;
42076
42077+ pax_track_stack();
42078+
42079 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
42080
42081 HFSPLUS_I(inode)->linkid = 0;
42082@@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
42083 struct hfs_find_data fd;
42084 hfsplus_cat_entry entry;
42085
42086+ pax_track_stack();
42087+
42088 if (HFSPLUS_IS_RSRC(inode))
42089 main_inode = HFSPLUS_I(inode)->rsrc_inode;
42090
42091diff -urNp linux-3.0.4/fs/hfsplus/ioctl.c linux-3.0.4/fs/hfsplus/ioctl.c
42092--- linux-3.0.4/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
42093+++ linux-3.0.4/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
42094@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
42095 struct hfsplus_cat_file *file;
42096 int res;
42097
42098+ pax_track_stack();
42099+
42100 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
42101 return -EOPNOTSUPP;
42102
42103@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
42104 struct hfsplus_cat_file *file;
42105 ssize_t res = 0;
42106
42107+ pax_track_stack();
42108+
42109 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
42110 return -EOPNOTSUPP;
42111
42112diff -urNp linux-3.0.4/fs/hfsplus/super.c linux-3.0.4/fs/hfsplus/super.c
42113--- linux-3.0.4/fs/hfsplus/super.c 2011-07-21 22:17:23.000000000 -0400
42114+++ linux-3.0.4/fs/hfsplus/super.c 2011-08-23 21:48:14.000000000 -0400
42115@@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
42116 struct nls_table *nls = NULL;
42117 int err;
42118
42119+ pax_track_stack();
42120+
42121 err = -EINVAL;
42122 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
42123 if (!sbi)
42124diff -urNp linux-3.0.4/fs/hugetlbfs/inode.c linux-3.0.4/fs/hugetlbfs/inode.c
42125--- linux-3.0.4/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
42126+++ linux-3.0.4/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
42127@@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
42128 .kill_sb = kill_litter_super,
42129 };
42130
42131-static struct vfsmount *hugetlbfs_vfsmount;
42132+struct vfsmount *hugetlbfs_vfsmount;
42133
42134 static int can_do_hugetlb_shm(void)
42135 {
42136diff -urNp linux-3.0.4/fs/inode.c linux-3.0.4/fs/inode.c
42137--- linux-3.0.4/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
42138+++ linux-3.0.4/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
42139@@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
42140
42141 #ifdef CONFIG_SMP
42142 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
42143- static atomic_t shared_last_ino;
42144- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
42145+ static atomic_unchecked_t shared_last_ino;
42146+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
42147
42148 res = next - LAST_INO_BATCH;
42149 }
42150diff -urNp linux-3.0.4/fs/jbd/checkpoint.c linux-3.0.4/fs/jbd/checkpoint.c
42151--- linux-3.0.4/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
42152+++ linux-3.0.4/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
42153@@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
42154 tid_t this_tid;
42155 int result;
42156
42157+ pax_track_stack();
42158+
42159 jbd_debug(1, "Start checkpoint\n");
42160
42161 /*
42162diff -urNp linux-3.0.4/fs/jffs2/compr_rtime.c linux-3.0.4/fs/jffs2/compr_rtime.c
42163--- linux-3.0.4/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
42164+++ linux-3.0.4/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
42165@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
42166 int outpos = 0;
42167 int pos=0;
42168
42169+ pax_track_stack();
42170+
42171 memset(positions,0,sizeof(positions));
42172
42173 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
42174@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
42175 int outpos = 0;
42176 int pos=0;
42177
42178+ pax_track_stack();
42179+
42180 memset(positions,0,sizeof(positions));
42181
42182 while (outpos<destlen) {
42183diff -urNp linux-3.0.4/fs/jffs2/compr_rubin.c linux-3.0.4/fs/jffs2/compr_rubin.c
42184--- linux-3.0.4/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
42185+++ linux-3.0.4/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
42186@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
42187 int ret;
42188 uint32_t mysrclen, mydstlen;
42189
42190+ pax_track_stack();
42191+
42192 mysrclen = *sourcelen;
42193 mydstlen = *dstlen - 8;
42194
42195diff -urNp linux-3.0.4/fs/jffs2/erase.c linux-3.0.4/fs/jffs2/erase.c
42196--- linux-3.0.4/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
42197+++ linux-3.0.4/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
42198@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
42199 struct jffs2_unknown_node marker = {
42200 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
42201 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
42202- .totlen = cpu_to_je32(c->cleanmarker_size)
42203+ .totlen = cpu_to_je32(c->cleanmarker_size),
42204+ .hdr_crc = cpu_to_je32(0)
42205 };
42206
42207 jffs2_prealloc_raw_node_refs(c, jeb, 1);
42208diff -urNp linux-3.0.4/fs/jffs2/wbuf.c linux-3.0.4/fs/jffs2/wbuf.c
42209--- linux-3.0.4/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
42210+++ linux-3.0.4/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
42211@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
42212 {
42213 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
42214 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
42215- .totlen = constant_cpu_to_je32(8)
42216+ .totlen = constant_cpu_to_je32(8),
42217+ .hdr_crc = constant_cpu_to_je32(0)
42218 };
42219
42220 /*
42221diff -urNp linux-3.0.4/fs/jffs2/xattr.c linux-3.0.4/fs/jffs2/xattr.c
42222--- linux-3.0.4/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
42223+++ linux-3.0.4/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
42224@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
42225
42226 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
42227
42228+ pax_track_stack();
42229+
42230 /* Phase.1 : Merge same xref */
42231 for (i=0; i < XREF_TMPHASH_SIZE; i++)
42232 xref_tmphash[i] = NULL;
42233diff -urNp linux-3.0.4/fs/jfs/super.c linux-3.0.4/fs/jfs/super.c
42234--- linux-3.0.4/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
42235+++ linux-3.0.4/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
42236@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
42237
42238 jfs_inode_cachep =
42239 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
42240- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
42241+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
42242 init_once);
42243 if (jfs_inode_cachep == NULL)
42244 return -ENOMEM;
42245diff -urNp linux-3.0.4/fs/Kconfig.binfmt linux-3.0.4/fs/Kconfig.binfmt
42246--- linux-3.0.4/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
42247+++ linux-3.0.4/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
42248@@ -86,7 +86,7 @@ config HAVE_AOUT
42249
42250 config BINFMT_AOUT
42251 tristate "Kernel support for a.out and ECOFF binaries"
42252- depends on HAVE_AOUT
42253+ depends on HAVE_AOUT && BROKEN
42254 ---help---
42255 A.out (Assembler.OUTput) is a set of formats for libraries and
42256 executables used in the earliest versions of UNIX. Linux used
42257diff -urNp linux-3.0.4/fs/libfs.c linux-3.0.4/fs/libfs.c
42258--- linux-3.0.4/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
42259+++ linux-3.0.4/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
42260@@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
42261
42262 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
42263 struct dentry *next;
42264+ char d_name[sizeof(next->d_iname)];
42265+ const unsigned char *name;
42266+
42267 next = list_entry(p, struct dentry, d_u.d_child);
42268 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
42269 if (!simple_positive(next)) {
42270@@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
42271
42272 spin_unlock(&next->d_lock);
42273 spin_unlock(&dentry->d_lock);
42274- if (filldir(dirent, next->d_name.name,
42275+ name = next->d_name.name;
42276+ if (name == next->d_iname) {
42277+ memcpy(d_name, name, next->d_name.len);
42278+ name = d_name;
42279+ }
42280+ if (filldir(dirent, name,
42281 next->d_name.len, filp->f_pos,
42282 next->d_inode->i_ino,
42283 dt_type(next->d_inode)) < 0)
42284diff -urNp linux-3.0.4/fs/lockd/clntproc.c linux-3.0.4/fs/lockd/clntproc.c
42285--- linux-3.0.4/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
42286+++ linux-3.0.4/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
42287@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
42288 /*
42289 * Cookie counter for NLM requests
42290 */
42291-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
42292+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
42293
42294 void nlmclnt_next_cookie(struct nlm_cookie *c)
42295 {
42296- u32 cookie = atomic_inc_return(&nlm_cookie);
42297+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
42298
42299 memcpy(c->data, &cookie, 4);
42300 c->len=4;
42301@@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
42302 struct nlm_rqst reqst, *req;
42303 int status;
42304
42305+ pax_track_stack();
42306+
42307 req = &reqst;
42308 memset(req, 0, sizeof(*req));
42309 locks_init_lock(&req->a_args.lock.fl);
42310diff -urNp linux-3.0.4/fs/locks.c linux-3.0.4/fs/locks.c
42311--- linux-3.0.4/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
42312+++ linux-3.0.4/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
42313@@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
42314 return;
42315
42316 if (filp->f_op && filp->f_op->flock) {
42317- struct file_lock fl = {
42318+ struct file_lock flock = {
42319 .fl_pid = current->tgid,
42320 .fl_file = filp,
42321 .fl_flags = FL_FLOCK,
42322 .fl_type = F_UNLCK,
42323 .fl_end = OFFSET_MAX,
42324 };
42325- filp->f_op->flock(filp, F_SETLKW, &fl);
42326- if (fl.fl_ops && fl.fl_ops->fl_release_private)
42327- fl.fl_ops->fl_release_private(&fl);
42328+ filp->f_op->flock(filp, F_SETLKW, &flock);
42329+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
42330+ flock.fl_ops->fl_release_private(&flock);
42331 }
42332
42333 lock_flocks();
42334diff -urNp linux-3.0.4/fs/logfs/super.c linux-3.0.4/fs/logfs/super.c
42335--- linux-3.0.4/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
42336+++ linux-3.0.4/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
42337@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
42338 struct logfs_disk_super _ds1, *ds1 = &_ds1;
42339 int err, valid0, valid1;
42340
42341+ pax_track_stack();
42342+
42343 /* read first superblock */
42344 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
42345 if (err)
42346diff -urNp linux-3.0.4/fs/namei.c linux-3.0.4/fs/namei.c
42347--- linux-3.0.4/fs/namei.c 2011-07-21 22:17:23.000000000 -0400
42348+++ linux-3.0.4/fs/namei.c 2011-10-06 03:40:11.000000000 -0400
42349@@ -237,21 +237,31 @@ int generic_permission(struct inode *ino
42350 return ret;
42351
42352 /*
42353- * Read/write DACs are always overridable.
42354- * Executable DACs are overridable for all directories and
42355- * for non-directories that have least one exec bit set.
42356+ * Searching includes executable on directories, else just read.
42357 */
42358- if (!(mask & MAY_EXEC) || execute_ok(inode))
42359- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
42360+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
42361+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
42362+#ifdef CONFIG_GRKERNSEC
42363+ if (flags & IPERM_FLAG_RCU)
42364+ return -ECHILD;
42365+#endif
42366+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
42367 return 0;
42368+ }
42369
42370 /*
42371- * Searching includes executable on directories, else just read.
42372+ * Read/write DACs are always overridable.
42373+ * Executable DACs are overridable for all directories and
42374+ * for non-directories that have least one exec bit set.
42375 */
42376- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
42377- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
42378- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
42379+ if (!(mask & MAY_EXEC) || execute_ok(inode)) {
42380+#ifdef CONFIG_GRKERNSEC
42381+ if (flags & IPERM_FLAG_RCU)
42382+ return -ECHILD;
42383+#endif
42384+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
42385 return 0;
42386+ }
42387
42388 return -EACCES;
42389 }
42390@@ -547,6 +557,9 @@ static int complete_walk(struct nameidat
42391 br_read_unlock(vfsmount_lock);
42392 }
42393
42394+ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
42395+ return -ENOENT;
42396+
42397 if (likely(!(nd->flags & LOOKUP_JUMPED)))
42398 return 0;
42399
42400@@ -593,9 +606,16 @@ static inline int exec_permission(struct
42401 if (ret == -ECHILD)
42402 return ret;
42403
42404- if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
42405- ns_capable(ns, CAP_DAC_READ_SEARCH))
42406+ if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
42407 goto ok;
42408+ else {
42409+#ifdef CONFIG_GRKERNSEC
42410+ if (flags & IPERM_FLAG_RCU)
42411+ return -ECHILD;
42412+#endif
42413+ if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
42414+ goto ok;
42415+ }
42416
42417 return ret;
42418 ok:
42419@@ -703,11 +723,26 @@ follow_link(struct path *link, struct na
42420 return error;
42421 }
42422
42423+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
42424+ dentry->d_inode, dentry, nd->path.mnt)) {
42425+ error = -EACCES;
42426+ *p = ERR_PTR(error); /* no ->put_link(), please */
42427+ path_put(&nd->path);
42428+ return error;
42429+ }
42430+
42431+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
42432+ error = -ENOENT;
42433+ *p = ERR_PTR(error); /* no ->put_link(), please */
42434+ path_put(&nd->path);
42435+ return error;
42436+ }
42437+
42438 nd->last_type = LAST_BIND;
42439 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
42440 error = PTR_ERR(*p);
42441 if (!IS_ERR(*p)) {
42442- char *s = nd_get_link(nd);
42443+ const char *s = nd_get_link(nd);
42444 error = 0;
42445 if (s)
42446 error = __vfs_follow_link(nd, s);
42447@@ -1625,6 +1660,9 @@ static int do_path_lookup(int dfd, const
42448 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
42449
42450 if (likely(!retval)) {
42451+ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
42452+ return -ENOENT;
42453+
42454 if (unlikely(!audit_dummy_context())) {
42455 if (nd->path.dentry && nd->inode)
42456 audit_inode(name, nd->path.dentry);
42457@@ -1935,6 +1973,30 @@ int vfs_create(struct inode *dir, struct
42458 return error;
42459 }
42460
42461+/*
42462+ * Note that while the flag value (low two bits) for sys_open means:
42463+ * 00 - read-only
42464+ * 01 - write-only
42465+ * 10 - read-write
42466+ * 11 - special
42467+ * it is changed into
42468+ * 00 - no permissions needed
42469+ * 01 - read-permission
42470+ * 10 - write-permission
42471+ * 11 - read-write
42472+ * for the internal routines (ie open_namei()/follow_link() etc)
42473+ * This is more logical, and also allows the 00 "no perm needed"
42474+ * to be used for symlinks (where the permissions are checked
42475+ * later).
42476+ *
42477+*/
42478+static inline int open_to_namei_flags(int flag)
42479+{
42480+ if ((flag+1) & O_ACCMODE)
42481+ flag++;
42482+ return flag;
42483+}
42484+
42485 static int may_open(struct path *path, int acc_mode, int flag)
42486 {
42487 struct dentry *dentry = path->dentry;
42488@@ -1987,7 +2049,27 @@ static int may_open(struct path *path, i
42489 /*
42490 * Ensure there are no outstanding leases on the file.
42491 */
42492- return break_lease(inode, flag);
42493+ error = break_lease(inode, flag);
42494+
42495+ if (error)
42496+ return error;
42497+
42498+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
42499+ error = -EPERM;
42500+ goto exit;
42501+ }
42502+
42503+ if (gr_handle_rawio(inode)) {
42504+ error = -EPERM;
42505+ goto exit;
42506+ }
42507+
42508+ if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
42509+ error = -EACCES;
42510+ goto exit;
42511+ }
42512+exit:
42513+ return error;
42514 }
42515
42516 static int handle_truncate(struct file *filp)
42517@@ -2013,30 +2095,6 @@ static int handle_truncate(struct file *
42518 }
42519
42520 /*
42521- * Note that while the flag value (low two bits) for sys_open means:
42522- * 00 - read-only
42523- * 01 - write-only
42524- * 10 - read-write
42525- * 11 - special
42526- * it is changed into
42527- * 00 - no permissions needed
42528- * 01 - read-permission
42529- * 10 - write-permission
42530- * 11 - read-write
42531- * for the internal routines (ie open_namei()/follow_link() etc)
42532- * This is more logical, and also allows the 00 "no perm needed"
42533- * to be used for symlinks (where the permissions are checked
42534- * later).
42535- *
42536-*/
42537-static inline int open_to_namei_flags(int flag)
42538-{
42539- if ((flag+1) & O_ACCMODE)
42540- flag++;
42541- return flag;
42542-}
42543-
42544-/*
42545 * Handle the last step of open()
42546 */
42547 static struct file *do_last(struct nameidata *nd, struct path *path,
42548@@ -2045,6 +2103,7 @@ static struct file *do_last(struct namei
42549 struct dentry *dir = nd->path.dentry;
42550 struct dentry *dentry;
42551 int open_flag = op->open_flag;
42552+ int flag = open_to_namei_flags(open_flag);
42553 int will_truncate = open_flag & O_TRUNC;
42554 int want_write = 0;
42555 int acc_mode = op->acc_mode;
42556@@ -2132,6 +2191,12 @@ static struct file *do_last(struct namei
42557 /* Negative dentry, just create the file */
42558 if (!dentry->d_inode) {
42559 int mode = op->mode;
42560+
42561+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
42562+ error = -EACCES;
42563+ goto exit_mutex_unlock;
42564+ }
42565+
42566 if (!IS_POSIXACL(dir->d_inode))
42567 mode &= ~current_umask();
42568 /*
42569@@ -2155,6 +2220,8 @@ static struct file *do_last(struct namei
42570 error = vfs_create(dir->d_inode, dentry, mode, nd);
42571 if (error)
42572 goto exit_mutex_unlock;
42573+ else
42574+ gr_handle_create(path->dentry, path->mnt);
42575 mutex_unlock(&dir->d_inode->i_mutex);
42576 dput(nd->path.dentry);
42577 nd->path.dentry = dentry;
42578@@ -2164,6 +2231,14 @@ static struct file *do_last(struct namei
42579 /*
42580 * It already exists.
42581 */
42582+
42583+ /* only check if O_CREAT is specified, all other checks need to go
42584+ into may_open */
42585+ if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
42586+ error = -EACCES;
42587+ goto exit_mutex_unlock;
42588+ }
42589+
42590 mutex_unlock(&dir->d_inode->i_mutex);
42591 audit_inode(pathname, path->dentry);
42592
42593@@ -2450,6 +2525,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
42594 error = may_mknod(mode);
42595 if (error)
42596 goto out_dput;
42597+
42598+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
42599+ error = -EPERM;
42600+ goto out_dput;
42601+ }
42602+
42603+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
42604+ error = -EACCES;
42605+ goto out_dput;
42606+ }
42607+
42608 error = mnt_want_write(nd.path.mnt);
42609 if (error)
42610 goto out_dput;
42611@@ -2470,6 +2556,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
42612 }
42613 out_drop_write:
42614 mnt_drop_write(nd.path.mnt);
42615+
42616+ if (!error)
42617+ gr_handle_create(dentry, nd.path.mnt);
42618 out_dput:
42619 dput(dentry);
42620 out_unlock:
42621@@ -2522,6 +2611,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
42622 if (IS_ERR(dentry))
42623 goto out_unlock;
42624
42625+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
42626+ error = -EACCES;
42627+ goto out_dput;
42628+ }
42629+
42630 if (!IS_POSIXACL(nd.path.dentry->d_inode))
42631 mode &= ~current_umask();
42632 error = mnt_want_write(nd.path.mnt);
42633@@ -2533,6 +2627,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
42634 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
42635 out_drop_write:
42636 mnt_drop_write(nd.path.mnt);
42637+
42638+ if (!error)
42639+ gr_handle_create(dentry, nd.path.mnt);
42640+
42641 out_dput:
42642 dput(dentry);
42643 out_unlock:
42644@@ -2613,6 +2711,8 @@ static long do_rmdir(int dfd, const char
42645 char * name;
42646 struct dentry *dentry;
42647 struct nameidata nd;
42648+ ino_t saved_ino = 0;
42649+ dev_t saved_dev = 0;
42650
42651 error = user_path_parent(dfd, pathname, &nd, &name);
42652 if (error)
42653@@ -2641,6 +2741,17 @@ static long do_rmdir(int dfd, const char
42654 error = -ENOENT;
42655 goto exit3;
42656 }
42657+
42658+ if (dentry->d_inode->i_nlink <= 1) {
42659+ saved_ino = dentry->d_inode->i_ino;
42660+ saved_dev = gr_get_dev_from_dentry(dentry);
42661+ }
42662+
42663+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
42664+ error = -EACCES;
42665+ goto exit3;
42666+ }
42667+
42668 error = mnt_want_write(nd.path.mnt);
42669 if (error)
42670 goto exit3;
42671@@ -2648,6 +2759,8 @@ static long do_rmdir(int dfd, const char
42672 if (error)
42673 goto exit4;
42674 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
42675+ if (!error && (saved_dev || saved_ino))
42676+ gr_handle_delete(saved_ino, saved_dev);
42677 exit4:
42678 mnt_drop_write(nd.path.mnt);
42679 exit3:
42680@@ -2710,6 +2823,8 @@ static long do_unlinkat(int dfd, const c
42681 struct dentry *dentry;
42682 struct nameidata nd;
42683 struct inode *inode = NULL;
42684+ ino_t saved_ino = 0;
42685+ dev_t saved_dev = 0;
42686
42687 error = user_path_parent(dfd, pathname, &nd, &name);
42688 if (error)
42689@@ -2732,6 +2847,16 @@ static long do_unlinkat(int dfd, const c
42690 if (!inode)
42691 goto slashes;
42692 ihold(inode);
42693+
42694+ if (inode->i_nlink <= 1) {
42695+ saved_ino = inode->i_ino;
42696+ saved_dev = gr_get_dev_from_dentry(dentry);
42697+ }
42698+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
42699+ error = -EACCES;
42700+ goto exit2;
42701+ }
42702+
42703 error = mnt_want_write(nd.path.mnt);
42704 if (error)
42705 goto exit2;
42706@@ -2739,6 +2864,8 @@ static long do_unlinkat(int dfd, const c
42707 if (error)
42708 goto exit3;
42709 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
42710+ if (!error && (saved_ino || saved_dev))
42711+ gr_handle_delete(saved_ino, saved_dev);
42712 exit3:
42713 mnt_drop_write(nd.path.mnt);
42714 exit2:
42715@@ -2816,6 +2943,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
42716 if (IS_ERR(dentry))
42717 goto out_unlock;
42718
42719+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
42720+ error = -EACCES;
42721+ goto out_dput;
42722+ }
42723+
42724 error = mnt_want_write(nd.path.mnt);
42725 if (error)
42726 goto out_dput;
42727@@ -2823,6 +2955,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
42728 if (error)
42729 goto out_drop_write;
42730 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
42731+ if (!error)
42732+ gr_handle_create(dentry, nd.path.mnt);
42733 out_drop_write:
42734 mnt_drop_write(nd.path.mnt);
42735 out_dput:
42736@@ -2931,6 +3065,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
42737 error = PTR_ERR(new_dentry);
42738 if (IS_ERR(new_dentry))
42739 goto out_unlock;
42740+
42741+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
42742+ old_path.dentry->d_inode,
42743+ old_path.dentry->d_inode->i_mode, to)) {
42744+ error = -EACCES;
42745+ goto out_dput;
42746+ }
42747+
42748+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
42749+ old_path.dentry, old_path.mnt, to)) {
42750+ error = -EACCES;
42751+ goto out_dput;
42752+ }
42753+
42754 error = mnt_want_write(nd.path.mnt);
42755 if (error)
42756 goto out_dput;
42757@@ -2938,6 +3086,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
42758 if (error)
42759 goto out_drop_write;
42760 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
42761+ if (!error)
42762+ gr_handle_create(new_dentry, nd.path.mnt);
42763 out_drop_write:
42764 mnt_drop_write(nd.path.mnt);
42765 out_dput:
42766@@ -3113,6 +3263,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
42767 char *to;
42768 int error;
42769
42770+ pax_track_stack();
42771+
42772 error = user_path_parent(olddfd, oldname, &oldnd, &from);
42773 if (error)
42774 goto exit;
42775@@ -3169,6 +3321,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
42776 if (new_dentry == trap)
42777 goto exit5;
42778
42779+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
42780+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
42781+ to);
42782+ if (error)
42783+ goto exit5;
42784+
42785 error = mnt_want_write(oldnd.path.mnt);
42786 if (error)
42787 goto exit5;
42788@@ -3178,6 +3336,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
42789 goto exit6;
42790 error = vfs_rename(old_dir->d_inode, old_dentry,
42791 new_dir->d_inode, new_dentry);
42792+ if (!error)
42793+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
42794+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
42795 exit6:
42796 mnt_drop_write(oldnd.path.mnt);
42797 exit5:
42798@@ -3203,6 +3364,8 @@ SYSCALL_DEFINE2(rename, const char __use
42799
42800 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
42801 {
42802+ char tmpbuf[64];
42803+ const char *newlink;
42804 int len;
42805
42806 len = PTR_ERR(link);
42807@@ -3212,7 +3375,14 @@ int vfs_readlink(struct dentry *dentry,
42808 len = strlen(link);
42809 if (len > (unsigned) buflen)
42810 len = buflen;
42811- if (copy_to_user(buffer, link, len))
42812+
42813+ if (len < sizeof(tmpbuf)) {
42814+ memcpy(tmpbuf, link, len);
42815+ newlink = tmpbuf;
42816+ } else
42817+ newlink = link;
42818+
42819+ if (copy_to_user(buffer, newlink, len))
42820 len = -EFAULT;
42821 out:
42822 return len;
42823diff -urNp linux-3.0.4/fs/namespace.c linux-3.0.4/fs/namespace.c
42824--- linux-3.0.4/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
42825+++ linux-3.0.4/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
42826@@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
42827 if (!(sb->s_flags & MS_RDONLY))
42828 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
42829 up_write(&sb->s_umount);
42830+
42831+ gr_log_remount(mnt->mnt_devname, retval);
42832+
42833 return retval;
42834 }
42835
42836@@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
42837 br_write_unlock(vfsmount_lock);
42838 up_write(&namespace_sem);
42839 release_mounts(&umount_list);
42840+
42841+ gr_log_unmount(mnt->mnt_devname, retval);
42842+
42843 return retval;
42844 }
42845
42846@@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
42847 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
42848 MS_STRICTATIME);
42849
42850+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
42851+ retval = -EPERM;
42852+ goto dput_out;
42853+ }
42854+
42855+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
42856+ retval = -EPERM;
42857+ goto dput_out;
42858+ }
42859+
42860 if (flags & MS_REMOUNT)
42861 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
42862 data_page);
42863@@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
42864 dev_name, data_page);
42865 dput_out:
42866 path_put(&path);
42867+
42868+ gr_log_mount(dev_name, dir_name, retval);
42869+
42870 return retval;
42871 }
42872
42873@@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
42874 if (error)
42875 goto out2;
42876
42877+ if (gr_handle_chroot_pivot()) {
42878+ error = -EPERM;
42879+ goto out2;
42880+ }
42881+
42882 get_fs_root(current->fs, &root);
42883 error = lock_mount(&old);
42884 if (error)
42885diff -urNp linux-3.0.4/fs/ncpfs/dir.c linux-3.0.4/fs/ncpfs/dir.c
42886--- linux-3.0.4/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
42887+++ linux-3.0.4/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
42888@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
42889 int res, val = 0, len;
42890 __u8 __name[NCP_MAXPATHLEN + 1];
42891
42892+ pax_track_stack();
42893+
42894 if (dentry == dentry->d_sb->s_root)
42895 return 1;
42896
42897@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
42898 int error, res, len;
42899 __u8 __name[NCP_MAXPATHLEN + 1];
42900
42901+ pax_track_stack();
42902+
42903 error = -EIO;
42904 if (!ncp_conn_valid(server))
42905 goto finished;
42906@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
42907 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
42908 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
42909
42910+ pax_track_stack();
42911+
42912 ncp_age_dentry(server, dentry);
42913 len = sizeof(__name);
42914 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
42915@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
42916 int error, len;
42917 __u8 __name[NCP_MAXPATHLEN + 1];
42918
42919+ pax_track_stack();
42920+
42921 DPRINTK("ncp_mkdir: making %s/%s\n",
42922 dentry->d_parent->d_name.name, dentry->d_name.name);
42923
42924@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
42925 int old_len, new_len;
42926 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
42927
42928+ pax_track_stack();
42929+
42930 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
42931 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
42932 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
42933diff -urNp linux-3.0.4/fs/ncpfs/inode.c linux-3.0.4/fs/ncpfs/inode.c
42934--- linux-3.0.4/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
42935+++ linux-3.0.4/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
42936@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
42937 #endif
42938 struct ncp_entry_info finfo;
42939
42940+ pax_track_stack();
42941+
42942 memset(&data, 0, sizeof(data));
42943 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
42944 if (!server)
42945diff -urNp linux-3.0.4/fs/nfs/inode.c linux-3.0.4/fs/nfs/inode.c
42946--- linux-3.0.4/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
42947+++ linux-3.0.4/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
42948@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
42949 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
42950 nfsi->attrtimeo_timestamp = jiffies;
42951
42952- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
42953+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
42954 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
42955 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
42956 else
42957@@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
42958 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
42959 }
42960
42961-static atomic_long_t nfs_attr_generation_counter;
42962+static atomic_long_unchecked_t nfs_attr_generation_counter;
42963
42964 static unsigned long nfs_read_attr_generation_counter(void)
42965 {
42966- return atomic_long_read(&nfs_attr_generation_counter);
42967+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
42968 }
42969
42970 unsigned long nfs_inc_attr_generation_counter(void)
42971 {
42972- return atomic_long_inc_return(&nfs_attr_generation_counter);
42973+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
42974 }
42975
42976 void nfs_fattr_init(struct nfs_fattr *fattr)
42977diff -urNp linux-3.0.4/fs/nfsd/nfs4state.c linux-3.0.4/fs/nfsd/nfs4state.c
42978--- linux-3.0.4/fs/nfsd/nfs4state.c 2011-09-02 18:11:21.000000000 -0400
42979+++ linux-3.0.4/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
42980@@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
42981 unsigned int strhashval;
42982 int err;
42983
42984+ pax_track_stack();
42985+
42986 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
42987 (long long) lock->lk_offset,
42988 (long long) lock->lk_length);
42989diff -urNp linux-3.0.4/fs/nfsd/nfs4xdr.c linux-3.0.4/fs/nfsd/nfs4xdr.c
42990--- linux-3.0.4/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
42991+++ linux-3.0.4/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
42992@@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
42993 .dentry = dentry,
42994 };
42995
42996+ pax_track_stack();
42997+
42998 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
42999 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
43000 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
43001diff -urNp linux-3.0.4/fs/nfsd/vfs.c linux-3.0.4/fs/nfsd/vfs.c
43002--- linux-3.0.4/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
43003+++ linux-3.0.4/fs/nfsd/vfs.c 2011-10-06 04:17:55.000000000 -0400
43004@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
43005 } else {
43006 oldfs = get_fs();
43007 set_fs(KERNEL_DS);
43008- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
43009+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
43010 set_fs(oldfs);
43011 }
43012
43013@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
43014
43015 /* Write the data. */
43016 oldfs = get_fs(); set_fs(KERNEL_DS);
43017- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
43018+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
43019 set_fs(oldfs);
43020 if (host_err < 0)
43021 goto out_nfserr;
43022@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
43023 */
43024
43025 oldfs = get_fs(); set_fs(KERNEL_DS);
43026- host_err = inode->i_op->readlink(dentry, buf, *lenp);
43027+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
43028 set_fs(oldfs);
43029
43030 if (host_err < 0)
43031diff -urNp linux-3.0.4/fs/notify/fanotify/fanotify_user.c linux-3.0.4/fs/notify/fanotify/fanotify_user.c
43032--- linux-3.0.4/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
43033+++ linux-3.0.4/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
43034@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
43035 goto out_close_fd;
43036
43037 ret = -EFAULT;
43038- if (copy_to_user(buf, &fanotify_event_metadata,
43039+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
43040+ copy_to_user(buf, &fanotify_event_metadata,
43041 fanotify_event_metadata.event_len))
43042 goto out_kill_access_response;
43043
43044diff -urNp linux-3.0.4/fs/notify/notification.c linux-3.0.4/fs/notify/notification.c
43045--- linux-3.0.4/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
43046+++ linux-3.0.4/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
43047@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
43048 * get set to 0 so it will never get 'freed'
43049 */
43050 static struct fsnotify_event *q_overflow_event;
43051-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
43052+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
43053
43054 /**
43055 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
43056@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
43057 */
43058 u32 fsnotify_get_cookie(void)
43059 {
43060- return atomic_inc_return(&fsnotify_sync_cookie);
43061+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
43062 }
43063 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
43064
43065diff -urNp linux-3.0.4/fs/ntfs/dir.c linux-3.0.4/fs/ntfs/dir.c
43066--- linux-3.0.4/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
43067+++ linux-3.0.4/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
43068@@ -1329,7 +1329,7 @@ find_next_index_buffer:
43069 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
43070 ~(s64)(ndir->itype.index.block_size - 1)));
43071 /* Bounds checks. */
43072- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
43073+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
43074 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
43075 "inode 0x%lx or driver bug.", vdir->i_ino);
43076 goto err_out;
43077diff -urNp linux-3.0.4/fs/ntfs/file.c linux-3.0.4/fs/ntfs/file.c
43078--- linux-3.0.4/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
43079+++ linux-3.0.4/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
43080@@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
43081 #endif /* NTFS_RW */
43082 };
43083
43084-const struct file_operations ntfs_empty_file_ops = {};
43085+const struct file_operations ntfs_empty_file_ops __read_only;
43086
43087-const struct inode_operations ntfs_empty_inode_ops = {};
43088+const struct inode_operations ntfs_empty_inode_ops __read_only;
43089diff -urNp linux-3.0.4/fs/ocfs2/localalloc.c linux-3.0.4/fs/ocfs2/localalloc.c
43090--- linux-3.0.4/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
43091+++ linux-3.0.4/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
43092@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
43093 goto bail;
43094 }
43095
43096- atomic_inc(&osb->alloc_stats.moves);
43097+ atomic_inc_unchecked(&osb->alloc_stats.moves);
43098
43099 bail:
43100 if (handle)
43101diff -urNp linux-3.0.4/fs/ocfs2/namei.c linux-3.0.4/fs/ocfs2/namei.c
43102--- linux-3.0.4/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
43103+++ linux-3.0.4/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
43104@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
43105 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
43106 struct ocfs2_dir_lookup_result target_insert = { NULL, };
43107
43108+ pax_track_stack();
43109+
43110 /* At some point it might be nice to break this function up a
43111 * bit. */
43112
43113diff -urNp linux-3.0.4/fs/ocfs2/ocfs2.h linux-3.0.4/fs/ocfs2/ocfs2.h
43114--- linux-3.0.4/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
43115+++ linux-3.0.4/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
43116@@ -235,11 +235,11 @@ enum ocfs2_vol_state
43117
43118 struct ocfs2_alloc_stats
43119 {
43120- atomic_t moves;
43121- atomic_t local_data;
43122- atomic_t bitmap_data;
43123- atomic_t bg_allocs;
43124- atomic_t bg_extends;
43125+ atomic_unchecked_t moves;
43126+ atomic_unchecked_t local_data;
43127+ atomic_unchecked_t bitmap_data;
43128+ atomic_unchecked_t bg_allocs;
43129+ atomic_unchecked_t bg_extends;
43130 };
43131
43132 enum ocfs2_local_alloc_state
43133diff -urNp linux-3.0.4/fs/ocfs2/suballoc.c linux-3.0.4/fs/ocfs2/suballoc.c
43134--- linux-3.0.4/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
43135+++ linux-3.0.4/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
43136@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
43137 mlog_errno(status);
43138 goto bail;
43139 }
43140- atomic_inc(&osb->alloc_stats.bg_extends);
43141+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
43142
43143 /* You should never ask for this much metadata */
43144 BUG_ON(bits_wanted >
43145@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
43146 mlog_errno(status);
43147 goto bail;
43148 }
43149- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
43150+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
43151
43152 *suballoc_loc = res.sr_bg_blkno;
43153 *suballoc_bit_start = res.sr_bit_offset;
43154@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
43155 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
43156 res->sr_bits);
43157
43158- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
43159+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
43160
43161 BUG_ON(res->sr_bits != 1);
43162
43163@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
43164 mlog_errno(status);
43165 goto bail;
43166 }
43167- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
43168+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
43169
43170 BUG_ON(res.sr_bits != 1);
43171
43172@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
43173 cluster_start,
43174 num_clusters);
43175 if (!status)
43176- atomic_inc(&osb->alloc_stats.local_data);
43177+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
43178 } else {
43179 if (min_clusters > (osb->bitmap_cpg - 1)) {
43180 /* The only paths asking for contiguousness
43181@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
43182 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
43183 res.sr_bg_blkno,
43184 res.sr_bit_offset);
43185- atomic_inc(&osb->alloc_stats.bitmap_data);
43186+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
43187 *num_clusters = res.sr_bits;
43188 }
43189 }
43190diff -urNp linux-3.0.4/fs/ocfs2/super.c linux-3.0.4/fs/ocfs2/super.c
43191--- linux-3.0.4/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
43192+++ linux-3.0.4/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
43193@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
43194 "%10s => GlobalAllocs: %d LocalAllocs: %d "
43195 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
43196 "Stats",
43197- atomic_read(&osb->alloc_stats.bitmap_data),
43198- atomic_read(&osb->alloc_stats.local_data),
43199- atomic_read(&osb->alloc_stats.bg_allocs),
43200- atomic_read(&osb->alloc_stats.moves),
43201- atomic_read(&osb->alloc_stats.bg_extends));
43202+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
43203+ atomic_read_unchecked(&osb->alloc_stats.local_data),
43204+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
43205+ atomic_read_unchecked(&osb->alloc_stats.moves),
43206+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
43207
43208 out += snprintf(buf + out, len - out,
43209 "%10s => State: %u Descriptor: %llu Size: %u bits "
43210@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
43211 spin_lock_init(&osb->osb_xattr_lock);
43212 ocfs2_init_steal_slots(osb);
43213
43214- atomic_set(&osb->alloc_stats.moves, 0);
43215- atomic_set(&osb->alloc_stats.local_data, 0);
43216- atomic_set(&osb->alloc_stats.bitmap_data, 0);
43217- atomic_set(&osb->alloc_stats.bg_allocs, 0);
43218- atomic_set(&osb->alloc_stats.bg_extends, 0);
43219+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
43220+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
43221+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
43222+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
43223+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
43224
43225 /* Copy the blockcheck stats from the superblock probe */
43226 osb->osb_ecc_stats = *stats;
43227diff -urNp linux-3.0.4/fs/ocfs2/symlink.c linux-3.0.4/fs/ocfs2/symlink.c
43228--- linux-3.0.4/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
43229+++ linux-3.0.4/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
43230@@ -142,7 +142,7 @@ bail:
43231
43232 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
43233 {
43234- char *link = nd_get_link(nd);
43235+ const char *link = nd_get_link(nd);
43236 if (!IS_ERR(link))
43237 kfree(link);
43238 }
43239diff -urNp linux-3.0.4/fs/open.c linux-3.0.4/fs/open.c
43240--- linux-3.0.4/fs/open.c 2011-07-21 22:17:23.000000000 -0400
43241+++ linux-3.0.4/fs/open.c 2011-09-14 09:16:46.000000000 -0400
43242@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
43243 error = locks_verify_truncate(inode, NULL, length);
43244 if (!error)
43245 error = security_path_truncate(&path);
43246+
43247+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
43248+ error = -EACCES;
43249+
43250 if (!error)
43251 error = do_truncate(path.dentry, length, 0, NULL);
43252
43253@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
43254 if (__mnt_is_readonly(path.mnt))
43255 res = -EROFS;
43256
43257+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
43258+ res = -EACCES;
43259+
43260 out_path_release:
43261 path_put(&path);
43262 out:
43263@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
43264 if (error)
43265 goto dput_and_out;
43266
43267+ gr_log_chdir(path.dentry, path.mnt);
43268+
43269 set_fs_pwd(current->fs, &path);
43270
43271 dput_and_out:
43272@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
43273 goto out_putf;
43274
43275 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
43276+
43277+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
43278+ error = -EPERM;
43279+
43280+ if (!error)
43281+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
43282+
43283 if (!error)
43284 set_fs_pwd(current->fs, &file->f_path);
43285 out_putf:
43286@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __use
43287 if (error)
43288 goto dput_and_out;
43289
43290+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
43291+ goto dput_and_out;
43292+
43293 set_fs_root(current->fs, &path);
43294+
43295+ gr_handle_chroot_chdir(&path);
43296+
43297 error = 0;
43298 dput_and_out:
43299 path_put(&path);
43300@@ -466,12 +488,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
43301 err = mnt_want_write_file(file);
43302 if (err)
43303 goto out_putf;
43304+
43305 mutex_lock(&inode->i_mutex);
43306+
43307+ if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
43308+ err = -EACCES;
43309+ goto out_unlock;
43310+ }
43311+
43312 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
43313 if (err)
43314 goto out_unlock;
43315 if (mode == (mode_t) -1)
43316 mode = inode->i_mode;
43317+
43318+ if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
43319+ err = -EACCES;
43320+ goto out_unlock;
43321+ }
43322+
43323 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
43324 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
43325 err = notify_change(dentry, &newattrs);
43326@@ -499,12 +534,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
43327 error = mnt_want_write(path.mnt);
43328 if (error)
43329 goto dput_and_out;
43330+
43331 mutex_lock(&inode->i_mutex);
43332+
43333+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
43334+ error = -EACCES;
43335+ goto out_unlock;
43336+ }
43337+
43338 error = security_path_chmod(path.dentry, path.mnt, mode);
43339 if (error)
43340 goto out_unlock;
43341 if (mode == (mode_t) -1)
43342 mode = inode->i_mode;
43343+
43344+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
43345+ error = -EACCES;
43346+ goto out_unlock;
43347+ }
43348+
43349 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
43350 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
43351 error = notify_change(path.dentry, &newattrs);
43352@@ -528,6 +576,9 @@ static int chown_common(struct path *pat
43353 int error;
43354 struct iattr newattrs;
43355
43356+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
43357+ return -EACCES;
43358+
43359 newattrs.ia_valid = ATTR_CTIME;
43360 if (user != (uid_t) -1) {
43361 newattrs.ia_valid |= ATTR_UID;
43362@@ -998,7 +1049,10 @@ long do_sys_open(int dfd, const char __u
43363 if (!IS_ERR(tmp)) {
43364 fd = get_unused_fd_flags(flags);
43365 if (fd >= 0) {
43366- struct file *f = do_filp_open(dfd, tmp, &op, lookup);
43367+ struct file *f;
43368+ /* don't allow to be set by userland */
43369+ flags &= ~FMODE_GREXEC;
43370+ f = do_filp_open(dfd, tmp, &op, lookup);
43371 if (IS_ERR(f)) {
43372 put_unused_fd(fd);
43373 fd = PTR_ERR(f);
43374diff -urNp linux-3.0.4/fs/partitions/ldm.c linux-3.0.4/fs/partitions/ldm.c
43375--- linux-3.0.4/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
43376+++ linux-3.0.4/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
43377@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
43378 ldm_error ("A VBLK claims to have %d parts.", num);
43379 return false;
43380 }
43381+
43382 if (rec >= num) {
43383 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
43384 return false;
43385@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
43386 goto found;
43387 }
43388
43389- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
43390+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
43391 if (!f) {
43392 ldm_crit ("Out of memory.");
43393 return false;
43394diff -urNp linux-3.0.4/fs/pipe.c linux-3.0.4/fs/pipe.c
43395--- linux-3.0.4/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
43396+++ linux-3.0.4/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
43397@@ -420,9 +420,9 @@ redo:
43398 }
43399 if (bufs) /* More to do? */
43400 continue;
43401- if (!pipe->writers)
43402+ if (!atomic_read(&pipe->writers))
43403 break;
43404- if (!pipe->waiting_writers) {
43405+ if (!atomic_read(&pipe->waiting_writers)) {
43406 /* syscall merging: Usually we must not sleep
43407 * if O_NONBLOCK is set, or if we got some data.
43408 * But if a writer sleeps in kernel space, then
43409@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
43410 mutex_lock(&inode->i_mutex);
43411 pipe = inode->i_pipe;
43412
43413- if (!pipe->readers) {
43414+ if (!atomic_read(&pipe->readers)) {
43415 send_sig(SIGPIPE, current, 0);
43416 ret = -EPIPE;
43417 goto out;
43418@@ -530,7 +530,7 @@ redo1:
43419 for (;;) {
43420 int bufs;
43421
43422- if (!pipe->readers) {
43423+ if (!atomic_read(&pipe->readers)) {
43424 send_sig(SIGPIPE, current, 0);
43425 if (!ret)
43426 ret = -EPIPE;
43427@@ -616,9 +616,9 @@ redo2:
43428 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
43429 do_wakeup = 0;
43430 }
43431- pipe->waiting_writers++;
43432+ atomic_inc(&pipe->waiting_writers);
43433 pipe_wait(pipe);
43434- pipe->waiting_writers--;
43435+ atomic_dec(&pipe->waiting_writers);
43436 }
43437 out:
43438 mutex_unlock(&inode->i_mutex);
43439@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
43440 mask = 0;
43441 if (filp->f_mode & FMODE_READ) {
43442 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
43443- if (!pipe->writers && filp->f_version != pipe->w_counter)
43444+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
43445 mask |= POLLHUP;
43446 }
43447
43448@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
43449 * Most Unices do not set POLLERR for FIFOs but on Linux they
43450 * behave exactly like pipes for poll().
43451 */
43452- if (!pipe->readers)
43453+ if (!atomic_read(&pipe->readers))
43454 mask |= POLLERR;
43455 }
43456
43457@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
43458
43459 mutex_lock(&inode->i_mutex);
43460 pipe = inode->i_pipe;
43461- pipe->readers -= decr;
43462- pipe->writers -= decw;
43463+ atomic_sub(decr, &pipe->readers);
43464+ atomic_sub(decw, &pipe->writers);
43465
43466- if (!pipe->readers && !pipe->writers) {
43467+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
43468 free_pipe_info(inode);
43469 } else {
43470 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
43471@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
43472
43473 if (inode->i_pipe) {
43474 ret = 0;
43475- inode->i_pipe->readers++;
43476+ atomic_inc(&inode->i_pipe->readers);
43477 }
43478
43479 mutex_unlock(&inode->i_mutex);
43480@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
43481
43482 if (inode->i_pipe) {
43483 ret = 0;
43484- inode->i_pipe->writers++;
43485+ atomic_inc(&inode->i_pipe->writers);
43486 }
43487
43488 mutex_unlock(&inode->i_mutex);
43489@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
43490 if (inode->i_pipe) {
43491 ret = 0;
43492 if (filp->f_mode & FMODE_READ)
43493- inode->i_pipe->readers++;
43494+ atomic_inc(&inode->i_pipe->readers);
43495 if (filp->f_mode & FMODE_WRITE)
43496- inode->i_pipe->writers++;
43497+ atomic_inc(&inode->i_pipe->writers);
43498 }
43499
43500 mutex_unlock(&inode->i_mutex);
43501@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
43502 inode->i_pipe = NULL;
43503 }
43504
43505-static struct vfsmount *pipe_mnt __read_mostly;
43506+struct vfsmount *pipe_mnt __read_mostly;
43507
43508 /*
43509 * pipefs_dname() is called from d_path().
43510@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
43511 goto fail_iput;
43512 inode->i_pipe = pipe;
43513
43514- pipe->readers = pipe->writers = 1;
43515+ atomic_set(&pipe->readers, 1);
43516+ atomic_set(&pipe->writers, 1);
43517 inode->i_fop = &rdwr_pipefifo_fops;
43518
43519 /*
43520diff -urNp linux-3.0.4/fs/proc/array.c linux-3.0.4/fs/proc/array.c
43521--- linux-3.0.4/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
43522+++ linux-3.0.4/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
43523@@ -60,6 +60,7 @@
43524 #include <linux/tty.h>
43525 #include <linux/string.h>
43526 #include <linux/mman.h>
43527+#include <linux/grsecurity.h>
43528 #include <linux/proc_fs.h>
43529 #include <linux/ioport.h>
43530 #include <linux/uaccess.h>
43531@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
43532 seq_putc(m, '\n');
43533 }
43534
43535+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
43536+static inline void task_pax(struct seq_file *m, struct task_struct *p)
43537+{
43538+ if (p->mm)
43539+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
43540+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
43541+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
43542+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
43543+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
43544+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
43545+ else
43546+ seq_printf(m, "PaX:\t-----\n");
43547+}
43548+#endif
43549+
43550 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
43551 struct pid *pid, struct task_struct *task)
43552 {
43553@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
43554 task_cpus_allowed(m, task);
43555 cpuset_task_status_allowed(m, task);
43556 task_context_switch_counts(m, task);
43557+
43558+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
43559+ task_pax(m, task);
43560+#endif
43561+
43562+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
43563+ task_grsec_rbac(m, task);
43564+#endif
43565+
43566 return 0;
43567 }
43568
43569+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43570+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
43571+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
43572+ _mm->pax_flags & MF_PAX_SEGMEXEC))
43573+#endif
43574+
43575 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
43576 struct pid *pid, struct task_struct *task, int whole)
43577 {
43578@@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
43579 cputime_t cutime, cstime, utime, stime;
43580 cputime_t cgtime, gtime;
43581 unsigned long rsslim = 0;
43582- char tcomm[sizeof(task->comm)];
43583+ char tcomm[sizeof(task->comm)] = { 0 };
43584 unsigned long flags;
43585
43586+ pax_track_stack();
43587+
43588 state = *get_task_state(task);
43589 vsize = eip = esp = 0;
43590 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
43591@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
43592 gtime = task->gtime;
43593 }
43594
43595+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43596+ if (PAX_RAND_FLAGS(mm)) {
43597+ eip = 0;
43598+ esp = 0;
43599+ wchan = 0;
43600+ }
43601+#endif
43602+#ifdef CONFIG_GRKERNSEC_HIDESYM
43603+ wchan = 0;
43604+ eip =0;
43605+ esp =0;
43606+#endif
43607+
43608 /* scale priority and nice values from timeslices to -20..20 */
43609 /* to make it look like a "normal" Unix priority/nice value */
43610 priority = task_prio(task);
43611@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
43612 vsize,
43613 mm ? get_mm_rss(mm) : 0,
43614 rsslim,
43615+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43616+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
43617+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
43618+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
43619+#else
43620 mm ? (permitted ? mm->start_code : 1) : 0,
43621 mm ? (permitted ? mm->end_code : 1) : 0,
43622 (permitted && mm) ? mm->start_stack : 0,
43623+#endif
43624 esp,
43625 eip,
43626 /* The signal information here is obsolete.
43627@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
43628
43629 return 0;
43630 }
43631+
43632+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
43633+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
43634+{
43635+ u32 curr_ip = 0;
43636+ unsigned long flags;
43637+
43638+ if (lock_task_sighand(task, &flags)) {
43639+ curr_ip = task->signal->curr_ip;
43640+ unlock_task_sighand(task, &flags);
43641+ }
43642+
43643+ return sprintf(buffer, "%pI4\n", &curr_ip);
43644+}
43645+#endif
43646diff -urNp linux-3.0.4/fs/proc/base.c linux-3.0.4/fs/proc/base.c
43647--- linux-3.0.4/fs/proc/base.c 2011-09-02 18:11:21.000000000 -0400
43648+++ linux-3.0.4/fs/proc/base.c 2011-09-13 14:50:28.000000000 -0400
43649@@ -107,6 +107,22 @@ struct pid_entry {
43650 union proc_op op;
43651 };
43652
43653+struct getdents_callback {
43654+ struct linux_dirent __user * current_dir;
43655+ struct linux_dirent __user * previous;
43656+ struct file * file;
43657+ int count;
43658+ int error;
43659+};
43660+
43661+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
43662+ loff_t offset, u64 ino, unsigned int d_type)
43663+{
43664+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
43665+ buf->error = -EINVAL;
43666+ return 0;
43667+}
43668+
43669 #define NOD(NAME, MODE, IOP, FOP, OP) { \
43670 .name = (NAME), \
43671 .len = sizeof(NAME) - 1, \
43672@@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
43673 if (task == current)
43674 return mm;
43675
43676+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
43677+ return ERR_PTR(-EPERM);
43678+
43679 /*
43680 * If current is actively ptrace'ing, and would also be
43681 * permitted to freshly attach with ptrace now, permit it.
43682@@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
43683 if (!mm->arg_end)
43684 goto out_mm; /* Shh! No looking before we're done */
43685
43686+ if (gr_acl_handle_procpidmem(task))
43687+ goto out_mm;
43688+
43689 len = mm->arg_end - mm->arg_start;
43690
43691 if (len > PAGE_SIZE)
43692@@ -309,12 +331,28 @@ out:
43693 return res;
43694 }
43695
43696+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43697+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
43698+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
43699+ _mm->pax_flags & MF_PAX_SEGMEXEC))
43700+#endif
43701+
43702 static int proc_pid_auxv(struct task_struct *task, char *buffer)
43703 {
43704 struct mm_struct *mm = mm_for_maps(task);
43705 int res = PTR_ERR(mm);
43706 if (mm && !IS_ERR(mm)) {
43707 unsigned int nwords = 0;
43708+
43709+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43710+ /* allow if we're currently ptracing this task */
43711+ if (PAX_RAND_FLAGS(mm) &&
43712+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
43713+ mmput(mm);
43714+ return 0;
43715+ }
43716+#endif
43717+
43718 do {
43719 nwords += 2;
43720 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
43721@@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
43722 }
43723
43724
43725-#ifdef CONFIG_KALLSYMS
43726+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43727 /*
43728 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
43729 * Returns the resolved symbol. If that fails, simply return the address.
43730@@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
43731 mutex_unlock(&task->signal->cred_guard_mutex);
43732 }
43733
43734-#ifdef CONFIG_STACKTRACE
43735+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43736
43737 #define MAX_STACK_TRACE_DEPTH 64
43738
43739@@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
43740 return count;
43741 }
43742
43743-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
43744+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
43745 static int proc_pid_syscall(struct task_struct *task, char *buffer)
43746 {
43747 long nr;
43748@@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
43749 /************************************************************************/
43750
43751 /* permission checks */
43752-static int proc_fd_access_allowed(struct inode *inode)
43753+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
43754 {
43755 struct task_struct *task;
43756 int allowed = 0;
43757@@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
43758 */
43759 task = get_proc_task(inode);
43760 if (task) {
43761- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
43762+ if (log)
43763+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
43764+ else
43765+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
43766 put_task_struct(task);
43767 }
43768 return allowed;
43769@@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
43770 if (!task)
43771 goto out_no_task;
43772
43773+ if (gr_acl_handle_procpidmem(task))
43774+ goto out;
43775+
43776 ret = -ENOMEM;
43777 page = (char *)__get_free_page(GFP_TEMPORARY);
43778 if (!page)
43779@@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
43780 path_put(&nd->path);
43781
43782 /* Are we allowed to snoop on the tasks file descriptors? */
43783- if (!proc_fd_access_allowed(inode))
43784+ if (!proc_fd_access_allowed(inode,0))
43785 goto out;
43786
43787 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
43788@@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
43789 struct path path;
43790
43791 /* Are we allowed to snoop on the tasks file descriptors? */
43792- if (!proc_fd_access_allowed(inode))
43793- goto out;
43794+ /* logging this is needed for learning on chromium to work properly,
43795+ but we don't want to flood the logs from 'ps' which does a readlink
43796+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
43797+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
43798+ */
43799+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
43800+ if (!proc_fd_access_allowed(inode,0))
43801+ goto out;
43802+ } else {
43803+ if (!proc_fd_access_allowed(inode,1))
43804+ goto out;
43805+ }
43806
43807 error = PROC_I(inode)->op.proc_get_link(inode, &path);
43808 if (error)
43809@@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
43810 rcu_read_lock();
43811 cred = __task_cred(task);
43812 inode->i_uid = cred->euid;
43813+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43814+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
43815+#else
43816 inode->i_gid = cred->egid;
43817+#endif
43818 rcu_read_unlock();
43819 }
43820 security_task_to_inode(task, inode);
43821@@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
43822 struct inode *inode = dentry->d_inode;
43823 struct task_struct *task;
43824 const struct cred *cred;
43825+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43826+ const struct cred *tmpcred = current_cred();
43827+#endif
43828
43829 generic_fillattr(inode, stat);
43830
43831@@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
43832 stat->uid = 0;
43833 stat->gid = 0;
43834 task = pid_task(proc_pid(inode), PIDTYPE_PID);
43835+
43836+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
43837+ rcu_read_unlock();
43838+ return -ENOENT;
43839+ }
43840+
43841 if (task) {
43842+ cred = __task_cred(task);
43843+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43844+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
43845+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43846+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
43847+#endif
43848+ ) {
43849+#endif
43850 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
43851+#ifdef CONFIG_GRKERNSEC_PROC_USER
43852+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
43853+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43854+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
43855+#endif
43856 task_dumpable(task)) {
43857- cred = __task_cred(task);
43858 stat->uid = cred->euid;
43859+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43860+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
43861+#else
43862 stat->gid = cred->egid;
43863+#endif
43864 }
43865+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43866+ } else {
43867+ rcu_read_unlock();
43868+ return -ENOENT;
43869+ }
43870+#endif
43871 }
43872 rcu_read_unlock();
43873 return 0;
43874@@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
43875
43876 if (task) {
43877 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
43878+#ifdef CONFIG_GRKERNSEC_PROC_USER
43879+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
43880+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43881+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
43882+#endif
43883 task_dumpable(task)) {
43884 rcu_read_lock();
43885 cred = __task_cred(task);
43886 inode->i_uid = cred->euid;
43887+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43888+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
43889+#else
43890 inode->i_gid = cred->egid;
43891+#endif
43892 rcu_read_unlock();
43893 } else {
43894 inode->i_uid = 0;
43895@@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
43896 int fd = proc_fd(inode);
43897
43898 if (task) {
43899- files = get_files_struct(task);
43900+ if (!gr_acl_handle_procpidmem(task))
43901+ files = get_files_struct(task);
43902 put_task_struct(task);
43903 }
43904 if (files) {
43905@@ -2169,11 +2268,21 @@ static const struct file_operations proc
43906 */
43907 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
43908 {
43909+ struct task_struct *task;
43910 int rv = generic_permission(inode, mask, flags, NULL);
43911- if (rv == 0)
43912- return 0;
43913+
43914 if (task_pid(current) == proc_pid(inode))
43915 rv = 0;
43916+
43917+ task = get_proc_task(inode);
43918+ if (task == NULL)
43919+ return rv;
43920+
43921+ if (gr_acl_handle_procpidmem(task))
43922+ rv = -EACCES;
43923+
43924+ put_task_struct(task);
43925+
43926 return rv;
43927 }
43928
43929@@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
43930 if (!task)
43931 goto out_no_task;
43932
43933+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
43934+ goto out;
43935+
43936 /*
43937 * Yes, it does not scale. And it should not. Don't add
43938 * new entries into /proc/<tgid>/ without very good reasons.
43939@@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
43940 if (!task)
43941 goto out_no_task;
43942
43943+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
43944+ goto out;
43945+
43946 ret = 0;
43947 i = filp->f_pos;
43948 switch (i) {
43949@@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
43950 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
43951 void *cookie)
43952 {
43953- char *s = nd_get_link(nd);
43954+ const char *s = nd_get_link(nd);
43955 if (!IS_ERR(s))
43956 __putname(s);
43957 }
43958@@ -2795,7 +2910,7 @@ static const struct pid_entry tgid_base_
43959 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
43960 #endif
43961 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
43962-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
43963+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
43964 INF("syscall", S_IRUGO, proc_pid_syscall),
43965 #endif
43966 INF("cmdline", S_IRUGO, proc_pid_cmdline),
43967@@ -2820,10 +2935,10 @@ static const struct pid_entry tgid_base_
43968 #ifdef CONFIG_SECURITY
43969 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
43970 #endif
43971-#ifdef CONFIG_KALLSYMS
43972+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43973 INF("wchan", S_IRUGO, proc_pid_wchan),
43974 #endif
43975-#ifdef CONFIG_STACKTRACE
43976+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43977 ONE("stack", S_IRUGO, proc_pid_stack),
43978 #endif
43979 #ifdef CONFIG_SCHEDSTATS
43980@@ -2857,6 +2972,9 @@ static const struct pid_entry tgid_base_
43981 #ifdef CONFIG_HARDWALL
43982 INF("hardwall", S_IRUGO, proc_pid_hardwall),
43983 #endif
43984+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
43985+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
43986+#endif
43987 };
43988
43989 static int proc_tgid_base_readdir(struct file * filp,
43990@@ -2982,7 +3100,14 @@ static struct dentry *proc_pid_instantia
43991 if (!inode)
43992 goto out;
43993
43994+#ifdef CONFIG_GRKERNSEC_PROC_USER
43995+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
43996+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43997+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
43998+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
43999+#else
44000 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
44001+#endif
44002 inode->i_op = &proc_tgid_base_inode_operations;
44003 inode->i_fop = &proc_tgid_base_operations;
44004 inode->i_flags|=S_IMMUTABLE;
44005@@ -3024,7 +3149,11 @@ struct dentry *proc_pid_lookup(struct in
44006 if (!task)
44007 goto out;
44008
44009+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
44010+ goto out_put_task;
44011+
44012 result = proc_pid_instantiate(dir, dentry, task, NULL);
44013+out_put_task:
44014 put_task_struct(task);
44015 out:
44016 return result;
44017@@ -3089,6 +3218,11 @@ int proc_pid_readdir(struct file * filp,
44018 {
44019 unsigned int nr;
44020 struct task_struct *reaper;
44021+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44022+ const struct cred *tmpcred = current_cred();
44023+ const struct cred *itercred;
44024+#endif
44025+ filldir_t __filldir = filldir;
44026 struct tgid_iter iter;
44027 struct pid_namespace *ns;
44028
44029@@ -3112,8 +3246,27 @@ int proc_pid_readdir(struct file * filp,
44030 for (iter = next_tgid(ns, iter);
44031 iter.task;
44032 iter.tgid += 1, iter = next_tgid(ns, iter)) {
44033+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44034+ rcu_read_lock();
44035+ itercred = __task_cred(iter.task);
44036+#endif
44037+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
44038+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44039+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
44040+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44041+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
44042+#endif
44043+ )
44044+#endif
44045+ )
44046+ __filldir = &gr_fake_filldir;
44047+ else
44048+ __filldir = filldir;
44049+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44050+ rcu_read_unlock();
44051+#endif
44052 filp->f_pos = iter.tgid + TGID_OFFSET;
44053- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
44054+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
44055 put_task_struct(iter.task);
44056 goto out;
44057 }
44058@@ -3141,7 +3294,7 @@ static const struct pid_entry tid_base_s
44059 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
44060 #endif
44061 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
44062-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
44063+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
44064 INF("syscall", S_IRUGO, proc_pid_syscall),
44065 #endif
44066 INF("cmdline", S_IRUGO, proc_pid_cmdline),
44067@@ -3165,10 +3318,10 @@ static const struct pid_entry tid_base_s
44068 #ifdef CONFIG_SECURITY
44069 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
44070 #endif
44071-#ifdef CONFIG_KALLSYMS
44072+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44073 INF("wchan", S_IRUGO, proc_pid_wchan),
44074 #endif
44075-#ifdef CONFIG_STACKTRACE
44076+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44077 ONE("stack", S_IRUGO, proc_pid_stack),
44078 #endif
44079 #ifdef CONFIG_SCHEDSTATS
44080diff -urNp linux-3.0.4/fs/proc/cmdline.c linux-3.0.4/fs/proc/cmdline.c
44081--- linux-3.0.4/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
44082+++ linux-3.0.4/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
44083@@ -23,7 +23,11 @@ static const struct file_operations cmdl
44084
44085 static int __init proc_cmdline_init(void)
44086 {
44087+#ifdef CONFIG_GRKERNSEC_PROC_ADD
44088+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
44089+#else
44090 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
44091+#endif
44092 return 0;
44093 }
44094 module_init(proc_cmdline_init);
44095diff -urNp linux-3.0.4/fs/proc/devices.c linux-3.0.4/fs/proc/devices.c
44096--- linux-3.0.4/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
44097+++ linux-3.0.4/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
44098@@ -64,7 +64,11 @@ static const struct file_operations proc
44099
44100 static int __init proc_devices_init(void)
44101 {
44102+#ifdef CONFIG_GRKERNSEC_PROC_ADD
44103+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
44104+#else
44105 proc_create("devices", 0, NULL, &proc_devinfo_operations);
44106+#endif
44107 return 0;
44108 }
44109 module_init(proc_devices_init);
44110diff -urNp linux-3.0.4/fs/proc/inode.c linux-3.0.4/fs/proc/inode.c
44111--- linux-3.0.4/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
44112+++ linux-3.0.4/fs/proc/inode.c 2011-08-23 21:48:14.000000000 -0400
44113@@ -440,7 +440,11 @@ struct inode *proc_get_inode(struct supe
44114 if (de->mode) {
44115 inode->i_mode = de->mode;
44116 inode->i_uid = de->uid;
44117+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44118+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
44119+#else
44120 inode->i_gid = de->gid;
44121+#endif
44122 }
44123 if (de->size)
44124 inode->i_size = de->size;
44125diff -urNp linux-3.0.4/fs/proc/internal.h linux-3.0.4/fs/proc/internal.h
44126--- linux-3.0.4/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
44127+++ linux-3.0.4/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
44128@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
44129 struct pid *pid, struct task_struct *task);
44130 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
44131 struct pid *pid, struct task_struct *task);
44132+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
44133+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
44134+#endif
44135 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
44136
44137 extern const struct file_operations proc_maps_operations;
44138diff -urNp linux-3.0.4/fs/proc/Kconfig linux-3.0.4/fs/proc/Kconfig
44139--- linux-3.0.4/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
44140+++ linux-3.0.4/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
44141@@ -30,12 +30,12 @@ config PROC_FS
44142
44143 config PROC_KCORE
44144 bool "/proc/kcore support" if !ARM
44145- depends on PROC_FS && MMU
44146+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
44147
44148 config PROC_VMCORE
44149 bool "/proc/vmcore support"
44150- depends on PROC_FS && CRASH_DUMP
44151- default y
44152+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
44153+ default n
44154 help
44155 Exports the dump image of crashed kernel in ELF format.
44156
44157@@ -59,8 +59,8 @@ config PROC_SYSCTL
44158 limited in memory.
44159
44160 config PROC_PAGE_MONITOR
44161- default y
44162- depends on PROC_FS && MMU
44163+ default n
44164+ depends on PROC_FS && MMU && !GRKERNSEC
44165 bool "Enable /proc page monitoring" if EXPERT
44166 help
44167 Various /proc files exist to monitor process memory utilization:
44168diff -urNp linux-3.0.4/fs/proc/kcore.c linux-3.0.4/fs/proc/kcore.c
44169--- linux-3.0.4/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
44170+++ linux-3.0.4/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
44171@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
44172 off_t offset = 0;
44173 struct kcore_list *m;
44174
44175+ pax_track_stack();
44176+
44177 /* setup ELF header */
44178 elf = (struct elfhdr *) bufp;
44179 bufp += sizeof(struct elfhdr);
44180@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
44181 * the addresses in the elf_phdr on our list.
44182 */
44183 start = kc_offset_to_vaddr(*fpos - elf_buflen);
44184- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
44185+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
44186+ if (tsz > buflen)
44187 tsz = buflen;
44188-
44189+
44190 while (buflen) {
44191 struct kcore_list *m;
44192
44193@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
44194 kfree(elf_buf);
44195 } else {
44196 if (kern_addr_valid(start)) {
44197- unsigned long n;
44198+ char *elf_buf;
44199+ mm_segment_t oldfs;
44200
44201- n = copy_to_user(buffer, (char *)start, tsz);
44202- /*
44203- * We cannot distingush between fault on source
44204- * and fault on destination. When this happens
44205- * we clear too and hope it will trigger the
44206- * EFAULT again.
44207- */
44208- if (n) {
44209- if (clear_user(buffer + tsz - n,
44210- n))
44211+ elf_buf = kmalloc(tsz, GFP_KERNEL);
44212+ if (!elf_buf)
44213+ return -ENOMEM;
44214+ oldfs = get_fs();
44215+ set_fs(KERNEL_DS);
44216+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
44217+ set_fs(oldfs);
44218+ if (copy_to_user(buffer, elf_buf, tsz)) {
44219+ kfree(elf_buf);
44220 return -EFAULT;
44221+ }
44222 }
44223+ set_fs(oldfs);
44224+ kfree(elf_buf);
44225 } else {
44226 if (clear_user(buffer, tsz))
44227 return -EFAULT;
44228@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
44229
44230 static int open_kcore(struct inode *inode, struct file *filp)
44231 {
44232+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
44233+ return -EPERM;
44234+#endif
44235 if (!capable(CAP_SYS_RAWIO))
44236 return -EPERM;
44237 if (kcore_need_update)
44238diff -urNp linux-3.0.4/fs/proc/meminfo.c linux-3.0.4/fs/proc/meminfo.c
44239--- linux-3.0.4/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
44240+++ linux-3.0.4/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
44241@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
44242 unsigned long pages[NR_LRU_LISTS];
44243 int lru;
44244
44245+ pax_track_stack();
44246+
44247 /*
44248 * display in kilobytes.
44249 */
44250@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
44251 vmi.used >> 10,
44252 vmi.largest_chunk >> 10
44253 #ifdef CONFIG_MEMORY_FAILURE
44254- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
44255+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
44256 #endif
44257 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
44258 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
44259diff -urNp linux-3.0.4/fs/proc/nommu.c linux-3.0.4/fs/proc/nommu.c
44260--- linux-3.0.4/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
44261+++ linux-3.0.4/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
44262@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
44263 if (len < 1)
44264 len = 1;
44265 seq_printf(m, "%*c", len, ' ');
44266- seq_path(m, &file->f_path, "");
44267+ seq_path(m, &file->f_path, "\n\\");
44268 }
44269
44270 seq_putc(m, '\n');
44271diff -urNp linux-3.0.4/fs/proc/proc_net.c linux-3.0.4/fs/proc/proc_net.c
44272--- linux-3.0.4/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
44273+++ linux-3.0.4/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
44274@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
44275 struct task_struct *task;
44276 struct nsproxy *ns;
44277 struct net *net = NULL;
44278+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44279+ const struct cred *cred = current_cred();
44280+#endif
44281+
44282+#ifdef CONFIG_GRKERNSEC_PROC_USER
44283+ if (cred->fsuid)
44284+ return net;
44285+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44286+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
44287+ return net;
44288+#endif
44289
44290 rcu_read_lock();
44291 task = pid_task(proc_pid(dir), PIDTYPE_PID);
44292diff -urNp linux-3.0.4/fs/proc/proc_sysctl.c linux-3.0.4/fs/proc/proc_sysctl.c
44293--- linux-3.0.4/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
44294+++ linux-3.0.4/fs/proc/proc_sysctl.c 2011-08-23 21:48:14.000000000 -0400
44295@@ -8,6 +8,8 @@
44296 #include <linux/namei.h>
44297 #include "internal.h"
44298
44299+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
44300+
44301 static const struct dentry_operations proc_sys_dentry_operations;
44302 static const struct file_operations proc_sys_file_operations;
44303 static const struct inode_operations proc_sys_inode_operations;
44304@@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
44305 if (!p)
44306 goto out;
44307
44308+ if (gr_handle_sysctl(p, MAY_EXEC))
44309+ goto out;
44310+
44311 err = ERR_PTR(-ENOMEM);
44312 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
44313 if (h)
44314@@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
44315 if (*pos < file->f_pos)
44316 continue;
44317
44318+ if (gr_handle_sysctl(table, 0))
44319+ continue;
44320+
44321 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
44322 if (res)
44323 return res;
44324@@ -355,6 +363,9 @@ static int proc_sys_getattr(struct vfsmo
44325 if (IS_ERR(head))
44326 return PTR_ERR(head);
44327
44328+ if (table && gr_handle_sysctl(table, MAY_EXEC))
44329+ return -ENOENT;
44330+
44331 generic_fillattr(inode, stat);
44332 if (table)
44333 stat->mode = (stat->mode & S_IFMT) | table->mode;
44334diff -urNp linux-3.0.4/fs/proc/root.c linux-3.0.4/fs/proc/root.c
44335--- linux-3.0.4/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
44336+++ linux-3.0.4/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
44337@@ -123,7 +123,15 @@ void __init proc_root_init(void)
44338 #ifdef CONFIG_PROC_DEVICETREE
44339 proc_device_tree_init();
44340 #endif
44341+#ifdef CONFIG_GRKERNSEC_PROC_ADD
44342+#ifdef CONFIG_GRKERNSEC_PROC_USER
44343+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
44344+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44345+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
44346+#endif
44347+#else
44348 proc_mkdir("bus", NULL);
44349+#endif
44350 proc_sys_init();
44351 }
44352
44353diff -urNp linux-3.0.4/fs/proc/task_mmu.c linux-3.0.4/fs/proc/task_mmu.c
44354--- linux-3.0.4/fs/proc/task_mmu.c 2011-07-21 22:17:23.000000000 -0400
44355+++ linux-3.0.4/fs/proc/task_mmu.c 2011-08-23 21:48:14.000000000 -0400
44356@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
44357 "VmExe:\t%8lu kB\n"
44358 "VmLib:\t%8lu kB\n"
44359 "VmPTE:\t%8lu kB\n"
44360- "VmSwap:\t%8lu kB\n",
44361- hiwater_vm << (PAGE_SHIFT-10),
44362+ "VmSwap:\t%8lu kB\n"
44363+
44364+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
44365+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
44366+#endif
44367+
44368+ ,hiwater_vm << (PAGE_SHIFT-10),
44369 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
44370 mm->locked_vm << (PAGE_SHIFT-10),
44371 hiwater_rss << (PAGE_SHIFT-10),
44372@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
44373 data << (PAGE_SHIFT-10),
44374 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
44375 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
44376- swap << (PAGE_SHIFT-10));
44377+ swap << (PAGE_SHIFT-10)
44378+
44379+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
44380+ , mm->context.user_cs_base, mm->context.user_cs_limit
44381+#endif
44382+
44383+ );
44384 }
44385
44386 unsigned long task_vsize(struct mm_struct *mm)
44387@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
44388 return ret;
44389 }
44390
44391+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44392+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44393+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
44394+ _mm->pax_flags & MF_PAX_SEGMEXEC))
44395+#endif
44396+
44397 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
44398 {
44399 struct mm_struct *mm = vma->vm_mm;
44400@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
44401 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
44402 }
44403
44404- /* We don't show the stack guard page in /proc/maps */
44405+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44406+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
44407+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
44408+#else
44409 start = vma->vm_start;
44410- if (stack_guard_page_start(vma, start))
44411- start += PAGE_SIZE;
44412 end = vma->vm_end;
44413- if (stack_guard_page_end(vma, end))
44414- end -= PAGE_SIZE;
44415+#endif
44416
44417 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
44418 start,
44419@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
44420 flags & VM_WRITE ? 'w' : '-',
44421 flags & VM_EXEC ? 'x' : '-',
44422 flags & VM_MAYSHARE ? 's' : 'p',
44423+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44424+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
44425+#else
44426 pgoff,
44427+#endif
44428 MAJOR(dev), MINOR(dev), ino, &len);
44429
44430 /*
44431@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
44432 */
44433 if (file) {
44434 pad_len_spaces(m, len);
44435- seq_path(m, &file->f_path, "\n");
44436+ seq_path(m, &file->f_path, "\n\\");
44437 } else {
44438 const char *name = arch_vma_name(vma);
44439 if (!name) {
44440@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
44441 if (vma->vm_start <= mm->brk &&
44442 vma->vm_end >= mm->start_brk) {
44443 name = "[heap]";
44444- } else if (vma->vm_start <= mm->start_stack &&
44445- vma->vm_end >= mm->start_stack) {
44446+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
44447+ (vma->vm_start <= mm->start_stack &&
44448+ vma->vm_end >= mm->start_stack)) {
44449 name = "[stack]";
44450 }
44451 } else {
44452@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
44453 };
44454
44455 memset(&mss, 0, sizeof mss);
44456- mss.vma = vma;
44457- /* mmap_sem is held in m_start */
44458- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
44459- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
44460-
44461+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44462+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
44463+#endif
44464+ mss.vma = vma;
44465+ /* mmap_sem is held in m_start */
44466+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
44467+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
44468+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44469+ }
44470+#endif
44471 show_map_vma(m, vma);
44472
44473 seq_printf(m,
44474@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
44475 "KernelPageSize: %8lu kB\n"
44476 "MMUPageSize: %8lu kB\n"
44477 "Locked: %8lu kB\n",
44478+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44479+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
44480+#else
44481 (vma->vm_end - vma->vm_start) >> 10,
44482+#endif
44483 mss.resident >> 10,
44484 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
44485 mss.shared_clean >> 10,
44486@@ -1001,7 +1032,7 @@ static int show_numa_map(struct seq_file
44487
44488 if (file) {
44489 seq_printf(m, " file=");
44490- seq_path(m, &file->f_path, "\n\t= ");
44491+ seq_path(m, &file->f_path, "\n\t\\= ");
44492 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
44493 seq_printf(m, " heap");
44494 } else if (vma->vm_start <= mm->start_stack &&
44495diff -urNp linux-3.0.4/fs/proc/task_nommu.c linux-3.0.4/fs/proc/task_nommu.c
44496--- linux-3.0.4/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
44497+++ linux-3.0.4/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
44498@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
44499 else
44500 bytes += kobjsize(mm);
44501
44502- if (current->fs && current->fs->users > 1)
44503+ if (current->fs && atomic_read(&current->fs->users) > 1)
44504 sbytes += kobjsize(current->fs);
44505 else
44506 bytes += kobjsize(current->fs);
44507@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
44508
44509 if (file) {
44510 pad_len_spaces(m, len);
44511- seq_path(m, &file->f_path, "");
44512+ seq_path(m, &file->f_path, "\n\\");
44513 } else if (mm) {
44514 if (vma->vm_start <= mm->start_stack &&
44515 vma->vm_end >= mm->start_stack) {
44516diff -urNp linux-3.0.4/fs/quota/netlink.c linux-3.0.4/fs/quota/netlink.c
44517--- linux-3.0.4/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
44518+++ linux-3.0.4/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
44519@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
44520 void quota_send_warning(short type, unsigned int id, dev_t dev,
44521 const char warntype)
44522 {
44523- static atomic_t seq;
44524+ static atomic_unchecked_t seq;
44525 struct sk_buff *skb;
44526 void *msg_head;
44527 int ret;
44528@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
44529 "VFS: Not enough memory to send quota warning.\n");
44530 return;
44531 }
44532- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
44533+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
44534 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
44535 if (!msg_head) {
44536 printk(KERN_ERR
44537diff -urNp linux-3.0.4/fs/readdir.c linux-3.0.4/fs/readdir.c
44538--- linux-3.0.4/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
44539+++ linux-3.0.4/fs/readdir.c 2011-10-06 04:17:55.000000000 -0400
44540@@ -17,6 +17,7 @@
44541 #include <linux/security.h>
44542 #include <linux/syscalls.h>
44543 #include <linux/unistd.h>
44544+#include <linux/namei.h>
44545
44546 #include <asm/uaccess.h>
44547
44548@@ -67,6 +68,7 @@ struct old_linux_dirent {
44549
44550 struct readdir_callback {
44551 struct old_linux_dirent __user * dirent;
44552+ struct file * file;
44553 int result;
44554 };
44555
44556@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
44557 buf->result = -EOVERFLOW;
44558 return -EOVERFLOW;
44559 }
44560+
44561+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44562+ return 0;
44563+
44564 buf->result++;
44565 dirent = buf->dirent;
44566 if (!access_ok(VERIFY_WRITE, dirent,
44567@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
44568
44569 buf.result = 0;
44570 buf.dirent = dirent;
44571+ buf.file = file;
44572
44573 error = vfs_readdir(file, fillonedir, &buf);
44574 if (buf.result)
44575@@ -142,6 +149,7 @@ struct linux_dirent {
44576 struct getdents_callback {
44577 struct linux_dirent __user * current_dir;
44578 struct linux_dirent __user * previous;
44579+ struct file * file;
44580 int count;
44581 int error;
44582 };
44583@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
44584 buf->error = -EOVERFLOW;
44585 return -EOVERFLOW;
44586 }
44587+
44588+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44589+ return 0;
44590+
44591 dirent = buf->previous;
44592 if (dirent) {
44593 if (__put_user(offset, &dirent->d_off))
44594@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
44595 buf.previous = NULL;
44596 buf.count = count;
44597 buf.error = 0;
44598+ buf.file = file;
44599
44600 error = vfs_readdir(file, filldir, &buf);
44601 if (error >= 0)
44602@@ -229,6 +242,7 @@ out:
44603 struct getdents_callback64 {
44604 struct linux_dirent64 __user * current_dir;
44605 struct linux_dirent64 __user * previous;
44606+ struct file *file;
44607 int count;
44608 int error;
44609 };
44610@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
44611 buf->error = -EINVAL; /* only used if we fail.. */
44612 if (reclen > buf->count)
44613 return -EINVAL;
44614+
44615+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44616+ return 0;
44617+
44618 dirent = buf->previous;
44619 if (dirent) {
44620 if (__put_user(offset, &dirent->d_off))
44621@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
44622
44623 buf.current_dir = dirent;
44624 buf.previous = NULL;
44625+ buf.file = file;
44626 buf.count = count;
44627 buf.error = 0;
44628
44629@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
44630 error = buf.error;
44631 lastdirent = buf.previous;
44632 if (lastdirent) {
44633- typeof(lastdirent->d_off) d_off = file->f_pos;
44634+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
44635 if (__put_user(d_off, &lastdirent->d_off))
44636 error = -EFAULT;
44637 else
44638diff -urNp linux-3.0.4/fs/reiserfs/dir.c linux-3.0.4/fs/reiserfs/dir.c
44639--- linux-3.0.4/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
44640+++ linux-3.0.4/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
44641@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
44642 struct reiserfs_dir_entry de;
44643 int ret = 0;
44644
44645+ pax_track_stack();
44646+
44647 reiserfs_write_lock(inode->i_sb);
44648
44649 reiserfs_check_lock_depth(inode->i_sb, "readdir");
44650diff -urNp linux-3.0.4/fs/reiserfs/do_balan.c linux-3.0.4/fs/reiserfs/do_balan.c
44651--- linux-3.0.4/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
44652+++ linux-3.0.4/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
44653@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
44654 return;
44655 }
44656
44657- atomic_inc(&(fs_generation(tb->tb_sb)));
44658+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
44659 do_balance_starts(tb);
44660
44661 /* balance leaf returns 0 except if combining L R and S into
44662diff -urNp linux-3.0.4/fs/reiserfs/journal.c linux-3.0.4/fs/reiserfs/journal.c
44663--- linux-3.0.4/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
44664+++ linux-3.0.4/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
44665@@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
44666 struct buffer_head *bh;
44667 int i, j;
44668
44669+ pax_track_stack();
44670+
44671 bh = __getblk(dev, block, bufsize);
44672 if (buffer_uptodate(bh))
44673 return (bh);
44674diff -urNp linux-3.0.4/fs/reiserfs/namei.c linux-3.0.4/fs/reiserfs/namei.c
44675--- linux-3.0.4/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
44676+++ linux-3.0.4/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
44677@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
44678 unsigned long savelink = 1;
44679 struct timespec ctime;
44680
44681+ pax_track_stack();
44682+
44683 /* three balancings: (1) old name removal, (2) new name insertion
44684 and (3) maybe "save" link insertion
44685 stat data updates: (1) old directory,
44686diff -urNp linux-3.0.4/fs/reiserfs/procfs.c linux-3.0.4/fs/reiserfs/procfs.c
44687--- linux-3.0.4/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
44688+++ linux-3.0.4/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
44689@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
44690 "SMALL_TAILS " : "NO_TAILS ",
44691 replay_only(sb) ? "REPLAY_ONLY " : "",
44692 convert_reiserfs(sb) ? "CONV " : "",
44693- atomic_read(&r->s_generation_counter),
44694+ atomic_read_unchecked(&r->s_generation_counter),
44695 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
44696 SF(s_do_balance), SF(s_unneeded_left_neighbor),
44697 SF(s_good_search_by_key_reada), SF(s_bmaps),
44698@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
44699 struct journal_params *jp = &rs->s_v1.s_journal;
44700 char b[BDEVNAME_SIZE];
44701
44702+ pax_track_stack();
44703+
44704 seq_printf(m, /* on-disk fields */
44705 "jp_journal_1st_block: \t%i\n"
44706 "jp_journal_dev: \t%s[%x]\n"
44707diff -urNp linux-3.0.4/fs/reiserfs/stree.c linux-3.0.4/fs/reiserfs/stree.c
44708--- linux-3.0.4/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
44709+++ linux-3.0.4/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
44710@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
44711 int iter = 0;
44712 #endif
44713
44714+ pax_track_stack();
44715+
44716 BUG_ON(!th->t_trans_id);
44717
44718 init_tb_struct(th, &s_del_balance, sb, path,
44719@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
44720 int retval;
44721 int quota_cut_bytes = 0;
44722
44723+ pax_track_stack();
44724+
44725 BUG_ON(!th->t_trans_id);
44726
44727 le_key2cpu_key(&cpu_key, key);
44728@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
44729 int quota_cut_bytes;
44730 loff_t tail_pos = 0;
44731
44732+ pax_track_stack();
44733+
44734 BUG_ON(!th->t_trans_id);
44735
44736 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
44737@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
44738 int retval;
44739 int fs_gen;
44740
44741+ pax_track_stack();
44742+
44743 BUG_ON(!th->t_trans_id);
44744
44745 fs_gen = get_generation(inode->i_sb);
44746@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
44747 int fs_gen = 0;
44748 int quota_bytes = 0;
44749
44750+ pax_track_stack();
44751+
44752 BUG_ON(!th->t_trans_id);
44753
44754 if (inode) { /* Do we count quotas for item? */
44755diff -urNp linux-3.0.4/fs/reiserfs/super.c linux-3.0.4/fs/reiserfs/super.c
44756--- linux-3.0.4/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
44757+++ linux-3.0.4/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
44758@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
44759 {.option_name = NULL}
44760 };
44761
44762+ pax_track_stack();
44763+
44764 *blocks = 0;
44765 if (!options || !*options)
44766 /* use default configuration: create tails, journaling on, no
44767diff -urNp linux-3.0.4/fs/select.c linux-3.0.4/fs/select.c
44768--- linux-3.0.4/fs/select.c 2011-07-21 22:17:23.000000000 -0400
44769+++ linux-3.0.4/fs/select.c 2011-08-23 21:48:14.000000000 -0400
44770@@ -20,6 +20,7 @@
44771 #include <linux/module.h>
44772 #include <linux/slab.h>
44773 #include <linux/poll.h>
44774+#include <linux/security.h>
44775 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
44776 #include <linux/file.h>
44777 #include <linux/fdtable.h>
44778@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
44779 int retval, i, timed_out = 0;
44780 unsigned long slack = 0;
44781
44782+ pax_track_stack();
44783+
44784 rcu_read_lock();
44785 retval = max_select_fd(n, fds);
44786 rcu_read_unlock();
44787@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
44788 /* Allocate small arguments on the stack to save memory and be faster */
44789 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
44790
44791+ pax_track_stack();
44792+
44793 ret = -EINVAL;
44794 if (n < 0)
44795 goto out_nofds;
44796@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
44797 struct poll_list *walk = head;
44798 unsigned long todo = nfds;
44799
44800+ pax_track_stack();
44801+
44802+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
44803 if (nfds > rlimit(RLIMIT_NOFILE))
44804 return -EINVAL;
44805
44806diff -urNp linux-3.0.4/fs/seq_file.c linux-3.0.4/fs/seq_file.c
44807--- linux-3.0.4/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
44808+++ linux-3.0.4/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
44809@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
44810 return 0;
44811 }
44812 if (!m->buf) {
44813- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
44814+ m->size = PAGE_SIZE;
44815+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
44816 if (!m->buf)
44817 return -ENOMEM;
44818 }
44819@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
44820 Eoverflow:
44821 m->op->stop(m, p);
44822 kfree(m->buf);
44823- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
44824+ m->size <<= 1;
44825+ m->buf = kmalloc(m->size, GFP_KERNEL);
44826 return !m->buf ? -ENOMEM : -EAGAIN;
44827 }
44828
44829@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
44830 m->version = file->f_version;
44831 /* grab buffer if we didn't have one */
44832 if (!m->buf) {
44833- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
44834+ m->size = PAGE_SIZE;
44835+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
44836 if (!m->buf)
44837 goto Enomem;
44838 }
44839@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
44840 goto Fill;
44841 m->op->stop(m, p);
44842 kfree(m->buf);
44843- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
44844+ m->size <<= 1;
44845+ m->buf = kmalloc(m->size, GFP_KERNEL);
44846 if (!m->buf)
44847 goto Enomem;
44848 m->count = 0;
44849@@ -549,7 +553,7 @@ static void single_stop(struct seq_file
44850 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
44851 void *data)
44852 {
44853- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
44854+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
44855 int res = -ENOMEM;
44856
44857 if (op) {
44858diff -urNp linux-3.0.4/fs/splice.c linux-3.0.4/fs/splice.c
44859--- linux-3.0.4/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
44860+++ linux-3.0.4/fs/splice.c 2011-10-06 04:17:55.000000000 -0400
44861@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
44862 pipe_lock(pipe);
44863
44864 for (;;) {
44865- if (!pipe->readers) {
44866+ if (!atomic_read(&pipe->readers)) {
44867 send_sig(SIGPIPE, current, 0);
44868 if (!ret)
44869 ret = -EPIPE;
44870@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
44871 do_wakeup = 0;
44872 }
44873
44874- pipe->waiting_writers++;
44875+ atomic_inc(&pipe->waiting_writers);
44876 pipe_wait(pipe);
44877- pipe->waiting_writers--;
44878+ atomic_dec(&pipe->waiting_writers);
44879 }
44880
44881 pipe_unlock(pipe);
44882@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
44883 .spd_release = spd_release_page,
44884 };
44885
44886+ pax_track_stack();
44887+
44888 if (splice_grow_spd(pipe, &spd))
44889 return -ENOMEM;
44890
44891@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
44892 old_fs = get_fs();
44893 set_fs(get_ds());
44894 /* The cast to a user pointer is valid due to the set_fs() */
44895- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
44896+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
44897 set_fs(old_fs);
44898
44899 return res;
44900@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
44901 old_fs = get_fs();
44902 set_fs(get_ds());
44903 /* The cast to a user pointer is valid due to the set_fs() */
44904- res = vfs_write(file, (const char __user *)buf, count, &pos);
44905+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
44906 set_fs(old_fs);
44907
44908 return res;
44909@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
44910 .spd_release = spd_release_page,
44911 };
44912
44913+ pax_track_stack();
44914+
44915 if (splice_grow_spd(pipe, &spd))
44916 return -ENOMEM;
44917
44918@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
44919 goto err;
44920
44921 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
44922- vec[i].iov_base = (void __user *) page_address(page);
44923+ vec[i].iov_base = (void __force_user *) page_address(page);
44924 vec[i].iov_len = this_len;
44925 spd.pages[i] = page;
44926 spd.nr_pages++;
44927@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
44928 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
44929 {
44930 while (!pipe->nrbufs) {
44931- if (!pipe->writers)
44932+ if (!atomic_read(&pipe->writers))
44933 return 0;
44934
44935- if (!pipe->waiting_writers && sd->num_spliced)
44936+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
44937 return 0;
44938
44939 if (sd->flags & SPLICE_F_NONBLOCK)
44940@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
44941 * out of the pipe right after the splice_to_pipe(). So set
44942 * PIPE_READERS appropriately.
44943 */
44944- pipe->readers = 1;
44945+ atomic_set(&pipe->readers, 1);
44946
44947 current->splice_pipe = pipe;
44948 }
44949@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
44950 };
44951 long ret;
44952
44953+ pax_track_stack();
44954+
44955 pipe = get_pipe_info(file);
44956 if (!pipe)
44957 return -EBADF;
44958@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
44959 ret = -ERESTARTSYS;
44960 break;
44961 }
44962- if (!pipe->writers)
44963+ if (!atomic_read(&pipe->writers))
44964 break;
44965- if (!pipe->waiting_writers) {
44966+ if (!atomic_read(&pipe->waiting_writers)) {
44967 if (flags & SPLICE_F_NONBLOCK) {
44968 ret = -EAGAIN;
44969 break;
44970@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
44971 pipe_lock(pipe);
44972
44973 while (pipe->nrbufs >= pipe->buffers) {
44974- if (!pipe->readers) {
44975+ if (!atomic_read(&pipe->readers)) {
44976 send_sig(SIGPIPE, current, 0);
44977 ret = -EPIPE;
44978 break;
44979@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
44980 ret = -ERESTARTSYS;
44981 break;
44982 }
44983- pipe->waiting_writers++;
44984+ atomic_inc(&pipe->waiting_writers);
44985 pipe_wait(pipe);
44986- pipe->waiting_writers--;
44987+ atomic_dec(&pipe->waiting_writers);
44988 }
44989
44990 pipe_unlock(pipe);
44991@@ -1819,14 +1825,14 @@ retry:
44992 pipe_double_lock(ipipe, opipe);
44993
44994 do {
44995- if (!opipe->readers) {
44996+ if (!atomic_read(&opipe->readers)) {
44997 send_sig(SIGPIPE, current, 0);
44998 if (!ret)
44999 ret = -EPIPE;
45000 break;
45001 }
45002
45003- if (!ipipe->nrbufs && !ipipe->writers)
45004+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
45005 break;
45006
45007 /*
45008@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
45009 pipe_double_lock(ipipe, opipe);
45010
45011 do {
45012- if (!opipe->readers) {
45013+ if (!atomic_read(&opipe->readers)) {
45014 send_sig(SIGPIPE, current, 0);
45015 if (!ret)
45016 ret = -EPIPE;
45017@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
45018 * return EAGAIN if we have the potential of some data in the
45019 * future, otherwise just return 0
45020 */
45021- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
45022+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
45023 ret = -EAGAIN;
45024
45025 pipe_unlock(ipipe);
45026diff -urNp linux-3.0.4/fs/sysfs/file.c linux-3.0.4/fs/sysfs/file.c
45027--- linux-3.0.4/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
45028+++ linux-3.0.4/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
45029@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
45030
45031 struct sysfs_open_dirent {
45032 atomic_t refcnt;
45033- atomic_t event;
45034+ atomic_unchecked_t event;
45035 wait_queue_head_t poll;
45036 struct list_head buffers; /* goes through sysfs_buffer.list */
45037 };
45038@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
45039 if (!sysfs_get_active(attr_sd))
45040 return -ENODEV;
45041
45042- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
45043+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
45044 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
45045
45046 sysfs_put_active(attr_sd);
45047@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
45048 return -ENOMEM;
45049
45050 atomic_set(&new_od->refcnt, 0);
45051- atomic_set(&new_od->event, 1);
45052+ atomic_set_unchecked(&new_od->event, 1);
45053 init_waitqueue_head(&new_od->poll);
45054 INIT_LIST_HEAD(&new_od->buffers);
45055 goto retry;
45056@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
45057
45058 sysfs_put_active(attr_sd);
45059
45060- if (buffer->event != atomic_read(&od->event))
45061+ if (buffer->event != atomic_read_unchecked(&od->event))
45062 goto trigger;
45063
45064 return DEFAULT_POLLMASK;
45065@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
45066
45067 od = sd->s_attr.open;
45068 if (od) {
45069- atomic_inc(&od->event);
45070+ atomic_inc_unchecked(&od->event);
45071 wake_up_interruptible(&od->poll);
45072 }
45073
45074diff -urNp linux-3.0.4/fs/sysfs/mount.c linux-3.0.4/fs/sysfs/mount.c
45075--- linux-3.0.4/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
45076+++ linux-3.0.4/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
45077@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
45078 .s_name = "",
45079 .s_count = ATOMIC_INIT(1),
45080 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
45081+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
45082+ .s_mode = S_IFDIR | S_IRWXU,
45083+#else
45084 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
45085+#endif
45086 .s_ino = 1,
45087 };
45088
45089diff -urNp linux-3.0.4/fs/sysfs/symlink.c linux-3.0.4/fs/sysfs/symlink.c
45090--- linux-3.0.4/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
45091+++ linux-3.0.4/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
45092@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
45093
45094 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
45095 {
45096- char *page = nd_get_link(nd);
45097+ const char *page = nd_get_link(nd);
45098 if (!IS_ERR(page))
45099 free_page((unsigned long)page);
45100 }
45101diff -urNp linux-3.0.4/fs/udf/inode.c linux-3.0.4/fs/udf/inode.c
45102--- linux-3.0.4/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
45103+++ linux-3.0.4/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
45104@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
45105 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
45106 int lastblock = 0;
45107
45108+ pax_track_stack();
45109+
45110 prev_epos.offset = udf_file_entry_alloc_offset(inode);
45111 prev_epos.block = iinfo->i_location;
45112 prev_epos.bh = NULL;
45113diff -urNp linux-3.0.4/fs/udf/misc.c linux-3.0.4/fs/udf/misc.c
45114--- linux-3.0.4/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
45115+++ linux-3.0.4/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
45116@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
45117
45118 u8 udf_tag_checksum(const struct tag *t)
45119 {
45120- u8 *data = (u8 *)t;
45121+ const u8 *data = (const u8 *)t;
45122 u8 checksum = 0;
45123 int i;
45124 for (i = 0; i < sizeof(struct tag); ++i)
45125diff -urNp linux-3.0.4/fs/utimes.c linux-3.0.4/fs/utimes.c
45126--- linux-3.0.4/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
45127+++ linux-3.0.4/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
45128@@ -1,6 +1,7 @@
45129 #include <linux/compiler.h>
45130 #include <linux/file.h>
45131 #include <linux/fs.h>
45132+#include <linux/security.h>
45133 #include <linux/linkage.h>
45134 #include <linux/mount.h>
45135 #include <linux/namei.h>
45136@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
45137 goto mnt_drop_write_and_out;
45138 }
45139 }
45140+
45141+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
45142+ error = -EACCES;
45143+ goto mnt_drop_write_and_out;
45144+ }
45145+
45146 mutex_lock(&inode->i_mutex);
45147 error = notify_change(path->dentry, &newattrs);
45148 mutex_unlock(&inode->i_mutex);
45149diff -urNp linux-3.0.4/fs/xattr_acl.c linux-3.0.4/fs/xattr_acl.c
45150--- linux-3.0.4/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
45151+++ linux-3.0.4/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
45152@@ -17,8 +17,8 @@
45153 struct posix_acl *
45154 posix_acl_from_xattr(const void *value, size_t size)
45155 {
45156- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
45157- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
45158+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
45159+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
45160 int count;
45161 struct posix_acl *acl;
45162 struct posix_acl_entry *acl_e;
45163diff -urNp linux-3.0.4/fs/xattr.c linux-3.0.4/fs/xattr.c
45164--- linux-3.0.4/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
45165+++ linux-3.0.4/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
45166@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
45167 * Extended attribute SET operations
45168 */
45169 static long
45170-setxattr(struct dentry *d, const char __user *name, const void __user *value,
45171+setxattr(struct path *path, const char __user *name, const void __user *value,
45172 size_t size, int flags)
45173 {
45174 int error;
45175@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
45176 return PTR_ERR(kvalue);
45177 }
45178
45179- error = vfs_setxattr(d, kname, kvalue, size, flags);
45180+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
45181+ error = -EACCES;
45182+ goto out;
45183+ }
45184+
45185+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
45186+out:
45187 kfree(kvalue);
45188 return error;
45189 }
45190@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
45191 return error;
45192 error = mnt_want_write(path.mnt);
45193 if (!error) {
45194- error = setxattr(path.dentry, name, value, size, flags);
45195+ error = setxattr(&path, name, value, size, flags);
45196 mnt_drop_write(path.mnt);
45197 }
45198 path_put(&path);
45199@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
45200 return error;
45201 error = mnt_want_write(path.mnt);
45202 if (!error) {
45203- error = setxattr(path.dentry, name, value, size, flags);
45204+ error = setxattr(&path, name, value, size, flags);
45205 mnt_drop_write(path.mnt);
45206 }
45207 path_put(&path);
45208@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
45209 const void __user *,value, size_t, size, int, flags)
45210 {
45211 struct file *f;
45212- struct dentry *dentry;
45213 int error = -EBADF;
45214
45215 f = fget(fd);
45216 if (!f)
45217 return error;
45218- dentry = f->f_path.dentry;
45219- audit_inode(NULL, dentry);
45220+ audit_inode(NULL, f->f_path.dentry);
45221 error = mnt_want_write_file(f);
45222 if (!error) {
45223- error = setxattr(dentry, name, value, size, flags);
45224+ error = setxattr(&f->f_path, name, value, size, flags);
45225 mnt_drop_write(f->f_path.mnt);
45226 }
45227 fput(f);
45228diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c
45229--- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
45230+++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
45231@@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
45232 xfs_fsop_geom_t fsgeo;
45233 int error;
45234
45235+ memset(&fsgeo, 0, sizeof(fsgeo));
45236 error = xfs_fs_geometry(mp, &fsgeo, 3);
45237 if (error)
45238 return -error;
45239diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c
45240--- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
45241+++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
45242@@ -128,7 +128,7 @@ xfs_find_handle(
45243 }
45244
45245 error = -EFAULT;
45246- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
45247+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
45248 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
45249 goto out_put;
45250
45251diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c
45252--- linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
45253+++ linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
45254@@ -437,7 +437,7 @@ xfs_vn_put_link(
45255 struct nameidata *nd,
45256 void *p)
45257 {
45258- char *s = nd_get_link(nd);
45259+ const char *s = nd_get_link(nd);
45260
45261 if (!IS_ERR(s))
45262 kfree(s);
45263diff -urNp linux-3.0.4/fs/xfs/xfs_bmap.c linux-3.0.4/fs/xfs/xfs_bmap.c
45264--- linux-3.0.4/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
45265+++ linux-3.0.4/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
45266@@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
45267 int nmap,
45268 int ret_nmap);
45269 #else
45270-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
45271+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
45272 #endif /* DEBUG */
45273
45274 STATIC int
45275diff -urNp linux-3.0.4/fs/xfs/xfs_dir2_sf.c linux-3.0.4/fs/xfs/xfs_dir2_sf.c
45276--- linux-3.0.4/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
45277+++ linux-3.0.4/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
45278@@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
45279 }
45280
45281 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
45282- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
45283+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
45284+ char name[sfep->namelen];
45285+ memcpy(name, sfep->name, sfep->namelen);
45286+ if (filldir(dirent, name, sfep->namelen,
45287+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
45288+ *offset = off & 0x7fffffff;
45289+ return 0;
45290+ }
45291+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
45292 off & 0x7fffffff, ino, DT_UNKNOWN)) {
45293 *offset = off & 0x7fffffff;
45294 return 0;
45295diff -urNp linux-3.0.4/grsecurity/gracl_alloc.c linux-3.0.4/grsecurity/gracl_alloc.c
45296--- linux-3.0.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
45297+++ linux-3.0.4/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
45298@@ -0,0 +1,105 @@
45299+#include <linux/kernel.h>
45300+#include <linux/mm.h>
45301+#include <linux/slab.h>
45302+#include <linux/vmalloc.h>
45303+#include <linux/gracl.h>
45304+#include <linux/grsecurity.h>
45305+
45306+static unsigned long alloc_stack_next = 1;
45307+static unsigned long alloc_stack_size = 1;
45308+static void **alloc_stack;
45309+
45310+static __inline__ int
45311+alloc_pop(void)
45312+{
45313+ if (alloc_stack_next == 1)
45314+ return 0;
45315+
45316+ kfree(alloc_stack[alloc_stack_next - 2]);
45317+
45318+ alloc_stack_next--;
45319+
45320+ return 1;
45321+}
45322+
45323+static __inline__ int
45324+alloc_push(void *buf)
45325+{
45326+ if (alloc_stack_next >= alloc_stack_size)
45327+ return 1;
45328+
45329+ alloc_stack[alloc_stack_next - 1] = buf;
45330+
45331+ alloc_stack_next++;
45332+
45333+ return 0;
45334+}
45335+
45336+void *
45337+acl_alloc(unsigned long len)
45338+{
45339+ void *ret = NULL;
45340+
45341+ if (!len || len > PAGE_SIZE)
45342+ goto out;
45343+
45344+ ret = kmalloc(len, GFP_KERNEL);
45345+
45346+ if (ret) {
45347+ if (alloc_push(ret)) {
45348+ kfree(ret);
45349+ ret = NULL;
45350+ }
45351+ }
45352+
45353+out:
45354+ return ret;
45355+}
45356+
45357+void *
45358+acl_alloc_num(unsigned long num, unsigned long len)
45359+{
45360+ if (!len || (num > (PAGE_SIZE / len)))
45361+ return NULL;
45362+
45363+ return acl_alloc(num * len);
45364+}
45365+
45366+void
45367+acl_free_all(void)
45368+{
45369+ if (gr_acl_is_enabled() || !alloc_stack)
45370+ return;
45371+
45372+ while (alloc_pop()) ;
45373+
45374+ if (alloc_stack) {
45375+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
45376+ kfree(alloc_stack);
45377+ else
45378+ vfree(alloc_stack);
45379+ }
45380+
45381+ alloc_stack = NULL;
45382+ alloc_stack_size = 1;
45383+ alloc_stack_next = 1;
45384+
45385+ return;
45386+}
45387+
45388+int
45389+acl_alloc_stack_init(unsigned long size)
45390+{
45391+ if ((size * sizeof (void *)) <= PAGE_SIZE)
45392+ alloc_stack =
45393+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
45394+ else
45395+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
45396+
45397+ alloc_stack_size = size;
45398+
45399+ if (!alloc_stack)
45400+ return 0;
45401+ else
45402+ return 1;
45403+}
45404diff -urNp linux-3.0.4/grsecurity/gracl.c linux-3.0.4/grsecurity/gracl.c
45405--- linux-3.0.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
45406+++ linux-3.0.4/grsecurity/gracl.c 2011-08-23 21:48:14.000000000 -0400
45407@@ -0,0 +1,4106 @@
45408+#include <linux/kernel.h>
45409+#include <linux/module.h>
45410+#include <linux/sched.h>
45411+#include <linux/mm.h>
45412+#include <linux/file.h>
45413+#include <linux/fs.h>
45414+#include <linux/namei.h>
45415+#include <linux/mount.h>
45416+#include <linux/tty.h>
45417+#include <linux/proc_fs.h>
45418+#include <linux/lglock.h>
45419+#include <linux/slab.h>
45420+#include <linux/vmalloc.h>
45421+#include <linux/types.h>
45422+#include <linux/sysctl.h>
45423+#include <linux/netdevice.h>
45424+#include <linux/ptrace.h>
45425+#include <linux/gracl.h>
45426+#include <linux/gralloc.h>
45427+#include <linux/grsecurity.h>
45428+#include <linux/grinternal.h>
45429+#include <linux/pid_namespace.h>
45430+#include <linux/fdtable.h>
45431+#include <linux/percpu.h>
45432+
45433+#include <asm/uaccess.h>
45434+#include <asm/errno.h>
45435+#include <asm/mman.h>
45436+
45437+static struct acl_role_db acl_role_set;
45438+static struct name_db name_set;
45439+static struct inodev_db inodev_set;
45440+
45441+/* for keeping track of userspace pointers used for subjects, so we
45442+ can share references in the kernel as well
45443+*/
45444+
45445+static struct path real_root;
45446+
45447+static struct acl_subj_map_db subj_map_set;
45448+
45449+static struct acl_role_label *default_role;
45450+
45451+static struct acl_role_label *role_list;
45452+
45453+static u16 acl_sp_role_value;
45454+
45455+extern char *gr_shared_page[4];
45456+static DEFINE_MUTEX(gr_dev_mutex);
45457+DEFINE_RWLOCK(gr_inode_lock);
45458+
45459+struct gr_arg *gr_usermode;
45460+
45461+static unsigned int gr_status __read_only = GR_STATUS_INIT;
45462+
45463+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
45464+extern void gr_clear_learn_entries(void);
45465+
45466+#ifdef CONFIG_GRKERNSEC_RESLOG
45467+extern void gr_log_resource(const struct task_struct *task,
45468+ const int res, const unsigned long wanted, const int gt);
45469+#endif
45470+
45471+unsigned char *gr_system_salt;
45472+unsigned char *gr_system_sum;
45473+
45474+static struct sprole_pw **acl_special_roles = NULL;
45475+static __u16 num_sprole_pws = 0;
45476+
45477+static struct acl_role_label *kernel_role = NULL;
45478+
45479+static unsigned int gr_auth_attempts = 0;
45480+static unsigned long gr_auth_expires = 0UL;
45481+
45482+#ifdef CONFIG_NET
45483+extern struct vfsmount *sock_mnt;
45484+#endif
45485+
45486+extern struct vfsmount *pipe_mnt;
45487+extern struct vfsmount *shm_mnt;
45488+#ifdef CONFIG_HUGETLBFS
45489+extern struct vfsmount *hugetlbfs_vfsmount;
45490+#endif
45491+
45492+static struct acl_object_label *fakefs_obj_rw;
45493+static struct acl_object_label *fakefs_obj_rwx;
45494+
45495+extern int gr_init_uidset(void);
45496+extern void gr_free_uidset(void);
45497+extern void gr_remove_uid(uid_t uid);
45498+extern int gr_find_uid(uid_t uid);
45499+
45500+DECLARE_BRLOCK(vfsmount_lock);
45501+
45502+__inline__ int
45503+gr_acl_is_enabled(void)
45504+{
45505+ return (gr_status & GR_READY);
45506+}
45507+
45508+#ifdef CONFIG_BTRFS_FS
45509+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
45510+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
45511+#endif
45512+
45513+static inline dev_t __get_dev(const struct dentry *dentry)
45514+{
45515+#ifdef CONFIG_BTRFS_FS
45516+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
45517+ return get_btrfs_dev_from_inode(dentry->d_inode);
45518+ else
45519+#endif
45520+ return dentry->d_inode->i_sb->s_dev;
45521+}
45522+
45523+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
45524+{
45525+ return __get_dev(dentry);
45526+}
45527+
45528+static char gr_task_roletype_to_char(struct task_struct *task)
45529+{
45530+ switch (task->role->roletype &
45531+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
45532+ GR_ROLE_SPECIAL)) {
45533+ case GR_ROLE_DEFAULT:
45534+ return 'D';
45535+ case GR_ROLE_USER:
45536+ return 'U';
45537+ case GR_ROLE_GROUP:
45538+ return 'G';
45539+ case GR_ROLE_SPECIAL:
45540+ return 'S';
45541+ }
45542+
45543+ return 'X';
45544+}
45545+
45546+char gr_roletype_to_char(void)
45547+{
45548+ return gr_task_roletype_to_char(current);
45549+}
45550+
45551+__inline__ int
45552+gr_acl_tpe_check(void)
45553+{
45554+ if (unlikely(!(gr_status & GR_READY)))
45555+ return 0;
45556+ if (current->role->roletype & GR_ROLE_TPE)
45557+ return 1;
45558+ else
45559+ return 0;
45560+}
45561+
45562+int
45563+gr_handle_rawio(const struct inode *inode)
45564+{
45565+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
45566+ if (inode && S_ISBLK(inode->i_mode) &&
45567+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
45568+ !capable(CAP_SYS_RAWIO))
45569+ return 1;
45570+#endif
45571+ return 0;
45572+}
45573+
45574+static int
45575+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
45576+{
45577+ if (likely(lena != lenb))
45578+ return 0;
45579+
45580+ return !memcmp(a, b, lena);
45581+}
45582+
45583+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
45584+{
45585+ *buflen -= namelen;
45586+ if (*buflen < 0)
45587+ return -ENAMETOOLONG;
45588+ *buffer -= namelen;
45589+ memcpy(*buffer, str, namelen);
45590+ return 0;
45591+}
45592+
45593+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
45594+{
45595+ return prepend(buffer, buflen, name->name, name->len);
45596+}
45597+
45598+static int prepend_path(const struct path *path, struct path *root,
45599+ char **buffer, int *buflen)
45600+{
45601+ struct dentry *dentry = path->dentry;
45602+ struct vfsmount *vfsmnt = path->mnt;
45603+ bool slash = false;
45604+ int error = 0;
45605+
45606+ while (dentry != root->dentry || vfsmnt != root->mnt) {
45607+ struct dentry * parent;
45608+
45609+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
45610+ /* Global root? */
45611+ if (vfsmnt->mnt_parent == vfsmnt) {
45612+ goto out;
45613+ }
45614+ dentry = vfsmnt->mnt_mountpoint;
45615+ vfsmnt = vfsmnt->mnt_parent;
45616+ continue;
45617+ }
45618+ parent = dentry->d_parent;
45619+ prefetch(parent);
45620+ spin_lock(&dentry->d_lock);
45621+ error = prepend_name(buffer, buflen, &dentry->d_name);
45622+ spin_unlock(&dentry->d_lock);
45623+ if (!error)
45624+ error = prepend(buffer, buflen, "/", 1);
45625+ if (error)
45626+ break;
45627+
45628+ slash = true;
45629+ dentry = parent;
45630+ }
45631+
45632+out:
45633+ if (!error && !slash)
45634+ error = prepend(buffer, buflen, "/", 1);
45635+
45636+ return error;
45637+}
45638+
45639+/* this must be called with vfsmount_lock and rename_lock held */
45640+
45641+static char *__our_d_path(const struct path *path, struct path *root,
45642+ char *buf, int buflen)
45643+{
45644+ char *res = buf + buflen;
45645+ int error;
45646+
45647+ prepend(&res, &buflen, "\0", 1);
45648+ error = prepend_path(path, root, &res, &buflen);
45649+ if (error)
45650+ return ERR_PTR(error);
45651+
45652+ return res;
45653+}
45654+
45655+static char *
45656+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
45657+{
45658+ char *retval;
45659+
45660+ retval = __our_d_path(path, root, buf, buflen);
45661+ if (unlikely(IS_ERR(retval)))
45662+ retval = strcpy(buf, "<path too long>");
45663+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
45664+ retval[1] = '\0';
45665+
45666+ return retval;
45667+}
45668+
45669+static char *
45670+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
45671+ char *buf, int buflen)
45672+{
45673+ struct path path;
45674+ char *res;
45675+
45676+ path.dentry = (struct dentry *)dentry;
45677+ path.mnt = (struct vfsmount *)vfsmnt;
45678+
45679+ /* we can use real_root.dentry, real_root.mnt, because this is only called
45680+ by the RBAC system */
45681+ res = gen_full_path(&path, &real_root, buf, buflen);
45682+
45683+ return res;
45684+}
45685+
45686+static char *
45687+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
45688+ char *buf, int buflen)
45689+{
45690+ char *res;
45691+ struct path path;
45692+ struct path root;
45693+ struct task_struct *reaper = &init_task;
45694+
45695+ path.dentry = (struct dentry *)dentry;
45696+ path.mnt = (struct vfsmount *)vfsmnt;
45697+
45698+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
45699+ get_fs_root(reaper->fs, &root);
45700+
45701+ write_seqlock(&rename_lock);
45702+ br_read_lock(vfsmount_lock);
45703+ res = gen_full_path(&path, &root, buf, buflen);
45704+ br_read_unlock(vfsmount_lock);
45705+ write_sequnlock(&rename_lock);
45706+
45707+ path_put(&root);
45708+ return res;
45709+}
45710+
45711+static char *
45712+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
45713+{
45714+ char *ret;
45715+ write_seqlock(&rename_lock);
45716+ br_read_lock(vfsmount_lock);
45717+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
45718+ PAGE_SIZE);
45719+ br_read_unlock(vfsmount_lock);
45720+ write_sequnlock(&rename_lock);
45721+ return ret;
45722+}
45723+
45724+char *
45725+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
45726+{
45727+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
45728+ PAGE_SIZE);
45729+}
45730+
45731+char *
45732+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
45733+{
45734+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
45735+ PAGE_SIZE);
45736+}
45737+
45738+char *
45739+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
45740+{
45741+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
45742+ PAGE_SIZE);
45743+}
45744+
45745+char *
45746+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
45747+{
45748+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
45749+ PAGE_SIZE);
45750+}
45751+
45752+char *
45753+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
45754+{
45755+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
45756+ PAGE_SIZE);
45757+}
45758+
45759+__inline__ __u32
45760+to_gr_audit(const __u32 reqmode)
45761+{
45762+ /* masks off auditable permission flags, then shifts them to create
45763+ auditing flags, and adds the special case of append auditing if
45764+ we're requesting write */
45765+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
45766+}
45767+
45768+struct acl_subject_label *
45769+lookup_subject_map(const struct acl_subject_label *userp)
45770+{
45771+ unsigned int index = shash(userp, subj_map_set.s_size);
45772+ struct subject_map *match;
45773+
45774+ match = subj_map_set.s_hash[index];
45775+
45776+ while (match && match->user != userp)
45777+ match = match->next;
45778+
45779+ if (match != NULL)
45780+ return match->kernel;
45781+ else
45782+ return NULL;
45783+}
45784+
45785+static void
45786+insert_subj_map_entry(struct subject_map *subjmap)
45787+{
45788+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
45789+ struct subject_map **curr;
45790+
45791+ subjmap->prev = NULL;
45792+
45793+ curr = &subj_map_set.s_hash[index];
45794+ if (*curr != NULL)
45795+ (*curr)->prev = subjmap;
45796+
45797+ subjmap->next = *curr;
45798+ *curr = subjmap;
45799+
45800+ return;
45801+}
45802+
45803+static struct acl_role_label *
45804+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
45805+ const gid_t gid)
45806+{
45807+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
45808+ struct acl_role_label *match;
45809+ struct role_allowed_ip *ipp;
45810+ unsigned int x;
45811+ u32 curr_ip = task->signal->curr_ip;
45812+
45813+ task->signal->saved_ip = curr_ip;
45814+
45815+ match = acl_role_set.r_hash[index];
45816+
45817+ while (match) {
45818+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
45819+ for (x = 0; x < match->domain_child_num; x++) {
45820+ if (match->domain_children[x] == uid)
45821+ goto found;
45822+ }
45823+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
45824+ break;
45825+ match = match->next;
45826+ }
45827+found:
45828+ if (match == NULL) {
45829+ try_group:
45830+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
45831+ match = acl_role_set.r_hash[index];
45832+
45833+ while (match) {
45834+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
45835+ for (x = 0; x < match->domain_child_num; x++) {
45836+ if (match->domain_children[x] == gid)
45837+ goto found2;
45838+ }
45839+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
45840+ break;
45841+ match = match->next;
45842+ }
45843+found2:
45844+ if (match == NULL)
45845+ match = default_role;
45846+ if (match->allowed_ips == NULL)
45847+ return match;
45848+ else {
45849+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
45850+ if (likely
45851+ ((ntohl(curr_ip) & ipp->netmask) ==
45852+ (ntohl(ipp->addr) & ipp->netmask)))
45853+ return match;
45854+ }
45855+ match = default_role;
45856+ }
45857+ } else if (match->allowed_ips == NULL) {
45858+ return match;
45859+ } else {
45860+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
45861+ if (likely
45862+ ((ntohl(curr_ip) & ipp->netmask) ==
45863+ (ntohl(ipp->addr) & ipp->netmask)))
45864+ return match;
45865+ }
45866+ goto try_group;
45867+ }
45868+
45869+ return match;
45870+}
45871+
45872+struct acl_subject_label *
45873+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
45874+ const struct acl_role_label *role)
45875+{
45876+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
45877+ struct acl_subject_label *match;
45878+
45879+ match = role->subj_hash[index];
45880+
45881+ while (match && (match->inode != ino || match->device != dev ||
45882+ (match->mode & GR_DELETED))) {
45883+ match = match->next;
45884+ }
45885+
45886+ if (match && !(match->mode & GR_DELETED))
45887+ return match;
45888+ else
45889+ return NULL;
45890+}
45891+
45892+struct acl_subject_label *
45893+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
45894+ const struct acl_role_label *role)
45895+{
45896+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
45897+ struct acl_subject_label *match;
45898+
45899+ match = role->subj_hash[index];
45900+
45901+ while (match && (match->inode != ino || match->device != dev ||
45902+ !(match->mode & GR_DELETED))) {
45903+ match = match->next;
45904+ }
45905+
45906+ if (match && (match->mode & GR_DELETED))
45907+ return match;
45908+ else
45909+ return NULL;
45910+}
45911+
45912+static struct acl_object_label *
45913+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
45914+ const struct acl_subject_label *subj)
45915+{
45916+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
45917+ struct acl_object_label *match;
45918+
45919+ match = subj->obj_hash[index];
45920+
45921+ while (match && (match->inode != ino || match->device != dev ||
45922+ (match->mode & GR_DELETED))) {
45923+ match = match->next;
45924+ }
45925+
45926+ if (match && !(match->mode & GR_DELETED))
45927+ return match;
45928+ else
45929+ return NULL;
45930+}
45931+
45932+static struct acl_object_label *
45933+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
45934+ const struct acl_subject_label *subj)
45935+{
45936+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
45937+ struct acl_object_label *match;
45938+
45939+ match = subj->obj_hash[index];
45940+
45941+ while (match && (match->inode != ino || match->device != dev ||
45942+ !(match->mode & GR_DELETED))) {
45943+ match = match->next;
45944+ }
45945+
45946+ if (match && (match->mode & GR_DELETED))
45947+ return match;
45948+
45949+ match = subj->obj_hash[index];
45950+
45951+ while (match && (match->inode != ino || match->device != dev ||
45952+ (match->mode & GR_DELETED))) {
45953+ match = match->next;
45954+ }
45955+
45956+ if (match && !(match->mode & GR_DELETED))
45957+ return match;
45958+ else
45959+ return NULL;
45960+}
45961+
45962+static struct name_entry *
45963+lookup_name_entry(const char *name)
45964+{
45965+ unsigned int len = strlen(name);
45966+ unsigned int key = full_name_hash(name, len);
45967+ unsigned int index = key % name_set.n_size;
45968+ struct name_entry *match;
45969+
45970+ match = name_set.n_hash[index];
45971+
45972+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
45973+ match = match->next;
45974+
45975+ return match;
45976+}
45977+
45978+static struct name_entry *
45979+lookup_name_entry_create(const char *name)
45980+{
45981+ unsigned int len = strlen(name);
45982+ unsigned int key = full_name_hash(name, len);
45983+ unsigned int index = key % name_set.n_size;
45984+ struct name_entry *match;
45985+
45986+ match = name_set.n_hash[index];
45987+
45988+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
45989+ !match->deleted))
45990+ match = match->next;
45991+
45992+ if (match && match->deleted)
45993+ return match;
45994+
45995+ match = name_set.n_hash[index];
45996+
45997+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
45998+ match->deleted))
45999+ match = match->next;
46000+
46001+ if (match && !match->deleted)
46002+ return match;
46003+ else
46004+ return NULL;
46005+}
46006+
46007+static struct inodev_entry *
46008+lookup_inodev_entry(const ino_t ino, const dev_t dev)
46009+{
46010+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
46011+ struct inodev_entry *match;
46012+
46013+ match = inodev_set.i_hash[index];
46014+
46015+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
46016+ match = match->next;
46017+
46018+ return match;
46019+}
46020+
46021+static void
46022+insert_inodev_entry(struct inodev_entry *entry)
46023+{
46024+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
46025+ inodev_set.i_size);
46026+ struct inodev_entry **curr;
46027+
46028+ entry->prev = NULL;
46029+
46030+ curr = &inodev_set.i_hash[index];
46031+ if (*curr != NULL)
46032+ (*curr)->prev = entry;
46033+
46034+ entry->next = *curr;
46035+ *curr = entry;
46036+
46037+ return;
46038+}
46039+
46040+static void
46041+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
46042+{
46043+ unsigned int index =
46044+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
46045+ struct acl_role_label **curr;
46046+ struct acl_role_label *tmp;
46047+
46048+ curr = &acl_role_set.r_hash[index];
46049+
46050+ /* if role was already inserted due to domains and already has
46051+ a role in the same bucket as it attached, then we need to
46052+ combine these two buckets
46053+ */
46054+ if (role->next) {
46055+ tmp = role->next;
46056+ while (tmp->next)
46057+ tmp = tmp->next;
46058+ tmp->next = *curr;
46059+ } else
46060+ role->next = *curr;
46061+ *curr = role;
46062+
46063+ return;
46064+}
46065+
46066+static void
46067+insert_acl_role_label(struct acl_role_label *role)
46068+{
46069+ int i;
46070+
46071+ if (role_list == NULL) {
46072+ role_list = role;
46073+ role->prev = NULL;
46074+ } else {
46075+ role->prev = role_list;
46076+ role_list = role;
46077+ }
46078+
46079+ /* used for hash chains */
46080+ role->next = NULL;
46081+
46082+ if (role->roletype & GR_ROLE_DOMAIN) {
46083+ for (i = 0; i < role->domain_child_num; i++)
46084+ __insert_acl_role_label(role, role->domain_children[i]);
46085+ } else
46086+ __insert_acl_role_label(role, role->uidgid);
46087+}
46088+
46089+static int
46090+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
46091+{
46092+ struct name_entry **curr, *nentry;
46093+ struct inodev_entry *ientry;
46094+ unsigned int len = strlen(name);
46095+ unsigned int key = full_name_hash(name, len);
46096+ unsigned int index = key % name_set.n_size;
46097+
46098+ curr = &name_set.n_hash[index];
46099+
46100+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
46101+ curr = &((*curr)->next);
46102+
46103+ if (*curr != NULL)
46104+ return 1;
46105+
46106+ nentry = acl_alloc(sizeof (struct name_entry));
46107+ if (nentry == NULL)
46108+ return 0;
46109+ ientry = acl_alloc(sizeof (struct inodev_entry));
46110+ if (ientry == NULL)
46111+ return 0;
46112+ ientry->nentry = nentry;
46113+
46114+ nentry->key = key;
46115+ nentry->name = name;
46116+ nentry->inode = inode;
46117+ nentry->device = device;
46118+ nentry->len = len;
46119+ nentry->deleted = deleted;
46120+
46121+ nentry->prev = NULL;
46122+ curr = &name_set.n_hash[index];
46123+ if (*curr != NULL)
46124+ (*curr)->prev = nentry;
46125+ nentry->next = *curr;
46126+ *curr = nentry;
46127+
46128+ /* insert us into the table searchable by inode/dev */
46129+ insert_inodev_entry(ientry);
46130+
46131+ return 1;
46132+}
46133+
46134+static void
46135+insert_acl_obj_label(struct acl_object_label *obj,
46136+ struct acl_subject_label *subj)
46137+{
46138+ unsigned int index =
46139+ fhash(obj->inode, obj->device, subj->obj_hash_size);
46140+ struct acl_object_label **curr;
46141+
46142+
46143+ obj->prev = NULL;
46144+
46145+ curr = &subj->obj_hash[index];
46146+ if (*curr != NULL)
46147+ (*curr)->prev = obj;
46148+
46149+ obj->next = *curr;
46150+ *curr = obj;
46151+
46152+ return;
46153+}
46154+
46155+static void
46156+insert_acl_subj_label(struct acl_subject_label *obj,
46157+ struct acl_role_label *role)
46158+{
46159+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
46160+ struct acl_subject_label **curr;
46161+
46162+ obj->prev = NULL;
46163+
46164+ curr = &role->subj_hash[index];
46165+ if (*curr != NULL)
46166+ (*curr)->prev = obj;
46167+
46168+ obj->next = *curr;
46169+ *curr = obj;
46170+
46171+ return;
46172+}
46173+
46174+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
46175+
46176+static void *
46177+create_table(__u32 * len, int elementsize)
46178+{
46179+ unsigned int table_sizes[] = {
46180+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
46181+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
46182+ 4194301, 8388593, 16777213, 33554393, 67108859
46183+ };
46184+ void *newtable = NULL;
46185+ unsigned int pwr = 0;
46186+
46187+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
46188+ table_sizes[pwr] <= *len)
46189+ pwr++;
46190+
46191+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
46192+ return newtable;
46193+
46194+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
46195+ newtable =
46196+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
46197+ else
46198+ newtable = vmalloc(table_sizes[pwr] * elementsize);
46199+
46200+ *len = table_sizes[pwr];
46201+
46202+ return newtable;
46203+}
46204+
46205+static int
46206+init_variables(const struct gr_arg *arg)
46207+{
46208+ struct task_struct *reaper = &init_task;
46209+ unsigned int stacksize;
46210+
46211+ subj_map_set.s_size = arg->role_db.num_subjects;
46212+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
46213+ name_set.n_size = arg->role_db.num_objects;
46214+ inodev_set.i_size = arg->role_db.num_objects;
46215+
46216+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
46217+ !name_set.n_size || !inodev_set.i_size)
46218+ return 1;
46219+
46220+ if (!gr_init_uidset())
46221+ return 1;
46222+
46223+ /* set up the stack that holds allocation info */
46224+
46225+ stacksize = arg->role_db.num_pointers + 5;
46226+
46227+ if (!acl_alloc_stack_init(stacksize))
46228+ return 1;
46229+
46230+ /* grab reference for the real root dentry and vfsmount */
46231+ get_fs_root(reaper->fs, &real_root);
46232+
46233+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46234+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
46235+#endif
46236+
46237+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
46238+ if (fakefs_obj_rw == NULL)
46239+ return 1;
46240+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
46241+
46242+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
46243+ if (fakefs_obj_rwx == NULL)
46244+ return 1;
46245+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
46246+
46247+ subj_map_set.s_hash =
46248+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
46249+ acl_role_set.r_hash =
46250+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
46251+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
46252+ inodev_set.i_hash =
46253+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
46254+
46255+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
46256+ !name_set.n_hash || !inodev_set.i_hash)
46257+ return 1;
46258+
46259+ memset(subj_map_set.s_hash, 0,
46260+ sizeof(struct subject_map *) * subj_map_set.s_size);
46261+ memset(acl_role_set.r_hash, 0,
46262+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
46263+ memset(name_set.n_hash, 0,
46264+ sizeof (struct name_entry *) * name_set.n_size);
46265+ memset(inodev_set.i_hash, 0,
46266+ sizeof (struct inodev_entry *) * inodev_set.i_size);
46267+
46268+ return 0;
46269+}
46270+
46271+/* free information not needed after startup
46272+ currently contains user->kernel pointer mappings for subjects
46273+*/
46274+
46275+static void
46276+free_init_variables(void)
46277+{
46278+ __u32 i;
46279+
46280+ if (subj_map_set.s_hash) {
46281+ for (i = 0; i < subj_map_set.s_size; i++) {
46282+ if (subj_map_set.s_hash[i]) {
46283+ kfree(subj_map_set.s_hash[i]);
46284+ subj_map_set.s_hash[i] = NULL;
46285+ }
46286+ }
46287+
46288+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
46289+ PAGE_SIZE)
46290+ kfree(subj_map_set.s_hash);
46291+ else
46292+ vfree(subj_map_set.s_hash);
46293+ }
46294+
46295+ return;
46296+}
46297+
46298+static void
46299+free_variables(void)
46300+{
46301+ struct acl_subject_label *s;
46302+ struct acl_role_label *r;
46303+ struct task_struct *task, *task2;
46304+ unsigned int x;
46305+
46306+ gr_clear_learn_entries();
46307+
46308+ read_lock(&tasklist_lock);
46309+ do_each_thread(task2, task) {
46310+ task->acl_sp_role = 0;
46311+ task->acl_role_id = 0;
46312+ task->acl = NULL;
46313+ task->role = NULL;
46314+ } while_each_thread(task2, task);
46315+ read_unlock(&tasklist_lock);
46316+
46317+ /* release the reference to the real root dentry and vfsmount */
46318+ path_put(&real_root);
46319+
46320+ /* free all object hash tables */
46321+
46322+ FOR_EACH_ROLE_START(r)
46323+ if (r->subj_hash == NULL)
46324+ goto next_role;
46325+ FOR_EACH_SUBJECT_START(r, s, x)
46326+ if (s->obj_hash == NULL)
46327+ break;
46328+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
46329+ kfree(s->obj_hash);
46330+ else
46331+ vfree(s->obj_hash);
46332+ FOR_EACH_SUBJECT_END(s, x)
46333+ FOR_EACH_NESTED_SUBJECT_START(r, s)
46334+ if (s->obj_hash == NULL)
46335+ break;
46336+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
46337+ kfree(s->obj_hash);
46338+ else
46339+ vfree(s->obj_hash);
46340+ FOR_EACH_NESTED_SUBJECT_END(s)
46341+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
46342+ kfree(r->subj_hash);
46343+ else
46344+ vfree(r->subj_hash);
46345+ r->subj_hash = NULL;
46346+next_role:
46347+ FOR_EACH_ROLE_END(r)
46348+
46349+ acl_free_all();
46350+
46351+ if (acl_role_set.r_hash) {
46352+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
46353+ PAGE_SIZE)
46354+ kfree(acl_role_set.r_hash);
46355+ else
46356+ vfree(acl_role_set.r_hash);
46357+ }
46358+ if (name_set.n_hash) {
46359+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
46360+ PAGE_SIZE)
46361+ kfree(name_set.n_hash);
46362+ else
46363+ vfree(name_set.n_hash);
46364+ }
46365+
46366+ if (inodev_set.i_hash) {
46367+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
46368+ PAGE_SIZE)
46369+ kfree(inodev_set.i_hash);
46370+ else
46371+ vfree(inodev_set.i_hash);
46372+ }
46373+
46374+ gr_free_uidset();
46375+
46376+ memset(&name_set, 0, sizeof (struct name_db));
46377+ memset(&inodev_set, 0, sizeof (struct inodev_db));
46378+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
46379+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
46380+
46381+ default_role = NULL;
46382+ role_list = NULL;
46383+
46384+ return;
46385+}
46386+
46387+static __u32
46388+count_user_objs(struct acl_object_label *userp)
46389+{
46390+ struct acl_object_label o_tmp;
46391+ __u32 num = 0;
46392+
46393+ while (userp) {
46394+ if (copy_from_user(&o_tmp, userp,
46395+ sizeof (struct acl_object_label)))
46396+ break;
46397+
46398+ userp = o_tmp.prev;
46399+ num++;
46400+ }
46401+
46402+ return num;
46403+}
46404+
46405+static struct acl_subject_label *
46406+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
46407+
46408+static int
46409+copy_user_glob(struct acl_object_label *obj)
46410+{
46411+ struct acl_object_label *g_tmp, **guser;
46412+ unsigned int len;
46413+ char *tmp;
46414+
46415+ if (obj->globbed == NULL)
46416+ return 0;
46417+
46418+ guser = &obj->globbed;
46419+ while (*guser) {
46420+ g_tmp = (struct acl_object_label *)
46421+ acl_alloc(sizeof (struct acl_object_label));
46422+ if (g_tmp == NULL)
46423+ return -ENOMEM;
46424+
46425+ if (copy_from_user(g_tmp, *guser,
46426+ sizeof (struct acl_object_label)))
46427+ return -EFAULT;
46428+
46429+ len = strnlen_user(g_tmp->filename, PATH_MAX);
46430+
46431+ if (!len || len >= PATH_MAX)
46432+ return -EINVAL;
46433+
46434+ if ((tmp = (char *) acl_alloc(len)) == NULL)
46435+ return -ENOMEM;
46436+
46437+ if (copy_from_user(tmp, g_tmp->filename, len))
46438+ return -EFAULT;
46439+ tmp[len-1] = '\0';
46440+ g_tmp->filename = tmp;
46441+
46442+ *guser = g_tmp;
46443+ guser = &(g_tmp->next);
46444+ }
46445+
46446+ return 0;
46447+}
46448+
46449+static int
46450+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
46451+ struct acl_role_label *role)
46452+{
46453+ struct acl_object_label *o_tmp;
46454+ unsigned int len;
46455+ int ret;
46456+ char *tmp;
46457+
46458+ while (userp) {
46459+ if ((o_tmp = (struct acl_object_label *)
46460+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
46461+ return -ENOMEM;
46462+
46463+ if (copy_from_user(o_tmp, userp,
46464+ sizeof (struct acl_object_label)))
46465+ return -EFAULT;
46466+
46467+ userp = o_tmp->prev;
46468+
46469+ len = strnlen_user(o_tmp->filename, PATH_MAX);
46470+
46471+ if (!len || len >= PATH_MAX)
46472+ return -EINVAL;
46473+
46474+ if ((tmp = (char *) acl_alloc(len)) == NULL)
46475+ return -ENOMEM;
46476+
46477+ if (copy_from_user(tmp, o_tmp->filename, len))
46478+ return -EFAULT;
46479+ tmp[len-1] = '\0';
46480+ o_tmp->filename = tmp;
46481+
46482+ insert_acl_obj_label(o_tmp, subj);
46483+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
46484+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
46485+ return -ENOMEM;
46486+
46487+ ret = copy_user_glob(o_tmp);
46488+ if (ret)
46489+ return ret;
46490+
46491+ if (o_tmp->nested) {
46492+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
46493+ if (IS_ERR(o_tmp->nested))
46494+ return PTR_ERR(o_tmp->nested);
46495+
46496+ /* insert into nested subject list */
46497+ o_tmp->nested->next = role->hash->first;
46498+ role->hash->first = o_tmp->nested;
46499+ }
46500+ }
46501+
46502+ return 0;
46503+}
46504+
46505+static __u32
46506+count_user_subjs(struct acl_subject_label *userp)
46507+{
46508+ struct acl_subject_label s_tmp;
46509+ __u32 num = 0;
46510+
46511+ while (userp) {
46512+ if (copy_from_user(&s_tmp, userp,
46513+ sizeof (struct acl_subject_label)))
46514+ break;
46515+
46516+ userp = s_tmp.prev;
46517+ /* do not count nested subjects against this count, since
46518+ they are not included in the hash table, but are
46519+ attached to objects. We have already counted
46520+ the subjects in userspace for the allocation
46521+ stack
46522+ */
46523+ if (!(s_tmp.mode & GR_NESTED))
46524+ num++;
46525+ }
46526+
46527+ return num;
46528+}
46529+
46530+static int
46531+copy_user_allowedips(struct acl_role_label *rolep)
46532+{
46533+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
46534+
46535+ ruserip = rolep->allowed_ips;
46536+
46537+ while (ruserip) {
46538+ rlast = rtmp;
46539+
46540+ if ((rtmp = (struct role_allowed_ip *)
46541+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
46542+ return -ENOMEM;
46543+
46544+ if (copy_from_user(rtmp, ruserip,
46545+ sizeof (struct role_allowed_ip)))
46546+ return -EFAULT;
46547+
46548+ ruserip = rtmp->prev;
46549+
46550+ if (!rlast) {
46551+ rtmp->prev = NULL;
46552+ rolep->allowed_ips = rtmp;
46553+ } else {
46554+ rlast->next = rtmp;
46555+ rtmp->prev = rlast;
46556+ }
46557+
46558+ if (!ruserip)
46559+ rtmp->next = NULL;
46560+ }
46561+
46562+ return 0;
46563+}
46564+
46565+static int
46566+copy_user_transitions(struct acl_role_label *rolep)
46567+{
46568+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
46569+
46570+ unsigned int len;
46571+ char *tmp;
46572+
46573+ rusertp = rolep->transitions;
46574+
46575+ while (rusertp) {
46576+ rlast = rtmp;
46577+
46578+ if ((rtmp = (struct role_transition *)
46579+ acl_alloc(sizeof (struct role_transition))) == NULL)
46580+ return -ENOMEM;
46581+
46582+ if (copy_from_user(rtmp, rusertp,
46583+ sizeof (struct role_transition)))
46584+ return -EFAULT;
46585+
46586+ rusertp = rtmp->prev;
46587+
46588+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
46589+
46590+ if (!len || len >= GR_SPROLE_LEN)
46591+ return -EINVAL;
46592+
46593+ if ((tmp = (char *) acl_alloc(len)) == NULL)
46594+ return -ENOMEM;
46595+
46596+ if (copy_from_user(tmp, rtmp->rolename, len))
46597+ return -EFAULT;
46598+ tmp[len-1] = '\0';
46599+ rtmp->rolename = tmp;
46600+
46601+ if (!rlast) {
46602+ rtmp->prev = NULL;
46603+ rolep->transitions = rtmp;
46604+ } else {
46605+ rlast->next = rtmp;
46606+ rtmp->prev = rlast;
46607+ }
46608+
46609+ if (!rusertp)
46610+ rtmp->next = NULL;
46611+ }
46612+
46613+ return 0;
46614+}
46615+
46616+static struct acl_subject_label *
46617+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
46618+{
46619+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
46620+ unsigned int len;
46621+ char *tmp;
46622+ __u32 num_objs;
46623+ struct acl_ip_label **i_tmp, *i_utmp2;
46624+ struct gr_hash_struct ghash;
46625+ struct subject_map *subjmap;
46626+ unsigned int i_num;
46627+ int err;
46628+
46629+ s_tmp = lookup_subject_map(userp);
46630+
46631+ /* we've already copied this subject into the kernel, just return
46632+ the reference to it, and don't copy it over again
46633+ */
46634+ if (s_tmp)
46635+ return(s_tmp);
46636+
46637+ if ((s_tmp = (struct acl_subject_label *)
46638+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
46639+ return ERR_PTR(-ENOMEM);
46640+
46641+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
46642+ if (subjmap == NULL)
46643+ return ERR_PTR(-ENOMEM);
46644+
46645+ subjmap->user = userp;
46646+ subjmap->kernel = s_tmp;
46647+ insert_subj_map_entry(subjmap);
46648+
46649+ if (copy_from_user(s_tmp, userp,
46650+ sizeof (struct acl_subject_label)))
46651+ return ERR_PTR(-EFAULT);
46652+
46653+ len = strnlen_user(s_tmp->filename, PATH_MAX);
46654+
46655+ if (!len || len >= PATH_MAX)
46656+ return ERR_PTR(-EINVAL);
46657+
46658+ if ((tmp = (char *) acl_alloc(len)) == NULL)
46659+ return ERR_PTR(-ENOMEM);
46660+
46661+ if (copy_from_user(tmp, s_tmp->filename, len))
46662+ return ERR_PTR(-EFAULT);
46663+ tmp[len-1] = '\0';
46664+ s_tmp->filename = tmp;
46665+
46666+ if (!strcmp(s_tmp->filename, "/"))
46667+ role->root_label = s_tmp;
46668+
46669+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
46670+ return ERR_PTR(-EFAULT);
46671+
46672+ /* copy user and group transition tables */
46673+
46674+ if (s_tmp->user_trans_num) {
46675+ uid_t *uidlist;
46676+
46677+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
46678+ if (uidlist == NULL)
46679+ return ERR_PTR(-ENOMEM);
46680+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
46681+ return ERR_PTR(-EFAULT);
46682+
46683+ s_tmp->user_transitions = uidlist;
46684+ }
46685+
46686+ if (s_tmp->group_trans_num) {
46687+ gid_t *gidlist;
46688+
46689+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
46690+ if (gidlist == NULL)
46691+ return ERR_PTR(-ENOMEM);
46692+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
46693+ return ERR_PTR(-EFAULT);
46694+
46695+ s_tmp->group_transitions = gidlist;
46696+ }
46697+
46698+ /* set up object hash table */
46699+ num_objs = count_user_objs(ghash.first);
46700+
46701+ s_tmp->obj_hash_size = num_objs;
46702+ s_tmp->obj_hash =
46703+ (struct acl_object_label **)
46704+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
46705+
46706+ if (!s_tmp->obj_hash)
46707+ return ERR_PTR(-ENOMEM);
46708+
46709+ memset(s_tmp->obj_hash, 0,
46710+ s_tmp->obj_hash_size *
46711+ sizeof (struct acl_object_label *));
46712+
46713+ /* add in objects */
46714+ err = copy_user_objs(ghash.first, s_tmp, role);
46715+
46716+ if (err)
46717+ return ERR_PTR(err);
46718+
46719+ /* set pointer for parent subject */
46720+ if (s_tmp->parent_subject) {
46721+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
46722+
46723+ if (IS_ERR(s_tmp2))
46724+ return s_tmp2;
46725+
46726+ s_tmp->parent_subject = s_tmp2;
46727+ }
46728+
46729+ /* add in ip acls */
46730+
46731+ if (!s_tmp->ip_num) {
46732+ s_tmp->ips = NULL;
46733+ goto insert;
46734+ }
46735+
46736+ i_tmp =
46737+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
46738+ sizeof (struct acl_ip_label *));
46739+
46740+ if (!i_tmp)
46741+ return ERR_PTR(-ENOMEM);
46742+
46743+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
46744+ *(i_tmp + i_num) =
46745+ (struct acl_ip_label *)
46746+ acl_alloc(sizeof (struct acl_ip_label));
46747+ if (!*(i_tmp + i_num))
46748+ return ERR_PTR(-ENOMEM);
46749+
46750+ if (copy_from_user
46751+ (&i_utmp2, s_tmp->ips + i_num,
46752+ sizeof (struct acl_ip_label *)))
46753+ return ERR_PTR(-EFAULT);
46754+
46755+ if (copy_from_user
46756+ (*(i_tmp + i_num), i_utmp2,
46757+ sizeof (struct acl_ip_label)))
46758+ return ERR_PTR(-EFAULT);
46759+
46760+ if ((*(i_tmp + i_num))->iface == NULL)
46761+ continue;
46762+
46763+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
46764+ if (!len || len >= IFNAMSIZ)
46765+ return ERR_PTR(-EINVAL);
46766+ tmp = acl_alloc(len);
46767+ if (tmp == NULL)
46768+ return ERR_PTR(-ENOMEM);
46769+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
46770+ return ERR_PTR(-EFAULT);
46771+ (*(i_tmp + i_num))->iface = tmp;
46772+ }
46773+
46774+ s_tmp->ips = i_tmp;
46775+
46776+insert:
46777+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
46778+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
46779+ return ERR_PTR(-ENOMEM);
46780+
46781+ return s_tmp;
46782+}
46783+
46784+static int
46785+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
46786+{
46787+ struct acl_subject_label s_pre;
46788+ struct acl_subject_label * ret;
46789+ int err;
46790+
46791+ while (userp) {
46792+ if (copy_from_user(&s_pre, userp,
46793+ sizeof (struct acl_subject_label)))
46794+ return -EFAULT;
46795+
46796+ /* do not add nested subjects here, add
46797+ while parsing objects
46798+ */
46799+
46800+ if (s_pre.mode & GR_NESTED) {
46801+ userp = s_pre.prev;
46802+ continue;
46803+ }
46804+
46805+ ret = do_copy_user_subj(userp, role);
46806+
46807+ err = PTR_ERR(ret);
46808+ if (IS_ERR(ret))
46809+ return err;
46810+
46811+ insert_acl_subj_label(ret, role);
46812+
46813+ userp = s_pre.prev;
46814+ }
46815+
46816+ return 0;
46817+}
46818+
46819+static int
46820+copy_user_acl(struct gr_arg *arg)
46821+{
46822+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
46823+ struct sprole_pw *sptmp;
46824+ struct gr_hash_struct *ghash;
46825+ uid_t *domainlist;
46826+ unsigned int r_num;
46827+ unsigned int len;
46828+ char *tmp;
46829+ int err = 0;
46830+ __u16 i;
46831+ __u32 num_subjs;
46832+
46833+ /* we need a default and kernel role */
46834+ if (arg->role_db.num_roles < 2)
46835+ return -EINVAL;
46836+
46837+ /* copy special role authentication info from userspace */
46838+
46839+ num_sprole_pws = arg->num_sprole_pws;
46840+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
46841+
46842+ if (!acl_special_roles) {
46843+ err = -ENOMEM;
46844+ goto cleanup;
46845+ }
46846+
46847+ for (i = 0; i < num_sprole_pws; i++) {
46848+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
46849+ if (!sptmp) {
46850+ err = -ENOMEM;
46851+ goto cleanup;
46852+ }
46853+ if (copy_from_user(sptmp, arg->sprole_pws + i,
46854+ sizeof (struct sprole_pw))) {
46855+ err = -EFAULT;
46856+ goto cleanup;
46857+ }
46858+
46859+ len =
46860+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
46861+
46862+ if (!len || len >= GR_SPROLE_LEN) {
46863+ err = -EINVAL;
46864+ goto cleanup;
46865+ }
46866+
46867+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
46868+ err = -ENOMEM;
46869+ goto cleanup;
46870+ }
46871+
46872+ if (copy_from_user(tmp, sptmp->rolename, len)) {
46873+ err = -EFAULT;
46874+ goto cleanup;
46875+ }
46876+ tmp[len-1] = '\0';
46877+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46878+ printk(KERN_ALERT "Copying special role %s\n", tmp);
46879+#endif
46880+ sptmp->rolename = tmp;
46881+ acl_special_roles[i] = sptmp;
46882+ }
46883+
46884+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
46885+
46886+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
46887+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
46888+
46889+ if (!r_tmp) {
46890+ err = -ENOMEM;
46891+ goto cleanup;
46892+ }
46893+
46894+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
46895+ sizeof (struct acl_role_label *))) {
46896+ err = -EFAULT;
46897+ goto cleanup;
46898+ }
46899+
46900+ if (copy_from_user(r_tmp, r_utmp2,
46901+ sizeof (struct acl_role_label))) {
46902+ err = -EFAULT;
46903+ goto cleanup;
46904+ }
46905+
46906+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
46907+
46908+ if (!len || len >= PATH_MAX) {
46909+ err = -EINVAL;
46910+ goto cleanup;
46911+ }
46912+
46913+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
46914+ err = -ENOMEM;
46915+ goto cleanup;
46916+ }
46917+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
46918+ err = -EFAULT;
46919+ goto cleanup;
46920+ }
46921+ tmp[len-1] = '\0';
46922+ r_tmp->rolename = tmp;
46923+
46924+ if (!strcmp(r_tmp->rolename, "default")
46925+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
46926+ default_role = r_tmp;
46927+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
46928+ kernel_role = r_tmp;
46929+ }
46930+
46931+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
46932+ err = -ENOMEM;
46933+ goto cleanup;
46934+ }
46935+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
46936+ err = -EFAULT;
46937+ goto cleanup;
46938+ }
46939+
46940+ r_tmp->hash = ghash;
46941+
46942+ num_subjs = count_user_subjs(r_tmp->hash->first);
46943+
46944+ r_tmp->subj_hash_size = num_subjs;
46945+ r_tmp->subj_hash =
46946+ (struct acl_subject_label **)
46947+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
46948+
46949+ if (!r_tmp->subj_hash) {
46950+ err = -ENOMEM;
46951+ goto cleanup;
46952+ }
46953+
46954+ err = copy_user_allowedips(r_tmp);
46955+ if (err)
46956+ goto cleanup;
46957+
46958+ /* copy domain info */
46959+ if (r_tmp->domain_children != NULL) {
46960+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
46961+ if (domainlist == NULL) {
46962+ err = -ENOMEM;
46963+ goto cleanup;
46964+ }
46965+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
46966+ err = -EFAULT;
46967+ goto cleanup;
46968+ }
46969+ r_tmp->domain_children = domainlist;
46970+ }
46971+
46972+ err = copy_user_transitions(r_tmp);
46973+ if (err)
46974+ goto cleanup;
46975+
46976+ memset(r_tmp->subj_hash, 0,
46977+ r_tmp->subj_hash_size *
46978+ sizeof (struct acl_subject_label *));
46979+
46980+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
46981+
46982+ if (err)
46983+ goto cleanup;
46984+
46985+ /* set nested subject list to null */
46986+ r_tmp->hash->first = NULL;
46987+
46988+ insert_acl_role_label(r_tmp);
46989+ }
46990+
46991+ goto return_err;
46992+ cleanup:
46993+ free_variables();
46994+ return_err:
46995+ return err;
46996+
46997+}
46998+
46999+static int
47000+gracl_init(struct gr_arg *args)
47001+{
47002+ int error = 0;
47003+
47004+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
47005+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
47006+
47007+ if (init_variables(args)) {
47008+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
47009+ error = -ENOMEM;
47010+ free_variables();
47011+ goto out;
47012+ }
47013+
47014+ error = copy_user_acl(args);
47015+ free_init_variables();
47016+ if (error) {
47017+ free_variables();
47018+ goto out;
47019+ }
47020+
47021+ if ((error = gr_set_acls(0))) {
47022+ free_variables();
47023+ goto out;
47024+ }
47025+
47026+ pax_open_kernel();
47027+ gr_status |= GR_READY;
47028+ pax_close_kernel();
47029+
47030+ out:
47031+ return error;
47032+}
47033+
47034+/* derived from glibc fnmatch() 0: match, 1: no match*/
47035+
47036+static int
47037+glob_match(const char *p, const char *n)
47038+{
47039+ char c;
47040+
47041+ while ((c = *p++) != '\0') {
47042+ switch (c) {
47043+ case '?':
47044+ if (*n == '\0')
47045+ return 1;
47046+ else if (*n == '/')
47047+ return 1;
47048+ break;
47049+ case '\\':
47050+ if (*n != c)
47051+ return 1;
47052+ break;
47053+ case '*':
47054+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
47055+ if (*n == '/')
47056+ return 1;
47057+ else if (c == '?') {
47058+ if (*n == '\0')
47059+ return 1;
47060+ else
47061+ ++n;
47062+ }
47063+ }
47064+ if (c == '\0') {
47065+ return 0;
47066+ } else {
47067+ const char *endp;
47068+
47069+ if ((endp = strchr(n, '/')) == NULL)
47070+ endp = n + strlen(n);
47071+
47072+ if (c == '[') {
47073+ for (--p; n < endp; ++n)
47074+ if (!glob_match(p, n))
47075+ return 0;
47076+ } else if (c == '/') {
47077+ while (*n != '\0' && *n != '/')
47078+ ++n;
47079+ if (*n == '/' && !glob_match(p, n + 1))
47080+ return 0;
47081+ } else {
47082+ for (--p; n < endp; ++n)
47083+ if (*n == c && !glob_match(p, n))
47084+ return 0;
47085+ }
47086+
47087+ return 1;
47088+ }
47089+ case '[':
47090+ {
47091+ int not;
47092+ char cold;
47093+
47094+ if (*n == '\0' || *n == '/')
47095+ return 1;
47096+
47097+ not = (*p == '!' || *p == '^');
47098+ if (not)
47099+ ++p;
47100+
47101+ c = *p++;
47102+ for (;;) {
47103+ unsigned char fn = (unsigned char)*n;
47104+
47105+ if (c == '\0')
47106+ return 1;
47107+ else {
47108+ if (c == fn)
47109+ goto matched;
47110+ cold = c;
47111+ c = *p++;
47112+
47113+ if (c == '-' && *p != ']') {
47114+ unsigned char cend = *p++;
47115+
47116+ if (cend == '\0')
47117+ return 1;
47118+
47119+ if (cold <= fn && fn <= cend)
47120+ goto matched;
47121+
47122+ c = *p++;
47123+ }
47124+ }
47125+
47126+ if (c == ']')
47127+ break;
47128+ }
47129+ if (!not)
47130+ return 1;
47131+ break;
47132+ matched:
47133+ while (c != ']') {
47134+ if (c == '\0')
47135+ return 1;
47136+
47137+ c = *p++;
47138+ }
47139+ if (not)
47140+ return 1;
47141+ }
47142+ break;
47143+ default:
47144+ if (c != *n)
47145+ return 1;
47146+ }
47147+
47148+ ++n;
47149+ }
47150+
47151+ if (*n == '\0')
47152+ return 0;
47153+
47154+ if (*n == '/')
47155+ return 0;
47156+
47157+ return 1;
47158+}
47159+
47160+static struct acl_object_label *
47161+chk_glob_label(struct acl_object_label *globbed,
47162+ struct dentry *dentry, struct vfsmount *mnt, char **path)
47163+{
47164+ struct acl_object_label *tmp;
47165+
47166+ if (*path == NULL)
47167+ *path = gr_to_filename_nolock(dentry, mnt);
47168+
47169+ tmp = globbed;
47170+
47171+ while (tmp) {
47172+ if (!glob_match(tmp->filename, *path))
47173+ return tmp;
47174+ tmp = tmp->next;
47175+ }
47176+
47177+ return NULL;
47178+}
47179+
47180+static struct acl_object_label *
47181+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
47182+ const ino_t curr_ino, const dev_t curr_dev,
47183+ const struct acl_subject_label *subj, char **path, const int checkglob)
47184+{
47185+ struct acl_subject_label *tmpsubj;
47186+ struct acl_object_label *retval;
47187+ struct acl_object_label *retval2;
47188+
47189+ tmpsubj = (struct acl_subject_label *) subj;
47190+ read_lock(&gr_inode_lock);
47191+ do {
47192+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
47193+ if (retval) {
47194+ if (checkglob && retval->globbed) {
47195+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
47196+ (struct vfsmount *)orig_mnt, path);
47197+ if (retval2)
47198+ retval = retval2;
47199+ }
47200+ break;
47201+ }
47202+ } while ((tmpsubj = tmpsubj->parent_subject));
47203+ read_unlock(&gr_inode_lock);
47204+
47205+ return retval;
47206+}
47207+
47208+static __inline__ struct acl_object_label *
47209+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
47210+ struct dentry *curr_dentry,
47211+ const struct acl_subject_label *subj, char **path, const int checkglob)
47212+{
47213+ int newglob = checkglob;
47214+ ino_t inode;
47215+ dev_t device;
47216+
47217+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
47218+ as we don't want a / * rule to match instead of the / object
47219+ don't do this for create lookups that call this function though, since they're looking up
47220+ on the parent and thus need globbing checks on all paths
47221+ */
47222+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
47223+ newglob = GR_NO_GLOB;
47224+
47225+ spin_lock(&curr_dentry->d_lock);
47226+ inode = curr_dentry->d_inode->i_ino;
47227+ device = __get_dev(curr_dentry);
47228+ spin_unlock(&curr_dentry->d_lock);
47229+
47230+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
47231+}
47232+
47233+static struct acl_object_label *
47234+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
47235+ const struct acl_subject_label *subj, char *path, const int checkglob)
47236+{
47237+ struct dentry *dentry = (struct dentry *) l_dentry;
47238+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
47239+ struct acl_object_label *retval;
47240+ struct dentry *parent;
47241+
47242+ write_seqlock(&rename_lock);
47243+ br_read_lock(vfsmount_lock);
47244+
47245+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
47246+#ifdef CONFIG_NET
47247+ mnt == sock_mnt ||
47248+#endif
47249+#ifdef CONFIG_HUGETLBFS
47250+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
47251+#endif
47252+ /* ignore Eric Biederman */
47253+ IS_PRIVATE(l_dentry->d_inode))) {
47254+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
47255+ goto out;
47256+ }
47257+
47258+ for (;;) {
47259+ if (dentry == real_root.dentry && mnt == real_root.mnt)
47260+ break;
47261+
47262+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
47263+ if (mnt->mnt_parent == mnt)
47264+ break;
47265+
47266+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
47267+ if (retval != NULL)
47268+ goto out;
47269+
47270+ dentry = mnt->mnt_mountpoint;
47271+ mnt = mnt->mnt_parent;
47272+ continue;
47273+ }
47274+
47275+ parent = dentry->d_parent;
47276+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
47277+ if (retval != NULL)
47278+ goto out;
47279+
47280+ dentry = parent;
47281+ }
47282+
47283+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
47284+
47285+ /* real_root is pinned so we don't have to hold a reference */
47286+ if (retval == NULL)
47287+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
47288+out:
47289+ br_read_unlock(vfsmount_lock);
47290+ write_sequnlock(&rename_lock);
47291+
47292+ BUG_ON(retval == NULL);
47293+
47294+ return retval;
47295+}
47296+
47297+static __inline__ struct acl_object_label *
47298+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
47299+ const struct acl_subject_label *subj)
47300+{
47301+ char *path = NULL;
47302+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
47303+}
47304+
47305+static __inline__ struct acl_object_label *
47306+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
47307+ const struct acl_subject_label *subj)
47308+{
47309+ char *path = NULL;
47310+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
47311+}
47312+
47313+static __inline__ struct acl_object_label *
47314+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
47315+ const struct acl_subject_label *subj, char *path)
47316+{
47317+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
47318+}
47319+
47320+static struct acl_subject_label *
47321+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
47322+ const struct acl_role_label *role)
47323+{
47324+ struct dentry *dentry = (struct dentry *) l_dentry;
47325+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
47326+ struct acl_subject_label *retval;
47327+ struct dentry *parent;
47328+
47329+ write_seqlock(&rename_lock);
47330+ br_read_lock(vfsmount_lock);
47331+
47332+ for (;;) {
47333+ if (dentry == real_root.dentry && mnt == real_root.mnt)
47334+ break;
47335+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
47336+ if (mnt->mnt_parent == mnt)
47337+ break;
47338+
47339+ spin_lock(&dentry->d_lock);
47340+ read_lock(&gr_inode_lock);
47341+ retval =
47342+ lookup_acl_subj_label(dentry->d_inode->i_ino,
47343+ __get_dev(dentry), role);
47344+ read_unlock(&gr_inode_lock);
47345+ spin_unlock(&dentry->d_lock);
47346+ if (retval != NULL)
47347+ goto out;
47348+
47349+ dentry = mnt->mnt_mountpoint;
47350+ mnt = mnt->mnt_parent;
47351+ continue;
47352+ }
47353+
47354+ spin_lock(&dentry->d_lock);
47355+ read_lock(&gr_inode_lock);
47356+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
47357+ __get_dev(dentry), role);
47358+ read_unlock(&gr_inode_lock);
47359+ parent = dentry->d_parent;
47360+ spin_unlock(&dentry->d_lock);
47361+
47362+ if (retval != NULL)
47363+ goto out;
47364+
47365+ dentry = parent;
47366+ }
47367+
47368+ spin_lock(&dentry->d_lock);
47369+ read_lock(&gr_inode_lock);
47370+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
47371+ __get_dev(dentry), role);
47372+ read_unlock(&gr_inode_lock);
47373+ spin_unlock(&dentry->d_lock);
47374+
47375+ if (unlikely(retval == NULL)) {
47376+ /* real_root is pinned, we don't need to hold a reference */
47377+ read_lock(&gr_inode_lock);
47378+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
47379+ __get_dev(real_root.dentry), role);
47380+ read_unlock(&gr_inode_lock);
47381+ }
47382+out:
47383+ br_read_unlock(vfsmount_lock);
47384+ write_sequnlock(&rename_lock);
47385+
47386+ BUG_ON(retval == NULL);
47387+
47388+ return retval;
47389+}
47390+
47391+static void
47392+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
47393+{
47394+ struct task_struct *task = current;
47395+ const struct cred *cred = current_cred();
47396+
47397+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
47398+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
47399+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
47400+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
47401+
47402+ return;
47403+}
47404+
47405+static void
47406+gr_log_learn_sysctl(const char *path, const __u32 mode)
47407+{
47408+ struct task_struct *task = current;
47409+ const struct cred *cred = current_cred();
47410+
47411+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
47412+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
47413+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
47414+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
47415+
47416+ return;
47417+}
47418+
47419+static void
47420+gr_log_learn_id_change(const char type, const unsigned int real,
47421+ const unsigned int effective, const unsigned int fs)
47422+{
47423+ struct task_struct *task = current;
47424+ const struct cred *cred = current_cred();
47425+
47426+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
47427+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
47428+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
47429+ type, real, effective, fs, &task->signal->saved_ip);
47430+
47431+ return;
47432+}
47433+
47434+__u32
47435+gr_check_link(const struct dentry * new_dentry,
47436+ const struct dentry * parent_dentry,
47437+ const struct vfsmount * parent_mnt,
47438+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
47439+{
47440+ struct acl_object_label *obj;
47441+ __u32 oldmode, newmode;
47442+ __u32 needmode;
47443+
47444+ if (unlikely(!(gr_status & GR_READY)))
47445+ return (GR_CREATE | GR_LINK);
47446+
47447+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
47448+ oldmode = obj->mode;
47449+
47450+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
47451+ oldmode |= (GR_CREATE | GR_LINK);
47452+
47453+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
47454+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
47455+ needmode |= GR_SETID | GR_AUDIT_SETID;
47456+
47457+ newmode =
47458+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
47459+ oldmode | needmode);
47460+
47461+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
47462+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
47463+ GR_INHERIT | GR_AUDIT_INHERIT);
47464+
47465+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
47466+ goto bad;
47467+
47468+ if ((oldmode & needmode) != needmode)
47469+ goto bad;
47470+
47471+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
47472+ if ((newmode & needmode) != needmode)
47473+ goto bad;
47474+
47475+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
47476+ return newmode;
47477+bad:
47478+ needmode = oldmode;
47479+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
47480+ needmode |= GR_SETID;
47481+
47482+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
47483+ gr_log_learn(old_dentry, old_mnt, needmode);
47484+ return (GR_CREATE | GR_LINK);
47485+ } else if (newmode & GR_SUPPRESS)
47486+ return GR_SUPPRESS;
47487+ else
47488+ return 0;
47489+}
47490+
47491+__u32
47492+gr_search_file(const struct dentry * dentry, const __u32 mode,
47493+ const struct vfsmount * mnt)
47494+{
47495+ __u32 retval = mode;
47496+ struct acl_subject_label *curracl;
47497+ struct acl_object_label *currobj;
47498+
47499+ if (unlikely(!(gr_status & GR_READY)))
47500+ return (mode & ~GR_AUDITS);
47501+
47502+ curracl = current->acl;
47503+
47504+ currobj = chk_obj_label(dentry, mnt, curracl);
47505+ retval = currobj->mode & mode;
47506+
47507+ /* if we're opening a specified transfer file for writing
47508+ (e.g. /dev/initctl), then transfer our role to init
47509+ */
47510+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
47511+ current->role->roletype & GR_ROLE_PERSIST)) {
47512+ struct task_struct *task = init_pid_ns.child_reaper;
47513+
47514+ if (task->role != current->role) {
47515+ task->acl_sp_role = 0;
47516+ task->acl_role_id = current->acl_role_id;
47517+ task->role = current->role;
47518+ rcu_read_lock();
47519+ read_lock(&grsec_exec_file_lock);
47520+ gr_apply_subject_to_task(task);
47521+ read_unlock(&grsec_exec_file_lock);
47522+ rcu_read_unlock();
47523+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
47524+ }
47525+ }
47526+
47527+ if (unlikely
47528+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
47529+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
47530+ __u32 new_mode = mode;
47531+
47532+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47533+
47534+ retval = new_mode;
47535+
47536+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
47537+ new_mode |= GR_INHERIT;
47538+
47539+ if (!(mode & GR_NOLEARN))
47540+ gr_log_learn(dentry, mnt, new_mode);
47541+ }
47542+
47543+ return retval;
47544+}
47545+
47546+__u32
47547+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
47548+ const struct vfsmount * mnt, const __u32 mode)
47549+{
47550+ struct name_entry *match;
47551+ struct acl_object_label *matchpo;
47552+ struct acl_subject_label *curracl;
47553+ char *path;
47554+ __u32 retval;
47555+
47556+ if (unlikely(!(gr_status & GR_READY)))
47557+ return (mode & ~GR_AUDITS);
47558+
47559+ preempt_disable();
47560+ path = gr_to_filename_rbac(new_dentry, mnt);
47561+ match = lookup_name_entry_create(path);
47562+
47563+ if (!match)
47564+ goto check_parent;
47565+
47566+ curracl = current->acl;
47567+
47568+ read_lock(&gr_inode_lock);
47569+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
47570+ read_unlock(&gr_inode_lock);
47571+
47572+ if (matchpo) {
47573+ if ((matchpo->mode & mode) !=
47574+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
47575+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
47576+ __u32 new_mode = mode;
47577+
47578+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47579+
47580+ gr_log_learn(new_dentry, mnt, new_mode);
47581+
47582+ preempt_enable();
47583+ return new_mode;
47584+ }
47585+ preempt_enable();
47586+ return (matchpo->mode & mode);
47587+ }
47588+
47589+ check_parent:
47590+ curracl = current->acl;
47591+
47592+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
47593+ retval = matchpo->mode & mode;
47594+
47595+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
47596+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
47597+ __u32 new_mode = mode;
47598+
47599+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47600+
47601+ gr_log_learn(new_dentry, mnt, new_mode);
47602+ preempt_enable();
47603+ return new_mode;
47604+ }
47605+
47606+ preempt_enable();
47607+ return retval;
47608+}
47609+
47610+int
47611+gr_check_hidden_task(const struct task_struct *task)
47612+{
47613+ if (unlikely(!(gr_status & GR_READY)))
47614+ return 0;
47615+
47616+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
47617+ return 1;
47618+
47619+ return 0;
47620+}
47621+
47622+int
47623+gr_check_protected_task(const struct task_struct *task)
47624+{
47625+ if (unlikely(!(gr_status & GR_READY) || !task))
47626+ return 0;
47627+
47628+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
47629+ task->acl != current->acl)
47630+ return 1;
47631+
47632+ return 0;
47633+}
47634+
47635+int
47636+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
47637+{
47638+ struct task_struct *p;
47639+ int ret = 0;
47640+
47641+ if (unlikely(!(gr_status & GR_READY) || !pid))
47642+ return ret;
47643+
47644+ read_lock(&tasklist_lock);
47645+ do_each_pid_task(pid, type, p) {
47646+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
47647+ p->acl != current->acl) {
47648+ ret = 1;
47649+ goto out;
47650+ }
47651+ } while_each_pid_task(pid, type, p);
47652+out:
47653+ read_unlock(&tasklist_lock);
47654+
47655+ return ret;
47656+}
47657+
47658+void
47659+gr_copy_label(struct task_struct *tsk)
47660+{
47661+ tsk->signal->used_accept = 0;
47662+ tsk->acl_sp_role = 0;
47663+ tsk->acl_role_id = current->acl_role_id;
47664+ tsk->acl = current->acl;
47665+ tsk->role = current->role;
47666+ tsk->signal->curr_ip = current->signal->curr_ip;
47667+ tsk->signal->saved_ip = current->signal->saved_ip;
47668+ if (current->exec_file)
47669+ get_file(current->exec_file);
47670+ tsk->exec_file = current->exec_file;
47671+ tsk->is_writable = current->is_writable;
47672+ if (unlikely(current->signal->used_accept)) {
47673+ current->signal->curr_ip = 0;
47674+ current->signal->saved_ip = 0;
47675+ }
47676+
47677+ return;
47678+}
47679+
47680+static void
47681+gr_set_proc_res(struct task_struct *task)
47682+{
47683+ struct acl_subject_label *proc;
47684+ unsigned short i;
47685+
47686+ proc = task->acl;
47687+
47688+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
47689+ return;
47690+
47691+ for (i = 0; i < RLIM_NLIMITS; i++) {
47692+ if (!(proc->resmask & (1 << i)))
47693+ continue;
47694+
47695+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
47696+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
47697+ }
47698+
47699+ return;
47700+}
47701+
47702+extern int __gr_process_user_ban(struct user_struct *user);
47703+
47704+int
47705+gr_check_user_change(int real, int effective, int fs)
47706+{
47707+ unsigned int i;
47708+ __u16 num;
47709+ uid_t *uidlist;
47710+ int curuid;
47711+ int realok = 0;
47712+ int effectiveok = 0;
47713+ int fsok = 0;
47714+
47715+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
47716+ struct user_struct *user;
47717+
47718+ if (real == -1)
47719+ goto skipit;
47720+
47721+ user = find_user(real);
47722+ if (user == NULL)
47723+ goto skipit;
47724+
47725+ if (__gr_process_user_ban(user)) {
47726+ /* for find_user */
47727+ free_uid(user);
47728+ return 1;
47729+ }
47730+
47731+ /* for find_user */
47732+ free_uid(user);
47733+
47734+skipit:
47735+#endif
47736+
47737+ if (unlikely(!(gr_status & GR_READY)))
47738+ return 0;
47739+
47740+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
47741+ gr_log_learn_id_change('u', real, effective, fs);
47742+
47743+ num = current->acl->user_trans_num;
47744+ uidlist = current->acl->user_transitions;
47745+
47746+ if (uidlist == NULL)
47747+ return 0;
47748+
47749+ if (real == -1)
47750+ realok = 1;
47751+ if (effective == -1)
47752+ effectiveok = 1;
47753+ if (fs == -1)
47754+ fsok = 1;
47755+
47756+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
47757+ for (i = 0; i < num; i++) {
47758+ curuid = (int)uidlist[i];
47759+ if (real == curuid)
47760+ realok = 1;
47761+ if (effective == curuid)
47762+ effectiveok = 1;
47763+ if (fs == curuid)
47764+ fsok = 1;
47765+ }
47766+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
47767+ for (i = 0; i < num; i++) {
47768+ curuid = (int)uidlist[i];
47769+ if (real == curuid)
47770+ break;
47771+ if (effective == curuid)
47772+ break;
47773+ if (fs == curuid)
47774+ break;
47775+ }
47776+ /* not in deny list */
47777+ if (i == num) {
47778+ realok = 1;
47779+ effectiveok = 1;
47780+ fsok = 1;
47781+ }
47782+ }
47783+
47784+ if (realok && effectiveok && fsok)
47785+ return 0;
47786+ else {
47787+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
47788+ return 1;
47789+ }
47790+}
47791+
47792+int
47793+gr_check_group_change(int real, int effective, int fs)
47794+{
47795+ unsigned int i;
47796+ __u16 num;
47797+ gid_t *gidlist;
47798+ int curgid;
47799+ int realok = 0;
47800+ int effectiveok = 0;
47801+ int fsok = 0;
47802+
47803+ if (unlikely(!(gr_status & GR_READY)))
47804+ return 0;
47805+
47806+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
47807+ gr_log_learn_id_change('g', real, effective, fs);
47808+
47809+ num = current->acl->group_trans_num;
47810+ gidlist = current->acl->group_transitions;
47811+
47812+ if (gidlist == NULL)
47813+ return 0;
47814+
47815+ if (real == -1)
47816+ realok = 1;
47817+ if (effective == -1)
47818+ effectiveok = 1;
47819+ if (fs == -1)
47820+ fsok = 1;
47821+
47822+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
47823+ for (i = 0; i < num; i++) {
47824+ curgid = (int)gidlist[i];
47825+ if (real == curgid)
47826+ realok = 1;
47827+ if (effective == curgid)
47828+ effectiveok = 1;
47829+ if (fs == curgid)
47830+ fsok = 1;
47831+ }
47832+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
47833+ for (i = 0; i < num; i++) {
47834+ curgid = (int)gidlist[i];
47835+ if (real == curgid)
47836+ break;
47837+ if (effective == curgid)
47838+ break;
47839+ if (fs == curgid)
47840+ break;
47841+ }
47842+ /* not in deny list */
47843+ if (i == num) {
47844+ realok = 1;
47845+ effectiveok = 1;
47846+ fsok = 1;
47847+ }
47848+ }
47849+
47850+ if (realok && effectiveok && fsok)
47851+ return 0;
47852+ else {
47853+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
47854+ return 1;
47855+ }
47856+}
47857+
47858+void
47859+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
47860+{
47861+ struct acl_role_label *role = task->role;
47862+ struct acl_subject_label *subj = NULL;
47863+ struct acl_object_label *obj;
47864+ struct file *filp;
47865+
47866+ if (unlikely(!(gr_status & GR_READY)))
47867+ return;
47868+
47869+ filp = task->exec_file;
47870+
47871+ /* kernel process, we'll give them the kernel role */
47872+ if (unlikely(!filp)) {
47873+ task->role = kernel_role;
47874+ task->acl = kernel_role->root_label;
47875+ return;
47876+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
47877+ role = lookup_acl_role_label(task, uid, gid);
47878+
47879+ /* perform subject lookup in possibly new role
47880+ we can use this result below in the case where role == task->role
47881+ */
47882+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
47883+
47884+ /* if we changed uid/gid, but result in the same role
47885+ and are using inheritance, don't lose the inherited subject
47886+ if current subject is other than what normal lookup
47887+ would result in, we arrived via inheritance, don't
47888+ lose subject
47889+ */
47890+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
47891+ (subj == task->acl)))
47892+ task->acl = subj;
47893+
47894+ task->role = role;
47895+
47896+ task->is_writable = 0;
47897+
47898+ /* ignore additional mmap checks for processes that are writable
47899+ by the default ACL */
47900+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47901+ if (unlikely(obj->mode & GR_WRITE))
47902+ task->is_writable = 1;
47903+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47904+ if (unlikely(obj->mode & GR_WRITE))
47905+ task->is_writable = 1;
47906+
47907+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47908+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47909+#endif
47910+
47911+ gr_set_proc_res(task);
47912+
47913+ return;
47914+}
47915+
47916+int
47917+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47918+ const int unsafe_share)
47919+{
47920+ struct task_struct *task = current;
47921+ struct acl_subject_label *newacl;
47922+ struct acl_object_label *obj;
47923+ __u32 retmode;
47924+
47925+ if (unlikely(!(gr_status & GR_READY)))
47926+ return 0;
47927+
47928+ newacl = chk_subj_label(dentry, mnt, task->role);
47929+
47930+ task_lock(task);
47931+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
47932+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
47933+ !(task->role->roletype & GR_ROLE_GOD) &&
47934+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
47935+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
47936+ task_unlock(task);
47937+ if (unsafe_share)
47938+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
47939+ else
47940+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
47941+ return -EACCES;
47942+ }
47943+ task_unlock(task);
47944+
47945+ obj = chk_obj_label(dentry, mnt, task->acl);
47946+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
47947+
47948+ if (!(task->acl->mode & GR_INHERITLEARN) &&
47949+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
47950+ if (obj->nested)
47951+ task->acl = obj->nested;
47952+ else
47953+ task->acl = newacl;
47954+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
47955+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
47956+
47957+ task->is_writable = 0;
47958+
47959+ /* ignore additional mmap checks for processes that are writable
47960+ by the default ACL */
47961+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
47962+ if (unlikely(obj->mode & GR_WRITE))
47963+ task->is_writable = 1;
47964+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
47965+ if (unlikely(obj->mode & GR_WRITE))
47966+ task->is_writable = 1;
47967+
47968+ gr_set_proc_res(task);
47969+
47970+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47971+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47972+#endif
47973+ return 0;
47974+}
47975+
47976+/* always called with valid inodev ptr */
47977+static void
47978+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
47979+{
47980+ struct acl_object_label *matchpo;
47981+ struct acl_subject_label *matchps;
47982+ struct acl_subject_label *subj;
47983+ struct acl_role_label *role;
47984+ unsigned int x;
47985+
47986+ FOR_EACH_ROLE_START(role)
47987+ FOR_EACH_SUBJECT_START(role, subj, x)
47988+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
47989+ matchpo->mode |= GR_DELETED;
47990+ FOR_EACH_SUBJECT_END(subj,x)
47991+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
47992+ if (subj->inode == ino && subj->device == dev)
47993+ subj->mode |= GR_DELETED;
47994+ FOR_EACH_NESTED_SUBJECT_END(subj)
47995+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
47996+ matchps->mode |= GR_DELETED;
47997+ FOR_EACH_ROLE_END(role)
47998+
47999+ inodev->nentry->deleted = 1;
48000+
48001+ return;
48002+}
48003+
48004+void
48005+gr_handle_delete(const ino_t ino, const dev_t dev)
48006+{
48007+ struct inodev_entry *inodev;
48008+
48009+ if (unlikely(!(gr_status & GR_READY)))
48010+ return;
48011+
48012+ write_lock(&gr_inode_lock);
48013+ inodev = lookup_inodev_entry(ino, dev);
48014+ if (inodev != NULL)
48015+ do_handle_delete(inodev, ino, dev);
48016+ write_unlock(&gr_inode_lock);
48017+
48018+ return;
48019+}
48020+
48021+static void
48022+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
48023+ const ino_t newinode, const dev_t newdevice,
48024+ struct acl_subject_label *subj)
48025+{
48026+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
48027+ struct acl_object_label *match;
48028+
48029+ match = subj->obj_hash[index];
48030+
48031+ while (match && (match->inode != oldinode ||
48032+ match->device != olddevice ||
48033+ !(match->mode & GR_DELETED)))
48034+ match = match->next;
48035+
48036+ if (match && (match->inode == oldinode)
48037+ && (match->device == olddevice)
48038+ && (match->mode & GR_DELETED)) {
48039+ if (match->prev == NULL) {
48040+ subj->obj_hash[index] = match->next;
48041+ if (match->next != NULL)
48042+ match->next->prev = NULL;
48043+ } else {
48044+ match->prev->next = match->next;
48045+ if (match->next != NULL)
48046+ match->next->prev = match->prev;
48047+ }
48048+ match->prev = NULL;
48049+ match->next = NULL;
48050+ match->inode = newinode;
48051+ match->device = newdevice;
48052+ match->mode &= ~GR_DELETED;
48053+
48054+ insert_acl_obj_label(match, subj);
48055+ }
48056+
48057+ return;
48058+}
48059+
48060+static void
48061+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
48062+ const ino_t newinode, const dev_t newdevice,
48063+ struct acl_role_label *role)
48064+{
48065+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
48066+ struct acl_subject_label *match;
48067+
48068+ match = role->subj_hash[index];
48069+
48070+ while (match && (match->inode != oldinode ||
48071+ match->device != olddevice ||
48072+ !(match->mode & GR_DELETED)))
48073+ match = match->next;
48074+
48075+ if (match && (match->inode == oldinode)
48076+ && (match->device == olddevice)
48077+ && (match->mode & GR_DELETED)) {
48078+ if (match->prev == NULL) {
48079+ role->subj_hash[index] = match->next;
48080+ if (match->next != NULL)
48081+ match->next->prev = NULL;
48082+ } else {
48083+ match->prev->next = match->next;
48084+ if (match->next != NULL)
48085+ match->next->prev = match->prev;
48086+ }
48087+ match->prev = NULL;
48088+ match->next = NULL;
48089+ match->inode = newinode;
48090+ match->device = newdevice;
48091+ match->mode &= ~GR_DELETED;
48092+
48093+ insert_acl_subj_label(match, role);
48094+ }
48095+
48096+ return;
48097+}
48098+
48099+static void
48100+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
48101+ const ino_t newinode, const dev_t newdevice)
48102+{
48103+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
48104+ struct inodev_entry *match;
48105+
48106+ match = inodev_set.i_hash[index];
48107+
48108+ while (match && (match->nentry->inode != oldinode ||
48109+ match->nentry->device != olddevice || !match->nentry->deleted))
48110+ match = match->next;
48111+
48112+ if (match && (match->nentry->inode == oldinode)
48113+ && (match->nentry->device == olddevice) &&
48114+ match->nentry->deleted) {
48115+ if (match->prev == NULL) {
48116+ inodev_set.i_hash[index] = match->next;
48117+ if (match->next != NULL)
48118+ match->next->prev = NULL;
48119+ } else {
48120+ match->prev->next = match->next;
48121+ if (match->next != NULL)
48122+ match->next->prev = match->prev;
48123+ }
48124+ match->prev = NULL;
48125+ match->next = NULL;
48126+ match->nentry->inode = newinode;
48127+ match->nentry->device = newdevice;
48128+ match->nentry->deleted = 0;
48129+
48130+ insert_inodev_entry(match);
48131+ }
48132+
48133+ return;
48134+}
48135+
48136+static void
48137+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
48138+ const struct vfsmount *mnt)
48139+{
48140+ struct acl_subject_label *subj;
48141+ struct acl_role_label *role;
48142+ unsigned int x;
48143+ ino_t ino = dentry->d_inode->i_ino;
48144+ dev_t dev = __get_dev(dentry);
48145+
48146+ FOR_EACH_ROLE_START(role)
48147+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
48148+
48149+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
48150+ if ((subj->inode == ino) && (subj->device == dev)) {
48151+ subj->inode = ino;
48152+ subj->device = dev;
48153+ }
48154+ FOR_EACH_NESTED_SUBJECT_END(subj)
48155+ FOR_EACH_SUBJECT_START(role, subj, x)
48156+ update_acl_obj_label(matchn->inode, matchn->device,
48157+ ino, dev, subj);
48158+ FOR_EACH_SUBJECT_END(subj,x)
48159+ FOR_EACH_ROLE_END(role)
48160+
48161+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
48162+
48163+ return;
48164+}
48165+
48166+void
48167+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
48168+{
48169+ struct name_entry *matchn;
48170+
48171+ if (unlikely(!(gr_status & GR_READY)))
48172+ return;
48173+
48174+ preempt_disable();
48175+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
48176+
48177+ if (unlikely((unsigned long)matchn)) {
48178+ write_lock(&gr_inode_lock);
48179+ do_handle_create(matchn, dentry, mnt);
48180+ write_unlock(&gr_inode_lock);
48181+ }
48182+ preempt_enable();
48183+
48184+ return;
48185+}
48186+
48187+void
48188+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
48189+ struct dentry *old_dentry,
48190+ struct dentry *new_dentry,
48191+ struct vfsmount *mnt, const __u8 replace)
48192+{
48193+ struct name_entry *matchn;
48194+ struct inodev_entry *inodev;
48195+ ino_t old_ino = old_dentry->d_inode->i_ino;
48196+ dev_t old_dev = __get_dev(old_dentry);
48197+
48198+ /* vfs_rename swaps the name and parent link for old_dentry and
48199+ new_dentry
48200+ at this point, old_dentry has the new name, parent link, and inode
48201+ for the renamed file
48202+ if a file is being replaced by a rename, new_dentry has the inode
48203+ and name for the replaced file
48204+ */
48205+
48206+ if (unlikely(!(gr_status & GR_READY)))
48207+ return;
48208+
48209+ preempt_disable();
48210+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
48211+
48212+ /* we wouldn't have to check d_inode if it weren't for
48213+ NFS silly-renaming
48214+ */
48215+
48216+ write_lock(&gr_inode_lock);
48217+ if (unlikely(replace && new_dentry->d_inode)) {
48218+ ino_t new_ino = new_dentry->d_inode->i_ino;
48219+ dev_t new_dev = __get_dev(new_dentry);
48220+
48221+ inodev = lookup_inodev_entry(new_ino, new_dev);
48222+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
48223+ do_handle_delete(inodev, new_ino, new_dev);
48224+ }
48225+
48226+ inodev = lookup_inodev_entry(old_ino, old_dev);
48227+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
48228+ do_handle_delete(inodev, old_ino, old_dev);
48229+
48230+ if (unlikely((unsigned long)matchn))
48231+ do_handle_create(matchn, old_dentry, mnt);
48232+
48233+ write_unlock(&gr_inode_lock);
48234+ preempt_enable();
48235+
48236+ return;
48237+}
48238+
48239+static int
48240+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
48241+ unsigned char **sum)
48242+{
48243+ struct acl_role_label *r;
48244+ struct role_allowed_ip *ipp;
48245+ struct role_transition *trans;
48246+ unsigned int i;
48247+ int found = 0;
48248+ u32 curr_ip = current->signal->curr_ip;
48249+
48250+ current->signal->saved_ip = curr_ip;
48251+
48252+ /* check transition table */
48253+
48254+ for (trans = current->role->transitions; trans; trans = trans->next) {
48255+ if (!strcmp(rolename, trans->rolename)) {
48256+ found = 1;
48257+ break;
48258+ }
48259+ }
48260+
48261+ if (!found)
48262+ return 0;
48263+
48264+ /* handle special roles that do not require authentication
48265+ and check ip */
48266+
48267+ FOR_EACH_ROLE_START(r)
48268+ if (!strcmp(rolename, r->rolename) &&
48269+ (r->roletype & GR_ROLE_SPECIAL)) {
48270+ found = 0;
48271+ if (r->allowed_ips != NULL) {
48272+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
48273+ if ((ntohl(curr_ip) & ipp->netmask) ==
48274+ (ntohl(ipp->addr) & ipp->netmask))
48275+ found = 1;
48276+ }
48277+ } else
48278+ found = 2;
48279+ if (!found)
48280+ return 0;
48281+
48282+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
48283+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
48284+ *salt = NULL;
48285+ *sum = NULL;
48286+ return 1;
48287+ }
48288+ }
48289+ FOR_EACH_ROLE_END(r)
48290+
48291+ for (i = 0; i < num_sprole_pws; i++) {
48292+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
48293+ *salt = acl_special_roles[i]->salt;
48294+ *sum = acl_special_roles[i]->sum;
48295+ return 1;
48296+ }
48297+ }
48298+
48299+ return 0;
48300+}
48301+
48302+static void
48303+assign_special_role(char *rolename)
48304+{
48305+ struct acl_object_label *obj;
48306+ struct acl_role_label *r;
48307+ struct acl_role_label *assigned = NULL;
48308+ struct task_struct *tsk;
48309+ struct file *filp;
48310+
48311+ FOR_EACH_ROLE_START(r)
48312+ if (!strcmp(rolename, r->rolename) &&
48313+ (r->roletype & GR_ROLE_SPECIAL)) {
48314+ assigned = r;
48315+ break;
48316+ }
48317+ FOR_EACH_ROLE_END(r)
48318+
48319+ if (!assigned)
48320+ return;
48321+
48322+ read_lock(&tasklist_lock);
48323+ read_lock(&grsec_exec_file_lock);
48324+
48325+ tsk = current->real_parent;
48326+ if (tsk == NULL)
48327+ goto out_unlock;
48328+
48329+ filp = tsk->exec_file;
48330+ if (filp == NULL)
48331+ goto out_unlock;
48332+
48333+ tsk->is_writable = 0;
48334+
48335+ tsk->acl_sp_role = 1;
48336+ tsk->acl_role_id = ++acl_sp_role_value;
48337+ tsk->role = assigned;
48338+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
48339+
48340+ /* ignore additional mmap checks for processes that are writable
48341+ by the default ACL */
48342+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
48343+ if (unlikely(obj->mode & GR_WRITE))
48344+ tsk->is_writable = 1;
48345+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
48346+ if (unlikely(obj->mode & GR_WRITE))
48347+ tsk->is_writable = 1;
48348+
48349+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48350+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
48351+#endif
48352+
48353+out_unlock:
48354+ read_unlock(&grsec_exec_file_lock);
48355+ read_unlock(&tasklist_lock);
48356+ return;
48357+}
48358+
48359+int gr_check_secure_terminal(struct task_struct *task)
48360+{
48361+ struct task_struct *p, *p2, *p3;
48362+ struct files_struct *files;
48363+ struct fdtable *fdt;
48364+ struct file *our_file = NULL, *file;
48365+ int i;
48366+
48367+ if (task->signal->tty == NULL)
48368+ return 1;
48369+
48370+ files = get_files_struct(task);
48371+ if (files != NULL) {
48372+ rcu_read_lock();
48373+ fdt = files_fdtable(files);
48374+ for (i=0; i < fdt->max_fds; i++) {
48375+ file = fcheck_files(files, i);
48376+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
48377+ get_file(file);
48378+ our_file = file;
48379+ }
48380+ }
48381+ rcu_read_unlock();
48382+ put_files_struct(files);
48383+ }
48384+
48385+ if (our_file == NULL)
48386+ return 1;
48387+
48388+ read_lock(&tasklist_lock);
48389+ do_each_thread(p2, p) {
48390+ files = get_files_struct(p);
48391+ if (files == NULL ||
48392+ (p->signal && p->signal->tty == task->signal->tty)) {
48393+ if (files != NULL)
48394+ put_files_struct(files);
48395+ continue;
48396+ }
48397+ rcu_read_lock();
48398+ fdt = files_fdtable(files);
48399+ for (i=0; i < fdt->max_fds; i++) {
48400+ file = fcheck_files(files, i);
48401+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
48402+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
48403+ p3 = task;
48404+ while (p3->pid > 0) {
48405+ if (p3 == p)
48406+ break;
48407+ p3 = p3->real_parent;
48408+ }
48409+ if (p3 == p)
48410+ break;
48411+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
48412+ gr_handle_alertkill(p);
48413+ rcu_read_unlock();
48414+ put_files_struct(files);
48415+ read_unlock(&tasklist_lock);
48416+ fput(our_file);
48417+ return 0;
48418+ }
48419+ }
48420+ rcu_read_unlock();
48421+ put_files_struct(files);
48422+ } while_each_thread(p2, p);
48423+ read_unlock(&tasklist_lock);
48424+
48425+ fput(our_file);
48426+ return 1;
48427+}
48428+
48429+ssize_t
48430+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
48431+{
48432+ struct gr_arg_wrapper uwrap;
48433+ unsigned char *sprole_salt = NULL;
48434+ unsigned char *sprole_sum = NULL;
48435+ int error = sizeof (struct gr_arg_wrapper);
48436+ int error2 = 0;
48437+
48438+ mutex_lock(&gr_dev_mutex);
48439+
48440+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
48441+ error = -EPERM;
48442+ goto out;
48443+ }
48444+
48445+ if (count != sizeof (struct gr_arg_wrapper)) {
48446+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
48447+ error = -EINVAL;
48448+ goto out;
48449+ }
48450+
48451+
48452+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
48453+ gr_auth_expires = 0;
48454+ gr_auth_attempts = 0;
48455+ }
48456+
48457+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
48458+ error = -EFAULT;
48459+ goto out;
48460+ }
48461+
48462+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
48463+ error = -EINVAL;
48464+ goto out;
48465+ }
48466+
48467+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
48468+ error = -EFAULT;
48469+ goto out;
48470+ }
48471+
48472+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
48473+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
48474+ time_after(gr_auth_expires, get_seconds())) {
48475+ error = -EBUSY;
48476+ goto out;
48477+ }
48478+
48479+ /* if non-root trying to do anything other than use a special role,
48480+ do not attempt authentication, do not count towards authentication
48481+ locking
48482+ */
48483+
48484+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
48485+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
48486+ current_uid()) {
48487+ error = -EPERM;
48488+ goto out;
48489+ }
48490+
48491+ /* ensure pw and special role name are null terminated */
48492+
48493+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
48494+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
48495+
48496+ /* Okay.
48497+ * We have our enough of the argument structure..(we have yet
48498+ * to copy_from_user the tables themselves) . Copy the tables
48499+ * only if we need them, i.e. for loading operations. */
48500+
48501+ switch (gr_usermode->mode) {
48502+ case GR_STATUS:
48503+ if (gr_status & GR_READY) {
48504+ error = 1;
48505+ if (!gr_check_secure_terminal(current))
48506+ error = 3;
48507+ } else
48508+ error = 2;
48509+ goto out;
48510+ case GR_SHUTDOWN:
48511+ if ((gr_status & GR_READY)
48512+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
48513+ pax_open_kernel();
48514+ gr_status &= ~GR_READY;
48515+ pax_close_kernel();
48516+
48517+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
48518+ free_variables();
48519+ memset(gr_usermode, 0, sizeof (struct gr_arg));
48520+ memset(gr_system_salt, 0, GR_SALT_LEN);
48521+ memset(gr_system_sum, 0, GR_SHA_LEN);
48522+ } else if (gr_status & GR_READY) {
48523+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
48524+ error = -EPERM;
48525+ } else {
48526+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
48527+ error = -EAGAIN;
48528+ }
48529+ break;
48530+ case GR_ENABLE:
48531+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
48532+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
48533+ else {
48534+ if (gr_status & GR_READY)
48535+ error = -EAGAIN;
48536+ else
48537+ error = error2;
48538+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
48539+ }
48540+ break;
48541+ case GR_RELOAD:
48542+ if (!(gr_status & GR_READY)) {
48543+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
48544+ error = -EAGAIN;
48545+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
48546+ preempt_disable();
48547+
48548+ pax_open_kernel();
48549+ gr_status &= ~GR_READY;
48550+ pax_close_kernel();
48551+
48552+ free_variables();
48553+ if (!(error2 = gracl_init(gr_usermode))) {
48554+ preempt_enable();
48555+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
48556+ } else {
48557+ preempt_enable();
48558+ error = error2;
48559+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
48560+ }
48561+ } else {
48562+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
48563+ error = -EPERM;
48564+ }
48565+ break;
48566+ case GR_SEGVMOD:
48567+ if (unlikely(!(gr_status & GR_READY))) {
48568+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
48569+ error = -EAGAIN;
48570+ break;
48571+ }
48572+
48573+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
48574+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
48575+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
48576+ struct acl_subject_label *segvacl;
48577+ segvacl =
48578+ lookup_acl_subj_label(gr_usermode->segv_inode,
48579+ gr_usermode->segv_device,
48580+ current->role);
48581+ if (segvacl) {
48582+ segvacl->crashes = 0;
48583+ segvacl->expires = 0;
48584+ }
48585+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
48586+ gr_remove_uid(gr_usermode->segv_uid);
48587+ }
48588+ } else {
48589+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
48590+ error = -EPERM;
48591+ }
48592+ break;
48593+ case GR_SPROLE:
48594+ case GR_SPROLEPAM:
48595+ if (unlikely(!(gr_status & GR_READY))) {
48596+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
48597+ error = -EAGAIN;
48598+ break;
48599+ }
48600+
48601+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
48602+ current->role->expires = 0;
48603+ current->role->auth_attempts = 0;
48604+ }
48605+
48606+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
48607+ time_after(current->role->expires, get_seconds())) {
48608+ error = -EBUSY;
48609+ goto out;
48610+ }
48611+
48612+ if (lookup_special_role_auth
48613+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
48614+ && ((!sprole_salt && !sprole_sum)
48615+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
48616+ char *p = "";
48617+ assign_special_role(gr_usermode->sp_role);
48618+ read_lock(&tasklist_lock);
48619+ if (current->real_parent)
48620+ p = current->real_parent->role->rolename;
48621+ read_unlock(&tasklist_lock);
48622+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
48623+ p, acl_sp_role_value);
48624+ } else {
48625+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
48626+ error = -EPERM;
48627+ if(!(current->role->auth_attempts++))
48628+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
48629+
48630+ goto out;
48631+ }
48632+ break;
48633+ case GR_UNSPROLE:
48634+ if (unlikely(!(gr_status & GR_READY))) {
48635+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
48636+ error = -EAGAIN;
48637+ break;
48638+ }
48639+
48640+ if (current->role->roletype & GR_ROLE_SPECIAL) {
48641+ char *p = "";
48642+ int i = 0;
48643+
48644+ read_lock(&tasklist_lock);
48645+ if (current->real_parent) {
48646+ p = current->real_parent->role->rolename;
48647+ i = current->real_parent->acl_role_id;
48648+ }
48649+ read_unlock(&tasklist_lock);
48650+
48651+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
48652+ gr_set_acls(1);
48653+ } else {
48654+ error = -EPERM;
48655+ goto out;
48656+ }
48657+ break;
48658+ default:
48659+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
48660+ error = -EINVAL;
48661+ break;
48662+ }
48663+
48664+ if (error != -EPERM)
48665+ goto out;
48666+
48667+ if(!(gr_auth_attempts++))
48668+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
48669+
48670+ out:
48671+ mutex_unlock(&gr_dev_mutex);
48672+ return error;
48673+}
48674+
48675+/* must be called with
48676+ rcu_read_lock();
48677+ read_lock(&tasklist_lock);
48678+ read_lock(&grsec_exec_file_lock);
48679+*/
48680+int gr_apply_subject_to_task(struct task_struct *task)
48681+{
48682+ struct acl_object_label *obj;
48683+ char *tmpname;
48684+ struct acl_subject_label *tmpsubj;
48685+ struct file *filp;
48686+ struct name_entry *nmatch;
48687+
48688+ filp = task->exec_file;
48689+ if (filp == NULL)
48690+ return 0;
48691+
48692+ /* the following is to apply the correct subject
48693+ on binaries running when the RBAC system
48694+ is enabled, when the binaries have been
48695+ replaced or deleted since their execution
48696+ -----
48697+ when the RBAC system starts, the inode/dev
48698+ from exec_file will be one the RBAC system
48699+ is unaware of. It only knows the inode/dev
48700+ of the present file on disk, or the absence
48701+ of it.
48702+ */
48703+ preempt_disable();
48704+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
48705+
48706+ nmatch = lookup_name_entry(tmpname);
48707+ preempt_enable();
48708+ tmpsubj = NULL;
48709+ if (nmatch) {
48710+ if (nmatch->deleted)
48711+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
48712+ else
48713+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
48714+ if (tmpsubj != NULL)
48715+ task->acl = tmpsubj;
48716+ }
48717+ if (tmpsubj == NULL)
48718+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
48719+ task->role);
48720+ if (task->acl) {
48721+ task->is_writable = 0;
48722+ /* ignore additional mmap checks for processes that are writable
48723+ by the default ACL */
48724+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
48725+ if (unlikely(obj->mode & GR_WRITE))
48726+ task->is_writable = 1;
48727+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
48728+ if (unlikely(obj->mode & GR_WRITE))
48729+ task->is_writable = 1;
48730+
48731+ gr_set_proc_res(task);
48732+
48733+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48734+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
48735+#endif
48736+ } else {
48737+ return 1;
48738+ }
48739+
48740+ return 0;
48741+}
48742+
48743+int
48744+gr_set_acls(const int type)
48745+{
48746+ struct task_struct *task, *task2;
48747+ struct acl_role_label *role = current->role;
48748+ __u16 acl_role_id = current->acl_role_id;
48749+ const struct cred *cred;
48750+ int ret;
48751+
48752+ rcu_read_lock();
48753+ read_lock(&tasklist_lock);
48754+ read_lock(&grsec_exec_file_lock);
48755+ do_each_thread(task2, task) {
48756+ /* check to see if we're called from the exit handler,
48757+ if so, only replace ACLs that have inherited the admin
48758+ ACL */
48759+
48760+ if (type && (task->role != role ||
48761+ task->acl_role_id != acl_role_id))
48762+ continue;
48763+
48764+ task->acl_role_id = 0;
48765+ task->acl_sp_role = 0;
48766+
48767+ if (task->exec_file) {
48768+ cred = __task_cred(task);
48769+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
48770+ ret = gr_apply_subject_to_task(task);
48771+ if (ret) {
48772+ read_unlock(&grsec_exec_file_lock);
48773+ read_unlock(&tasklist_lock);
48774+ rcu_read_unlock();
48775+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
48776+ return ret;
48777+ }
48778+ } else {
48779+ // it's a kernel process
48780+ task->role = kernel_role;
48781+ task->acl = kernel_role->root_label;
48782+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
48783+ task->acl->mode &= ~GR_PROCFIND;
48784+#endif
48785+ }
48786+ } while_each_thread(task2, task);
48787+ read_unlock(&grsec_exec_file_lock);
48788+ read_unlock(&tasklist_lock);
48789+ rcu_read_unlock();
48790+
48791+ return 0;
48792+}
48793+
48794+void
48795+gr_learn_resource(const struct task_struct *task,
48796+ const int res, const unsigned long wanted, const int gt)
48797+{
48798+ struct acl_subject_label *acl;
48799+ const struct cred *cred;
48800+
48801+ if (unlikely((gr_status & GR_READY) &&
48802+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
48803+ goto skip_reslog;
48804+
48805+#ifdef CONFIG_GRKERNSEC_RESLOG
48806+ gr_log_resource(task, res, wanted, gt);
48807+#endif
48808+ skip_reslog:
48809+
48810+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
48811+ return;
48812+
48813+ acl = task->acl;
48814+
48815+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
48816+ !(acl->resmask & (1 << (unsigned short) res))))
48817+ return;
48818+
48819+ if (wanted >= acl->res[res].rlim_cur) {
48820+ unsigned long res_add;
48821+
48822+ res_add = wanted;
48823+ switch (res) {
48824+ case RLIMIT_CPU:
48825+ res_add += GR_RLIM_CPU_BUMP;
48826+ break;
48827+ case RLIMIT_FSIZE:
48828+ res_add += GR_RLIM_FSIZE_BUMP;
48829+ break;
48830+ case RLIMIT_DATA:
48831+ res_add += GR_RLIM_DATA_BUMP;
48832+ break;
48833+ case RLIMIT_STACK:
48834+ res_add += GR_RLIM_STACK_BUMP;
48835+ break;
48836+ case RLIMIT_CORE:
48837+ res_add += GR_RLIM_CORE_BUMP;
48838+ break;
48839+ case RLIMIT_RSS:
48840+ res_add += GR_RLIM_RSS_BUMP;
48841+ break;
48842+ case RLIMIT_NPROC:
48843+ res_add += GR_RLIM_NPROC_BUMP;
48844+ break;
48845+ case RLIMIT_NOFILE:
48846+ res_add += GR_RLIM_NOFILE_BUMP;
48847+ break;
48848+ case RLIMIT_MEMLOCK:
48849+ res_add += GR_RLIM_MEMLOCK_BUMP;
48850+ break;
48851+ case RLIMIT_AS:
48852+ res_add += GR_RLIM_AS_BUMP;
48853+ break;
48854+ case RLIMIT_LOCKS:
48855+ res_add += GR_RLIM_LOCKS_BUMP;
48856+ break;
48857+ case RLIMIT_SIGPENDING:
48858+ res_add += GR_RLIM_SIGPENDING_BUMP;
48859+ break;
48860+ case RLIMIT_MSGQUEUE:
48861+ res_add += GR_RLIM_MSGQUEUE_BUMP;
48862+ break;
48863+ case RLIMIT_NICE:
48864+ res_add += GR_RLIM_NICE_BUMP;
48865+ break;
48866+ case RLIMIT_RTPRIO:
48867+ res_add += GR_RLIM_RTPRIO_BUMP;
48868+ break;
48869+ case RLIMIT_RTTIME:
48870+ res_add += GR_RLIM_RTTIME_BUMP;
48871+ break;
48872+ }
48873+
48874+ acl->res[res].rlim_cur = res_add;
48875+
48876+ if (wanted > acl->res[res].rlim_max)
48877+ acl->res[res].rlim_max = res_add;
48878+
48879+ /* only log the subject filename, since resource logging is supported for
48880+ single-subject learning only */
48881+ rcu_read_lock();
48882+ cred = __task_cred(task);
48883+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
48884+ task->role->roletype, cred->uid, cred->gid, acl->filename,
48885+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
48886+ "", (unsigned long) res, &task->signal->saved_ip);
48887+ rcu_read_unlock();
48888+ }
48889+
48890+ return;
48891+}
48892+
48893+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
48894+void
48895+pax_set_initial_flags(struct linux_binprm *bprm)
48896+{
48897+ struct task_struct *task = current;
48898+ struct acl_subject_label *proc;
48899+ unsigned long flags;
48900+
48901+ if (unlikely(!(gr_status & GR_READY)))
48902+ return;
48903+
48904+ flags = pax_get_flags(task);
48905+
48906+ proc = task->acl;
48907+
48908+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
48909+ flags &= ~MF_PAX_PAGEEXEC;
48910+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
48911+ flags &= ~MF_PAX_SEGMEXEC;
48912+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
48913+ flags &= ~MF_PAX_RANDMMAP;
48914+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
48915+ flags &= ~MF_PAX_EMUTRAMP;
48916+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
48917+ flags &= ~MF_PAX_MPROTECT;
48918+
48919+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
48920+ flags |= MF_PAX_PAGEEXEC;
48921+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
48922+ flags |= MF_PAX_SEGMEXEC;
48923+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
48924+ flags |= MF_PAX_RANDMMAP;
48925+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
48926+ flags |= MF_PAX_EMUTRAMP;
48927+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
48928+ flags |= MF_PAX_MPROTECT;
48929+
48930+ pax_set_flags(task, flags);
48931+
48932+ return;
48933+}
48934+#endif
48935+
48936+#ifdef CONFIG_SYSCTL
48937+/* Eric Biederman likes breaking userland ABI and every inode-based security
48938+ system to save 35kb of memory */
48939+
48940+/* we modify the passed in filename, but adjust it back before returning */
48941+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
48942+{
48943+ struct name_entry *nmatch;
48944+ char *p, *lastp = NULL;
48945+ struct acl_object_label *obj = NULL, *tmp;
48946+ struct acl_subject_label *tmpsubj;
48947+ char c = '\0';
48948+
48949+ read_lock(&gr_inode_lock);
48950+
48951+ p = name + len - 1;
48952+ do {
48953+ nmatch = lookup_name_entry(name);
48954+ if (lastp != NULL)
48955+ *lastp = c;
48956+
48957+ if (nmatch == NULL)
48958+ goto next_component;
48959+ tmpsubj = current->acl;
48960+ do {
48961+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
48962+ if (obj != NULL) {
48963+ tmp = obj->globbed;
48964+ while (tmp) {
48965+ if (!glob_match(tmp->filename, name)) {
48966+ obj = tmp;
48967+ goto found_obj;
48968+ }
48969+ tmp = tmp->next;
48970+ }
48971+ goto found_obj;
48972+ }
48973+ } while ((tmpsubj = tmpsubj->parent_subject));
48974+next_component:
48975+ /* end case */
48976+ if (p == name)
48977+ break;
48978+
48979+ while (*p != '/')
48980+ p--;
48981+ if (p == name)
48982+ lastp = p + 1;
48983+ else {
48984+ lastp = p;
48985+ p--;
48986+ }
48987+ c = *lastp;
48988+ *lastp = '\0';
48989+ } while (1);
48990+found_obj:
48991+ read_unlock(&gr_inode_lock);
48992+ /* obj returned will always be non-null */
48993+ return obj;
48994+}
48995+
48996+/* returns 0 when allowing, non-zero on error
48997+ op of 0 is used for readdir, so we don't log the names of hidden files
48998+*/
48999+__u32
49000+gr_handle_sysctl(const struct ctl_table *table, const int op)
49001+{
49002+ struct ctl_table *tmp;
49003+ const char *proc_sys = "/proc/sys";
49004+ char *path;
49005+ struct acl_object_label *obj;
49006+ unsigned short len = 0, pos = 0, depth = 0, i;
49007+ __u32 err = 0;
49008+ __u32 mode = 0;
49009+
49010+ if (unlikely(!(gr_status & GR_READY)))
49011+ return 0;
49012+
49013+ /* for now, ignore operations on non-sysctl entries if it's not a
49014+ readdir*/
49015+ if (table->child != NULL && op != 0)
49016+ return 0;
49017+
49018+ mode |= GR_FIND;
49019+ /* it's only a read if it's an entry, read on dirs is for readdir */
49020+ if (op & MAY_READ)
49021+ mode |= GR_READ;
49022+ if (op & MAY_WRITE)
49023+ mode |= GR_WRITE;
49024+
49025+ preempt_disable();
49026+
49027+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
49028+
49029+ /* it's only a read/write if it's an actual entry, not a dir
49030+ (which are opened for readdir)
49031+ */
49032+
49033+ /* convert the requested sysctl entry into a pathname */
49034+
49035+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
49036+ len += strlen(tmp->procname);
49037+ len++;
49038+ depth++;
49039+ }
49040+
49041+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
49042+ /* deny */
49043+ goto out;
49044+ }
49045+
49046+ memset(path, 0, PAGE_SIZE);
49047+
49048+ memcpy(path, proc_sys, strlen(proc_sys));
49049+
49050+ pos += strlen(proc_sys);
49051+
49052+ for (; depth > 0; depth--) {
49053+ path[pos] = '/';
49054+ pos++;
49055+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
49056+ if (depth == i) {
49057+ memcpy(path + pos, tmp->procname,
49058+ strlen(tmp->procname));
49059+ pos += strlen(tmp->procname);
49060+ }
49061+ i++;
49062+ }
49063+ }
49064+
49065+ obj = gr_lookup_by_name(path, pos);
49066+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
49067+
49068+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
49069+ ((err & mode) != mode))) {
49070+ __u32 new_mode = mode;
49071+
49072+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49073+
49074+ err = 0;
49075+ gr_log_learn_sysctl(path, new_mode);
49076+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
49077+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
49078+ err = -ENOENT;
49079+ } else if (!(err & GR_FIND)) {
49080+ err = -ENOENT;
49081+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
49082+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
49083+ path, (mode & GR_READ) ? " reading" : "",
49084+ (mode & GR_WRITE) ? " writing" : "");
49085+ err = -EACCES;
49086+ } else if ((err & mode) != mode) {
49087+ err = -EACCES;
49088+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
49089+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
49090+ path, (mode & GR_READ) ? " reading" : "",
49091+ (mode & GR_WRITE) ? " writing" : "");
49092+ err = 0;
49093+ } else
49094+ err = 0;
49095+
49096+ out:
49097+ preempt_enable();
49098+
49099+ return err;
49100+}
49101+#endif
49102+
49103+int
49104+gr_handle_proc_ptrace(struct task_struct *task)
49105+{
49106+ struct file *filp;
49107+ struct task_struct *tmp = task;
49108+ struct task_struct *curtemp = current;
49109+ __u32 retmode;
49110+
49111+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
49112+ if (unlikely(!(gr_status & GR_READY)))
49113+ return 0;
49114+#endif
49115+
49116+ read_lock(&tasklist_lock);
49117+ read_lock(&grsec_exec_file_lock);
49118+ filp = task->exec_file;
49119+
49120+ while (tmp->pid > 0) {
49121+ if (tmp == curtemp)
49122+ break;
49123+ tmp = tmp->real_parent;
49124+ }
49125+
49126+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
49127+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
49128+ read_unlock(&grsec_exec_file_lock);
49129+ read_unlock(&tasklist_lock);
49130+ return 1;
49131+ }
49132+
49133+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
49134+ if (!(gr_status & GR_READY)) {
49135+ read_unlock(&grsec_exec_file_lock);
49136+ read_unlock(&tasklist_lock);
49137+ return 0;
49138+ }
49139+#endif
49140+
49141+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
49142+ read_unlock(&grsec_exec_file_lock);
49143+ read_unlock(&tasklist_lock);
49144+
49145+ if (retmode & GR_NOPTRACE)
49146+ return 1;
49147+
49148+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
49149+ && (current->acl != task->acl || (current->acl != current->role->root_label
49150+ && current->pid != task->pid)))
49151+ return 1;
49152+
49153+ return 0;
49154+}
49155+
49156+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
49157+{
49158+ if (unlikely(!(gr_status & GR_READY)))
49159+ return;
49160+
49161+ if (!(current->role->roletype & GR_ROLE_GOD))
49162+ return;
49163+
49164+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
49165+ p->role->rolename, gr_task_roletype_to_char(p),
49166+ p->acl->filename);
49167+}
49168+
49169+int
49170+gr_handle_ptrace(struct task_struct *task, const long request)
49171+{
49172+ struct task_struct *tmp = task;
49173+ struct task_struct *curtemp = current;
49174+ __u32 retmode;
49175+
49176+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
49177+ if (unlikely(!(gr_status & GR_READY)))
49178+ return 0;
49179+#endif
49180+
49181+ read_lock(&tasklist_lock);
49182+ while (tmp->pid > 0) {
49183+ if (tmp == curtemp)
49184+ break;
49185+ tmp = tmp->real_parent;
49186+ }
49187+
49188+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
49189+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
49190+ read_unlock(&tasklist_lock);
49191+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
49192+ return 1;
49193+ }
49194+ read_unlock(&tasklist_lock);
49195+
49196+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
49197+ if (!(gr_status & GR_READY))
49198+ return 0;
49199+#endif
49200+
49201+ read_lock(&grsec_exec_file_lock);
49202+ if (unlikely(!task->exec_file)) {
49203+ read_unlock(&grsec_exec_file_lock);
49204+ return 0;
49205+ }
49206+
49207+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
49208+ read_unlock(&grsec_exec_file_lock);
49209+
49210+ if (retmode & GR_NOPTRACE) {
49211+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
49212+ return 1;
49213+ }
49214+
49215+ if (retmode & GR_PTRACERD) {
49216+ switch (request) {
49217+ case PTRACE_POKETEXT:
49218+ case PTRACE_POKEDATA:
49219+ case PTRACE_POKEUSR:
49220+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
49221+ case PTRACE_SETREGS:
49222+ case PTRACE_SETFPREGS:
49223+#endif
49224+#ifdef CONFIG_X86
49225+ case PTRACE_SETFPXREGS:
49226+#endif
49227+#ifdef CONFIG_ALTIVEC
49228+ case PTRACE_SETVRREGS:
49229+#endif
49230+ return 1;
49231+ default:
49232+ return 0;
49233+ }
49234+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
49235+ !(current->role->roletype & GR_ROLE_GOD) &&
49236+ (current->acl != task->acl)) {
49237+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
49238+ return 1;
49239+ }
49240+
49241+ return 0;
49242+}
49243+
49244+static int is_writable_mmap(const struct file *filp)
49245+{
49246+ struct task_struct *task = current;
49247+ struct acl_object_label *obj, *obj2;
49248+
49249+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
49250+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
49251+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49252+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
49253+ task->role->root_label);
49254+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
49255+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
49256+ return 1;
49257+ }
49258+ }
49259+ return 0;
49260+}
49261+
49262+int
49263+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
49264+{
49265+ __u32 mode;
49266+
49267+ if (unlikely(!file || !(prot & PROT_EXEC)))
49268+ return 1;
49269+
49270+ if (is_writable_mmap(file))
49271+ return 0;
49272+
49273+ mode =
49274+ gr_search_file(file->f_path.dentry,
49275+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
49276+ file->f_path.mnt);
49277+
49278+ if (!gr_tpe_allow(file))
49279+ return 0;
49280+
49281+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
49282+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
49283+ return 0;
49284+ } else if (unlikely(!(mode & GR_EXEC))) {
49285+ return 0;
49286+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
49287+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
49288+ return 1;
49289+ }
49290+
49291+ return 1;
49292+}
49293+
49294+int
49295+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
49296+{
49297+ __u32 mode;
49298+
49299+ if (unlikely(!file || !(prot & PROT_EXEC)))
49300+ return 1;
49301+
49302+ if (is_writable_mmap(file))
49303+ return 0;
49304+
49305+ mode =
49306+ gr_search_file(file->f_path.dentry,
49307+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
49308+ file->f_path.mnt);
49309+
49310+ if (!gr_tpe_allow(file))
49311+ return 0;
49312+
49313+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
49314+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
49315+ return 0;
49316+ } else if (unlikely(!(mode & GR_EXEC))) {
49317+ return 0;
49318+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
49319+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
49320+ return 1;
49321+ }
49322+
49323+ return 1;
49324+}
49325+
49326+void
49327+gr_acl_handle_psacct(struct task_struct *task, const long code)
49328+{
49329+ unsigned long runtime;
49330+ unsigned long cputime;
49331+ unsigned int wday, cday;
49332+ __u8 whr, chr;
49333+ __u8 wmin, cmin;
49334+ __u8 wsec, csec;
49335+ struct timespec timeval;
49336+
49337+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
49338+ !(task->acl->mode & GR_PROCACCT)))
49339+ return;
49340+
49341+ do_posix_clock_monotonic_gettime(&timeval);
49342+ runtime = timeval.tv_sec - task->start_time.tv_sec;
49343+ wday = runtime / (3600 * 24);
49344+ runtime -= wday * (3600 * 24);
49345+ whr = runtime / 3600;
49346+ runtime -= whr * 3600;
49347+ wmin = runtime / 60;
49348+ runtime -= wmin * 60;
49349+ wsec = runtime;
49350+
49351+ cputime = (task->utime + task->stime) / HZ;
49352+ cday = cputime / (3600 * 24);
49353+ cputime -= cday * (3600 * 24);
49354+ chr = cputime / 3600;
49355+ cputime -= chr * 3600;
49356+ cmin = cputime / 60;
49357+ cputime -= cmin * 60;
49358+ csec = cputime;
49359+
49360+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
49361+
49362+ return;
49363+}
49364+
49365+void gr_set_kernel_label(struct task_struct *task)
49366+{
49367+ if (gr_status & GR_READY) {
49368+ task->role = kernel_role;
49369+ task->acl = kernel_role->root_label;
49370+ }
49371+ return;
49372+}
49373+
49374+#ifdef CONFIG_TASKSTATS
49375+int gr_is_taskstats_denied(int pid)
49376+{
49377+ struct task_struct *task;
49378+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49379+ const struct cred *cred;
49380+#endif
49381+ int ret = 0;
49382+
49383+ /* restrict taskstats viewing to un-chrooted root users
49384+ who have the 'view' subject flag if the RBAC system is enabled
49385+ */
49386+
49387+ rcu_read_lock();
49388+ read_lock(&tasklist_lock);
49389+ task = find_task_by_vpid(pid);
49390+ if (task) {
49391+#ifdef CONFIG_GRKERNSEC_CHROOT
49392+ if (proc_is_chrooted(task))
49393+ ret = -EACCES;
49394+#endif
49395+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49396+ cred = __task_cred(task);
49397+#ifdef CONFIG_GRKERNSEC_PROC_USER
49398+ if (cred->uid != 0)
49399+ ret = -EACCES;
49400+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49401+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
49402+ ret = -EACCES;
49403+#endif
49404+#endif
49405+ if (gr_status & GR_READY) {
49406+ if (!(task->acl->mode & GR_VIEW))
49407+ ret = -EACCES;
49408+ }
49409+ } else
49410+ ret = -ENOENT;
49411+
49412+ read_unlock(&tasklist_lock);
49413+ rcu_read_unlock();
49414+
49415+ return ret;
49416+}
49417+#endif
49418+
49419+/* AUXV entries are filled via a descendant of search_binary_handler
49420+ after we've already applied the subject for the target
49421+*/
49422+int gr_acl_enable_at_secure(void)
49423+{
49424+ if (unlikely(!(gr_status & GR_READY)))
49425+ return 0;
49426+
49427+ if (current->acl->mode & GR_ATSECURE)
49428+ return 1;
49429+
49430+ return 0;
49431+}
49432+
49433+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
49434+{
49435+ struct task_struct *task = current;
49436+ struct dentry *dentry = file->f_path.dentry;
49437+ struct vfsmount *mnt = file->f_path.mnt;
49438+ struct acl_object_label *obj, *tmp;
49439+ struct acl_subject_label *subj;
49440+ unsigned int bufsize;
49441+ int is_not_root;
49442+ char *path;
49443+ dev_t dev = __get_dev(dentry);
49444+
49445+ if (unlikely(!(gr_status & GR_READY)))
49446+ return 1;
49447+
49448+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49449+ return 1;
49450+
49451+ /* ignore Eric Biederman */
49452+ if (IS_PRIVATE(dentry->d_inode))
49453+ return 1;
49454+
49455+ subj = task->acl;
49456+ do {
49457+ obj = lookup_acl_obj_label(ino, dev, subj);
49458+ if (obj != NULL)
49459+ return (obj->mode & GR_FIND) ? 1 : 0;
49460+ } while ((subj = subj->parent_subject));
49461+
49462+ /* this is purely an optimization since we're looking for an object
49463+ for the directory we're doing a readdir on
49464+ if it's possible for any globbed object to match the entry we're
49465+ filling into the directory, then the object we find here will be
49466+ an anchor point with attached globbed objects
49467+ */
49468+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
49469+ if (obj->globbed == NULL)
49470+ return (obj->mode & GR_FIND) ? 1 : 0;
49471+
49472+ is_not_root = ((obj->filename[0] == '/') &&
49473+ (obj->filename[1] == '\0')) ? 0 : 1;
49474+ bufsize = PAGE_SIZE - namelen - is_not_root;
49475+
49476+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
49477+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
49478+ return 1;
49479+
49480+ preempt_disable();
49481+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
49482+ bufsize);
49483+
49484+ bufsize = strlen(path);
49485+
49486+ /* if base is "/", don't append an additional slash */
49487+ if (is_not_root)
49488+ *(path + bufsize) = '/';
49489+ memcpy(path + bufsize + is_not_root, name, namelen);
49490+ *(path + bufsize + namelen + is_not_root) = '\0';
49491+
49492+ tmp = obj->globbed;
49493+ while (tmp) {
49494+ if (!glob_match(tmp->filename, path)) {
49495+ preempt_enable();
49496+ return (tmp->mode & GR_FIND) ? 1 : 0;
49497+ }
49498+ tmp = tmp->next;
49499+ }
49500+ preempt_enable();
49501+ return (obj->mode & GR_FIND) ? 1 : 0;
49502+}
49503+
49504+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
49505+EXPORT_SYMBOL(gr_acl_is_enabled);
49506+#endif
49507+EXPORT_SYMBOL(gr_learn_resource);
49508+EXPORT_SYMBOL(gr_set_kernel_label);
49509+#ifdef CONFIG_SECURITY
49510+EXPORT_SYMBOL(gr_check_user_change);
49511+EXPORT_SYMBOL(gr_check_group_change);
49512+#endif
49513+
49514diff -urNp linux-3.0.4/grsecurity/gracl_cap.c linux-3.0.4/grsecurity/gracl_cap.c
49515--- linux-3.0.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
49516+++ linux-3.0.4/grsecurity/gracl_cap.c 2011-09-14 09:21:24.000000000 -0400
49517@@ -0,0 +1,101 @@
49518+#include <linux/kernel.h>
49519+#include <linux/module.h>
49520+#include <linux/sched.h>
49521+#include <linux/gracl.h>
49522+#include <linux/grsecurity.h>
49523+#include <linux/grinternal.h>
49524+
49525+extern const char *captab_log[];
49526+extern int captab_log_entries;
49527+
49528+int
49529+gr_acl_is_capable(const int cap)
49530+{
49531+ struct task_struct *task = current;
49532+ const struct cred *cred = current_cred();
49533+ struct acl_subject_label *curracl;
49534+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
49535+ kernel_cap_t cap_audit = __cap_empty_set;
49536+
49537+ if (!gr_acl_is_enabled())
49538+ return 1;
49539+
49540+ curracl = task->acl;
49541+
49542+ cap_drop = curracl->cap_lower;
49543+ cap_mask = curracl->cap_mask;
49544+ cap_audit = curracl->cap_invert_audit;
49545+
49546+ while ((curracl = curracl->parent_subject)) {
49547+ /* if the cap isn't specified in the current computed mask but is specified in the
49548+ current level subject, and is lowered in the current level subject, then add
49549+ it to the set of dropped capabilities
49550+ otherwise, add the current level subject's mask to the current computed mask
49551+ */
49552+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
49553+ cap_raise(cap_mask, cap);
49554+ if (cap_raised(curracl->cap_lower, cap))
49555+ cap_raise(cap_drop, cap);
49556+ if (cap_raised(curracl->cap_invert_audit, cap))
49557+ cap_raise(cap_audit, cap);
49558+ }
49559+ }
49560+
49561+ if (!cap_raised(cap_drop, cap)) {
49562+ if (cap_raised(cap_audit, cap))
49563+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
49564+ return 1;
49565+ }
49566+
49567+ curracl = task->acl;
49568+
49569+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
49570+ && cap_raised(cred->cap_effective, cap)) {
49571+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
49572+ task->role->roletype, cred->uid,
49573+ cred->gid, task->exec_file ?
49574+ gr_to_filename(task->exec_file->f_path.dentry,
49575+ task->exec_file->f_path.mnt) : curracl->filename,
49576+ curracl->filename, 0UL,
49577+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
49578+ return 1;
49579+ }
49580+
49581+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
49582+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
49583+ return 0;
49584+}
49585+
49586+int
49587+gr_acl_is_capable_nolog(const int cap)
49588+{
49589+ struct acl_subject_label *curracl;
49590+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
49591+
49592+ if (!gr_acl_is_enabled())
49593+ return 1;
49594+
49595+ curracl = current->acl;
49596+
49597+ cap_drop = curracl->cap_lower;
49598+ cap_mask = curracl->cap_mask;
49599+
49600+ while ((curracl = curracl->parent_subject)) {
49601+ /* if the cap isn't specified in the current computed mask but is specified in the
49602+ current level subject, and is lowered in the current level subject, then add
49603+ it to the set of dropped capabilities
49604+ otherwise, add the current level subject's mask to the current computed mask
49605+ */
49606+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
49607+ cap_raise(cap_mask, cap);
49608+ if (cap_raised(curracl->cap_lower, cap))
49609+ cap_raise(cap_drop, cap);
49610+ }
49611+ }
49612+
49613+ if (!cap_raised(cap_drop, cap))
49614+ return 1;
49615+
49616+ return 0;
49617+}
49618+
49619diff -urNp linux-3.0.4/grsecurity/gracl_fs.c linux-3.0.4/grsecurity/gracl_fs.c
49620--- linux-3.0.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
49621+++ linux-3.0.4/grsecurity/gracl_fs.c 2011-08-23 21:48:14.000000000 -0400
49622@@ -0,0 +1,431 @@
49623+#include <linux/kernel.h>
49624+#include <linux/sched.h>
49625+#include <linux/types.h>
49626+#include <linux/fs.h>
49627+#include <linux/file.h>
49628+#include <linux/stat.h>
49629+#include <linux/grsecurity.h>
49630+#include <linux/grinternal.h>
49631+#include <linux/gracl.h>
49632+
49633+__u32
49634+gr_acl_handle_hidden_file(const struct dentry * dentry,
49635+ const struct vfsmount * mnt)
49636+{
49637+ __u32 mode;
49638+
49639+ if (unlikely(!dentry->d_inode))
49640+ return GR_FIND;
49641+
49642+ mode =
49643+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
49644+
49645+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
49646+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
49647+ return mode;
49648+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
49649+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
49650+ return 0;
49651+ } else if (unlikely(!(mode & GR_FIND)))
49652+ return 0;
49653+
49654+ return GR_FIND;
49655+}
49656+
49657+__u32
49658+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
49659+ const int fmode)
49660+{
49661+ __u32 reqmode = GR_FIND;
49662+ __u32 mode;
49663+
49664+ if (unlikely(!dentry->d_inode))
49665+ return reqmode;
49666+
49667+ if (unlikely(fmode & O_APPEND))
49668+ reqmode |= GR_APPEND;
49669+ else if (unlikely(fmode & FMODE_WRITE))
49670+ reqmode |= GR_WRITE;
49671+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
49672+ reqmode |= GR_READ;
49673+ if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
49674+ reqmode &= ~GR_READ;
49675+ mode =
49676+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
49677+ mnt);
49678+
49679+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
49680+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
49681+ reqmode & GR_READ ? " reading" : "",
49682+ reqmode & GR_WRITE ? " writing" : reqmode &
49683+ GR_APPEND ? " appending" : "");
49684+ return reqmode;
49685+ } else
49686+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
49687+ {
49688+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
49689+ reqmode & GR_READ ? " reading" : "",
49690+ reqmode & GR_WRITE ? " writing" : reqmode &
49691+ GR_APPEND ? " appending" : "");
49692+ return 0;
49693+ } else if (unlikely((mode & reqmode) != reqmode))
49694+ return 0;
49695+
49696+ return reqmode;
49697+}
49698+
49699+__u32
49700+gr_acl_handle_creat(const struct dentry * dentry,
49701+ const struct dentry * p_dentry,
49702+ const struct vfsmount * p_mnt, const int fmode,
49703+ const int imode)
49704+{
49705+ __u32 reqmode = GR_WRITE | GR_CREATE;
49706+ __u32 mode;
49707+
49708+ if (unlikely(fmode & O_APPEND))
49709+ reqmode |= GR_APPEND;
49710+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
49711+ reqmode |= GR_READ;
49712+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
49713+ reqmode |= GR_SETID;
49714+
49715+ mode =
49716+ gr_check_create(dentry, p_dentry, p_mnt,
49717+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
49718+
49719+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
49720+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
49721+ reqmode & GR_READ ? " reading" : "",
49722+ reqmode & GR_WRITE ? " writing" : reqmode &
49723+ GR_APPEND ? " appending" : "");
49724+ return reqmode;
49725+ } else
49726+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
49727+ {
49728+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
49729+ reqmode & GR_READ ? " reading" : "",
49730+ reqmode & GR_WRITE ? " writing" : reqmode &
49731+ GR_APPEND ? " appending" : "");
49732+ return 0;
49733+ } else if (unlikely((mode & reqmode) != reqmode))
49734+ return 0;
49735+
49736+ return reqmode;
49737+}
49738+
49739+__u32
49740+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
49741+ const int fmode)
49742+{
49743+ __u32 mode, reqmode = GR_FIND;
49744+
49745+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
49746+ reqmode |= GR_EXEC;
49747+ if (fmode & S_IWOTH)
49748+ reqmode |= GR_WRITE;
49749+ if (fmode & S_IROTH)
49750+ reqmode |= GR_READ;
49751+
49752+ mode =
49753+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
49754+ mnt);
49755+
49756+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
49757+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
49758+ reqmode & GR_READ ? " reading" : "",
49759+ reqmode & GR_WRITE ? " writing" : "",
49760+ reqmode & GR_EXEC ? " executing" : "");
49761+ return reqmode;
49762+ } else
49763+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
49764+ {
49765+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
49766+ reqmode & GR_READ ? " reading" : "",
49767+ reqmode & GR_WRITE ? " writing" : "",
49768+ reqmode & GR_EXEC ? " executing" : "");
49769+ return 0;
49770+ } else if (unlikely((mode & reqmode) != reqmode))
49771+ return 0;
49772+
49773+ return reqmode;
49774+}
49775+
49776+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
49777+{
49778+ __u32 mode;
49779+
49780+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
49781+
49782+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
49783+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
49784+ return mode;
49785+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
49786+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
49787+ return 0;
49788+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
49789+ return 0;
49790+
49791+ return (reqmode);
49792+}
49793+
49794+__u32
49795+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
49796+{
49797+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
49798+}
49799+
49800+__u32
49801+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
49802+{
49803+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
49804+}
49805+
49806+__u32
49807+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
49808+{
49809+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
49810+}
49811+
49812+__u32
49813+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
49814+{
49815+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
49816+}
49817+
49818+__u32
49819+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
49820+ mode_t mode)
49821+{
49822+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
49823+ return 1;
49824+
49825+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
49826+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
49827+ GR_FCHMOD_ACL_MSG);
49828+ } else {
49829+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
49830+ }
49831+}
49832+
49833+__u32
49834+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
49835+ mode_t mode)
49836+{
49837+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
49838+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
49839+ GR_CHMOD_ACL_MSG);
49840+ } else {
49841+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
49842+ }
49843+}
49844+
49845+__u32
49846+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
49847+{
49848+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
49849+}
49850+
49851+__u32
49852+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
49853+{
49854+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
49855+}
49856+
49857+__u32
49858+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
49859+{
49860+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
49861+}
49862+
49863+__u32
49864+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
49865+{
49866+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
49867+ GR_UNIXCONNECT_ACL_MSG);
49868+}
49869+
49870+/* hardlinks require at minimum create permission,
49871+ any additional privilege required is based on the
49872+ privilege of the file being linked to
49873+*/
49874+__u32
49875+gr_acl_handle_link(const struct dentry * new_dentry,
49876+ const struct dentry * parent_dentry,
49877+ const struct vfsmount * parent_mnt,
49878+ const struct dentry * old_dentry,
49879+ const struct vfsmount * old_mnt, const char *to)
49880+{
49881+ __u32 mode;
49882+ __u32 needmode = GR_CREATE | GR_LINK;
49883+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
49884+
49885+ mode =
49886+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
49887+ old_mnt);
49888+
49889+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
49890+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
49891+ return mode;
49892+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
49893+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
49894+ return 0;
49895+ } else if (unlikely((mode & needmode) != needmode))
49896+ return 0;
49897+
49898+ return 1;
49899+}
49900+
49901+__u32
49902+gr_acl_handle_symlink(const struct dentry * new_dentry,
49903+ const struct dentry * parent_dentry,
49904+ const struct vfsmount * parent_mnt, const char *from)
49905+{
49906+ __u32 needmode = GR_WRITE | GR_CREATE;
49907+ __u32 mode;
49908+
49909+ mode =
49910+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
49911+ GR_CREATE | GR_AUDIT_CREATE |
49912+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
49913+
49914+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
49915+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
49916+ return mode;
49917+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
49918+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
49919+ return 0;
49920+ } else if (unlikely((mode & needmode) != needmode))
49921+ return 0;
49922+
49923+ return (GR_WRITE | GR_CREATE);
49924+}
49925+
49926+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
49927+{
49928+ __u32 mode;
49929+
49930+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
49931+
49932+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
49933+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
49934+ return mode;
49935+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
49936+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
49937+ return 0;
49938+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
49939+ return 0;
49940+
49941+ return (reqmode);
49942+}
49943+
49944+__u32
49945+gr_acl_handle_mknod(const struct dentry * new_dentry,
49946+ const struct dentry * parent_dentry,
49947+ const struct vfsmount * parent_mnt,
49948+ const int mode)
49949+{
49950+ __u32 reqmode = GR_WRITE | GR_CREATE;
49951+ if (unlikely(mode & (S_ISUID | S_ISGID)))
49952+ reqmode |= GR_SETID;
49953+
49954+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
49955+ reqmode, GR_MKNOD_ACL_MSG);
49956+}
49957+
49958+__u32
49959+gr_acl_handle_mkdir(const struct dentry *new_dentry,
49960+ const struct dentry *parent_dentry,
49961+ const struct vfsmount *parent_mnt)
49962+{
49963+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
49964+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
49965+}
49966+
49967+#define RENAME_CHECK_SUCCESS(old, new) \
49968+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
49969+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
49970+
49971+int
49972+gr_acl_handle_rename(struct dentry *new_dentry,
49973+ struct dentry *parent_dentry,
49974+ const struct vfsmount *parent_mnt,
49975+ struct dentry *old_dentry,
49976+ struct inode *old_parent_inode,
49977+ struct vfsmount *old_mnt, const char *newname)
49978+{
49979+ __u32 comp1, comp2;
49980+ int error = 0;
49981+
49982+ if (unlikely(!gr_acl_is_enabled()))
49983+ return 0;
49984+
49985+ if (!new_dentry->d_inode) {
49986+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
49987+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
49988+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
49989+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
49990+ GR_DELETE | GR_AUDIT_DELETE |
49991+ GR_AUDIT_READ | GR_AUDIT_WRITE |
49992+ GR_SUPPRESS, old_mnt);
49993+ } else {
49994+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
49995+ GR_CREATE | GR_DELETE |
49996+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
49997+ GR_AUDIT_READ | GR_AUDIT_WRITE |
49998+ GR_SUPPRESS, parent_mnt);
49999+ comp2 =
50000+ gr_search_file(old_dentry,
50001+ GR_READ | GR_WRITE | GR_AUDIT_READ |
50002+ GR_DELETE | GR_AUDIT_DELETE |
50003+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
50004+ }
50005+
50006+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
50007+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
50008+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
50009+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
50010+ && !(comp2 & GR_SUPPRESS)) {
50011+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
50012+ error = -EACCES;
50013+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
50014+ error = -EACCES;
50015+
50016+ return error;
50017+}
50018+
50019+void
50020+gr_acl_handle_exit(void)
50021+{
50022+ u16 id;
50023+ char *rolename;
50024+ struct file *exec_file;
50025+
50026+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
50027+ !(current->role->roletype & GR_ROLE_PERSIST))) {
50028+ id = current->acl_role_id;
50029+ rolename = current->role->rolename;
50030+ gr_set_acls(1);
50031+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
50032+ }
50033+
50034+ write_lock(&grsec_exec_file_lock);
50035+ exec_file = current->exec_file;
50036+ current->exec_file = NULL;
50037+ write_unlock(&grsec_exec_file_lock);
50038+
50039+ if (exec_file)
50040+ fput(exec_file);
50041+}
50042+
50043+int
50044+gr_acl_handle_procpidmem(const struct task_struct *task)
50045+{
50046+ if (unlikely(!gr_acl_is_enabled()))
50047+ return 0;
50048+
50049+ if (task != current && task->acl->mode & GR_PROTPROCFD)
50050+ return -EACCES;
50051+
50052+ return 0;
50053+}
50054diff -urNp linux-3.0.4/grsecurity/gracl_ip.c linux-3.0.4/grsecurity/gracl_ip.c
50055--- linux-3.0.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
50056+++ linux-3.0.4/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
50057@@ -0,0 +1,381 @@
50058+#include <linux/kernel.h>
50059+#include <asm/uaccess.h>
50060+#include <asm/errno.h>
50061+#include <net/sock.h>
50062+#include <linux/file.h>
50063+#include <linux/fs.h>
50064+#include <linux/net.h>
50065+#include <linux/in.h>
50066+#include <linux/skbuff.h>
50067+#include <linux/ip.h>
50068+#include <linux/udp.h>
50069+#include <linux/types.h>
50070+#include <linux/sched.h>
50071+#include <linux/netdevice.h>
50072+#include <linux/inetdevice.h>
50073+#include <linux/gracl.h>
50074+#include <linux/grsecurity.h>
50075+#include <linux/grinternal.h>
50076+
50077+#define GR_BIND 0x01
50078+#define GR_CONNECT 0x02
50079+#define GR_INVERT 0x04
50080+#define GR_BINDOVERRIDE 0x08
50081+#define GR_CONNECTOVERRIDE 0x10
50082+#define GR_SOCK_FAMILY 0x20
50083+
50084+static const char * gr_protocols[IPPROTO_MAX] = {
50085+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
50086+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
50087+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
50088+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
50089+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
50090+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
50091+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
50092+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
50093+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
50094+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
50095+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
50096+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
50097+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
50098+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
50099+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
50100+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
50101+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
50102+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
50103+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
50104+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
50105+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
50106+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
50107+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
50108+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
50109+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
50110+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
50111+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
50112+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
50113+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
50114+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
50115+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
50116+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
50117+ };
50118+
50119+static const char * gr_socktypes[SOCK_MAX] = {
50120+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
50121+ "unknown:7", "unknown:8", "unknown:9", "packet"
50122+ };
50123+
50124+static const char * gr_sockfamilies[AF_MAX+1] = {
50125+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
50126+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
50127+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
50128+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
50129+ };
50130+
50131+const char *
50132+gr_proto_to_name(unsigned char proto)
50133+{
50134+ return gr_protocols[proto];
50135+}
50136+
50137+const char *
50138+gr_socktype_to_name(unsigned char type)
50139+{
50140+ return gr_socktypes[type];
50141+}
50142+
50143+const char *
50144+gr_sockfamily_to_name(unsigned char family)
50145+{
50146+ return gr_sockfamilies[family];
50147+}
50148+
50149+int
50150+gr_search_socket(const int domain, const int type, const int protocol)
50151+{
50152+ struct acl_subject_label *curr;
50153+ const struct cred *cred = current_cred();
50154+
50155+ if (unlikely(!gr_acl_is_enabled()))
50156+ goto exit;
50157+
50158+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
50159+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
50160+ goto exit; // let the kernel handle it
50161+
50162+ curr = current->acl;
50163+
50164+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
50165+ /* the family is allowed, if this is PF_INET allow it only if
50166+ the extra sock type/protocol checks pass */
50167+ if (domain == PF_INET)
50168+ goto inet_check;
50169+ goto exit;
50170+ } else {
50171+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
50172+ __u32 fakeip = 0;
50173+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
50174+ current->role->roletype, cred->uid,
50175+ cred->gid, current->exec_file ?
50176+ gr_to_filename(current->exec_file->f_path.dentry,
50177+ current->exec_file->f_path.mnt) :
50178+ curr->filename, curr->filename,
50179+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
50180+ &current->signal->saved_ip);
50181+ goto exit;
50182+ }
50183+ goto exit_fail;
50184+ }
50185+
50186+inet_check:
50187+ /* the rest of this checking is for IPv4 only */
50188+ if (!curr->ips)
50189+ goto exit;
50190+
50191+ if ((curr->ip_type & (1 << type)) &&
50192+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
50193+ goto exit;
50194+
50195+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
50196+ /* we don't place acls on raw sockets , and sometimes
50197+ dgram/ip sockets are opened for ioctl and not
50198+ bind/connect, so we'll fake a bind learn log */
50199+ if (type == SOCK_RAW || type == SOCK_PACKET) {
50200+ __u32 fakeip = 0;
50201+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
50202+ current->role->roletype, cred->uid,
50203+ cred->gid, current->exec_file ?
50204+ gr_to_filename(current->exec_file->f_path.dentry,
50205+ current->exec_file->f_path.mnt) :
50206+ curr->filename, curr->filename,
50207+ &fakeip, 0, type,
50208+ protocol, GR_CONNECT, &current->signal->saved_ip);
50209+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
50210+ __u32 fakeip = 0;
50211+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
50212+ current->role->roletype, cred->uid,
50213+ cred->gid, current->exec_file ?
50214+ gr_to_filename(current->exec_file->f_path.dentry,
50215+ current->exec_file->f_path.mnt) :
50216+ curr->filename, curr->filename,
50217+ &fakeip, 0, type,
50218+ protocol, GR_BIND, &current->signal->saved_ip);
50219+ }
50220+ /* we'll log when they use connect or bind */
50221+ goto exit;
50222+ }
50223+
50224+exit_fail:
50225+ if (domain == PF_INET)
50226+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
50227+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
50228+ else
50229+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
50230+ gr_socktype_to_name(type), protocol);
50231+
50232+ return 0;
50233+exit:
50234+ return 1;
50235+}
50236+
50237+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
50238+{
50239+ if ((ip->mode & mode) &&
50240+ (ip_port >= ip->low) &&
50241+ (ip_port <= ip->high) &&
50242+ ((ntohl(ip_addr) & our_netmask) ==
50243+ (ntohl(our_addr) & our_netmask))
50244+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
50245+ && (ip->type & (1 << type))) {
50246+ if (ip->mode & GR_INVERT)
50247+ return 2; // specifically denied
50248+ else
50249+ return 1; // allowed
50250+ }
50251+
50252+ return 0; // not specifically allowed, may continue parsing
50253+}
50254+
50255+static int
50256+gr_search_connectbind(const int full_mode, struct sock *sk,
50257+ struct sockaddr_in *addr, const int type)
50258+{
50259+ char iface[IFNAMSIZ] = {0};
50260+ struct acl_subject_label *curr;
50261+ struct acl_ip_label *ip;
50262+ struct inet_sock *isk;
50263+ struct net_device *dev;
50264+ struct in_device *idev;
50265+ unsigned long i;
50266+ int ret;
50267+ int mode = full_mode & (GR_BIND | GR_CONNECT);
50268+ __u32 ip_addr = 0;
50269+ __u32 our_addr;
50270+ __u32 our_netmask;
50271+ char *p;
50272+ __u16 ip_port = 0;
50273+ const struct cred *cred = current_cred();
50274+
50275+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
50276+ return 0;
50277+
50278+ curr = current->acl;
50279+ isk = inet_sk(sk);
50280+
50281+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
50282+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
50283+ addr->sin_addr.s_addr = curr->inaddr_any_override;
50284+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
50285+ struct sockaddr_in saddr;
50286+ int err;
50287+
50288+ saddr.sin_family = AF_INET;
50289+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
50290+ saddr.sin_port = isk->inet_sport;
50291+
50292+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
50293+ if (err)
50294+ return err;
50295+
50296+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
50297+ if (err)
50298+ return err;
50299+ }
50300+
50301+ if (!curr->ips)
50302+ return 0;
50303+
50304+ ip_addr = addr->sin_addr.s_addr;
50305+ ip_port = ntohs(addr->sin_port);
50306+
50307+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
50308+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
50309+ current->role->roletype, cred->uid,
50310+ cred->gid, current->exec_file ?
50311+ gr_to_filename(current->exec_file->f_path.dentry,
50312+ current->exec_file->f_path.mnt) :
50313+ curr->filename, curr->filename,
50314+ &ip_addr, ip_port, type,
50315+ sk->sk_protocol, mode, &current->signal->saved_ip);
50316+ return 0;
50317+ }
50318+
50319+ for (i = 0; i < curr->ip_num; i++) {
50320+ ip = *(curr->ips + i);
50321+ if (ip->iface != NULL) {
50322+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
50323+ p = strchr(iface, ':');
50324+ if (p != NULL)
50325+ *p = '\0';
50326+ dev = dev_get_by_name(sock_net(sk), iface);
50327+ if (dev == NULL)
50328+ continue;
50329+ idev = in_dev_get(dev);
50330+ if (idev == NULL) {
50331+ dev_put(dev);
50332+ continue;
50333+ }
50334+ rcu_read_lock();
50335+ for_ifa(idev) {
50336+ if (!strcmp(ip->iface, ifa->ifa_label)) {
50337+ our_addr = ifa->ifa_address;
50338+ our_netmask = 0xffffffff;
50339+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
50340+ if (ret == 1) {
50341+ rcu_read_unlock();
50342+ in_dev_put(idev);
50343+ dev_put(dev);
50344+ return 0;
50345+ } else if (ret == 2) {
50346+ rcu_read_unlock();
50347+ in_dev_put(idev);
50348+ dev_put(dev);
50349+ goto denied;
50350+ }
50351+ }
50352+ } endfor_ifa(idev);
50353+ rcu_read_unlock();
50354+ in_dev_put(idev);
50355+ dev_put(dev);
50356+ } else {
50357+ our_addr = ip->addr;
50358+ our_netmask = ip->netmask;
50359+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
50360+ if (ret == 1)
50361+ return 0;
50362+ else if (ret == 2)
50363+ goto denied;
50364+ }
50365+ }
50366+
50367+denied:
50368+ if (mode == GR_BIND)
50369+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
50370+ else if (mode == GR_CONNECT)
50371+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
50372+
50373+ return -EACCES;
50374+}
50375+
50376+int
50377+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
50378+{
50379+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
50380+}
50381+
50382+int
50383+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
50384+{
50385+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
50386+}
50387+
50388+int gr_search_listen(struct socket *sock)
50389+{
50390+ struct sock *sk = sock->sk;
50391+ struct sockaddr_in addr;
50392+
50393+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
50394+ addr.sin_port = inet_sk(sk)->inet_sport;
50395+
50396+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
50397+}
50398+
50399+int gr_search_accept(struct socket *sock)
50400+{
50401+ struct sock *sk = sock->sk;
50402+ struct sockaddr_in addr;
50403+
50404+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
50405+ addr.sin_port = inet_sk(sk)->inet_sport;
50406+
50407+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
50408+}
50409+
50410+int
50411+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
50412+{
50413+ if (addr)
50414+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
50415+ else {
50416+ struct sockaddr_in sin;
50417+ const struct inet_sock *inet = inet_sk(sk);
50418+
50419+ sin.sin_addr.s_addr = inet->inet_daddr;
50420+ sin.sin_port = inet->inet_dport;
50421+
50422+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
50423+ }
50424+}
50425+
50426+int
50427+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
50428+{
50429+ struct sockaddr_in sin;
50430+
50431+ if (unlikely(skb->len < sizeof (struct udphdr)))
50432+ return 0; // skip this packet
50433+
50434+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
50435+ sin.sin_port = udp_hdr(skb)->source;
50436+
50437+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
50438+}
50439diff -urNp linux-3.0.4/grsecurity/gracl_learn.c linux-3.0.4/grsecurity/gracl_learn.c
50440--- linux-3.0.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
50441+++ linux-3.0.4/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
50442@@ -0,0 +1,207 @@
50443+#include <linux/kernel.h>
50444+#include <linux/mm.h>
50445+#include <linux/sched.h>
50446+#include <linux/poll.h>
50447+#include <linux/string.h>
50448+#include <linux/file.h>
50449+#include <linux/types.h>
50450+#include <linux/vmalloc.h>
50451+#include <linux/grinternal.h>
50452+
50453+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
50454+ size_t count, loff_t *ppos);
50455+extern int gr_acl_is_enabled(void);
50456+
50457+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
50458+static int gr_learn_attached;
50459+
50460+/* use a 512k buffer */
50461+#define LEARN_BUFFER_SIZE (512 * 1024)
50462+
50463+static DEFINE_SPINLOCK(gr_learn_lock);
50464+static DEFINE_MUTEX(gr_learn_user_mutex);
50465+
50466+/* we need to maintain two buffers, so that the kernel context of grlearn
50467+ uses a semaphore around the userspace copying, and the other kernel contexts
50468+ use a spinlock when copying into the buffer, since they cannot sleep
50469+*/
50470+static char *learn_buffer;
50471+static char *learn_buffer_user;
50472+static int learn_buffer_len;
50473+static int learn_buffer_user_len;
50474+
50475+static ssize_t
50476+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
50477+{
50478+ DECLARE_WAITQUEUE(wait, current);
50479+ ssize_t retval = 0;
50480+
50481+ add_wait_queue(&learn_wait, &wait);
50482+ set_current_state(TASK_INTERRUPTIBLE);
50483+ do {
50484+ mutex_lock(&gr_learn_user_mutex);
50485+ spin_lock(&gr_learn_lock);
50486+ if (learn_buffer_len)
50487+ break;
50488+ spin_unlock(&gr_learn_lock);
50489+ mutex_unlock(&gr_learn_user_mutex);
50490+ if (file->f_flags & O_NONBLOCK) {
50491+ retval = -EAGAIN;
50492+ goto out;
50493+ }
50494+ if (signal_pending(current)) {
50495+ retval = -ERESTARTSYS;
50496+ goto out;
50497+ }
50498+
50499+ schedule();
50500+ } while (1);
50501+
50502+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
50503+ learn_buffer_user_len = learn_buffer_len;
50504+ retval = learn_buffer_len;
50505+ learn_buffer_len = 0;
50506+
50507+ spin_unlock(&gr_learn_lock);
50508+
50509+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
50510+ retval = -EFAULT;
50511+
50512+ mutex_unlock(&gr_learn_user_mutex);
50513+out:
50514+ set_current_state(TASK_RUNNING);
50515+ remove_wait_queue(&learn_wait, &wait);
50516+ return retval;
50517+}
50518+
50519+static unsigned int
50520+poll_learn(struct file * file, poll_table * wait)
50521+{
50522+ poll_wait(file, &learn_wait, wait);
50523+
50524+ if (learn_buffer_len)
50525+ return (POLLIN | POLLRDNORM);
50526+
50527+ return 0;
50528+}
50529+
50530+void
50531+gr_clear_learn_entries(void)
50532+{
50533+ char *tmp;
50534+
50535+ mutex_lock(&gr_learn_user_mutex);
50536+ spin_lock(&gr_learn_lock);
50537+ tmp = learn_buffer;
50538+ learn_buffer = NULL;
50539+ spin_unlock(&gr_learn_lock);
50540+ if (tmp)
50541+ vfree(tmp);
50542+ if (learn_buffer_user != NULL) {
50543+ vfree(learn_buffer_user);
50544+ learn_buffer_user = NULL;
50545+ }
50546+ learn_buffer_len = 0;
50547+ mutex_unlock(&gr_learn_user_mutex);
50548+
50549+ return;
50550+}
50551+
50552+void
50553+gr_add_learn_entry(const char *fmt, ...)
50554+{
50555+ va_list args;
50556+ unsigned int len;
50557+
50558+ if (!gr_learn_attached)
50559+ return;
50560+
50561+ spin_lock(&gr_learn_lock);
50562+
50563+ /* leave a gap at the end so we know when it's "full" but don't have to
50564+ compute the exact length of the string we're trying to append
50565+ */
50566+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
50567+ spin_unlock(&gr_learn_lock);
50568+ wake_up_interruptible(&learn_wait);
50569+ return;
50570+ }
50571+ if (learn_buffer == NULL) {
50572+ spin_unlock(&gr_learn_lock);
50573+ return;
50574+ }
50575+
50576+ va_start(args, fmt);
50577+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
50578+ va_end(args);
50579+
50580+ learn_buffer_len += len + 1;
50581+
50582+ spin_unlock(&gr_learn_lock);
50583+ wake_up_interruptible(&learn_wait);
50584+
50585+ return;
50586+}
50587+
50588+static int
50589+open_learn(struct inode *inode, struct file *file)
50590+{
50591+ if (file->f_mode & FMODE_READ && gr_learn_attached)
50592+ return -EBUSY;
50593+ if (file->f_mode & FMODE_READ) {
50594+ int retval = 0;
50595+ mutex_lock(&gr_learn_user_mutex);
50596+ if (learn_buffer == NULL)
50597+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
50598+ if (learn_buffer_user == NULL)
50599+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
50600+ if (learn_buffer == NULL) {
50601+ retval = -ENOMEM;
50602+ goto out_error;
50603+ }
50604+ if (learn_buffer_user == NULL) {
50605+ retval = -ENOMEM;
50606+ goto out_error;
50607+ }
50608+ learn_buffer_len = 0;
50609+ learn_buffer_user_len = 0;
50610+ gr_learn_attached = 1;
50611+out_error:
50612+ mutex_unlock(&gr_learn_user_mutex);
50613+ return retval;
50614+ }
50615+ return 0;
50616+}
50617+
50618+static int
50619+close_learn(struct inode *inode, struct file *file)
50620+{
50621+ if (file->f_mode & FMODE_READ) {
50622+ char *tmp = NULL;
50623+ mutex_lock(&gr_learn_user_mutex);
50624+ spin_lock(&gr_learn_lock);
50625+ tmp = learn_buffer;
50626+ learn_buffer = NULL;
50627+ spin_unlock(&gr_learn_lock);
50628+ if (tmp)
50629+ vfree(tmp);
50630+ if (learn_buffer_user != NULL) {
50631+ vfree(learn_buffer_user);
50632+ learn_buffer_user = NULL;
50633+ }
50634+ learn_buffer_len = 0;
50635+ learn_buffer_user_len = 0;
50636+ gr_learn_attached = 0;
50637+ mutex_unlock(&gr_learn_user_mutex);
50638+ }
50639+
50640+ return 0;
50641+}
50642+
50643+const struct file_operations grsec_fops = {
50644+ .read = read_learn,
50645+ .write = write_grsec_handler,
50646+ .open = open_learn,
50647+ .release = close_learn,
50648+ .poll = poll_learn,
50649+};
50650diff -urNp linux-3.0.4/grsecurity/gracl_res.c linux-3.0.4/grsecurity/gracl_res.c
50651--- linux-3.0.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
50652+++ linux-3.0.4/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
50653@@ -0,0 +1,68 @@
50654+#include <linux/kernel.h>
50655+#include <linux/sched.h>
50656+#include <linux/gracl.h>
50657+#include <linux/grinternal.h>
50658+
50659+static const char *restab_log[] = {
50660+ [RLIMIT_CPU] = "RLIMIT_CPU",
50661+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
50662+ [RLIMIT_DATA] = "RLIMIT_DATA",
50663+ [RLIMIT_STACK] = "RLIMIT_STACK",
50664+ [RLIMIT_CORE] = "RLIMIT_CORE",
50665+ [RLIMIT_RSS] = "RLIMIT_RSS",
50666+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
50667+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
50668+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
50669+ [RLIMIT_AS] = "RLIMIT_AS",
50670+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
50671+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
50672+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
50673+ [RLIMIT_NICE] = "RLIMIT_NICE",
50674+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
50675+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
50676+ [GR_CRASH_RES] = "RLIMIT_CRASH"
50677+};
50678+
50679+void
50680+gr_log_resource(const struct task_struct *task,
50681+ const int res, const unsigned long wanted, const int gt)
50682+{
50683+ const struct cred *cred;
50684+ unsigned long rlim;
50685+
50686+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
50687+ return;
50688+
50689+ // not yet supported resource
50690+ if (unlikely(!restab_log[res]))
50691+ return;
50692+
50693+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
50694+ rlim = task_rlimit_max(task, res);
50695+ else
50696+ rlim = task_rlimit(task, res);
50697+
50698+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
50699+ return;
50700+
50701+ rcu_read_lock();
50702+ cred = __task_cred(task);
50703+
50704+ if (res == RLIMIT_NPROC &&
50705+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
50706+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
50707+ goto out_rcu_unlock;
50708+ else if (res == RLIMIT_MEMLOCK &&
50709+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
50710+ goto out_rcu_unlock;
50711+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
50712+ goto out_rcu_unlock;
50713+ rcu_read_unlock();
50714+
50715+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
50716+
50717+ return;
50718+out_rcu_unlock:
50719+ rcu_read_unlock();
50720+ return;
50721+}
50722diff -urNp linux-3.0.4/grsecurity/gracl_segv.c linux-3.0.4/grsecurity/gracl_segv.c
50723--- linux-3.0.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
50724+++ linux-3.0.4/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
50725@@ -0,0 +1,299 @@
50726+#include <linux/kernel.h>
50727+#include <linux/mm.h>
50728+#include <asm/uaccess.h>
50729+#include <asm/errno.h>
50730+#include <asm/mman.h>
50731+#include <net/sock.h>
50732+#include <linux/file.h>
50733+#include <linux/fs.h>
50734+#include <linux/net.h>
50735+#include <linux/in.h>
50736+#include <linux/slab.h>
50737+#include <linux/types.h>
50738+#include <linux/sched.h>
50739+#include <linux/timer.h>
50740+#include <linux/gracl.h>
50741+#include <linux/grsecurity.h>
50742+#include <linux/grinternal.h>
50743+
50744+static struct crash_uid *uid_set;
50745+static unsigned short uid_used;
50746+static DEFINE_SPINLOCK(gr_uid_lock);
50747+extern rwlock_t gr_inode_lock;
50748+extern struct acl_subject_label *
50749+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
50750+ struct acl_role_label *role);
50751+
50752+#ifdef CONFIG_BTRFS_FS
50753+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50754+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50755+#endif
50756+
50757+static inline dev_t __get_dev(const struct dentry *dentry)
50758+{
50759+#ifdef CONFIG_BTRFS_FS
50760+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50761+ return get_btrfs_dev_from_inode(dentry->d_inode);
50762+ else
50763+#endif
50764+ return dentry->d_inode->i_sb->s_dev;
50765+}
50766+
50767+int
50768+gr_init_uidset(void)
50769+{
50770+ uid_set =
50771+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
50772+ uid_used = 0;
50773+
50774+ return uid_set ? 1 : 0;
50775+}
50776+
50777+void
50778+gr_free_uidset(void)
50779+{
50780+ if (uid_set)
50781+ kfree(uid_set);
50782+
50783+ return;
50784+}
50785+
50786+int
50787+gr_find_uid(const uid_t uid)
50788+{
50789+ struct crash_uid *tmp = uid_set;
50790+ uid_t buid;
50791+ int low = 0, high = uid_used - 1, mid;
50792+
50793+ while (high >= low) {
50794+ mid = (low + high) >> 1;
50795+ buid = tmp[mid].uid;
50796+ if (buid == uid)
50797+ return mid;
50798+ if (buid > uid)
50799+ high = mid - 1;
50800+ if (buid < uid)
50801+ low = mid + 1;
50802+ }
50803+
50804+ return -1;
50805+}
50806+
50807+static __inline__ void
50808+gr_insertsort(void)
50809+{
50810+ unsigned short i, j;
50811+ struct crash_uid index;
50812+
50813+ for (i = 1; i < uid_used; i++) {
50814+ index = uid_set[i];
50815+ j = i;
50816+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
50817+ uid_set[j] = uid_set[j - 1];
50818+ j--;
50819+ }
50820+ uid_set[j] = index;
50821+ }
50822+
50823+ return;
50824+}
50825+
50826+static __inline__ void
50827+gr_insert_uid(const uid_t uid, const unsigned long expires)
50828+{
50829+ int loc;
50830+
50831+ if (uid_used == GR_UIDTABLE_MAX)
50832+ return;
50833+
50834+ loc = gr_find_uid(uid);
50835+
50836+ if (loc >= 0) {
50837+ uid_set[loc].expires = expires;
50838+ return;
50839+ }
50840+
50841+ uid_set[uid_used].uid = uid;
50842+ uid_set[uid_used].expires = expires;
50843+ uid_used++;
50844+
50845+ gr_insertsort();
50846+
50847+ return;
50848+}
50849+
50850+void
50851+gr_remove_uid(const unsigned short loc)
50852+{
50853+ unsigned short i;
50854+
50855+ for (i = loc + 1; i < uid_used; i++)
50856+ uid_set[i - 1] = uid_set[i];
50857+
50858+ uid_used--;
50859+
50860+ return;
50861+}
50862+
50863+int
50864+gr_check_crash_uid(const uid_t uid)
50865+{
50866+ int loc;
50867+ int ret = 0;
50868+
50869+ if (unlikely(!gr_acl_is_enabled()))
50870+ return 0;
50871+
50872+ spin_lock(&gr_uid_lock);
50873+ loc = gr_find_uid(uid);
50874+
50875+ if (loc < 0)
50876+ goto out_unlock;
50877+
50878+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
50879+ gr_remove_uid(loc);
50880+ else
50881+ ret = 1;
50882+
50883+out_unlock:
50884+ spin_unlock(&gr_uid_lock);
50885+ return ret;
50886+}
50887+
50888+static __inline__ int
50889+proc_is_setxid(const struct cred *cred)
50890+{
50891+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
50892+ cred->uid != cred->fsuid)
50893+ return 1;
50894+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
50895+ cred->gid != cred->fsgid)
50896+ return 1;
50897+
50898+ return 0;
50899+}
50900+
50901+extern int gr_fake_force_sig(int sig, struct task_struct *t);
50902+
50903+void
50904+gr_handle_crash(struct task_struct *task, const int sig)
50905+{
50906+ struct acl_subject_label *curr;
50907+ struct acl_subject_label *curr2;
50908+ struct task_struct *tsk, *tsk2;
50909+ const struct cred *cred;
50910+ const struct cred *cred2;
50911+
50912+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
50913+ return;
50914+
50915+ if (unlikely(!gr_acl_is_enabled()))
50916+ return;
50917+
50918+ curr = task->acl;
50919+
50920+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
50921+ return;
50922+
50923+ if (time_before_eq(curr->expires, get_seconds())) {
50924+ curr->expires = 0;
50925+ curr->crashes = 0;
50926+ }
50927+
50928+ curr->crashes++;
50929+
50930+ if (!curr->expires)
50931+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
50932+
50933+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
50934+ time_after(curr->expires, get_seconds())) {
50935+ rcu_read_lock();
50936+ cred = __task_cred(task);
50937+ if (cred->uid && proc_is_setxid(cred)) {
50938+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
50939+ spin_lock(&gr_uid_lock);
50940+ gr_insert_uid(cred->uid, curr->expires);
50941+ spin_unlock(&gr_uid_lock);
50942+ curr->expires = 0;
50943+ curr->crashes = 0;
50944+ read_lock(&tasklist_lock);
50945+ do_each_thread(tsk2, tsk) {
50946+ cred2 = __task_cred(tsk);
50947+ if (tsk != task && cred2->uid == cred->uid)
50948+ gr_fake_force_sig(SIGKILL, tsk);
50949+ } while_each_thread(tsk2, tsk);
50950+ read_unlock(&tasklist_lock);
50951+ } else {
50952+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
50953+ read_lock(&tasklist_lock);
50954+ do_each_thread(tsk2, tsk) {
50955+ if (likely(tsk != task)) {
50956+ curr2 = tsk->acl;
50957+
50958+ if (curr2->device == curr->device &&
50959+ curr2->inode == curr->inode)
50960+ gr_fake_force_sig(SIGKILL, tsk);
50961+ }
50962+ } while_each_thread(tsk2, tsk);
50963+ read_unlock(&tasklist_lock);
50964+ }
50965+ rcu_read_unlock();
50966+ }
50967+
50968+ return;
50969+}
50970+
50971+int
50972+gr_check_crash_exec(const struct file *filp)
50973+{
50974+ struct acl_subject_label *curr;
50975+
50976+ if (unlikely(!gr_acl_is_enabled()))
50977+ return 0;
50978+
50979+ read_lock(&gr_inode_lock);
50980+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
50981+ __get_dev(filp->f_path.dentry),
50982+ current->role);
50983+ read_unlock(&gr_inode_lock);
50984+
50985+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
50986+ (!curr->crashes && !curr->expires))
50987+ return 0;
50988+
50989+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
50990+ time_after(curr->expires, get_seconds()))
50991+ return 1;
50992+ else if (time_before_eq(curr->expires, get_seconds())) {
50993+ curr->crashes = 0;
50994+ curr->expires = 0;
50995+ }
50996+
50997+ return 0;
50998+}
50999+
51000+void
51001+gr_handle_alertkill(struct task_struct *task)
51002+{
51003+ struct acl_subject_label *curracl;
51004+ __u32 curr_ip;
51005+ struct task_struct *p, *p2;
51006+
51007+ if (unlikely(!gr_acl_is_enabled()))
51008+ return;
51009+
51010+ curracl = task->acl;
51011+ curr_ip = task->signal->curr_ip;
51012+
51013+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
51014+ read_lock(&tasklist_lock);
51015+ do_each_thread(p2, p) {
51016+ if (p->signal->curr_ip == curr_ip)
51017+ gr_fake_force_sig(SIGKILL, p);
51018+ } while_each_thread(p2, p);
51019+ read_unlock(&tasklist_lock);
51020+ } else if (curracl->mode & GR_KILLPROC)
51021+ gr_fake_force_sig(SIGKILL, task);
51022+
51023+ return;
51024+}
51025diff -urNp linux-3.0.4/grsecurity/gracl_shm.c linux-3.0.4/grsecurity/gracl_shm.c
51026--- linux-3.0.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
51027+++ linux-3.0.4/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
51028@@ -0,0 +1,40 @@
51029+#include <linux/kernel.h>
51030+#include <linux/mm.h>
51031+#include <linux/sched.h>
51032+#include <linux/file.h>
51033+#include <linux/ipc.h>
51034+#include <linux/gracl.h>
51035+#include <linux/grsecurity.h>
51036+#include <linux/grinternal.h>
51037+
51038+int
51039+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
51040+ const time_t shm_createtime, const uid_t cuid, const int shmid)
51041+{
51042+ struct task_struct *task;
51043+
51044+ if (!gr_acl_is_enabled())
51045+ return 1;
51046+
51047+ rcu_read_lock();
51048+ read_lock(&tasklist_lock);
51049+
51050+ task = find_task_by_vpid(shm_cprid);
51051+
51052+ if (unlikely(!task))
51053+ task = find_task_by_vpid(shm_lapid);
51054+
51055+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
51056+ (task->pid == shm_lapid)) &&
51057+ (task->acl->mode & GR_PROTSHM) &&
51058+ (task->acl != current->acl))) {
51059+ read_unlock(&tasklist_lock);
51060+ rcu_read_unlock();
51061+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
51062+ return 0;
51063+ }
51064+ read_unlock(&tasklist_lock);
51065+ rcu_read_unlock();
51066+
51067+ return 1;
51068+}
51069diff -urNp linux-3.0.4/grsecurity/grsec_chdir.c linux-3.0.4/grsecurity/grsec_chdir.c
51070--- linux-3.0.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
51071+++ linux-3.0.4/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
51072@@ -0,0 +1,19 @@
51073+#include <linux/kernel.h>
51074+#include <linux/sched.h>
51075+#include <linux/fs.h>
51076+#include <linux/file.h>
51077+#include <linux/grsecurity.h>
51078+#include <linux/grinternal.h>
51079+
51080+void
51081+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
51082+{
51083+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
51084+ if ((grsec_enable_chdir && grsec_enable_group &&
51085+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
51086+ !grsec_enable_group)) {
51087+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
51088+ }
51089+#endif
51090+ return;
51091+}
51092diff -urNp linux-3.0.4/grsecurity/grsec_chroot.c linux-3.0.4/grsecurity/grsec_chroot.c
51093--- linux-3.0.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
51094+++ linux-3.0.4/grsecurity/grsec_chroot.c 2011-09-15 06:47:48.000000000 -0400
51095@@ -0,0 +1,351 @@
51096+#include <linux/kernel.h>
51097+#include <linux/module.h>
51098+#include <linux/sched.h>
51099+#include <linux/file.h>
51100+#include <linux/fs.h>
51101+#include <linux/mount.h>
51102+#include <linux/types.h>
51103+#include <linux/pid_namespace.h>
51104+#include <linux/grsecurity.h>
51105+#include <linux/grinternal.h>
51106+
51107+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
51108+{
51109+#ifdef CONFIG_GRKERNSEC
51110+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
51111+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
51112+ task->gr_is_chrooted = 1;
51113+ else
51114+ task->gr_is_chrooted = 0;
51115+
51116+ task->gr_chroot_dentry = path->dentry;
51117+#endif
51118+ return;
51119+}
51120+
51121+void gr_clear_chroot_entries(struct task_struct *task)
51122+{
51123+#ifdef CONFIG_GRKERNSEC
51124+ task->gr_is_chrooted = 0;
51125+ task->gr_chroot_dentry = NULL;
51126+#endif
51127+ return;
51128+}
51129+
51130+int
51131+gr_handle_chroot_unix(const pid_t pid)
51132+{
51133+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
51134+ struct task_struct *p;
51135+
51136+ if (unlikely(!grsec_enable_chroot_unix))
51137+ return 1;
51138+
51139+ if (likely(!proc_is_chrooted(current)))
51140+ return 1;
51141+
51142+ rcu_read_lock();
51143+ read_lock(&tasklist_lock);
51144+ p = find_task_by_vpid_unrestricted(pid);
51145+ if (unlikely(p && !have_same_root(current, p))) {
51146+ read_unlock(&tasklist_lock);
51147+ rcu_read_unlock();
51148+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
51149+ return 0;
51150+ }
51151+ read_unlock(&tasklist_lock);
51152+ rcu_read_unlock();
51153+#endif
51154+ return 1;
51155+}
51156+
51157+int
51158+gr_handle_chroot_nice(void)
51159+{
51160+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51161+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
51162+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
51163+ return -EPERM;
51164+ }
51165+#endif
51166+ return 0;
51167+}
51168+
51169+int
51170+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
51171+{
51172+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51173+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
51174+ && proc_is_chrooted(current)) {
51175+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
51176+ return -EACCES;
51177+ }
51178+#endif
51179+ return 0;
51180+}
51181+
51182+int
51183+gr_handle_chroot_rawio(const struct inode *inode)
51184+{
51185+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51186+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
51187+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
51188+ return 1;
51189+#endif
51190+ return 0;
51191+}
51192+
51193+int
51194+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
51195+{
51196+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
51197+ struct task_struct *p;
51198+ int ret = 0;
51199+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
51200+ return ret;
51201+
51202+ read_lock(&tasklist_lock);
51203+ do_each_pid_task(pid, type, p) {
51204+ if (!have_same_root(current, p)) {
51205+ ret = 1;
51206+ goto out;
51207+ }
51208+ } while_each_pid_task(pid, type, p);
51209+out:
51210+ read_unlock(&tasklist_lock);
51211+ return ret;
51212+#endif
51213+ return 0;
51214+}
51215+
51216+int
51217+gr_pid_is_chrooted(struct task_struct *p)
51218+{
51219+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
51220+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
51221+ return 0;
51222+
51223+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
51224+ !have_same_root(current, p)) {
51225+ return 1;
51226+ }
51227+#endif
51228+ return 0;
51229+}
51230+
51231+EXPORT_SYMBOL(gr_pid_is_chrooted);
51232+
51233+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
51234+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
51235+{
51236+ struct path path, currentroot;
51237+ int ret = 0;
51238+
51239+ path.dentry = (struct dentry *)u_dentry;
51240+ path.mnt = (struct vfsmount *)u_mnt;
51241+ get_fs_root(current->fs, &currentroot);
51242+ if (path_is_under(&path, &currentroot))
51243+ ret = 1;
51244+ path_put(&currentroot);
51245+
51246+ return ret;
51247+}
51248+#endif
51249+
51250+int
51251+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
51252+{
51253+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
51254+ if (!grsec_enable_chroot_fchdir)
51255+ return 1;
51256+
51257+ if (!proc_is_chrooted(current))
51258+ return 1;
51259+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
51260+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
51261+ return 0;
51262+ }
51263+#endif
51264+ return 1;
51265+}
51266+
51267+int
51268+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
51269+ const time_t shm_createtime)
51270+{
51271+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
51272+ struct task_struct *p;
51273+ time_t starttime;
51274+
51275+ if (unlikely(!grsec_enable_chroot_shmat))
51276+ return 1;
51277+
51278+ if (likely(!proc_is_chrooted(current)))
51279+ return 1;
51280+
51281+ rcu_read_lock();
51282+ read_lock(&tasklist_lock);
51283+
51284+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
51285+ starttime = p->start_time.tv_sec;
51286+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
51287+ if (have_same_root(current, p)) {
51288+ goto allow;
51289+ } else {
51290+ read_unlock(&tasklist_lock);
51291+ rcu_read_unlock();
51292+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
51293+ return 0;
51294+ }
51295+ }
51296+ /* creator exited, pid reuse, fall through to next check */
51297+ }
51298+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
51299+ if (unlikely(!have_same_root(current, p))) {
51300+ read_unlock(&tasklist_lock);
51301+ rcu_read_unlock();
51302+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
51303+ return 0;
51304+ }
51305+ }
51306+
51307+allow:
51308+ read_unlock(&tasklist_lock);
51309+ rcu_read_unlock();
51310+#endif
51311+ return 1;
51312+}
51313+
51314+void
51315+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
51316+{
51317+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
51318+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
51319+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
51320+#endif
51321+ return;
51322+}
51323+
51324+int
51325+gr_handle_chroot_mknod(const struct dentry *dentry,
51326+ const struct vfsmount *mnt, const int mode)
51327+{
51328+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
51329+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
51330+ proc_is_chrooted(current)) {
51331+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
51332+ return -EPERM;
51333+ }
51334+#endif
51335+ return 0;
51336+}
51337+
51338+int
51339+gr_handle_chroot_mount(const struct dentry *dentry,
51340+ const struct vfsmount *mnt, const char *dev_name)
51341+{
51342+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
51343+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
51344+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
51345+ return -EPERM;
51346+ }
51347+#endif
51348+ return 0;
51349+}
51350+
51351+int
51352+gr_handle_chroot_pivot(void)
51353+{
51354+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
51355+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
51356+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
51357+ return -EPERM;
51358+ }
51359+#endif
51360+ return 0;
51361+}
51362+
51363+int
51364+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
51365+{
51366+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
51367+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
51368+ !gr_is_outside_chroot(dentry, mnt)) {
51369+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
51370+ return -EPERM;
51371+ }
51372+#endif
51373+ return 0;
51374+}
51375+
51376+extern const char *captab_log[];
51377+extern int captab_log_entries;
51378+
51379+int
51380+gr_chroot_is_capable(const int cap)
51381+{
51382+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51383+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
51384+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
51385+ if (cap_raised(chroot_caps, cap)) {
51386+ const struct cred *creds = current_cred();
51387+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
51388+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
51389+ }
51390+ return 0;
51391+ }
51392+ }
51393+#endif
51394+ return 1;
51395+}
51396+
51397+int
51398+gr_chroot_is_capable_nolog(const int cap)
51399+{
51400+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51401+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
51402+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
51403+ if (cap_raised(chroot_caps, cap)) {
51404+ return 0;
51405+ }
51406+ }
51407+#endif
51408+ return 1;
51409+}
51410+
51411+int
51412+gr_handle_chroot_sysctl(const int op)
51413+{
51414+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
51415+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
51416+ proc_is_chrooted(current))
51417+ return -EACCES;
51418+#endif
51419+ return 0;
51420+}
51421+
51422+void
51423+gr_handle_chroot_chdir(struct path *path)
51424+{
51425+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
51426+ if (grsec_enable_chroot_chdir)
51427+ set_fs_pwd(current->fs, path);
51428+#endif
51429+ return;
51430+}
51431+
51432+int
51433+gr_handle_chroot_chmod(const struct dentry *dentry,
51434+ const struct vfsmount *mnt, const int mode)
51435+{
51436+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
51437+ /* allow chmod +s on directories, but not files */
51438+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
51439+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
51440+ proc_is_chrooted(current)) {
51441+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
51442+ return -EPERM;
51443+ }
51444+#endif
51445+ return 0;
51446+}
51447diff -urNp linux-3.0.4/grsecurity/grsec_disabled.c linux-3.0.4/grsecurity/grsec_disabled.c
51448--- linux-3.0.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
51449+++ linux-3.0.4/grsecurity/grsec_disabled.c 2011-09-24 08:13:01.000000000 -0400
51450@@ -0,0 +1,433 @@
51451+#include <linux/kernel.h>
51452+#include <linux/module.h>
51453+#include <linux/sched.h>
51454+#include <linux/file.h>
51455+#include <linux/fs.h>
51456+#include <linux/kdev_t.h>
51457+#include <linux/net.h>
51458+#include <linux/in.h>
51459+#include <linux/ip.h>
51460+#include <linux/skbuff.h>
51461+#include <linux/sysctl.h>
51462+
51463+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
51464+void
51465+pax_set_initial_flags(struct linux_binprm *bprm)
51466+{
51467+ return;
51468+}
51469+#endif
51470+
51471+#ifdef CONFIG_SYSCTL
51472+__u32
51473+gr_handle_sysctl(const struct ctl_table * table, const int op)
51474+{
51475+ return 0;
51476+}
51477+#endif
51478+
51479+#ifdef CONFIG_TASKSTATS
51480+int gr_is_taskstats_denied(int pid)
51481+{
51482+ return 0;
51483+}
51484+#endif
51485+
51486+int
51487+gr_acl_is_enabled(void)
51488+{
51489+ return 0;
51490+}
51491+
51492+int
51493+gr_handle_rawio(const struct inode *inode)
51494+{
51495+ return 0;
51496+}
51497+
51498+void
51499+gr_acl_handle_psacct(struct task_struct *task, const long code)
51500+{
51501+ return;
51502+}
51503+
51504+int
51505+gr_handle_ptrace(struct task_struct *task, const long request)
51506+{
51507+ return 0;
51508+}
51509+
51510+int
51511+gr_handle_proc_ptrace(struct task_struct *task)
51512+{
51513+ return 0;
51514+}
51515+
51516+void
51517+gr_learn_resource(const struct task_struct *task,
51518+ const int res, const unsigned long wanted, const int gt)
51519+{
51520+ return;
51521+}
51522+
51523+int
51524+gr_set_acls(const int type)
51525+{
51526+ return 0;
51527+}
51528+
51529+int
51530+gr_check_hidden_task(const struct task_struct *tsk)
51531+{
51532+ return 0;
51533+}
51534+
51535+int
51536+gr_check_protected_task(const struct task_struct *task)
51537+{
51538+ return 0;
51539+}
51540+
51541+int
51542+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
51543+{
51544+ return 0;
51545+}
51546+
51547+void
51548+gr_copy_label(struct task_struct *tsk)
51549+{
51550+ return;
51551+}
51552+
51553+void
51554+gr_set_pax_flags(struct task_struct *task)
51555+{
51556+ return;
51557+}
51558+
51559+int
51560+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
51561+ const int unsafe_share)
51562+{
51563+ return 0;
51564+}
51565+
51566+void
51567+gr_handle_delete(const ino_t ino, const dev_t dev)
51568+{
51569+ return;
51570+}
51571+
51572+void
51573+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
51574+{
51575+ return;
51576+}
51577+
51578+void
51579+gr_handle_crash(struct task_struct *task, const int sig)
51580+{
51581+ return;
51582+}
51583+
51584+int
51585+gr_check_crash_exec(const struct file *filp)
51586+{
51587+ return 0;
51588+}
51589+
51590+int
51591+gr_check_crash_uid(const uid_t uid)
51592+{
51593+ return 0;
51594+}
51595+
51596+void
51597+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
51598+ struct dentry *old_dentry,
51599+ struct dentry *new_dentry,
51600+ struct vfsmount *mnt, const __u8 replace)
51601+{
51602+ return;
51603+}
51604+
51605+int
51606+gr_search_socket(const int family, const int type, const int protocol)
51607+{
51608+ return 1;
51609+}
51610+
51611+int
51612+gr_search_connectbind(const int mode, const struct socket *sock,
51613+ const struct sockaddr_in *addr)
51614+{
51615+ return 0;
51616+}
51617+
51618+void
51619+gr_handle_alertkill(struct task_struct *task)
51620+{
51621+ return;
51622+}
51623+
51624+__u32
51625+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
51626+{
51627+ return 1;
51628+}
51629+
51630+__u32
51631+gr_acl_handle_hidden_file(const struct dentry * dentry,
51632+ const struct vfsmount * mnt)
51633+{
51634+ return 1;
51635+}
51636+
51637+__u32
51638+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
51639+ const int fmode)
51640+{
51641+ return 1;
51642+}
51643+
51644+__u32
51645+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51646+{
51647+ return 1;
51648+}
51649+
51650+__u32
51651+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
51652+{
51653+ return 1;
51654+}
51655+
51656+int
51657+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
51658+ unsigned int *vm_flags)
51659+{
51660+ return 1;
51661+}
51662+
51663+__u32
51664+gr_acl_handle_truncate(const struct dentry * dentry,
51665+ const struct vfsmount * mnt)
51666+{
51667+ return 1;
51668+}
51669+
51670+__u32
51671+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
51672+{
51673+ return 1;
51674+}
51675+
51676+__u32
51677+gr_acl_handle_access(const struct dentry * dentry,
51678+ const struct vfsmount * mnt, const int fmode)
51679+{
51680+ return 1;
51681+}
51682+
51683+__u32
51684+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
51685+ mode_t mode)
51686+{
51687+ return 1;
51688+}
51689+
51690+__u32
51691+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
51692+ mode_t mode)
51693+{
51694+ return 1;
51695+}
51696+
51697+__u32
51698+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
51699+{
51700+ return 1;
51701+}
51702+
51703+__u32
51704+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
51705+{
51706+ return 1;
51707+}
51708+
51709+void
51710+grsecurity_init(void)
51711+{
51712+ return;
51713+}
51714+
51715+__u32
51716+gr_acl_handle_mknod(const struct dentry * new_dentry,
51717+ const struct dentry * parent_dentry,
51718+ const struct vfsmount * parent_mnt,
51719+ const int mode)
51720+{
51721+ return 1;
51722+}
51723+
51724+__u32
51725+gr_acl_handle_mkdir(const struct dentry * new_dentry,
51726+ const struct dentry * parent_dentry,
51727+ const struct vfsmount * parent_mnt)
51728+{
51729+ return 1;
51730+}
51731+
51732+__u32
51733+gr_acl_handle_symlink(const struct dentry * new_dentry,
51734+ const struct dentry * parent_dentry,
51735+ const struct vfsmount * parent_mnt, const char *from)
51736+{
51737+ return 1;
51738+}
51739+
51740+__u32
51741+gr_acl_handle_link(const struct dentry * new_dentry,
51742+ const struct dentry * parent_dentry,
51743+ const struct vfsmount * parent_mnt,
51744+ const struct dentry * old_dentry,
51745+ const struct vfsmount * old_mnt, const char *to)
51746+{
51747+ return 1;
51748+}
51749+
51750+int
51751+gr_acl_handle_rename(const struct dentry *new_dentry,
51752+ const struct dentry *parent_dentry,
51753+ const struct vfsmount *parent_mnt,
51754+ const struct dentry *old_dentry,
51755+ const struct inode *old_parent_inode,
51756+ const struct vfsmount *old_mnt, const char *newname)
51757+{
51758+ return 0;
51759+}
51760+
51761+int
51762+gr_acl_handle_filldir(const struct file *file, const char *name,
51763+ const int namelen, const ino_t ino)
51764+{
51765+ return 1;
51766+}
51767+
51768+int
51769+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
51770+ const time_t shm_createtime, const uid_t cuid, const int shmid)
51771+{
51772+ return 1;
51773+}
51774+
51775+int
51776+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
51777+{
51778+ return 0;
51779+}
51780+
51781+int
51782+gr_search_accept(const struct socket *sock)
51783+{
51784+ return 0;
51785+}
51786+
51787+int
51788+gr_search_listen(const struct socket *sock)
51789+{
51790+ return 0;
51791+}
51792+
51793+int
51794+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
51795+{
51796+ return 0;
51797+}
51798+
51799+__u32
51800+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
51801+{
51802+ return 1;
51803+}
51804+
51805+__u32
51806+gr_acl_handle_creat(const struct dentry * dentry,
51807+ const struct dentry * p_dentry,
51808+ const struct vfsmount * p_mnt, const int fmode,
51809+ const int imode)
51810+{
51811+ return 1;
51812+}
51813+
51814+void
51815+gr_acl_handle_exit(void)
51816+{
51817+ return;
51818+}
51819+
51820+int
51821+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
51822+{
51823+ return 1;
51824+}
51825+
51826+void
51827+gr_set_role_label(const uid_t uid, const gid_t gid)
51828+{
51829+ return;
51830+}
51831+
51832+int
51833+gr_acl_handle_procpidmem(const struct task_struct *task)
51834+{
51835+ return 0;
51836+}
51837+
51838+int
51839+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
51840+{
51841+ return 0;
51842+}
51843+
51844+int
51845+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
51846+{
51847+ return 0;
51848+}
51849+
51850+void
51851+gr_set_kernel_label(struct task_struct *task)
51852+{
51853+ return;
51854+}
51855+
51856+int
51857+gr_check_user_change(int real, int effective, int fs)
51858+{
51859+ return 0;
51860+}
51861+
51862+int
51863+gr_check_group_change(int real, int effective, int fs)
51864+{
51865+ return 0;
51866+}
51867+
51868+int gr_acl_enable_at_secure(void)
51869+{
51870+ return 0;
51871+}
51872+
51873+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
51874+{
51875+ return dentry->d_inode->i_sb->s_dev;
51876+}
51877+
51878+EXPORT_SYMBOL(gr_learn_resource);
51879+EXPORT_SYMBOL(gr_set_kernel_label);
51880+#ifdef CONFIG_SECURITY
51881+EXPORT_SYMBOL(gr_check_user_change);
51882+EXPORT_SYMBOL(gr_check_group_change);
51883+#endif
51884diff -urNp linux-3.0.4/grsecurity/grsec_exec.c linux-3.0.4/grsecurity/grsec_exec.c
51885--- linux-3.0.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
51886+++ linux-3.0.4/grsecurity/grsec_exec.c 2011-09-14 09:20:28.000000000 -0400
51887@@ -0,0 +1,145 @@
51888+#include <linux/kernel.h>
51889+#include <linux/sched.h>
51890+#include <linux/file.h>
51891+#include <linux/binfmts.h>
51892+#include <linux/fs.h>
51893+#include <linux/types.h>
51894+#include <linux/grdefs.h>
51895+#include <linux/grsecurity.h>
51896+#include <linux/grinternal.h>
51897+#include <linux/capability.h>
51898+#include <linux/module.h>
51899+
51900+#include <asm/uaccess.h>
51901+
51902+#ifdef CONFIG_GRKERNSEC_EXECLOG
51903+static char gr_exec_arg_buf[132];
51904+static DEFINE_MUTEX(gr_exec_arg_mutex);
51905+#endif
51906+
51907+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
51908+
51909+void
51910+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
51911+{
51912+#ifdef CONFIG_GRKERNSEC_EXECLOG
51913+ char *grarg = gr_exec_arg_buf;
51914+ unsigned int i, x, execlen = 0;
51915+ char c;
51916+
51917+ if (!((grsec_enable_execlog && grsec_enable_group &&
51918+ in_group_p(grsec_audit_gid))
51919+ || (grsec_enable_execlog && !grsec_enable_group)))
51920+ return;
51921+
51922+ mutex_lock(&gr_exec_arg_mutex);
51923+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
51924+
51925+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
51926+ const char __user *p;
51927+ unsigned int len;
51928+
51929+ p = get_user_arg_ptr(argv, i);
51930+ if (IS_ERR(p))
51931+ goto log;
51932+
51933+ len = strnlen_user(p, 128 - execlen);
51934+ if (len > 128 - execlen)
51935+ len = 128 - execlen;
51936+ else if (len > 0)
51937+ len--;
51938+ if (copy_from_user(grarg + execlen, p, len))
51939+ goto log;
51940+
51941+ /* rewrite unprintable characters */
51942+ for (x = 0; x < len; x++) {
51943+ c = *(grarg + execlen + x);
51944+ if (c < 32 || c > 126)
51945+ *(grarg + execlen + x) = ' ';
51946+ }
51947+
51948+ execlen += len;
51949+ *(grarg + execlen) = ' ';
51950+ *(grarg + execlen + 1) = '\0';
51951+ execlen++;
51952+ }
51953+
51954+ log:
51955+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
51956+ bprm->file->f_path.mnt, grarg);
51957+ mutex_unlock(&gr_exec_arg_mutex);
51958+#endif
51959+ return;
51960+}
51961+
51962+#ifdef CONFIG_GRKERNSEC
51963+extern int gr_acl_is_capable(const int cap);
51964+extern int gr_acl_is_capable_nolog(const int cap);
51965+extern int gr_chroot_is_capable(const int cap);
51966+extern int gr_chroot_is_capable_nolog(const int cap);
51967+#endif
51968+
51969+const char *captab_log[] = {
51970+ "CAP_CHOWN",
51971+ "CAP_DAC_OVERRIDE",
51972+ "CAP_DAC_READ_SEARCH",
51973+ "CAP_FOWNER",
51974+ "CAP_FSETID",
51975+ "CAP_KILL",
51976+ "CAP_SETGID",
51977+ "CAP_SETUID",
51978+ "CAP_SETPCAP",
51979+ "CAP_LINUX_IMMUTABLE",
51980+ "CAP_NET_BIND_SERVICE",
51981+ "CAP_NET_BROADCAST",
51982+ "CAP_NET_ADMIN",
51983+ "CAP_NET_RAW",
51984+ "CAP_IPC_LOCK",
51985+ "CAP_IPC_OWNER",
51986+ "CAP_SYS_MODULE",
51987+ "CAP_SYS_RAWIO",
51988+ "CAP_SYS_CHROOT",
51989+ "CAP_SYS_PTRACE",
51990+ "CAP_SYS_PACCT",
51991+ "CAP_SYS_ADMIN",
51992+ "CAP_SYS_BOOT",
51993+ "CAP_SYS_NICE",
51994+ "CAP_SYS_RESOURCE",
51995+ "CAP_SYS_TIME",
51996+ "CAP_SYS_TTY_CONFIG",
51997+ "CAP_MKNOD",
51998+ "CAP_LEASE",
51999+ "CAP_AUDIT_WRITE",
52000+ "CAP_AUDIT_CONTROL",
52001+ "CAP_SETFCAP",
52002+ "CAP_MAC_OVERRIDE",
52003+ "CAP_MAC_ADMIN",
52004+ "CAP_SYSLOG"
52005+};
52006+
52007+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
52008+
52009+int gr_is_capable(const int cap)
52010+{
52011+#ifdef CONFIG_GRKERNSEC
52012+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
52013+ return 1;
52014+ return 0;
52015+#else
52016+ return 1;
52017+#endif
52018+}
52019+
52020+int gr_is_capable_nolog(const int cap)
52021+{
52022+#ifdef CONFIG_GRKERNSEC
52023+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
52024+ return 1;
52025+ return 0;
52026+#else
52027+ return 1;
52028+#endif
52029+}
52030+
52031+EXPORT_SYMBOL(gr_is_capable);
52032+EXPORT_SYMBOL(gr_is_capable_nolog);
52033diff -urNp linux-3.0.4/grsecurity/grsec_fifo.c linux-3.0.4/grsecurity/grsec_fifo.c
52034--- linux-3.0.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
52035+++ linux-3.0.4/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
52036@@ -0,0 +1,24 @@
52037+#include <linux/kernel.h>
52038+#include <linux/sched.h>
52039+#include <linux/fs.h>
52040+#include <linux/file.h>
52041+#include <linux/grinternal.h>
52042+
52043+int
52044+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
52045+ const struct dentry *dir, const int flag, const int acc_mode)
52046+{
52047+#ifdef CONFIG_GRKERNSEC_FIFO
52048+ const struct cred *cred = current_cred();
52049+
52050+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
52051+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
52052+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
52053+ (cred->fsuid != dentry->d_inode->i_uid)) {
52054+ if (!inode_permission(dentry->d_inode, acc_mode))
52055+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
52056+ return -EACCES;
52057+ }
52058+#endif
52059+ return 0;
52060+}
52061diff -urNp linux-3.0.4/grsecurity/grsec_fork.c linux-3.0.4/grsecurity/grsec_fork.c
52062--- linux-3.0.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
52063+++ linux-3.0.4/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
52064@@ -0,0 +1,23 @@
52065+#include <linux/kernel.h>
52066+#include <linux/sched.h>
52067+#include <linux/grsecurity.h>
52068+#include <linux/grinternal.h>
52069+#include <linux/errno.h>
52070+
52071+void
52072+gr_log_forkfail(const int retval)
52073+{
52074+#ifdef CONFIG_GRKERNSEC_FORKFAIL
52075+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
52076+ switch (retval) {
52077+ case -EAGAIN:
52078+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
52079+ break;
52080+ case -ENOMEM:
52081+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
52082+ break;
52083+ }
52084+ }
52085+#endif
52086+ return;
52087+}
52088diff -urNp linux-3.0.4/grsecurity/grsec_init.c linux-3.0.4/grsecurity/grsec_init.c
52089--- linux-3.0.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
52090+++ linux-3.0.4/grsecurity/grsec_init.c 2011-08-25 17:25:12.000000000 -0400
52091@@ -0,0 +1,269 @@
52092+#include <linux/kernel.h>
52093+#include <linux/sched.h>
52094+#include <linux/mm.h>
52095+#include <linux/gracl.h>
52096+#include <linux/slab.h>
52097+#include <linux/vmalloc.h>
52098+#include <linux/percpu.h>
52099+#include <linux/module.h>
52100+
52101+int grsec_enable_brute;
52102+int grsec_enable_link;
52103+int grsec_enable_dmesg;
52104+int grsec_enable_harden_ptrace;
52105+int grsec_enable_fifo;
52106+int grsec_enable_execlog;
52107+int grsec_enable_signal;
52108+int grsec_enable_forkfail;
52109+int grsec_enable_audit_ptrace;
52110+int grsec_enable_time;
52111+int grsec_enable_audit_textrel;
52112+int grsec_enable_group;
52113+int grsec_audit_gid;
52114+int grsec_enable_chdir;
52115+int grsec_enable_mount;
52116+int grsec_enable_rofs;
52117+int grsec_enable_chroot_findtask;
52118+int grsec_enable_chroot_mount;
52119+int grsec_enable_chroot_shmat;
52120+int grsec_enable_chroot_fchdir;
52121+int grsec_enable_chroot_double;
52122+int grsec_enable_chroot_pivot;
52123+int grsec_enable_chroot_chdir;
52124+int grsec_enable_chroot_chmod;
52125+int grsec_enable_chroot_mknod;
52126+int grsec_enable_chroot_nice;
52127+int grsec_enable_chroot_execlog;
52128+int grsec_enable_chroot_caps;
52129+int grsec_enable_chroot_sysctl;
52130+int grsec_enable_chroot_unix;
52131+int grsec_enable_tpe;
52132+int grsec_tpe_gid;
52133+int grsec_enable_blackhole;
52134+#ifdef CONFIG_IPV6_MODULE
52135+EXPORT_SYMBOL(grsec_enable_blackhole);
52136+#endif
52137+int grsec_lastack_retries;
52138+int grsec_enable_tpe_all;
52139+int grsec_enable_tpe_invert;
52140+int grsec_enable_socket_all;
52141+int grsec_socket_all_gid;
52142+int grsec_enable_socket_client;
52143+int grsec_socket_client_gid;
52144+int grsec_enable_socket_server;
52145+int grsec_socket_server_gid;
52146+int grsec_resource_logging;
52147+int grsec_disable_privio;
52148+int grsec_enable_log_rwxmaps;
52149+int grsec_lock;
52150+
52151+DEFINE_SPINLOCK(grsec_alert_lock);
52152+unsigned long grsec_alert_wtime = 0;
52153+unsigned long grsec_alert_fyet = 0;
52154+
52155+DEFINE_SPINLOCK(grsec_audit_lock);
52156+
52157+DEFINE_RWLOCK(grsec_exec_file_lock);
52158+
52159+char *gr_shared_page[4];
52160+
52161+char *gr_alert_log_fmt;
52162+char *gr_audit_log_fmt;
52163+char *gr_alert_log_buf;
52164+char *gr_audit_log_buf;
52165+
52166+extern struct gr_arg *gr_usermode;
52167+extern unsigned char *gr_system_salt;
52168+extern unsigned char *gr_system_sum;
52169+
52170+void __init
52171+grsecurity_init(void)
52172+{
52173+ int j;
52174+ /* create the per-cpu shared pages */
52175+
52176+#ifdef CONFIG_X86
52177+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
52178+#endif
52179+
52180+ for (j = 0; j < 4; j++) {
52181+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
52182+ if (gr_shared_page[j] == NULL) {
52183+ panic("Unable to allocate grsecurity shared page");
52184+ return;
52185+ }
52186+ }
52187+
52188+ /* allocate log buffers */
52189+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
52190+ if (!gr_alert_log_fmt) {
52191+ panic("Unable to allocate grsecurity alert log format buffer");
52192+ return;
52193+ }
52194+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
52195+ if (!gr_audit_log_fmt) {
52196+ panic("Unable to allocate grsecurity audit log format buffer");
52197+ return;
52198+ }
52199+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
52200+ if (!gr_alert_log_buf) {
52201+ panic("Unable to allocate grsecurity alert log buffer");
52202+ return;
52203+ }
52204+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
52205+ if (!gr_audit_log_buf) {
52206+ panic("Unable to allocate grsecurity audit log buffer");
52207+ return;
52208+ }
52209+
52210+ /* allocate memory for authentication structure */
52211+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
52212+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
52213+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
52214+
52215+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
52216+ panic("Unable to allocate grsecurity authentication structure");
52217+ return;
52218+ }
52219+
52220+
52221+#ifdef CONFIG_GRKERNSEC_IO
52222+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
52223+ grsec_disable_privio = 1;
52224+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
52225+ grsec_disable_privio = 1;
52226+#else
52227+ grsec_disable_privio = 0;
52228+#endif
52229+#endif
52230+
52231+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52232+ /* for backward compatibility, tpe_invert always defaults to on if
52233+ enabled in the kernel
52234+ */
52235+ grsec_enable_tpe_invert = 1;
52236+#endif
52237+
52238+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
52239+#ifndef CONFIG_GRKERNSEC_SYSCTL
52240+ grsec_lock = 1;
52241+#endif
52242+
52243+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52244+ grsec_enable_audit_textrel = 1;
52245+#endif
52246+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52247+ grsec_enable_log_rwxmaps = 1;
52248+#endif
52249+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
52250+ grsec_enable_group = 1;
52251+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
52252+#endif
52253+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52254+ grsec_enable_chdir = 1;
52255+#endif
52256+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52257+ grsec_enable_harden_ptrace = 1;
52258+#endif
52259+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52260+ grsec_enable_mount = 1;
52261+#endif
52262+#ifdef CONFIG_GRKERNSEC_LINK
52263+ grsec_enable_link = 1;
52264+#endif
52265+#ifdef CONFIG_GRKERNSEC_BRUTE
52266+ grsec_enable_brute = 1;
52267+#endif
52268+#ifdef CONFIG_GRKERNSEC_DMESG
52269+ grsec_enable_dmesg = 1;
52270+#endif
52271+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52272+ grsec_enable_blackhole = 1;
52273+ grsec_lastack_retries = 4;
52274+#endif
52275+#ifdef CONFIG_GRKERNSEC_FIFO
52276+ grsec_enable_fifo = 1;
52277+#endif
52278+#ifdef CONFIG_GRKERNSEC_EXECLOG
52279+ grsec_enable_execlog = 1;
52280+#endif
52281+#ifdef CONFIG_GRKERNSEC_SIGNAL
52282+ grsec_enable_signal = 1;
52283+#endif
52284+#ifdef CONFIG_GRKERNSEC_FORKFAIL
52285+ grsec_enable_forkfail = 1;
52286+#endif
52287+#ifdef CONFIG_GRKERNSEC_TIME
52288+ grsec_enable_time = 1;
52289+#endif
52290+#ifdef CONFIG_GRKERNSEC_RESLOG
52291+ grsec_resource_logging = 1;
52292+#endif
52293+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52294+ grsec_enable_chroot_findtask = 1;
52295+#endif
52296+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52297+ grsec_enable_chroot_unix = 1;
52298+#endif
52299+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52300+ grsec_enable_chroot_mount = 1;
52301+#endif
52302+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52303+ grsec_enable_chroot_fchdir = 1;
52304+#endif
52305+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52306+ grsec_enable_chroot_shmat = 1;
52307+#endif
52308+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52309+ grsec_enable_audit_ptrace = 1;
52310+#endif
52311+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52312+ grsec_enable_chroot_double = 1;
52313+#endif
52314+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52315+ grsec_enable_chroot_pivot = 1;
52316+#endif
52317+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52318+ grsec_enable_chroot_chdir = 1;
52319+#endif
52320+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52321+ grsec_enable_chroot_chmod = 1;
52322+#endif
52323+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52324+ grsec_enable_chroot_mknod = 1;
52325+#endif
52326+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52327+ grsec_enable_chroot_nice = 1;
52328+#endif
52329+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52330+ grsec_enable_chroot_execlog = 1;
52331+#endif
52332+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52333+ grsec_enable_chroot_caps = 1;
52334+#endif
52335+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52336+ grsec_enable_chroot_sysctl = 1;
52337+#endif
52338+#ifdef CONFIG_GRKERNSEC_TPE
52339+ grsec_enable_tpe = 1;
52340+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
52341+#ifdef CONFIG_GRKERNSEC_TPE_ALL
52342+ grsec_enable_tpe_all = 1;
52343+#endif
52344+#endif
52345+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52346+ grsec_enable_socket_all = 1;
52347+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
52348+#endif
52349+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52350+ grsec_enable_socket_client = 1;
52351+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
52352+#endif
52353+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52354+ grsec_enable_socket_server = 1;
52355+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
52356+#endif
52357+#endif
52358+
52359+ return;
52360+}
52361diff -urNp linux-3.0.4/grsecurity/grsec_link.c linux-3.0.4/grsecurity/grsec_link.c
52362--- linux-3.0.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
52363+++ linux-3.0.4/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
52364@@ -0,0 +1,43 @@
52365+#include <linux/kernel.h>
52366+#include <linux/sched.h>
52367+#include <linux/fs.h>
52368+#include <linux/file.h>
52369+#include <linux/grinternal.h>
52370+
52371+int
52372+gr_handle_follow_link(const struct inode *parent,
52373+ const struct inode *inode,
52374+ const struct dentry *dentry, const struct vfsmount *mnt)
52375+{
52376+#ifdef CONFIG_GRKERNSEC_LINK
52377+ const struct cred *cred = current_cred();
52378+
52379+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
52380+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
52381+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
52382+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
52383+ return -EACCES;
52384+ }
52385+#endif
52386+ return 0;
52387+}
52388+
52389+int
52390+gr_handle_hardlink(const struct dentry *dentry,
52391+ const struct vfsmount *mnt,
52392+ struct inode *inode, const int mode, const char *to)
52393+{
52394+#ifdef CONFIG_GRKERNSEC_LINK
52395+ const struct cred *cred = current_cred();
52396+
52397+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
52398+ (!S_ISREG(mode) || (mode & S_ISUID) ||
52399+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
52400+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
52401+ !capable(CAP_FOWNER) && cred->uid) {
52402+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
52403+ return -EPERM;
52404+ }
52405+#endif
52406+ return 0;
52407+}
52408diff -urNp linux-3.0.4/grsecurity/grsec_log.c linux-3.0.4/grsecurity/grsec_log.c
52409--- linux-3.0.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
52410+++ linux-3.0.4/grsecurity/grsec_log.c 2011-09-26 10:46:21.000000000 -0400
52411@@ -0,0 +1,315 @@
52412+#include <linux/kernel.h>
52413+#include <linux/sched.h>
52414+#include <linux/file.h>
52415+#include <linux/tty.h>
52416+#include <linux/fs.h>
52417+#include <linux/grinternal.h>
52418+
52419+#ifdef CONFIG_TREE_PREEMPT_RCU
52420+#define DISABLE_PREEMPT() preempt_disable()
52421+#define ENABLE_PREEMPT() preempt_enable()
52422+#else
52423+#define DISABLE_PREEMPT()
52424+#define ENABLE_PREEMPT()
52425+#endif
52426+
52427+#define BEGIN_LOCKS(x) \
52428+ DISABLE_PREEMPT(); \
52429+ rcu_read_lock(); \
52430+ read_lock(&tasklist_lock); \
52431+ read_lock(&grsec_exec_file_lock); \
52432+ if (x != GR_DO_AUDIT) \
52433+ spin_lock(&grsec_alert_lock); \
52434+ else \
52435+ spin_lock(&grsec_audit_lock)
52436+
52437+#define END_LOCKS(x) \
52438+ if (x != GR_DO_AUDIT) \
52439+ spin_unlock(&grsec_alert_lock); \
52440+ else \
52441+ spin_unlock(&grsec_audit_lock); \
52442+ read_unlock(&grsec_exec_file_lock); \
52443+ read_unlock(&tasklist_lock); \
52444+ rcu_read_unlock(); \
52445+ ENABLE_PREEMPT(); \
52446+ if (x == GR_DONT_AUDIT) \
52447+ gr_handle_alertkill(current)
52448+
52449+enum {
52450+ FLOODING,
52451+ NO_FLOODING
52452+};
52453+
52454+extern char *gr_alert_log_fmt;
52455+extern char *gr_audit_log_fmt;
52456+extern char *gr_alert_log_buf;
52457+extern char *gr_audit_log_buf;
52458+
52459+static int gr_log_start(int audit)
52460+{
52461+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
52462+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
52463+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
52464+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
52465+ unsigned long curr_secs = get_seconds();
52466+
52467+ if (audit == GR_DO_AUDIT)
52468+ goto set_fmt;
52469+
52470+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
52471+ grsec_alert_wtime = curr_secs;
52472+ grsec_alert_fyet = 0;
52473+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
52474+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
52475+ grsec_alert_fyet++;
52476+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
52477+ grsec_alert_wtime = curr_secs;
52478+ grsec_alert_fyet++;
52479+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
52480+ return FLOODING;
52481+ }
52482+ else return FLOODING;
52483+
52484+set_fmt:
52485+#endif
52486+ memset(buf, 0, PAGE_SIZE);
52487+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
52488+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
52489+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
52490+ } else if (current->signal->curr_ip) {
52491+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
52492+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
52493+ } else if (gr_acl_is_enabled()) {
52494+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
52495+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
52496+ } else {
52497+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
52498+ strcpy(buf, fmt);
52499+ }
52500+
52501+ return NO_FLOODING;
52502+}
52503+
52504+static void gr_log_middle(int audit, const char *msg, va_list ap)
52505+ __attribute__ ((format (printf, 2, 0)));
52506+
52507+static void gr_log_middle(int audit, const char *msg, va_list ap)
52508+{
52509+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
52510+ unsigned int len = strlen(buf);
52511+
52512+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
52513+
52514+ return;
52515+}
52516+
52517+static void gr_log_middle_varargs(int audit, const char *msg, ...)
52518+ __attribute__ ((format (printf, 2, 3)));
52519+
52520+static void gr_log_middle_varargs(int audit, const char *msg, ...)
52521+{
52522+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
52523+ unsigned int len = strlen(buf);
52524+ va_list ap;
52525+
52526+ va_start(ap, msg);
52527+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
52528+ va_end(ap);
52529+
52530+ return;
52531+}
52532+
52533+static void gr_log_end(int audit)
52534+{
52535+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
52536+ unsigned int len = strlen(buf);
52537+
52538+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
52539+ printk("%s\n", buf);
52540+
52541+ return;
52542+}
52543+
52544+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
52545+{
52546+ int logtype;
52547+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
52548+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
52549+ void *voidptr = NULL;
52550+ int num1 = 0, num2 = 0;
52551+ unsigned long ulong1 = 0, ulong2 = 0;
52552+ struct dentry *dentry = NULL;
52553+ struct vfsmount *mnt = NULL;
52554+ struct file *file = NULL;
52555+ struct task_struct *task = NULL;
52556+ const struct cred *cred, *pcred;
52557+ va_list ap;
52558+
52559+ BEGIN_LOCKS(audit);
52560+ logtype = gr_log_start(audit);
52561+ if (logtype == FLOODING) {
52562+ END_LOCKS(audit);
52563+ return;
52564+ }
52565+ va_start(ap, argtypes);
52566+ switch (argtypes) {
52567+ case GR_TTYSNIFF:
52568+ task = va_arg(ap, struct task_struct *);
52569+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
52570+ break;
52571+ case GR_SYSCTL_HIDDEN:
52572+ str1 = va_arg(ap, char *);
52573+ gr_log_middle_varargs(audit, msg, result, str1);
52574+ break;
52575+ case GR_RBAC:
52576+ dentry = va_arg(ap, struct dentry *);
52577+ mnt = va_arg(ap, struct vfsmount *);
52578+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
52579+ break;
52580+ case GR_RBAC_STR:
52581+ dentry = va_arg(ap, struct dentry *);
52582+ mnt = va_arg(ap, struct vfsmount *);
52583+ str1 = va_arg(ap, char *);
52584+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
52585+ break;
52586+ case GR_STR_RBAC:
52587+ str1 = va_arg(ap, char *);
52588+ dentry = va_arg(ap, struct dentry *);
52589+ mnt = va_arg(ap, struct vfsmount *);
52590+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
52591+ break;
52592+ case GR_RBAC_MODE2:
52593+ dentry = va_arg(ap, struct dentry *);
52594+ mnt = va_arg(ap, struct vfsmount *);
52595+ str1 = va_arg(ap, char *);
52596+ str2 = va_arg(ap, char *);
52597+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
52598+ break;
52599+ case GR_RBAC_MODE3:
52600+ dentry = va_arg(ap, struct dentry *);
52601+ mnt = va_arg(ap, struct vfsmount *);
52602+ str1 = va_arg(ap, char *);
52603+ str2 = va_arg(ap, char *);
52604+ str3 = va_arg(ap, char *);
52605+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
52606+ break;
52607+ case GR_FILENAME:
52608+ dentry = va_arg(ap, struct dentry *);
52609+ mnt = va_arg(ap, struct vfsmount *);
52610+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
52611+ break;
52612+ case GR_STR_FILENAME:
52613+ str1 = va_arg(ap, char *);
52614+ dentry = va_arg(ap, struct dentry *);
52615+ mnt = va_arg(ap, struct vfsmount *);
52616+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
52617+ break;
52618+ case GR_FILENAME_STR:
52619+ dentry = va_arg(ap, struct dentry *);
52620+ mnt = va_arg(ap, struct vfsmount *);
52621+ str1 = va_arg(ap, char *);
52622+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
52623+ break;
52624+ case GR_FILENAME_TWO_INT:
52625+ dentry = va_arg(ap, struct dentry *);
52626+ mnt = va_arg(ap, struct vfsmount *);
52627+ num1 = va_arg(ap, int);
52628+ num2 = va_arg(ap, int);
52629+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
52630+ break;
52631+ case GR_FILENAME_TWO_INT_STR:
52632+ dentry = va_arg(ap, struct dentry *);
52633+ mnt = va_arg(ap, struct vfsmount *);
52634+ num1 = va_arg(ap, int);
52635+ num2 = va_arg(ap, int);
52636+ str1 = va_arg(ap, char *);
52637+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
52638+ break;
52639+ case GR_TEXTREL:
52640+ file = va_arg(ap, struct file *);
52641+ ulong1 = va_arg(ap, unsigned long);
52642+ ulong2 = va_arg(ap, unsigned long);
52643+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
52644+ break;
52645+ case GR_PTRACE:
52646+ task = va_arg(ap, struct task_struct *);
52647+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
52648+ break;
52649+ case GR_RESOURCE:
52650+ task = va_arg(ap, struct task_struct *);
52651+ cred = __task_cred(task);
52652+ pcred = __task_cred(task->real_parent);
52653+ ulong1 = va_arg(ap, unsigned long);
52654+ str1 = va_arg(ap, char *);
52655+ ulong2 = va_arg(ap, unsigned long);
52656+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
52657+ break;
52658+ case GR_CAP:
52659+ task = va_arg(ap, struct task_struct *);
52660+ cred = __task_cred(task);
52661+ pcred = __task_cred(task->real_parent);
52662+ str1 = va_arg(ap, char *);
52663+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
52664+ break;
52665+ case GR_SIG:
52666+ str1 = va_arg(ap, char *);
52667+ voidptr = va_arg(ap, void *);
52668+ gr_log_middle_varargs(audit, msg, str1, voidptr);
52669+ break;
52670+ case GR_SIG2:
52671+ task = va_arg(ap, struct task_struct *);
52672+ cred = __task_cred(task);
52673+ pcred = __task_cred(task->real_parent);
52674+ num1 = va_arg(ap, int);
52675+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
52676+ break;
52677+ case GR_CRASH1:
52678+ task = va_arg(ap, struct task_struct *);
52679+ cred = __task_cred(task);
52680+ pcred = __task_cred(task->real_parent);
52681+ ulong1 = va_arg(ap, unsigned long);
52682+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
52683+ break;
52684+ case GR_CRASH2:
52685+ task = va_arg(ap, struct task_struct *);
52686+ cred = __task_cred(task);
52687+ pcred = __task_cred(task->real_parent);
52688+ ulong1 = va_arg(ap, unsigned long);
52689+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
52690+ break;
52691+ case GR_RWXMAP:
52692+ file = va_arg(ap, struct file *);
52693+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
52694+ break;
52695+ case GR_PSACCT:
52696+ {
52697+ unsigned int wday, cday;
52698+ __u8 whr, chr;
52699+ __u8 wmin, cmin;
52700+ __u8 wsec, csec;
52701+ char cur_tty[64] = { 0 };
52702+ char parent_tty[64] = { 0 };
52703+
52704+ task = va_arg(ap, struct task_struct *);
52705+ wday = va_arg(ap, unsigned int);
52706+ cday = va_arg(ap, unsigned int);
52707+ whr = va_arg(ap, int);
52708+ chr = va_arg(ap, int);
52709+ wmin = va_arg(ap, int);
52710+ cmin = va_arg(ap, int);
52711+ wsec = va_arg(ap, int);
52712+ csec = va_arg(ap, int);
52713+ ulong1 = va_arg(ap, unsigned long);
52714+ cred = __task_cred(task);
52715+ pcred = __task_cred(task->real_parent);
52716+
52717+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
52718+ }
52719+ break;
52720+ default:
52721+ gr_log_middle(audit, msg, ap);
52722+ }
52723+ va_end(ap);
52724+ gr_log_end(audit);
52725+ END_LOCKS(audit);
52726+}
52727diff -urNp linux-3.0.4/grsecurity/grsec_mem.c linux-3.0.4/grsecurity/grsec_mem.c
52728--- linux-3.0.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
52729+++ linux-3.0.4/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
52730@@ -0,0 +1,33 @@
52731+#include <linux/kernel.h>
52732+#include <linux/sched.h>
52733+#include <linux/mm.h>
52734+#include <linux/mman.h>
52735+#include <linux/grinternal.h>
52736+
52737+void
52738+gr_handle_ioperm(void)
52739+{
52740+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
52741+ return;
52742+}
52743+
52744+void
52745+gr_handle_iopl(void)
52746+{
52747+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
52748+ return;
52749+}
52750+
52751+void
52752+gr_handle_mem_readwrite(u64 from, u64 to)
52753+{
52754+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
52755+ return;
52756+}
52757+
52758+void
52759+gr_handle_vm86(void)
52760+{
52761+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
52762+ return;
52763+}
52764diff -urNp linux-3.0.4/grsecurity/grsec_mount.c linux-3.0.4/grsecurity/grsec_mount.c
52765--- linux-3.0.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
52766+++ linux-3.0.4/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
52767@@ -0,0 +1,62 @@
52768+#include <linux/kernel.h>
52769+#include <linux/sched.h>
52770+#include <linux/mount.h>
52771+#include <linux/grsecurity.h>
52772+#include <linux/grinternal.h>
52773+
52774+void
52775+gr_log_remount(const char *devname, const int retval)
52776+{
52777+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52778+ if (grsec_enable_mount && (retval >= 0))
52779+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
52780+#endif
52781+ return;
52782+}
52783+
52784+void
52785+gr_log_unmount(const char *devname, const int retval)
52786+{
52787+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52788+ if (grsec_enable_mount && (retval >= 0))
52789+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
52790+#endif
52791+ return;
52792+}
52793+
52794+void
52795+gr_log_mount(const char *from, const char *to, const int retval)
52796+{
52797+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52798+ if (grsec_enable_mount && (retval >= 0))
52799+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
52800+#endif
52801+ return;
52802+}
52803+
52804+int
52805+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
52806+{
52807+#ifdef CONFIG_GRKERNSEC_ROFS
52808+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
52809+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
52810+ return -EPERM;
52811+ } else
52812+ return 0;
52813+#endif
52814+ return 0;
52815+}
52816+
52817+int
52818+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
52819+{
52820+#ifdef CONFIG_GRKERNSEC_ROFS
52821+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
52822+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
52823+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
52824+ return -EPERM;
52825+ } else
52826+ return 0;
52827+#endif
52828+ return 0;
52829+}
52830diff -urNp linux-3.0.4/grsecurity/grsec_pax.c linux-3.0.4/grsecurity/grsec_pax.c
52831--- linux-3.0.4/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
52832+++ linux-3.0.4/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
52833@@ -0,0 +1,36 @@
52834+#include <linux/kernel.h>
52835+#include <linux/sched.h>
52836+#include <linux/mm.h>
52837+#include <linux/file.h>
52838+#include <linux/grinternal.h>
52839+#include <linux/grsecurity.h>
52840+
52841+void
52842+gr_log_textrel(struct vm_area_struct * vma)
52843+{
52844+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52845+ if (grsec_enable_audit_textrel)
52846+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
52847+#endif
52848+ return;
52849+}
52850+
52851+void
52852+gr_log_rwxmmap(struct file *file)
52853+{
52854+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52855+ if (grsec_enable_log_rwxmaps)
52856+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
52857+#endif
52858+ return;
52859+}
52860+
52861+void
52862+gr_log_rwxmprotect(struct file *file)
52863+{
52864+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52865+ if (grsec_enable_log_rwxmaps)
52866+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
52867+#endif
52868+ return;
52869+}
52870diff -urNp linux-3.0.4/grsecurity/grsec_ptrace.c linux-3.0.4/grsecurity/grsec_ptrace.c
52871--- linux-3.0.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
52872+++ linux-3.0.4/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
52873@@ -0,0 +1,14 @@
52874+#include <linux/kernel.h>
52875+#include <linux/sched.h>
52876+#include <linux/grinternal.h>
52877+#include <linux/grsecurity.h>
52878+
52879+void
52880+gr_audit_ptrace(struct task_struct *task)
52881+{
52882+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52883+ if (grsec_enable_audit_ptrace)
52884+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
52885+#endif
52886+ return;
52887+}
52888diff -urNp linux-3.0.4/grsecurity/grsec_sig.c linux-3.0.4/grsecurity/grsec_sig.c
52889--- linux-3.0.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
52890+++ linux-3.0.4/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
52891@@ -0,0 +1,206 @@
52892+#include <linux/kernel.h>
52893+#include <linux/sched.h>
52894+#include <linux/delay.h>
52895+#include <linux/grsecurity.h>
52896+#include <linux/grinternal.h>
52897+#include <linux/hardirq.h>
52898+
52899+char *signames[] = {
52900+ [SIGSEGV] = "Segmentation fault",
52901+ [SIGILL] = "Illegal instruction",
52902+ [SIGABRT] = "Abort",
52903+ [SIGBUS] = "Invalid alignment/Bus error"
52904+};
52905+
52906+void
52907+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
52908+{
52909+#ifdef CONFIG_GRKERNSEC_SIGNAL
52910+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
52911+ (sig == SIGABRT) || (sig == SIGBUS))) {
52912+ if (t->pid == current->pid) {
52913+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
52914+ } else {
52915+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
52916+ }
52917+ }
52918+#endif
52919+ return;
52920+}
52921+
52922+int
52923+gr_handle_signal(const struct task_struct *p, const int sig)
52924+{
52925+#ifdef CONFIG_GRKERNSEC
52926+ if (current->pid > 1 && gr_check_protected_task(p)) {
52927+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
52928+ return -EPERM;
52929+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
52930+ return -EPERM;
52931+ }
52932+#endif
52933+ return 0;
52934+}
52935+
52936+#ifdef CONFIG_GRKERNSEC
52937+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
52938+
52939+int gr_fake_force_sig(int sig, struct task_struct *t)
52940+{
52941+ unsigned long int flags;
52942+ int ret, blocked, ignored;
52943+ struct k_sigaction *action;
52944+
52945+ spin_lock_irqsave(&t->sighand->siglock, flags);
52946+ action = &t->sighand->action[sig-1];
52947+ ignored = action->sa.sa_handler == SIG_IGN;
52948+ blocked = sigismember(&t->blocked, sig);
52949+ if (blocked || ignored) {
52950+ action->sa.sa_handler = SIG_DFL;
52951+ if (blocked) {
52952+ sigdelset(&t->blocked, sig);
52953+ recalc_sigpending_and_wake(t);
52954+ }
52955+ }
52956+ if (action->sa.sa_handler == SIG_DFL)
52957+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
52958+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
52959+
52960+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
52961+
52962+ return ret;
52963+}
52964+#endif
52965+
52966+#ifdef CONFIG_GRKERNSEC_BRUTE
52967+#define GR_USER_BAN_TIME (15 * 60)
52968+
52969+static int __get_dumpable(unsigned long mm_flags)
52970+{
52971+ int ret;
52972+
52973+ ret = mm_flags & MMF_DUMPABLE_MASK;
52974+ return (ret >= 2) ? 2 : ret;
52975+}
52976+#endif
52977+
52978+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
52979+{
52980+#ifdef CONFIG_GRKERNSEC_BRUTE
52981+ uid_t uid = 0;
52982+
52983+ if (!grsec_enable_brute)
52984+ return;
52985+
52986+ rcu_read_lock();
52987+ read_lock(&tasklist_lock);
52988+ read_lock(&grsec_exec_file_lock);
52989+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
52990+ p->real_parent->brute = 1;
52991+ else {
52992+ const struct cred *cred = __task_cred(p), *cred2;
52993+ struct task_struct *tsk, *tsk2;
52994+
52995+ if (!__get_dumpable(mm_flags) && cred->uid) {
52996+ struct user_struct *user;
52997+
52998+ uid = cred->uid;
52999+
53000+ /* this is put upon execution past expiration */
53001+ user = find_user(uid);
53002+ if (user == NULL)
53003+ goto unlock;
53004+ user->banned = 1;
53005+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
53006+ if (user->ban_expires == ~0UL)
53007+ user->ban_expires--;
53008+
53009+ do_each_thread(tsk2, tsk) {
53010+ cred2 = __task_cred(tsk);
53011+ if (tsk != p && cred2->uid == uid)
53012+ gr_fake_force_sig(SIGKILL, tsk);
53013+ } while_each_thread(tsk2, tsk);
53014+ }
53015+ }
53016+unlock:
53017+ read_unlock(&grsec_exec_file_lock);
53018+ read_unlock(&tasklist_lock);
53019+ rcu_read_unlock();
53020+
53021+ if (uid)
53022+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
53023+
53024+#endif
53025+ return;
53026+}
53027+
53028+void gr_handle_brute_check(void)
53029+{
53030+#ifdef CONFIG_GRKERNSEC_BRUTE
53031+ if (current->brute)
53032+ msleep(30 * 1000);
53033+#endif
53034+ return;
53035+}
53036+
53037+void gr_handle_kernel_exploit(void)
53038+{
53039+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
53040+ const struct cred *cred;
53041+ struct task_struct *tsk, *tsk2;
53042+ struct user_struct *user;
53043+ uid_t uid;
53044+
53045+ if (in_irq() || in_serving_softirq() || in_nmi())
53046+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
53047+
53048+ uid = current_uid();
53049+
53050+ if (uid == 0)
53051+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
53052+ else {
53053+ /* kill all the processes of this user, hold a reference
53054+ to their creds struct, and prevent them from creating
53055+ another process until system reset
53056+ */
53057+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
53058+ /* we intentionally leak this ref */
53059+ user = get_uid(current->cred->user);
53060+ if (user) {
53061+ user->banned = 1;
53062+ user->ban_expires = ~0UL;
53063+ }
53064+
53065+ read_lock(&tasklist_lock);
53066+ do_each_thread(tsk2, tsk) {
53067+ cred = __task_cred(tsk);
53068+ if (cred->uid == uid)
53069+ gr_fake_force_sig(SIGKILL, tsk);
53070+ } while_each_thread(tsk2, tsk);
53071+ read_unlock(&tasklist_lock);
53072+ }
53073+#endif
53074+}
53075+
53076+int __gr_process_user_ban(struct user_struct *user)
53077+{
53078+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
53079+ if (unlikely(user->banned)) {
53080+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
53081+ user->banned = 0;
53082+ user->ban_expires = 0;
53083+ free_uid(user);
53084+ } else
53085+ return -EPERM;
53086+ }
53087+#endif
53088+ return 0;
53089+}
53090+
53091+int gr_process_user_ban(void)
53092+{
53093+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
53094+ return __gr_process_user_ban(current->cred->user);
53095+#endif
53096+ return 0;
53097+}
53098diff -urNp linux-3.0.4/grsecurity/grsec_sock.c linux-3.0.4/grsecurity/grsec_sock.c
53099--- linux-3.0.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
53100+++ linux-3.0.4/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
53101@@ -0,0 +1,244 @@
53102+#include <linux/kernel.h>
53103+#include <linux/module.h>
53104+#include <linux/sched.h>
53105+#include <linux/file.h>
53106+#include <linux/net.h>
53107+#include <linux/in.h>
53108+#include <linux/ip.h>
53109+#include <net/sock.h>
53110+#include <net/inet_sock.h>
53111+#include <linux/grsecurity.h>
53112+#include <linux/grinternal.h>
53113+#include <linux/gracl.h>
53114+
53115+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
53116+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
53117+
53118+EXPORT_SYMBOL(gr_search_udp_recvmsg);
53119+EXPORT_SYMBOL(gr_search_udp_sendmsg);
53120+
53121+#ifdef CONFIG_UNIX_MODULE
53122+EXPORT_SYMBOL(gr_acl_handle_unix);
53123+EXPORT_SYMBOL(gr_acl_handle_mknod);
53124+EXPORT_SYMBOL(gr_handle_chroot_unix);
53125+EXPORT_SYMBOL(gr_handle_create);
53126+#endif
53127+
53128+#ifdef CONFIG_GRKERNSEC
53129+#define gr_conn_table_size 32749
53130+struct conn_table_entry {
53131+ struct conn_table_entry *next;
53132+ struct signal_struct *sig;
53133+};
53134+
53135+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
53136+DEFINE_SPINLOCK(gr_conn_table_lock);
53137+
53138+extern const char * gr_socktype_to_name(unsigned char type);
53139+extern const char * gr_proto_to_name(unsigned char proto);
53140+extern const char * gr_sockfamily_to_name(unsigned char family);
53141+
53142+static __inline__ int
53143+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
53144+{
53145+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
53146+}
53147+
53148+static __inline__ int
53149+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
53150+ __u16 sport, __u16 dport)
53151+{
53152+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
53153+ sig->gr_sport == sport && sig->gr_dport == dport))
53154+ return 1;
53155+ else
53156+ return 0;
53157+}
53158+
53159+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
53160+{
53161+ struct conn_table_entry **match;
53162+ unsigned int index;
53163+
53164+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
53165+ sig->gr_sport, sig->gr_dport,
53166+ gr_conn_table_size);
53167+
53168+ newent->sig = sig;
53169+
53170+ match = &gr_conn_table[index];
53171+ newent->next = *match;
53172+ *match = newent;
53173+
53174+ return;
53175+}
53176+
53177+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
53178+{
53179+ struct conn_table_entry *match, *last = NULL;
53180+ unsigned int index;
53181+
53182+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
53183+ sig->gr_sport, sig->gr_dport,
53184+ gr_conn_table_size);
53185+
53186+ match = gr_conn_table[index];
53187+ while (match && !conn_match(match->sig,
53188+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
53189+ sig->gr_dport)) {
53190+ last = match;
53191+ match = match->next;
53192+ }
53193+
53194+ if (match) {
53195+ if (last)
53196+ last->next = match->next;
53197+ else
53198+ gr_conn_table[index] = NULL;
53199+ kfree(match);
53200+ }
53201+
53202+ return;
53203+}
53204+
53205+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
53206+ __u16 sport, __u16 dport)
53207+{
53208+ struct conn_table_entry *match;
53209+ unsigned int index;
53210+
53211+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
53212+
53213+ match = gr_conn_table[index];
53214+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
53215+ match = match->next;
53216+
53217+ if (match)
53218+ return match->sig;
53219+ else
53220+ return NULL;
53221+}
53222+
53223+#endif
53224+
53225+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
53226+{
53227+#ifdef CONFIG_GRKERNSEC
53228+ struct signal_struct *sig = task->signal;
53229+ struct conn_table_entry *newent;
53230+
53231+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
53232+ if (newent == NULL)
53233+ return;
53234+ /* no bh lock needed since we are called with bh disabled */
53235+ spin_lock(&gr_conn_table_lock);
53236+ gr_del_task_from_ip_table_nolock(sig);
53237+ sig->gr_saddr = inet->inet_rcv_saddr;
53238+ sig->gr_daddr = inet->inet_daddr;
53239+ sig->gr_sport = inet->inet_sport;
53240+ sig->gr_dport = inet->inet_dport;
53241+ gr_add_to_task_ip_table_nolock(sig, newent);
53242+ spin_unlock(&gr_conn_table_lock);
53243+#endif
53244+ return;
53245+}
53246+
53247+void gr_del_task_from_ip_table(struct task_struct *task)
53248+{
53249+#ifdef CONFIG_GRKERNSEC
53250+ spin_lock_bh(&gr_conn_table_lock);
53251+ gr_del_task_from_ip_table_nolock(task->signal);
53252+ spin_unlock_bh(&gr_conn_table_lock);
53253+#endif
53254+ return;
53255+}
53256+
53257+void
53258+gr_attach_curr_ip(const struct sock *sk)
53259+{
53260+#ifdef CONFIG_GRKERNSEC
53261+ struct signal_struct *p, *set;
53262+ const struct inet_sock *inet = inet_sk(sk);
53263+
53264+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
53265+ return;
53266+
53267+ set = current->signal;
53268+
53269+ spin_lock_bh(&gr_conn_table_lock);
53270+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
53271+ inet->inet_dport, inet->inet_sport);
53272+ if (unlikely(p != NULL)) {
53273+ set->curr_ip = p->curr_ip;
53274+ set->used_accept = 1;
53275+ gr_del_task_from_ip_table_nolock(p);
53276+ spin_unlock_bh(&gr_conn_table_lock);
53277+ return;
53278+ }
53279+ spin_unlock_bh(&gr_conn_table_lock);
53280+
53281+ set->curr_ip = inet->inet_daddr;
53282+ set->used_accept = 1;
53283+#endif
53284+ return;
53285+}
53286+
53287+int
53288+gr_handle_sock_all(const int family, const int type, const int protocol)
53289+{
53290+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
53291+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
53292+ (family != AF_UNIX)) {
53293+ if (family == AF_INET)
53294+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
53295+ else
53296+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
53297+ return -EACCES;
53298+ }
53299+#endif
53300+ return 0;
53301+}
53302+
53303+int
53304+gr_handle_sock_server(const struct sockaddr *sck)
53305+{
53306+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53307+ if (grsec_enable_socket_server &&
53308+ in_group_p(grsec_socket_server_gid) &&
53309+ sck && (sck->sa_family != AF_UNIX) &&
53310+ (sck->sa_family != AF_LOCAL)) {
53311+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
53312+ return -EACCES;
53313+ }
53314+#endif
53315+ return 0;
53316+}
53317+
53318+int
53319+gr_handle_sock_server_other(const struct sock *sck)
53320+{
53321+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53322+ if (grsec_enable_socket_server &&
53323+ in_group_p(grsec_socket_server_gid) &&
53324+ sck && (sck->sk_family != AF_UNIX) &&
53325+ (sck->sk_family != AF_LOCAL)) {
53326+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
53327+ return -EACCES;
53328+ }
53329+#endif
53330+ return 0;
53331+}
53332+
53333+int
53334+gr_handle_sock_client(const struct sockaddr *sck)
53335+{
53336+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
53337+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
53338+ sck && (sck->sa_family != AF_UNIX) &&
53339+ (sck->sa_family != AF_LOCAL)) {
53340+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
53341+ return -EACCES;
53342+ }
53343+#endif
53344+ return 0;
53345+}
53346diff -urNp linux-3.0.4/grsecurity/grsec_sysctl.c linux-3.0.4/grsecurity/grsec_sysctl.c
53347--- linux-3.0.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
53348+++ linux-3.0.4/grsecurity/grsec_sysctl.c 2011-08-25 17:26:15.000000000 -0400
53349@@ -0,0 +1,433 @@
53350+#include <linux/kernel.h>
53351+#include <linux/sched.h>
53352+#include <linux/sysctl.h>
53353+#include <linux/grsecurity.h>
53354+#include <linux/grinternal.h>
53355+
53356+int
53357+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
53358+{
53359+#ifdef CONFIG_GRKERNSEC_SYSCTL
53360+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
53361+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
53362+ return -EACCES;
53363+ }
53364+#endif
53365+ return 0;
53366+}
53367+
53368+#ifdef CONFIG_GRKERNSEC_ROFS
53369+static int __maybe_unused one = 1;
53370+#endif
53371+
53372+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
53373+struct ctl_table grsecurity_table[] = {
53374+#ifdef CONFIG_GRKERNSEC_SYSCTL
53375+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
53376+#ifdef CONFIG_GRKERNSEC_IO
53377+ {
53378+ .procname = "disable_priv_io",
53379+ .data = &grsec_disable_privio,
53380+ .maxlen = sizeof(int),
53381+ .mode = 0600,
53382+ .proc_handler = &proc_dointvec,
53383+ },
53384+#endif
53385+#endif
53386+#ifdef CONFIG_GRKERNSEC_LINK
53387+ {
53388+ .procname = "linking_restrictions",
53389+ .data = &grsec_enable_link,
53390+ .maxlen = sizeof(int),
53391+ .mode = 0600,
53392+ .proc_handler = &proc_dointvec,
53393+ },
53394+#endif
53395+#ifdef CONFIG_GRKERNSEC_BRUTE
53396+ {
53397+ .procname = "deter_bruteforce",
53398+ .data = &grsec_enable_brute,
53399+ .maxlen = sizeof(int),
53400+ .mode = 0600,
53401+ .proc_handler = &proc_dointvec,
53402+ },
53403+#endif
53404+#ifdef CONFIG_GRKERNSEC_FIFO
53405+ {
53406+ .procname = "fifo_restrictions",
53407+ .data = &grsec_enable_fifo,
53408+ .maxlen = sizeof(int),
53409+ .mode = 0600,
53410+ .proc_handler = &proc_dointvec,
53411+ },
53412+#endif
53413+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
53414+ {
53415+ .procname = "ip_blackhole",
53416+ .data = &grsec_enable_blackhole,
53417+ .maxlen = sizeof(int),
53418+ .mode = 0600,
53419+ .proc_handler = &proc_dointvec,
53420+ },
53421+ {
53422+ .procname = "lastack_retries",
53423+ .data = &grsec_lastack_retries,
53424+ .maxlen = sizeof(int),
53425+ .mode = 0600,
53426+ .proc_handler = &proc_dointvec,
53427+ },
53428+#endif
53429+#ifdef CONFIG_GRKERNSEC_EXECLOG
53430+ {
53431+ .procname = "exec_logging",
53432+ .data = &grsec_enable_execlog,
53433+ .maxlen = sizeof(int),
53434+ .mode = 0600,
53435+ .proc_handler = &proc_dointvec,
53436+ },
53437+#endif
53438+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53439+ {
53440+ .procname = "rwxmap_logging",
53441+ .data = &grsec_enable_log_rwxmaps,
53442+ .maxlen = sizeof(int),
53443+ .mode = 0600,
53444+ .proc_handler = &proc_dointvec,
53445+ },
53446+#endif
53447+#ifdef CONFIG_GRKERNSEC_SIGNAL
53448+ {
53449+ .procname = "signal_logging",
53450+ .data = &grsec_enable_signal,
53451+ .maxlen = sizeof(int),
53452+ .mode = 0600,
53453+ .proc_handler = &proc_dointvec,
53454+ },
53455+#endif
53456+#ifdef CONFIG_GRKERNSEC_FORKFAIL
53457+ {
53458+ .procname = "forkfail_logging",
53459+ .data = &grsec_enable_forkfail,
53460+ .maxlen = sizeof(int),
53461+ .mode = 0600,
53462+ .proc_handler = &proc_dointvec,
53463+ },
53464+#endif
53465+#ifdef CONFIG_GRKERNSEC_TIME
53466+ {
53467+ .procname = "timechange_logging",
53468+ .data = &grsec_enable_time,
53469+ .maxlen = sizeof(int),
53470+ .mode = 0600,
53471+ .proc_handler = &proc_dointvec,
53472+ },
53473+#endif
53474+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53475+ {
53476+ .procname = "chroot_deny_shmat",
53477+ .data = &grsec_enable_chroot_shmat,
53478+ .maxlen = sizeof(int),
53479+ .mode = 0600,
53480+ .proc_handler = &proc_dointvec,
53481+ },
53482+#endif
53483+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53484+ {
53485+ .procname = "chroot_deny_unix",
53486+ .data = &grsec_enable_chroot_unix,
53487+ .maxlen = sizeof(int),
53488+ .mode = 0600,
53489+ .proc_handler = &proc_dointvec,
53490+ },
53491+#endif
53492+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53493+ {
53494+ .procname = "chroot_deny_mount",
53495+ .data = &grsec_enable_chroot_mount,
53496+ .maxlen = sizeof(int),
53497+ .mode = 0600,
53498+ .proc_handler = &proc_dointvec,
53499+ },
53500+#endif
53501+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53502+ {
53503+ .procname = "chroot_deny_fchdir",
53504+ .data = &grsec_enable_chroot_fchdir,
53505+ .maxlen = sizeof(int),
53506+ .mode = 0600,
53507+ .proc_handler = &proc_dointvec,
53508+ },
53509+#endif
53510+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53511+ {
53512+ .procname = "chroot_deny_chroot",
53513+ .data = &grsec_enable_chroot_double,
53514+ .maxlen = sizeof(int),
53515+ .mode = 0600,
53516+ .proc_handler = &proc_dointvec,
53517+ },
53518+#endif
53519+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53520+ {
53521+ .procname = "chroot_deny_pivot",
53522+ .data = &grsec_enable_chroot_pivot,
53523+ .maxlen = sizeof(int),
53524+ .mode = 0600,
53525+ .proc_handler = &proc_dointvec,
53526+ },
53527+#endif
53528+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53529+ {
53530+ .procname = "chroot_enforce_chdir",
53531+ .data = &grsec_enable_chroot_chdir,
53532+ .maxlen = sizeof(int),
53533+ .mode = 0600,
53534+ .proc_handler = &proc_dointvec,
53535+ },
53536+#endif
53537+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53538+ {
53539+ .procname = "chroot_deny_chmod",
53540+ .data = &grsec_enable_chroot_chmod,
53541+ .maxlen = sizeof(int),
53542+ .mode = 0600,
53543+ .proc_handler = &proc_dointvec,
53544+ },
53545+#endif
53546+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53547+ {
53548+ .procname = "chroot_deny_mknod",
53549+ .data = &grsec_enable_chroot_mknod,
53550+ .maxlen = sizeof(int),
53551+ .mode = 0600,
53552+ .proc_handler = &proc_dointvec,
53553+ },
53554+#endif
53555+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53556+ {
53557+ .procname = "chroot_restrict_nice",
53558+ .data = &grsec_enable_chroot_nice,
53559+ .maxlen = sizeof(int),
53560+ .mode = 0600,
53561+ .proc_handler = &proc_dointvec,
53562+ },
53563+#endif
53564+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53565+ {
53566+ .procname = "chroot_execlog",
53567+ .data = &grsec_enable_chroot_execlog,
53568+ .maxlen = sizeof(int),
53569+ .mode = 0600,
53570+ .proc_handler = &proc_dointvec,
53571+ },
53572+#endif
53573+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53574+ {
53575+ .procname = "chroot_caps",
53576+ .data = &grsec_enable_chroot_caps,
53577+ .maxlen = sizeof(int),
53578+ .mode = 0600,
53579+ .proc_handler = &proc_dointvec,
53580+ },
53581+#endif
53582+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53583+ {
53584+ .procname = "chroot_deny_sysctl",
53585+ .data = &grsec_enable_chroot_sysctl,
53586+ .maxlen = sizeof(int),
53587+ .mode = 0600,
53588+ .proc_handler = &proc_dointvec,
53589+ },
53590+#endif
53591+#ifdef CONFIG_GRKERNSEC_TPE
53592+ {
53593+ .procname = "tpe",
53594+ .data = &grsec_enable_tpe,
53595+ .maxlen = sizeof(int),
53596+ .mode = 0600,
53597+ .proc_handler = &proc_dointvec,
53598+ },
53599+ {
53600+ .procname = "tpe_gid",
53601+ .data = &grsec_tpe_gid,
53602+ .maxlen = sizeof(int),
53603+ .mode = 0600,
53604+ .proc_handler = &proc_dointvec,
53605+ },
53606+#endif
53607+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53608+ {
53609+ .procname = "tpe_invert",
53610+ .data = &grsec_enable_tpe_invert,
53611+ .maxlen = sizeof(int),
53612+ .mode = 0600,
53613+ .proc_handler = &proc_dointvec,
53614+ },
53615+#endif
53616+#ifdef CONFIG_GRKERNSEC_TPE_ALL
53617+ {
53618+ .procname = "tpe_restrict_all",
53619+ .data = &grsec_enable_tpe_all,
53620+ .maxlen = sizeof(int),
53621+ .mode = 0600,
53622+ .proc_handler = &proc_dointvec,
53623+ },
53624+#endif
53625+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
53626+ {
53627+ .procname = "socket_all",
53628+ .data = &grsec_enable_socket_all,
53629+ .maxlen = sizeof(int),
53630+ .mode = 0600,
53631+ .proc_handler = &proc_dointvec,
53632+ },
53633+ {
53634+ .procname = "socket_all_gid",
53635+ .data = &grsec_socket_all_gid,
53636+ .maxlen = sizeof(int),
53637+ .mode = 0600,
53638+ .proc_handler = &proc_dointvec,
53639+ },
53640+#endif
53641+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
53642+ {
53643+ .procname = "socket_client",
53644+ .data = &grsec_enable_socket_client,
53645+ .maxlen = sizeof(int),
53646+ .mode = 0600,
53647+ .proc_handler = &proc_dointvec,
53648+ },
53649+ {
53650+ .procname = "socket_client_gid",
53651+ .data = &grsec_socket_client_gid,
53652+ .maxlen = sizeof(int),
53653+ .mode = 0600,
53654+ .proc_handler = &proc_dointvec,
53655+ },
53656+#endif
53657+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53658+ {
53659+ .procname = "socket_server",
53660+ .data = &grsec_enable_socket_server,
53661+ .maxlen = sizeof(int),
53662+ .mode = 0600,
53663+ .proc_handler = &proc_dointvec,
53664+ },
53665+ {
53666+ .procname = "socket_server_gid",
53667+ .data = &grsec_socket_server_gid,
53668+ .maxlen = sizeof(int),
53669+ .mode = 0600,
53670+ .proc_handler = &proc_dointvec,
53671+ },
53672+#endif
53673+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
53674+ {
53675+ .procname = "audit_group",
53676+ .data = &grsec_enable_group,
53677+ .maxlen = sizeof(int),
53678+ .mode = 0600,
53679+ .proc_handler = &proc_dointvec,
53680+ },
53681+ {
53682+ .procname = "audit_gid",
53683+ .data = &grsec_audit_gid,
53684+ .maxlen = sizeof(int),
53685+ .mode = 0600,
53686+ .proc_handler = &proc_dointvec,
53687+ },
53688+#endif
53689+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53690+ {
53691+ .procname = "audit_chdir",
53692+ .data = &grsec_enable_chdir,
53693+ .maxlen = sizeof(int),
53694+ .mode = 0600,
53695+ .proc_handler = &proc_dointvec,
53696+ },
53697+#endif
53698+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53699+ {
53700+ .procname = "audit_mount",
53701+ .data = &grsec_enable_mount,
53702+ .maxlen = sizeof(int),
53703+ .mode = 0600,
53704+ .proc_handler = &proc_dointvec,
53705+ },
53706+#endif
53707+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
53708+ {
53709+ .procname = "audit_textrel",
53710+ .data = &grsec_enable_audit_textrel,
53711+ .maxlen = sizeof(int),
53712+ .mode = 0600,
53713+ .proc_handler = &proc_dointvec,
53714+ },
53715+#endif
53716+#ifdef CONFIG_GRKERNSEC_DMESG
53717+ {
53718+ .procname = "dmesg",
53719+ .data = &grsec_enable_dmesg,
53720+ .maxlen = sizeof(int),
53721+ .mode = 0600,
53722+ .proc_handler = &proc_dointvec,
53723+ },
53724+#endif
53725+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53726+ {
53727+ .procname = "chroot_findtask",
53728+ .data = &grsec_enable_chroot_findtask,
53729+ .maxlen = sizeof(int),
53730+ .mode = 0600,
53731+ .proc_handler = &proc_dointvec,
53732+ },
53733+#endif
53734+#ifdef CONFIG_GRKERNSEC_RESLOG
53735+ {
53736+ .procname = "resource_logging",
53737+ .data = &grsec_resource_logging,
53738+ .maxlen = sizeof(int),
53739+ .mode = 0600,
53740+ .proc_handler = &proc_dointvec,
53741+ },
53742+#endif
53743+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
53744+ {
53745+ .procname = "audit_ptrace",
53746+ .data = &grsec_enable_audit_ptrace,
53747+ .maxlen = sizeof(int),
53748+ .mode = 0600,
53749+ .proc_handler = &proc_dointvec,
53750+ },
53751+#endif
53752+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53753+ {
53754+ .procname = "harden_ptrace",
53755+ .data = &grsec_enable_harden_ptrace,
53756+ .maxlen = sizeof(int),
53757+ .mode = 0600,
53758+ .proc_handler = &proc_dointvec,
53759+ },
53760+#endif
53761+ {
53762+ .procname = "grsec_lock",
53763+ .data = &grsec_lock,
53764+ .maxlen = sizeof(int),
53765+ .mode = 0600,
53766+ .proc_handler = &proc_dointvec,
53767+ },
53768+#endif
53769+#ifdef CONFIG_GRKERNSEC_ROFS
53770+ {
53771+ .procname = "romount_protect",
53772+ .data = &grsec_enable_rofs,
53773+ .maxlen = sizeof(int),
53774+ .mode = 0600,
53775+ .proc_handler = &proc_dointvec_minmax,
53776+ .extra1 = &one,
53777+ .extra2 = &one,
53778+ },
53779+#endif
53780+ { }
53781+};
53782+#endif
53783diff -urNp linux-3.0.4/grsecurity/grsec_time.c linux-3.0.4/grsecurity/grsec_time.c
53784--- linux-3.0.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
53785+++ linux-3.0.4/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
53786@@ -0,0 +1,16 @@
53787+#include <linux/kernel.h>
53788+#include <linux/sched.h>
53789+#include <linux/grinternal.h>
53790+#include <linux/module.h>
53791+
53792+void
53793+gr_log_timechange(void)
53794+{
53795+#ifdef CONFIG_GRKERNSEC_TIME
53796+ if (grsec_enable_time)
53797+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
53798+#endif
53799+ return;
53800+}
53801+
53802+EXPORT_SYMBOL(gr_log_timechange);
53803diff -urNp linux-3.0.4/grsecurity/grsec_tpe.c linux-3.0.4/grsecurity/grsec_tpe.c
53804--- linux-3.0.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
53805+++ linux-3.0.4/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
53806@@ -0,0 +1,39 @@
53807+#include <linux/kernel.h>
53808+#include <linux/sched.h>
53809+#include <linux/file.h>
53810+#include <linux/fs.h>
53811+#include <linux/grinternal.h>
53812+
53813+extern int gr_acl_tpe_check(void);
53814+
53815+int
53816+gr_tpe_allow(const struct file *file)
53817+{
53818+#ifdef CONFIG_GRKERNSEC
53819+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
53820+ const struct cred *cred = current_cred();
53821+
53822+ if (cred->uid && ((grsec_enable_tpe &&
53823+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53824+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
53825+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
53826+#else
53827+ in_group_p(grsec_tpe_gid)
53828+#endif
53829+ ) || gr_acl_tpe_check()) &&
53830+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
53831+ (inode->i_mode & S_IWOTH))))) {
53832+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
53833+ return 0;
53834+ }
53835+#ifdef CONFIG_GRKERNSEC_TPE_ALL
53836+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
53837+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
53838+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
53839+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
53840+ return 0;
53841+ }
53842+#endif
53843+#endif
53844+ return 1;
53845+}
53846diff -urNp linux-3.0.4/grsecurity/grsum.c linux-3.0.4/grsecurity/grsum.c
53847--- linux-3.0.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
53848+++ linux-3.0.4/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
53849@@ -0,0 +1,61 @@
53850+#include <linux/err.h>
53851+#include <linux/kernel.h>
53852+#include <linux/sched.h>
53853+#include <linux/mm.h>
53854+#include <linux/scatterlist.h>
53855+#include <linux/crypto.h>
53856+#include <linux/gracl.h>
53857+
53858+
53859+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
53860+#error "crypto and sha256 must be built into the kernel"
53861+#endif
53862+
53863+int
53864+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
53865+{
53866+ char *p;
53867+ struct crypto_hash *tfm;
53868+ struct hash_desc desc;
53869+ struct scatterlist sg;
53870+ unsigned char temp_sum[GR_SHA_LEN];
53871+ volatile int retval = 0;
53872+ volatile int dummy = 0;
53873+ unsigned int i;
53874+
53875+ sg_init_table(&sg, 1);
53876+
53877+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
53878+ if (IS_ERR(tfm)) {
53879+ /* should never happen, since sha256 should be built in */
53880+ return 1;
53881+ }
53882+
53883+ desc.tfm = tfm;
53884+ desc.flags = 0;
53885+
53886+ crypto_hash_init(&desc);
53887+
53888+ p = salt;
53889+ sg_set_buf(&sg, p, GR_SALT_LEN);
53890+ crypto_hash_update(&desc, &sg, sg.length);
53891+
53892+ p = entry->pw;
53893+ sg_set_buf(&sg, p, strlen(p));
53894+
53895+ crypto_hash_update(&desc, &sg, sg.length);
53896+
53897+ crypto_hash_final(&desc, temp_sum);
53898+
53899+ memset(entry->pw, 0, GR_PW_LEN);
53900+
53901+ for (i = 0; i < GR_SHA_LEN; i++)
53902+ if (sum[i] != temp_sum[i])
53903+ retval = 1;
53904+ else
53905+ dummy = 1; // waste a cycle
53906+
53907+ crypto_free_hash(tfm);
53908+
53909+ return retval;
53910+}
53911diff -urNp linux-3.0.4/grsecurity/Kconfig linux-3.0.4/grsecurity/Kconfig
53912--- linux-3.0.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
53913+++ linux-3.0.4/grsecurity/Kconfig 2011-09-15 00:00:57.000000000 -0400
53914@@ -0,0 +1,1038 @@
53915+#
53916+# grecurity configuration
53917+#
53918+
53919+menu "Grsecurity"
53920+
53921+config GRKERNSEC
53922+ bool "Grsecurity"
53923+ select CRYPTO
53924+ select CRYPTO_SHA256
53925+ help
53926+ If you say Y here, you will be able to configure many features
53927+ that will enhance the security of your system. It is highly
53928+ recommended that you say Y here and read through the help
53929+ for each option so that you fully understand the features and
53930+ can evaluate their usefulness for your machine.
53931+
53932+choice
53933+ prompt "Security Level"
53934+ depends on GRKERNSEC
53935+ default GRKERNSEC_CUSTOM
53936+
53937+config GRKERNSEC_LOW
53938+ bool "Low"
53939+ select GRKERNSEC_LINK
53940+ select GRKERNSEC_FIFO
53941+ select GRKERNSEC_RANDNET
53942+ select GRKERNSEC_DMESG
53943+ select GRKERNSEC_CHROOT
53944+ select GRKERNSEC_CHROOT_CHDIR
53945+
53946+ help
53947+ If you choose this option, several of the grsecurity options will
53948+ be enabled that will give you greater protection against a number
53949+ of attacks, while assuring that none of your software will have any
53950+ conflicts with the additional security measures. If you run a lot
53951+ of unusual software, or you are having problems with the higher
53952+ security levels, you should say Y here. With this option, the
53953+ following features are enabled:
53954+
53955+ - Linking restrictions
53956+ - FIFO restrictions
53957+ - Restricted dmesg
53958+ - Enforced chdir("/") on chroot
53959+ - Runtime module disabling
53960+
53961+config GRKERNSEC_MEDIUM
53962+ bool "Medium"
53963+ select PAX
53964+ select PAX_EI_PAX
53965+ select PAX_PT_PAX_FLAGS
53966+ select PAX_HAVE_ACL_FLAGS
53967+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
53968+ select GRKERNSEC_CHROOT
53969+ select GRKERNSEC_CHROOT_SYSCTL
53970+ select GRKERNSEC_LINK
53971+ select GRKERNSEC_FIFO
53972+ select GRKERNSEC_DMESG
53973+ select GRKERNSEC_RANDNET
53974+ select GRKERNSEC_FORKFAIL
53975+ select GRKERNSEC_TIME
53976+ select GRKERNSEC_SIGNAL
53977+ select GRKERNSEC_CHROOT
53978+ select GRKERNSEC_CHROOT_UNIX
53979+ select GRKERNSEC_CHROOT_MOUNT
53980+ select GRKERNSEC_CHROOT_PIVOT
53981+ select GRKERNSEC_CHROOT_DOUBLE
53982+ select GRKERNSEC_CHROOT_CHDIR
53983+ select GRKERNSEC_CHROOT_MKNOD
53984+ select GRKERNSEC_PROC
53985+ select GRKERNSEC_PROC_USERGROUP
53986+ select PAX_RANDUSTACK
53987+ select PAX_ASLR
53988+ select PAX_RANDMMAP
53989+ select PAX_REFCOUNT if (X86 || SPARC64)
53990+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
53991+
53992+ help
53993+ If you say Y here, several features in addition to those included
53994+ in the low additional security level will be enabled. These
53995+ features provide even more security to your system, though in rare
53996+ cases they may be incompatible with very old or poorly written
53997+ software. If you enable this option, make sure that your auth
53998+ service (identd) is running as gid 1001. With this option,
53999+ the following features (in addition to those provided in the
54000+ low additional security level) will be enabled:
54001+
54002+ - Failed fork logging
54003+ - Time change logging
54004+ - Signal logging
54005+ - Deny mounts in chroot
54006+ - Deny double chrooting
54007+ - Deny sysctl writes in chroot
54008+ - Deny mknod in chroot
54009+ - Deny access to abstract AF_UNIX sockets out of chroot
54010+ - Deny pivot_root in chroot
54011+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
54012+ - /proc restrictions with special GID set to 10 (usually wheel)
54013+ - Address Space Layout Randomization (ASLR)
54014+ - Prevent exploitation of most refcount overflows
54015+ - Bounds checking of copying between the kernel and userland
54016+
54017+config GRKERNSEC_HIGH
54018+ bool "High"
54019+ select GRKERNSEC_LINK
54020+ select GRKERNSEC_FIFO
54021+ select GRKERNSEC_DMESG
54022+ select GRKERNSEC_FORKFAIL
54023+ select GRKERNSEC_TIME
54024+ select GRKERNSEC_SIGNAL
54025+ select GRKERNSEC_CHROOT
54026+ select GRKERNSEC_CHROOT_SHMAT
54027+ select GRKERNSEC_CHROOT_UNIX
54028+ select GRKERNSEC_CHROOT_MOUNT
54029+ select GRKERNSEC_CHROOT_FCHDIR
54030+ select GRKERNSEC_CHROOT_PIVOT
54031+ select GRKERNSEC_CHROOT_DOUBLE
54032+ select GRKERNSEC_CHROOT_CHDIR
54033+ select GRKERNSEC_CHROOT_MKNOD
54034+ select GRKERNSEC_CHROOT_CAPS
54035+ select GRKERNSEC_CHROOT_SYSCTL
54036+ select GRKERNSEC_CHROOT_FINDTASK
54037+ select GRKERNSEC_SYSFS_RESTRICT
54038+ select GRKERNSEC_PROC
54039+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54040+ select GRKERNSEC_HIDESYM
54041+ select GRKERNSEC_BRUTE
54042+ select GRKERNSEC_PROC_USERGROUP
54043+ select GRKERNSEC_KMEM
54044+ select GRKERNSEC_RESLOG
54045+ select GRKERNSEC_RANDNET
54046+ select GRKERNSEC_PROC_ADD
54047+ select GRKERNSEC_CHROOT_CHMOD
54048+ select GRKERNSEC_CHROOT_NICE
54049+ select GRKERNSEC_AUDIT_MOUNT
54050+ select GRKERNSEC_MODHARDEN if (MODULES)
54051+ select GRKERNSEC_HARDEN_PTRACE
54052+ select GRKERNSEC_VM86 if (X86_32)
54053+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
54054+ select PAX
54055+ select PAX_RANDUSTACK
54056+ select PAX_ASLR
54057+ select PAX_RANDMMAP
54058+ select PAX_NOEXEC
54059+ select PAX_MPROTECT
54060+ select PAX_EI_PAX
54061+ select PAX_PT_PAX_FLAGS
54062+ select PAX_HAVE_ACL_FLAGS
54063+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
54064+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
54065+ select PAX_RANDKSTACK if (X86_TSC && X86)
54066+ select PAX_SEGMEXEC if (X86_32)
54067+ select PAX_PAGEEXEC
54068+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
54069+ select PAX_EMUTRAMP if (PARISC)
54070+ select PAX_EMUSIGRT if (PARISC)
54071+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
54072+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
54073+ select PAX_REFCOUNT if (X86 || SPARC64)
54074+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
54075+ help
54076+ If you say Y here, many of the features of grsecurity will be
54077+ enabled, which will protect you against many kinds of attacks
54078+ against your system. The heightened security comes at a cost
54079+ of an increased chance of incompatibilities with rare software
54080+ on your machine. Since this security level enables PaX, you should
54081+ view <http://pax.grsecurity.net> and read about the PaX
54082+ project. While you are there, download chpax and run it on
54083+ binaries that cause problems with PaX. Also remember that
54084+ since the /proc restrictions are enabled, you must run your
54085+ identd as gid 1001. This security level enables the following
54086+ features in addition to those listed in the low and medium
54087+ security levels:
54088+
54089+ - Additional /proc restrictions
54090+ - Chmod restrictions in chroot
54091+ - No signals, ptrace, or viewing of processes outside of chroot
54092+ - Capability restrictions in chroot
54093+ - Deny fchdir out of chroot
54094+ - Priority restrictions in chroot
54095+ - Segmentation-based implementation of PaX
54096+ - Mprotect restrictions
54097+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
54098+ - Kernel stack randomization
54099+ - Mount/unmount/remount logging
54100+ - Kernel symbol hiding
54101+ - Prevention of memory exhaustion-based exploits
54102+ - Hardening of module auto-loading
54103+ - Ptrace restrictions
54104+ - Restricted vm86 mode
54105+ - Restricted sysfs/debugfs
54106+ - Active kernel exploit response
54107+
54108+config GRKERNSEC_CUSTOM
54109+ bool "Custom"
54110+ help
54111+ If you say Y here, you will be able to configure every grsecurity
54112+ option, which allows you to enable many more features that aren't
54113+ covered in the basic security levels. These additional features
54114+ include TPE, socket restrictions, and the sysctl system for
54115+ grsecurity. It is advised that you read through the help for
54116+ each option to determine its usefulness in your situation.
54117+
54118+endchoice
54119+
54120+menu "Address Space Protection"
54121+depends on GRKERNSEC
54122+
54123+config GRKERNSEC_KMEM
54124+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
54125+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
54126+ help
54127+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
54128+ be written to via mmap or otherwise to modify the running kernel.
54129+ /dev/port will also not be allowed to be opened. If you have module
54130+ support disabled, enabling this will close up four ways that are
54131+ currently used to insert malicious code into the running kernel.
54132+ Even with all these features enabled, we still highly recommend that
54133+ you use the RBAC system, as it is still possible for an attacker to
54134+ modify the running kernel through privileged I/O granted by ioperm/iopl.
54135+ If you are not using XFree86, you may be able to stop this additional
54136+ case by enabling the 'Disable privileged I/O' option. Though nothing
54137+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
54138+ but only to video memory, which is the only writing we allow in this
54139+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
54140+ not be allowed to mprotect it with PROT_WRITE later.
54141+ It is highly recommended that you say Y here if you meet all the
54142+ conditions above.
54143+
54144+config GRKERNSEC_VM86
54145+ bool "Restrict VM86 mode"
54146+ depends on X86_32
54147+
54148+ help
54149+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
54150+ make use of a special execution mode on 32bit x86 processors called
54151+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
54152+ video cards and will still work with this option enabled. The purpose
54153+ of the option is to prevent exploitation of emulation errors in
54154+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
54155+ Nearly all users should be able to enable this option.
54156+
54157+config GRKERNSEC_IO
54158+ bool "Disable privileged I/O"
54159+ depends on X86
54160+ select RTC_CLASS
54161+ select RTC_INTF_DEV
54162+ select RTC_DRV_CMOS
54163+
54164+ help
54165+ If you say Y here, all ioperm and iopl calls will return an error.
54166+ Ioperm and iopl can be used to modify the running kernel.
54167+ Unfortunately, some programs need this access to operate properly,
54168+ the most notable of which are XFree86 and hwclock. hwclock can be
54169+ remedied by having RTC support in the kernel, so real-time
54170+ clock support is enabled if this option is enabled, to ensure
54171+ that hwclock operates correctly. XFree86 still will not
54172+ operate correctly with this option enabled, so DO NOT CHOOSE Y
54173+ IF YOU USE XFree86. If you use XFree86 and you still want to
54174+ protect your kernel against modification, use the RBAC system.
54175+
54176+config GRKERNSEC_PROC_MEMMAP
54177+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
54178+ default y if (PAX_NOEXEC || PAX_ASLR)
54179+ depends on PAX_NOEXEC || PAX_ASLR
54180+ help
54181+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
54182+ give no information about the addresses of its mappings if
54183+ PaX features that rely on random addresses are enabled on the task.
54184+ If you use PaX it is greatly recommended that you say Y here as it
54185+ closes up a hole that makes the full ASLR useless for suid
54186+ binaries.
54187+
54188+config GRKERNSEC_BRUTE
54189+ bool "Deter exploit bruteforcing"
54190+ help
54191+ If you say Y here, attempts to bruteforce exploits against forking
54192+ daemons such as apache or sshd, as well as against suid/sgid binaries
54193+ will be deterred. When a child of a forking daemon is killed by PaX
54194+ or crashes due to an illegal instruction or other suspicious signal,
54195+ the parent process will be delayed 30 seconds upon every subsequent
54196+ fork until the administrator is able to assess the situation and
54197+ restart the daemon.
54198+ In the suid/sgid case, the attempt is logged, the user has all their
54199+ processes terminated, and they are prevented from executing any further
54200+ processes for 15 minutes.
54201+ It is recommended that you also enable signal logging in the auditing
54202+ section so that logs are generated when a process triggers a suspicious
54203+ signal.
54204+ If the sysctl option is enabled, a sysctl option with name
54205+ "deter_bruteforce" is created.
54206+
54207+
54208+config GRKERNSEC_MODHARDEN
54209+ bool "Harden module auto-loading"
54210+ depends on MODULES
54211+ help
54212+ If you say Y here, module auto-loading in response to use of some
54213+ feature implemented by an unloaded module will be restricted to
54214+ root users. Enabling this option helps defend against attacks
54215+ by unprivileged users who abuse the auto-loading behavior to
54216+ cause a vulnerable module to load that is then exploited.
54217+
54218+ If this option prevents a legitimate use of auto-loading for a
54219+ non-root user, the administrator can execute modprobe manually
54220+ with the exact name of the module mentioned in the alert log.
54221+ Alternatively, the administrator can add the module to the list
54222+ of modules loaded at boot by modifying init scripts.
54223+
54224+ Modification of init scripts will most likely be needed on
54225+ Ubuntu servers with encrypted home directory support enabled,
54226+ as the first non-root user logging in will cause the ecb(aes),
54227+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
54228+
54229+config GRKERNSEC_HIDESYM
54230+ bool "Hide kernel symbols"
54231+ help
54232+ If you say Y here, getting information on loaded modules, and
54233+ displaying all kernel symbols through a syscall will be restricted
54234+ to users with CAP_SYS_MODULE. For software compatibility reasons,
54235+ /proc/kallsyms will be restricted to the root user. The RBAC
54236+ system can hide that entry even from root.
54237+
54238+ This option also prevents leaking of kernel addresses through
54239+ several /proc entries.
54240+
54241+ Note that this option is only effective provided the following
54242+ conditions are met:
54243+ 1) The kernel using grsecurity is not precompiled by some distribution
54244+ 2) You have also enabled GRKERNSEC_DMESG
54245+ 3) You are using the RBAC system and hiding other files such as your
54246+ kernel image and System.map. Alternatively, enabling this option
54247+ causes the permissions on /boot, /lib/modules, and the kernel
54248+ source directory to change at compile time to prevent
54249+ reading by non-root users.
54250+ If the above conditions are met, this option will aid in providing a
54251+ useful protection against local kernel exploitation of overflows
54252+ and arbitrary read/write vulnerabilities.
54253+
54254+config GRKERNSEC_KERN_LOCKOUT
54255+ bool "Active kernel exploit response"
54256+ depends on X86 || ARM || PPC || SPARC
54257+ help
54258+ If you say Y here, when a PaX alert is triggered due to suspicious
54259+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
54260+ or an OOPs occurs due to bad memory accesses, instead of just
54261+ terminating the offending process (and potentially allowing
54262+ a subsequent exploit from the same user), we will take one of two
54263+ actions:
54264+ If the user was root, we will panic the system
54265+ If the user was non-root, we will log the attempt, terminate
54266+ all processes owned by the user, then prevent them from creating
54267+ any new processes until the system is restarted
54268+ This deters repeated kernel exploitation/bruteforcing attempts
54269+ and is useful for later forensics.
54270+
54271+endmenu
54272+menu "Role Based Access Control Options"
54273+depends on GRKERNSEC
54274+
54275+config GRKERNSEC_RBAC_DEBUG
54276+ bool
54277+
54278+config GRKERNSEC_NO_RBAC
54279+ bool "Disable RBAC system"
54280+ help
54281+ If you say Y here, the /dev/grsec device will be removed from the kernel,
54282+ preventing the RBAC system from being enabled. You should only say Y
54283+ here if you have no intention of using the RBAC system, so as to prevent
54284+ an attacker with root access from misusing the RBAC system to hide files
54285+ and processes when loadable module support and /dev/[k]mem have been
54286+ locked down.
54287+
54288+config GRKERNSEC_ACL_HIDEKERN
54289+ bool "Hide kernel processes"
54290+ help
54291+ If you say Y here, all kernel threads will be hidden to all
54292+ processes but those whose subject has the "view hidden processes"
54293+ flag.
54294+
54295+config GRKERNSEC_ACL_MAXTRIES
54296+ int "Maximum tries before password lockout"
54297+ default 3
54298+ help
54299+ This option enforces the maximum number of times a user can attempt
54300+ to authorize themselves with the grsecurity RBAC system before being
54301+ denied the ability to attempt authorization again for a specified time.
54302+ The lower the number, the harder it will be to brute-force a password.
54303+
54304+config GRKERNSEC_ACL_TIMEOUT
54305+ int "Time to wait after max password tries, in seconds"
54306+ default 30
54307+ help
54308+ This option specifies the time the user must wait after attempting to
54309+ authorize to the RBAC system with the maximum number of invalid
54310+ passwords. The higher the number, the harder it will be to brute-force
54311+ a password.
54312+
54313+endmenu
54314+menu "Filesystem Protections"
54315+depends on GRKERNSEC
54316+
54317+config GRKERNSEC_PROC
54318+ bool "Proc restrictions"
54319+ help
54320+ If you say Y here, the permissions of the /proc filesystem
54321+ will be altered to enhance system security and privacy. You MUST
54322+ choose either a user only restriction or a user and group restriction.
54323+ Depending upon the option you choose, you can either restrict users to
54324+ see only the processes they themselves run, or choose a group that can
54325+ view all processes and files normally restricted to root if you choose
54326+ the "restrict to user only" option. NOTE: If you're running identd as
54327+ a non-root user, you will have to run it as the group you specify here.
54328+
54329+config GRKERNSEC_PROC_USER
54330+ bool "Restrict /proc to user only"
54331+ depends on GRKERNSEC_PROC
54332+ help
54333+ If you say Y here, non-root users will only be able to view their own
54334+ processes, and restricts them from viewing network-related information,
54335+ and viewing kernel symbol and module information.
54336+
54337+config GRKERNSEC_PROC_USERGROUP
54338+ bool "Allow special group"
54339+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
54340+ help
54341+ If you say Y here, you will be able to select a group that will be
54342+ able to view all processes and network-related information. If you've
54343+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
54344+ remain hidden. This option is useful if you want to run identd as
54345+ a non-root user.
54346+
54347+config GRKERNSEC_PROC_GID
54348+ int "GID for special group"
54349+ depends on GRKERNSEC_PROC_USERGROUP
54350+ default 1001
54351+
54352+config GRKERNSEC_PROC_ADD
54353+ bool "Additional restrictions"
54354+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
54355+ help
54356+ If you say Y here, additional restrictions will be placed on
54357+ /proc that keep normal users from viewing device information and
54358+ slabinfo information that could be useful for exploits.
54359+
54360+config GRKERNSEC_LINK
54361+ bool "Linking restrictions"
54362+ help
54363+ If you say Y here, /tmp race exploits will be prevented, since users
54364+ will no longer be able to follow symlinks owned by other users in
54365+ world-writable +t directories (e.g. /tmp), unless the owner of the
54366+ symlink is the owner of the directory. users will also not be
54367+ able to hardlink to files they do not own. If the sysctl option is
54368+ enabled, a sysctl option with name "linking_restrictions" is created.
54369+
54370+config GRKERNSEC_FIFO
54371+ bool "FIFO restrictions"
54372+ help
54373+ If you say Y here, users will not be able to write to FIFOs they don't
54374+ own in world-writable +t directories (e.g. /tmp), unless the owner of
54375+ the FIFO is the same owner of the directory it's held in. If the sysctl
54376+ option is enabled, a sysctl option with name "fifo_restrictions" is
54377+ created.
54378+
54379+config GRKERNSEC_SYSFS_RESTRICT
54380+ bool "Sysfs/debugfs restriction"
54381+ depends on SYSFS
54382+ help
54383+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
54384+ any filesystem normally mounted under it (e.g. debugfs) will only
54385+ be accessible by root. These filesystems generally provide access
54386+ to hardware and debug information that isn't appropriate for unprivileged
54387+ users of the system. Sysfs and debugfs have also become a large source
54388+ of new vulnerabilities, ranging from infoleaks to local compromise.
54389+ There has been very little oversight with an eye toward security involved
54390+ in adding new exporters of information to these filesystems, so their
54391+ use is discouraged.
54392+ This option is equivalent to a chmod 0700 of the mount paths.
54393+
54394+config GRKERNSEC_ROFS
54395+ bool "Runtime read-only mount protection"
54396+ help
54397+ If you say Y here, a sysctl option with name "romount_protect" will
54398+ be created. By setting this option to 1 at runtime, filesystems
54399+ will be protected in the following ways:
54400+ * No new writable mounts will be allowed
54401+ * Existing read-only mounts won't be able to be remounted read/write
54402+ * Write operations will be denied on all block devices
54403+ This option acts independently of grsec_lock: once it is set to 1,
54404+ it cannot be turned off. Therefore, please be mindful of the resulting
54405+ behavior if this option is enabled in an init script on a read-only
54406+ filesystem. This feature is mainly intended for secure embedded systems.
54407+
54408+config GRKERNSEC_CHROOT
54409+ bool "Chroot jail restrictions"
54410+ help
54411+ If you say Y here, you will be able to choose several options that will
54412+ make breaking out of a chrooted jail much more difficult. If you
54413+ encounter no software incompatibilities with the following options, it
54414+ is recommended that you enable each one.
54415+
54416+config GRKERNSEC_CHROOT_MOUNT
54417+ bool "Deny mounts"
54418+ depends on GRKERNSEC_CHROOT
54419+ help
54420+ If you say Y here, processes inside a chroot will not be able to
54421+ mount or remount filesystems. If the sysctl option is enabled, a
54422+ sysctl option with name "chroot_deny_mount" is created.
54423+
54424+config GRKERNSEC_CHROOT_DOUBLE
54425+ bool "Deny double-chroots"
54426+ depends on GRKERNSEC_CHROOT
54427+ help
54428+ If you say Y here, processes inside a chroot will not be able to chroot
54429+ again outside the chroot. This is a widely used method of breaking
54430+ out of a chroot jail and should not be allowed. If the sysctl
54431+ option is enabled, a sysctl option with name
54432+ "chroot_deny_chroot" is created.
54433+
54434+config GRKERNSEC_CHROOT_PIVOT
54435+ bool "Deny pivot_root in chroot"
54436+ depends on GRKERNSEC_CHROOT
54437+ help
54438+ If you say Y here, processes inside a chroot will not be able to use
54439+ a function called pivot_root() that was introduced in Linux 2.3.41. It
54440+ works similar to chroot in that it changes the root filesystem. This
54441+ function could be misused in a chrooted process to attempt to break out
54442+ of the chroot, and therefore should not be allowed. If the sysctl
54443+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
54444+ created.
54445+
54446+config GRKERNSEC_CHROOT_CHDIR
54447+ bool "Enforce chdir(\"/\") on all chroots"
54448+ depends on GRKERNSEC_CHROOT
54449+ help
54450+ If you say Y here, the current working directory of all newly-chrooted
54451+ applications will be set to the the root directory of the chroot.
54452+ The man page on chroot(2) states:
54453+ Note that this call does not change the current working
54454+ directory, so that `.' can be outside the tree rooted at
54455+ `/'. In particular, the super-user can escape from a
54456+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
54457+
54458+ It is recommended that you say Y here, since it's not known to break
54459+ any software. If the sysctl option is enabled, a sysctl option with
54460+ name "chroot_enforce_chdir" is created.
54461+
54462+config GRKERNSEC_CHROOT_CHMOD
54463+ bool "Deny (f)chmod +s"
54464+ depends on GRKERNSEC_CHROOT
54465+ help
54466+ If you say Y here, processes inside a chroot will not be able to chmod
54467+ or fchmod files to make them have suid or sgid bits. This protects
54468+ against another published method of breaking a chroot. If the sysctl
54469+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
54470+ created.
54471+
54472+config GRKERNSEC_CHROOT_FCHDIR
54473+ bool "Deny fchdir out of chroot"
54474+ depends on GRKERNSEC_CHROOT
54475+ help
54476+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
54477+ to a file descriptor of the chrooting process that points to a directory
54478+ outside the filesystem will be stopped. If the sysctl option
54479+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
54480+
54481+config GRKERNSEC_CHROOT_MKNOD
54482+ bool "Deny mknod"
54483+ depends on GRKERNSEC_CHROOT
54484+ help
54485+ If you say Y here, processes inside a chroot will not be allowed to
54486+ mknod. The problem with using mknod inside a chroot is that it
54487+ would allow an attacker to create a device entry that is the same
54488+ as one on the physical root of your system, which could range from
54489+ anything from the console device to a device for your harddrive (which
54490+ they could then use to wipe the drive or steal data). It is recommended
54491+ that you say Y here, unless you run into software incompatibilities.
54492+ If the sysctl option is enabled, a sysctl option with name
54493+ "chroot_deny_mknod" is created.
54494+
54495+config GRKERNSEC_CHROOT_SHMAT
54496+ bool "Deny shmat() out of chroot"
54497+ depends on GRKERNSEC_CHROOT
54498+ help
54499+ If you say Y here, processes inside a chroot will not be able to attach
54500+ to shared memory segments that were created outside of the chroot jail.
54501+ It is recommended that you say Y here. If the sysctl option is enabled,
54502+ a sysctl option with name "chroot_deny_shmat" is created.
54503+
54504+config GRKERNSEC_CHROOT_UNIX
54505+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
54506+ depends on GRKERNSEC_CHROOT
54507+ help
54508+ If you say Y here, processes inside a chroot will not be able to
54509+ connect to abstract (meaning not belonging to a filesystem) Unix
54510+ domain sockets that were bound outside of a chroot. It is recommended
54511+ that you say Y here. If the sysctl option is enabled, a sysctl option
54512+ with name "chroot_deny_unix" is created.
54513+
54514+config GRKERNSEC_CHROOT_FINDTASK
54515+ bool "Protect outside processes"
54516+ depends on GRKERNSEC_CHROOT
54517+ help
54518+ If you say Y here, processes inside a chroot will not be able to
54519+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
54520+ getsid, or view any process outside of the chroot. If the sysctl
54521+ option is enabled, a sysctl option with name "chroot_findtask" is
54522+ created.
54523+
54524+config GRKERNSEC_CHROOT_NICE
54525+ bool "Restrict priority changes"
54526+ depends on GRKERNSEC_CHROOT
54527+ help
54528+ If you say Y here, processes inside a chroot will not be able to raise
54529+ the priority of processes in the chroot, or alter the priority of
54530+ processes outside the chroot. This provides more security than simply
54531+ removing CAP_SYS_NICE from the process' capability set. If the
54532+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
54533+ is created.
54534+
54535+config GRKERNSEC_CHROOT_SYSCTL
54536+ bool "Deny sysctl writes"
54537+ depends on GRKERNSEC_CHROOT
54538+ help
54539+ If you say Y here, an attacker in a chroot will not be able to
54540+ write to sysctl entries, either by sysctl(2) or through a /proc
54541+ interface. It is strongly recommended that you say Y here. If the
54542+ sysctl option is enabled, a sysctl option with name
54543+ "chroot_deny_sysctl" is created.
54544+
54545+config GRKERNSEC_CHROOT_CAPS
54546+ bool "Capability restrictions"
54547+ depends on GRKERNSEC_CHROOT
54548+ help
54549+ If you say Y here, the capabilities on all processes within a
54550+ chroot jail will be lowered to stop module insertion, raw i/o,
54551+ system and net admin tasks, rebooting the system, modifying immutable
54552+ files, modifying IPC owned by another, and changing the system time.
54553+ This is left an option because it can break some apps. Disable this
54554+ if your chrooted apps are having problems performing those kinds of
54555+ tasks. If the sysctl option is enabled, a sysctl option with
54556+ name "chroot_caps" is created.
54557+
54558+endmenu
54559+menu "Kernel Auditing"
54560+depends on GRKERNSEC
54561+
54562+config GRKERNSEC_AUDIT_GROUP
54563+ bool "Single group for auditing"
54564+ help
54565+ If you say Y here, the exec, chdir, and (un)mount logging features
54566+ will only operate on a group you specify. This option is recommended
54567+ if you only want to watch certain users instead of having a large
54568+ amount of logs from the entire system. If the sysctl option is enabled,
54569+ a sysctl option with name "audit_group" is created.
54570+
54571+config GRKERNSEC_AUDIT_GID
54572+ int "GID for auditing"
54573+ depends on GRKERNSEC_AUDIT_GROUP
54574+ default 1007
54575+
54576+config GRKERNSEC_EXECLOG
54577+ bool "Exec logging"
54578+ help
54579+ If you say Y here, all execve() calls will be logged (since the
54580+ other exec*() calls are frontends to execve(), all execution
54581+ will be logged). Useful for shell-servers that like to keep track
54582+ of their users. If the sysctl option is enabled, a sysctl option with
54583+ name "exec_logging" is created.
54584+ WARNING: This option when enabled will produce a LOT of logs, especially
54585+ on an active system.
54586+
54587+config GRKERNSEC_RESLOG
54588+ bool "Resource logging"
54589+ help
54590+ If you say Y here, all attempts to overstep resource limits will
54591+ be logged with the resource name, the requested size, and the current
54592+ limit. It is highly recommended that you say Y here. If the sysctl
54593+ option is enabled, a sysctl option with name "resource_logging" is
54594+ created. If the RBAC system is enabled, the sysctl value is ignored.
54595+
54596+config GRKERNSEC_CHROOT_EXECLOG
54597+ bool "Log execs within chroot"
54598+ help
54599+ If you say Y here, all executions inside a chroot jail will be logged
54600+ to syslog. This can cause a large amount of logs if certain
54601+ applications (eg. djb's daemontools) are installed on the system, and
54602+ is therefore left as an option. If the sysctl option is enabled, a
54603+ sysctl option with name "chroot_execlog" is created.
54604+
54605+config GRKERNSEC_AUDIT_PTRACE
54606+ bool "Ptrace logging"
54607+ help
54608+ If you say Y here, all attempts to attach to a process via ptrace
54609+ will be logged. If the sysctl option is enabled, a sysctl option
54610+ with name "audit_ptrace" is created.
54611+
54612+config GRKERNSEC_AUDIT_CHDIR
54613+ bool "Chdir logging"
54614+ help
54615+ If you say Y here, all chdir() calls will be logged. If the sysctl
54616+ option is enabled, a sysctl option with name "audit_chdir" is created.
54617+
54618+config GRKERNSEC_AUDIT_MOUNT
54619+ bool "(Un)Mount logging"
54620+ help
54621+ If you say Y here, all mounts and unmounts will be logged. If the
54622+ sysctl option is enabled, a sysctl option with name "audit_mount" is
54623+ created.
54624+
54625+config GRKERNSEC_SIGNAL
54626+ bool "Signal logging"
54627+ help
54628+ If you say Y here, certain important signals will be logged, such as
54629+ SIGSEGV, which will as a result inform you of when a error in a program
54630+ occurred, which in some cases could mean a possible exploit attempt.
54631+ If the sysctl option is enabled, a sysctl option with name
54632+ "signal_logging" is created.
54633+
54634+config GRKERNSEC_FORKFAIL
54635+ bool "Fork failure logging"
54636+ help
54637+ If you say Y here, all failed fork() attempts will be logged.
54638+ This could suggest a fork bomb, or someone attempting to overstep
54639+ their process limit. If the sysctl option is enabled, a sysctl option
54640+ with name "forkfail_logging" is created.
54641+
54642+config GRKERNSEC_TIME
54643+ bool "Time change logging"
54644+ help
54645+ If you say Y here, any changes of the system clock will be logged.
54646+ If the sysctl option is enabled, a sysctl option with name
54647+ "timechange_logging" is created.
54648+
54649+config GRKERNSEC_PROC_IPADDR
54650+ bool "/proc/<pid>/ipaddr support"
54651+ help
54652+ If you say Y here, a new entry will be added to each /proc/<pid>
54653+ directory that contains the IP address of the person using the task.
54654+ The IP is carried across local TCP and AF_UNIX stream sockets.
54655+ This information can be useful for IDS/IPSes to perform remote response
54656+ to a local attack. The entry is readable by only the owner of the
54657+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
54658+ the RBAC system), and thus does not create privacy concerns.
54659+
54660+config GRKERNSEC_RWXMAP_LOG
54661+ bool 'Denied RWX mmap/mprotect logging'
54662+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
54663+ help
54664+ If you say Y here, calls to mmap() and mprotect() with explicit
54665+ usage of PROT_WRITE and PROT_EXEC together will be logged when
54666+ denied by the PAX_MPROTECT feature. If the sysctl option is
54667+ enabled, a sysctl option with name "rwxmap_logging" is created.
54668+
54669+config GRKERNSEC_AUDIT_TEXTREL
54670+ bool 'ELF text relocations logging (READ HELP)'
54671+ depends on PAX_MPROTECT
54672+ help
54673+ If you say Y here, text relocations will be logged with the filename
54674+ of the offending library or binary. The purpose of the feature is
54675+ to help Linux distribution developers get rid of libraries and
54676+ binaries that need text relocations which hinder the future progress
54677+ of PaX. Only Linux distribution developers should say Y here, and
54678+ never on a production machine, as this option creates an information
54679+ leak that could aid an attacker in defeating the randomization of
54680+ a single memory region. If the sysctl option is enabled, a sysctl
54681+ option with name "audit_textrel" is created.
54682+
54683+endmenu
54684+
54685+menu "Executable Protections"
54686+depends on GRKERNSEC
54687+
54688+config GRKERNSEC_DMESG
54689+ bool "Dmesg(8) restriction"
54690+ help
54691+ If you say Y here, non-root users will not be able to use dmesg(8)
54692+ to view up to the last 4kb of messages in the kernel's log buffer.
54693+ The kernel's log buffer often contains kernel addresses and other
54694+ identifying information useful to an attacker in fingerprinting a
54695+ system for a targeted exploit.
54696+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
54697+ created.
54698+
54699+config GRKERNSEC_HARDEN_PTRACE
54700+ bool "Deter ptrace-based process snooping"
54701+ help
54702+ If you say Y here, TTY sniffers and other malicious monitoring
54703+ programs implemented through ptrace will be defeated. If you
54704+ have been using the RBAC system, this option has already been
54705+ enabled for several years for all users, with the ability to make
54706+ fine-grained exceptions.
54707+
54708+ This option only affects the ability of non-root users to ptrace
54709+ processes that are not a descendent of the ptracing process.
54710+ This means that strace ./binary and gdb ./binary will still work,
54711+ but attaching to arbitrary processes will not. If the sysctl
54712+ option is enabled, a sysctl option with name "harden_ptrace" is
54713+ created.
54714+
54715+config GRKERNSEC_TPE
54716+ bool "Trusted Path Execution (TPE)"
54717+ help
54718+ If you say Y here, you will be able to choose a gid to add to the
54719+ supplementary groups of users you want to mark as "untrusted."
54720+ These users will not be able to execute any files that are not in
54721+ root-owned directories writable only by root. If the sysctl option
54722+ is enabled, a sysctl option with name "tpe" is created.
54723+
54724+config GRKERNSEC_TPE_ALL
54725+ bool "Partially restrict all non-root users"
54726+ depends on GRKERNSEC_TPE
54727+ help
54728+ If you say Y here, all non-root users will be covered under
54729+ a weaker TPE restriction. This is separate from, and in addition to,
54730+ the main TPE options that you have selected elsewhere. Thus, if a
54731+ "trusted" GID is chosen, this restriction applies to even that GID.
54732+ Under this restriction, all non-root users will only be allowed to
54733+ execute files in directories they own that are not group or
54734+ world-writable, or in directories owned by root and writable only by
54735+ root. If the sysctl option is enabled, a sysctl option with name
54736+ "tpe_restrict_all" is created.
54737+
54738+config GRKERNSEC_TPE_INVERT
54739+ bool "Invert GID option"
54740+ depends on GRKERNSEC_TPE
54741+ help
54742+ If you say Y here, the group you specify in the TPE configuration will
54743+ decide what group TPE restrictions will be *disabled* for. This
54744+ option is useful if you want TPE restrictions to be applied to most
54745+ users on the system. If the sysctl option is enabled, a sysctl option
54746+ with name "tpe_invert" is created. Unlike other sysctl options, this
54747+ entry will default to on for backward-compatibility.
54748+
54749+config GRKERNSEC_TPE_GID
54750+ int "GID for untrusted users"
54751+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
54752+ default 1005
54753+ help
54754+ Setting this GID determines what group TPE restrictions will be
54755+ *enabled* for. If the sysctl option is enabled, a sysctl option
54756+ with name "tpe_gid" is created.
54757+
54758+config GRKERNSEC_TPE_GID
54759+ int "GID for trusted users"
54760+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
54761+ default 1005
54762+ help
54763+ Setting this GID determines what group TPE restrictions will be
54764+ *disabled* for. If the sysctl option is enabled, a sysctl option
54765+ with name "tpe_gid" is created.
54766+
54767+endmenu
54768+menu "Network Protections"
54769+depends on GRKERNSEC
54770+
54771+config GRKERNSEC_RANDNET
54772+ bool "Larger entropy pools"
54773+ help
54774+ If you say Y here, the entropy pools used for many features of Linux
54775+ and grsecurity will be doubled in size. Since several grsecurity
54776+ features use additional randomness, it is recommended that you say Y
54777+ here. Saying Y here has a similar effect as modifying
54778+ /proc/sys/kernel/random/poolsize.
54779+
54780+config GRKERNSEC_BLACKHOLE
54781+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
54782+ depends on NET
54783+ help
54784+ If you say Y here, neither TCP resets nor ICMP
54785+ destination-unreachable packets will be sent in response to packets
54786+ sent to ports for which no associated listening process exists.
54787+ This feature supports both IPV4 and IPV6 and exempts the
54788+ loopback interface from blackholing. Enabling this feature
54789+ makes a host more resilient to DoS attacks and reduces network
54790+ visibility against scanners.
54791+
54792+ The blackhole feature as-implemented is equivalent to the FreeBSD
54793+ blackhole feature, as it prevents RST responses to all packets, not
54794+ just SYNs. Under most application behavior this causes no
54795+ problems, but applications (like haproxy) may not close certain
54796+ connections in a way that cleanly terminates them on the remote
54797+ end, leaving the remote host in LAST_ACK state. Because of this
54798+ side-effect and to prevent intentional LAST_ACK DoSes, this
54799+ feature also adds automatic mitigation against such attacks.
54800+ The mitigation drastically reduces the amount of time a socket
54801+ can spend in LAST_ACK state. If you're using haproxy and not
54802+ all servers it connects to have this option enabled, consider
54803+ disabling this feature on the haproxy host.
54804+
54805+ If the sysctl option is enabled, two sysctl options with names
54806+ "ip_blackhole" and "lastack_retries" will be created.
54807+ While "ip_blackhole" takes the standard zero/non-zero on/off
54808+ toggle, "lastack_retries" uses the same kinds of values as
54809+ "tcp_retries1" and "tcp_retries2". The default value of 4
54810+ prevents a socket from lasting more than 45 seconds in LAST_ACK
54811+ state.
54812+
54813+config GRKERNSEC_SOCKET
54814+ bool "Socket restrictions"
54815+ depends on NET
54816+ help
54817+ If you say Y here, you will be able to choose from several options.
54818+ If you assign a GID on your system and add it to the supplementary
54819+ groups of users you want to restrict socket access to, this patch
54820+ will perform up to three things, based on the option(s) you choose.
54821+
54822+config GRKERNSEC_SOCKET_ALL
54823+ bool "Deny any sockets to group"
54824+ depends on GRKERNSEC_SOCKET
54825+ help
54826+ If you say Y here, you will be able to choose a GID of whose users will
54827+ be unable to connect to other hosts from your machine or run server
54828+ applications from your machine. If the sysctl option is enabled, a
54829+ sysctl option with name "socket_all" is created.
54830+
54831+config GRKERNSEC_SOCKET_ALL_GID
54832+ int "GID to deny all sockets for"
54833+ depends on GRKERNSEC_SOCKET_ALL
54834+ default 1004
54835+ help
54836+ Here you can choose the GID to disable socket access for. Remember to
54837+ add the users you want socket access disabled for to the GID
54838+ specified here. If the sysctl option is enabled, a sysctl option
54839+ with name "socket_all_gid" is created.
54840+
54841+config GRKERNSEC_SOCKET_CLIENT
54842+ bool "Deny client sockets to group"
54843+ depends on GRKERNSEC_SOCKET
54844+ help
54845+ If you say Y here, you will be able to choose a GID of whose users will
54846+ be unable to connect to other hosts from your machine, but will be
54847+ able to run servers. If this option is enabled, all users in the group
54848+ you specify will have to use passive mode when initiating ftp transfers
54849+ from the shell on your machine. If the sysctl option is enabled, a
54850+ sysctl option with name "socket_client" is created.
54851+
54852+config GRKERNSEC_SOCKET_CLIENT_GID
54853+ int "GID to deny client sockets for"
54854+ depends on GRKERNSEC_SOCKET_CLIENT
54855+ default 1003
54856+ help
54857+ Here you can choose the GID to disable client socket access for.
54858+ Remember to add the users you want client socket access disabled for to
54859+ the GID specified here. If the sysctl option is enabled, a sysctl
54860+ option with name "socket_client_gid" is created.
54861+
54862+config GRKERNSEC_SOCKET_SERVER
54863+ bool "Deny server sockets to group"
54864+ depends on GRKERNSEC_SOCKET
54865+ help
54866+ If you say Y here, you will be able to choose a GID of whose users will
54867+ be unable to run server applications from your machine. If the sysctl
54868+ option is enabled, a sysctl option with name "socket_server" is created.
54869+
54870+config GRKERNSEC_SOCKET_SERVER_GID
54871+ int "GID to deny server sockets for"
54872+ depends on GRKERNSEC_SOCKET_SERVER
54873+ default 1002
54874+ help
54875+ Here you can choose the GID to disable server socket access for.
54876+ Remember to add the users you want server socket access disabled for to
54877+ the GID specified here. If the sysctl option is enabled, a sysctl
54878+ option with name "socket_server_gid" is created.
54879+
54880+endmenu
54881+menu "Sysctl support"
54882+depends on GRKERNSEC && SYSCTL
54883+
54884+config GRKERNSEC_SYSCTL
54885+ bool "Sysctl support"
54886+ help
54887+ If you say Y here, you will be able to change the options that
54888+ grsecurity runs with at bootup, without having to recompile your
54889+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
54890+ to enable (1) or disable (0) various features. All the sysctl entries
54891+ are mutable until the "grsec_lock" entry is set to a non-zero value.
54892+ All features enabled in the kernel configuration are disabled at boot
54893+ if you do not say Y to the "Turn on features by default" option.
54894+ All options should be set at startup, and the grsec_lock entry should
54895+ be set to a non-zero value after all the options are set.
54896+ *THIS IS EXTREMELY IMPORTANT*
54897+
54898+config GRKERNSEC_SYSCTL_DISTRO
54899+ bool "Extra sysctl support for distro makers (READ HELP)"
54900+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
54901+ help
54902+ If you say Y here, additional sysctl options will be created
54903+ for features that affect processes running as root. Therefore,
54904+ it is critical when using this option that the grsec_lock entry be
54905+ enabled after boot. Only distros with prebuilt kernel packages
54906+ with this option enabled that can ensure grsec_lock is enabled
54907+ after boot should use this option.
54908+ *Failure to set grsec_lock after boot makes all grsec features
54909+ this option covers useless*
54910+
54911+ Currently this option creates the following sysctl entries:
54912+ "Disable Privileged I/O": "disable_priv_io"
54913+
54914+config GRKERNSEC_SYSCTL_ON
54915+ bool "Turn on features by default"
54916+ depends on GRKERNSEC_SYSCTL
54917+ help
54918+ If you say Y here, instead of having all features enabled in the
54919+ kernel configuration disabled at boot time, the features will be
54920+ enabled at boot time. It is recommended you say Y here unless
54921+ there is some reason you would want all sysctl-tunable features to
54922+ be disabled by default. As mentioned elsewhere, it is important
54923+ to enable the grsec_lock entry once you have finished modifying
54924+ the sysctl entries.
54925+
54926+endmenu
54927+menu "Logging Options"
54928+depends on GRKERNSEC
54929+
54930+config GRKERNSEC_FLOODTIME
54931+ int "Seconds in between log messages (minimum)"
54932+ default 10
54933+ help
54934+ This option allows you to enforce the number of seconds between
54935+ grsecurity log messages. The default should be suitable for most
54936+ people, however, if you choose to change it, choose a value small enough
54937+ to allow informative logs to be produced, but large enough to
54938+ prevent flooding.
54939+
54940+config GRKERNSEC_FLOODBURST
54941+ int "Number of messages in a burst (maximum)"
54942+ default 6
54943+ help
54944+ This option allows you to choose the maximum number of messages allowed
54945+ within the flood time interval you chose in a separate option. The
54946+ default should be suitable for most people, however if you find that
54947+ many of your logs are being interpreted as flooding, you may want to
54948+ raise this value.
54949+
54950+endmenu
54951+
54952+endmenu
54953diff -urNp linux-3.0.4/grsecurity/Makefile linux-3.0.4/grsecurity/Makefile
54954--- linux-3.0.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
54955+++ linux-3.0.4/grsecurity/Makefile 2011-09-14 23:29:56.000000000 -0400
54956@@ -0,0 +1,35 @@
54957+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
54958+# during 2001-2009 it has been completely redesigned by Brad Spengler
54959+# into an RBAC system
54960+#
54961+# All code in this directory and various hooks inserted throughout the kernel
54962+# are copyright Brad Spengler - Open Source Security, Inc., and released
54963+# under the GPL v2 or higher
54964+
54965+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
54966+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
54967+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
54968+
54969+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
54970+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
54971+ gracl_learn.o grsec_log.o
54972+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
54973+
54974+ifdef CONFIG_NET
54975+obj-y += grsec_sock.o
54976+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
54977+endif
54978+
54979+ifndef CONFIG_GRKERNSEC
54980+obj-y += grsec_disabled.o
54981+endif
54982+
54983+ifdef CONFIG_GRKERNSEC_HIDESYM
54984+extra-y := grsec_hidesym.o
54985+$(obj)/grsec_hidesym.o:
54986+ @-chmod -f 500 /boot
54987+ @-chmod -f 500 /lib/modules
54988+ @-chmod -f 500 /lib64/modules
54989+ @-chmod -f 700 .
54990+ @echo ' grsec: protected kernel image paths'
54991+endif
54992diff -urNp linux-3.0.4/include/acpi/acpi_bus.h linux-3.0.4/include/acpi/acpi_bus.h
54993--- linux-3.0.4/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
54994+++ linux-3.0.4/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
54995@@ -107,7 +107,7 @@ struct acpi_device_ops {
54996 acpi_op_bind bind;
54997 acpi_op_unbind unbind;
54998 acpi_op_notify notify;
54999-};
55000+} __no_const;
55001
55002 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
55003
55004diff -urNp linux-3.0.4/include/asm-generic/atomic-long.h linux-3.0.4/include/asm-generic/atomic-long.h
55005--- linux-3.0.4/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
55006+++ linux-3.0.4/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
55007@@ -22,6 +22,12 @@
55008
55009 typedef atomic64_t atomic_long_t;
55010
55011+#ifdef CONFIG_PAX_REFCOUNT
55012+typedef atomic64_unchecked_t atomic_long_unchecked_t;
55013+#else
55014+typedef atomic64_t atomic_long_unchecked_t;
55015+#endif
55016+
55017 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
55018
55019 static inline long atomic_long_read(atomic_long_t *l)
55020@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
55021 return (long)atomic64_read(v);
55022 }
55023
55024+#ifdef CONFIG_PAX_REFCOUNT
55025+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
55026+{
55027+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
55028+
55029+ return (long)atomic64_read_unchecked(v);
55030+}
55031+#endif
55032+
55033 static inline void atomic_long_set(atomic_long_t *l, long i)
55034 {
55035 atomic64_t *v = (atomic64_t *)l;
55036@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
55037 atomic64_set(v, i);
55038 }
55039
55040+#ifdef CONFIG_PAX_REFCOUNT
55041+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
55042+{
55043+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
55044+
55045+ atomic64_set_unchecked(v, i);
55046+}
55047+#endif
55048+
55049 static inline void atomic_long_inc(atomic_long_t *l)
55050 {
55051 atomic64_t *v = (atomic64_t *)l;
55052@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
55053 atomic64_inc(v);
55054 }
55055
55056+#ifdef CONFIG_PAX_REFCOUNT
55057+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
55058+{
55059+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
55060+
55061+ atomic64_inc_unchecked(v);
55062+}
55063+#endif
55064+
55065 static inline void atomic_long_dec(atomic_long_t *l)
55066 {
55067 atomic64_t *v = (atomic64_t *)l;
55068@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
55069 atomic64_dec(v);
55070 }
55071
55072+#ifdef CONFIG_PAX_REFCOUNT
55073+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
55074+{
55075+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
55076+
55077+ atomic64_dec_unchecked(v);
55078+}
55079+#endif
55080+
55081 static inline void atomic_long_add(long i, atomic_long_t *l)
55082 {
55083 atomic64_t *v = (atomic64_t *)l;
55084@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
55085 atomic64_add(i, v);
55086 }
55087
55088+#ifdef CONFIG_PAX_REFCOUNT
55089+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
55090+{
55091+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
55092+
55093+ atomic64_add_unchecked(i, v);
55094+}
55095+#endif
55096+
55097 static inline void atomic_long_sub(long i, atomic_long_t *l)
55098 {
55099 atomic64_t *v = (atomic64_t *)l;
55100@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
55101 atomic64_sub(i, v);
55102 }
55103
55104+#ifdef CONFIG_PAX_REFCOUNT
55105+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
55106+{
55107+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
55108+
55109+ atomic64_sub_unchecked(i, v);
55110+}
55111+#endif
55112+
55113 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
55114 {
55115 atomic64_t *v = (atomic64_t *)l;
55116@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
55117 return (long)atomic64_inc_return(v);
55118 }
55119
55120+#ifdef CONFIG_PAX_REFCOUNT
55121+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
55122+{
55123+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
55124+
55125+ return (long)atomic64_inc_return_unchecked(v);
55126+}
55127+#endif
55128+
55129 static inline long atomic_long_dec_return(atomic_long_t *l)
55130 {
55131 atomic64_t *v = (atomic64_t *)l;
55132@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
55133
55134 typedef atomic_t atomic_long_t;
55135
55136+#ifdef CONFIG_PAX_REFCOUNT
55137+typedef atomic_unchecked_t atomic_long_unchecked_t;
55138+#else
55139+typedef atomic_t atomic_long_unchecked_t;
55140+#endif
55141+
55142 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
55143 static inline long atomic_long_read(atomic_long_t *l)
55144 {
55145@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
55146 return (long)atomic_read(v);
55147 }
55148
55149+#ifdef CONFIG_PAX_REFCOUNT
55150+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
55151+{
55152+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
55153+
55154+ return (long)atomic_read_unchecked(v);
55155+}
55156+#endif
55157+
55158 static inline void atomic_long_set(atomic_long_t *l, long i)
55159 {
55160 atomic_t *v = (atomic_t *)l;
55161@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
55162 atomic_set(v, i);
55163 }
55164
55165+#ifdef CONFIG_PAX_REFCOUNT
55166+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
55167+{
55168+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
55169+
55170+ atomic_set_unchecked(v, i);
55171+}
55172+#endif
55173+
55174 static inline void atomic_long_inc(atomic_long_t *l)
55175 {
55176 atomic_t *v = (atomic_t *)l;
55177@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
55178 atomic_inc(v);
55179 }
55180
55181+#ifdef CONFIG_PAX_REFCOUNT
55182+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
55183+{
55184+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
55185+
55186+ atomic_inc_unchecked(v);
55187+}
55188+#endif
55189+
55190 static inline void atomic_long_dec(atomic_long_t *l)
55191 {
55192 atomic_t *v = (atomic_t *)l;
55193@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
55194 atomic_dec(v);
55195 }
55196
55197+#ifdef CONFIG_PAX_REFCOUNT
55198+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
55199+{
55200+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
55201+
55202+ atomic_dec_unchecked(v);
55203+}
55204+#endif
55205+
55206 static inline void atomic_long_add(long i, atomic_long_t *l)
55207 {
55208 atomic_t *v = (atomic_t *)l;
55209@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
55210 atomic_add(i, v);
55211 }
55212
55213+#ifdef CONFIG_PAX_REFCOUNT
55214+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
55215+{
55216+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
55217+
55218+ atomic_add_unchecked(i, v);
55219+}
55220+#endif
55221+
55222 static inline void atomic_long_sub(long i, atomic_long_t *l)
55223 {
55224 atomic_t *v = (atomic_t *)l;
55225@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
55226 atomic_sub(i, v);
55227 }
55228
55229+#ifdef CONFIG_PAX_REFCOUNT
55230+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
55231+{
55232+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
55233+
55234+ atomic_sub_unchecked(i, v);
55235+}
55236+#endif
55237+
55238 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
55239 {
55240 atomic_t *v = (atomic_t *)l;
55241@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
55242 return (long)atomic_inc_return(v);
55243 }
55244
55245+#ifdef CONFIG_PAX_REFCOUNT
55246+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
55247+{
55248+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
55249+
55250+ return (long)atomic_inc_return_unchecked(v);
55251+}
55252+#endif
55253+
55254 static inline long atomic_long_dec_return(atomic_long_t *l)
55255 {
55256 atomic_t *v = (atomic_t *)l;
55257@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
55258
55259 #endif /* BITS_PER_LONG == 64 */
55260
55261+#ifdef CONFIG_PAX_REFCOUNT
55262+static inline void pax_refcount_needs_these_functions(void)
55263+{
55264+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
55265+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
55266+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
55267+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
55268+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
55269+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
55270+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
55271+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
55272+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
55273+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
55274+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
55275+
55276+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
55277+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
55278+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
55279+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
55280+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
55281+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
55282+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
55283+}
55284+#else
55285+#define atomic_read_unchecked(v) atomic_read(v)
55286+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
55287+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
55288+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
55289+#define atomic_inc_unchecked(v) atomic_inc(v)
55290+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
55291+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
55292+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
55293+#define atomic_dec_unchecked(v) atomic_dec(v)
55294+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
55295+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
55296+
55297+#define atomic_long_read_unchecked(v) atomic_long_read(v)
55298+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
55299+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
55300+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
55301+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
55302+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
55303+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
55304+#endif
55305+
55306 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
55307diff -urNp linux-3.0.4/include/asm-generic/cache.h linux-3.0.4/include/asm-generic/cache.h
55308--- linux-3.0.4/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
55309+++ linux-3.0.4/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
55310@@ -6,7 +6,7 @@
55311 * cache lines need to provide their own cache.h.
55312 */
55313
55314-#define L1_CACHE_SHIFT 5
55315-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
55316+#define L1_CACHE_SHIFT 5UL
55317+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
55318
55319 #endif /* __ASM_GENERIC_CACHE_H */
55320diff -urNp linux-3.0.4/include/asm-generic/int-l64.h linux-3.0.4/include/asm-generic/int-l64.h
55321--- linux-3.0.4/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
55322+++ linux-3.0.4/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
55323@@ -46,6 +46,8 @@ typedef unsigned int u32;
55324 typedef signed long s64;
55325 typedef unsigned long u64;
55326
55327+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
55328+
55329 #define S8_C(x) x
55330 #define U8_C(x) x ## U
55331 #define S16_C(x) x
55332diff -urNp linux-3.0.4/include/asm-generic/int-ll64.h linux-3.0.4/include/asm-generic/int-ll64.h
55333--- linux-3.0.4/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
55334+++ linux-3.0.4/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
55335@@ -51,6 +51,8 @@ typedef unsigned int u32;
55336 typedef signed long long s64;
55337 typedef unsigned long long u64;
55338
55339+typedef unsigned long long intoverflow_t;
55340+
55341 #define S8_C(x) x
55342 #define U8_C(x) x ## U
55343 #define S16_C(x) x
55344diff -urNp linux-3.0.4/include/asm-generic/kmap_types.h linux-3.0.4/include/asm-generic/kmap_types.h
55345--- linux-3.0.4/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
55346+++ linux-3.0.4/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
55347@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
55348 KMAP_D(17) KM_NMI,
55349 KMAP_D(18) KM_NMI_PTE,
55350 KMAP_D(19) KM_KDB,
55351+KMAP_D(20) KM_CLEARPAGE,
55352 /*
55353 * Remember to update debug_kmap_atomic() when adding new kmap types!
55354 */
55355-KMAP_D(20) KM_TYPE_NR
55356+KMAP_D(21) KM_TYPE_NR
55357 };
55358
55359 #undef KMAP_D
55360diff -urNp linux-3.0.4/include/asm-generic/pgtable.h linux-3.0.4/include/asm-generic/pgtable.h
55361--- linux-3.0.4/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
55362+++ linux-3.0.4/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
55363@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
55364 #endif /* __HAVE_ARCH_PMD_WRITE */
55365 #endif
55366
55367+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
55368+static inline unsigned long pax_open_kernel(void) { return 0; }
55369+#endif
55370+
55371+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
55372+static inline unsigned long pax_close_kernel(void) { return 0; }
55373+#endif
55374+
55375 #endif /* !__ASSEMBLY__ */
55376
55377 #endif /* _ASM_GENERIC_PGTABLE_H */
55378diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopmd.h linux-3.0.4/include/asm-generic/pgtable-nopmd.h
55379--- linux-3.0.4/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
55380+++ linux-3.0.4/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
55381@@ -1,14 +1,19 @@
55382 #ifndef _PGTABLE_NOPMD_H
55383 #define _PGTABLE_NOPMD_H
55384
55385-#ifndef __ASSEMBLY__
55386-
55387 #include <asm-generic/pgtable-nopud.h>
55388
55389-struct mm_struct;
55390-
55391 #define __PAGETABLE_PMD_FOLDED
55392
55393+#define PMD_SHIFT PUD_SHIFT
55394+#define PTRS_PER_PMD 1
55395+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
55396+#define PMD_MASK (~(PMD_SIZE-1))
55397+
55398+#ifndef __ASSEMBLY__
55399+
55400+struct mm_struct;
55401+
55402 /*
55403 * Having the pmd type consist of a pud gets the size right, and allows
55404 * us to conceptually access the pud entry that this pmd is folded into
55405@@ -16,11 +21,6 @@ struct mm_struct;
55406 */
55407 typedef struct { pud_t pud; } pmd_t;
55408
55409-#define PMD_SHIFT PUD_SHIFT
55410-#define PTRS_PER_PMD 1
55411-#define PMD_SIZE (1UL << PMD_SHIFT)
55412-#define PMD_MASK (~(PMD_SIZE-1))
55413-
55414 /*
55415 * The "pud_xxx()" functions here are trivial for a folded two-level
55416 * setup: the pmd is never bad, and a pmd always exists (as it's folded
55417diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopud.h linux-3.0.4/include/asm-generic/pgtable-nopud.h
55418--- linux-3.0.4/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
55419+++ linux-3.0.4/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
55420@@ -1,10 +1,15 @@
55421 #ifndef _PGTABLE_NOPUD_H
55422 #define _PGTABLE_NOPUD_H
55423
55424-#ifndef __ASSEMBLY__
55425-
55426 #define __PAGETABLE_PUD_FOLDED
55427
55428+#define PUD_SHIFT PGDIR_SHIFT
55429+#define PTRS_PER_PUD 1
55430+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
55431+#define PUD_MASK (~(PUD_SIZE-1))
55432+
55433+#ifndef __ASSEMBLY__
55434+
55435 /*
55436 * Having the pud type consist of a pgd gets the size right, and allows
55437 * us to conceptually access the pgd entry that this pud is folded into
55438@@ -12,11 +17,6 @@
55439 */
55440 typedef struct { pgd_t pgd; } pud_t;
55441
55442-#define PUD_SHIFT PGDIR_SHIFT
55443-#define PTRS_PER_PUD 1
55444-#define PUD_SIZE (1UL << PUD_SHIFT)
55445-#define PUD_MASK (~(PUD_SIZE-1))
55446-
55447 /*
55448 * The "pgd_xxx()" functions here are trivial for a folded two-level
55449 * setup: the pud is never bad, and a pud always exists (as it's folded
55450diff -urNp linux-3.0.4/include/asm-generic/vmlinux.lds.h linux-3.0.4/include/asm-generic/vmlinux.lds.h
55451--- linux-3.0.4/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
55452+++ linux-3.0.4/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
55453@@ -217,6 +217,7 @@
55454 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
55455 VMLINUX_SYMBOL(__start_rodata) = .; \
55456 *(.rodata) *(.rodata.*) \
55457+ *(.data..read_only) \
55458 *(__vermagic) /* Kernel version magic */ \
55459 . = ALIGN(8); \
55460 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
55461@@ -723,17 +724,18 @@
55462 * section in the linker script will go there too. @phdr should have
55463 * a leading colon.
55464 *
55465- * Note that this macros defines __per_cpu_load as an absolute symbol.
55466+ * Note that this macros defines per_cpu_load as an absolute symbol.
55467 * If there is no need to put the percpu section at a predetermined
55468 * address, use PERCPU_SECTION.
55469 */
55470 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
55471- VMLINUX_SYMBOL(__per_cpu_load) = .; \
55472- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
55473+ per_cpu_load = .; \
55474+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
55475 - LOAD_OFFSET) { \
55476+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
55477 PERCPU_INPUT(cacheline) \
55478 } phdr \
55479- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
55480+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
55481
55482 /**
55483 * PERCPU_SECTION - define output section for percpu area, simple version
55484diff -urNp linux-3.0.4/include/drm/drm_crtc_helper.h linux-3.0.4/include/drm/drm_crtc_helper.h
55485--- linux-3.0.4/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
55486+++ linux-3.0.4/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
55487@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
55488
55489 /* disable crtc when not in use - more explicit than dpms off */
55490 void (*disable)(struct drm_crtc *crtc);
55491-};
55492+} __no_const;
55493
55494 struct drm_encoder_helper_funcs {
55495 void (*dpms)(struct drm_encoder *encoder, int mode);
55496@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
55497 struct drm_connector *connector);
55498 /* disable encoder when not in use - more explicit than dpms off */
55499 void (*disable)(struct drm_encoder *encoder);
55500-};
55501+} __no_const;
55502
55503 struct drm_connector_helper_funcs {
55504 int (*get_modes)(struct drm_connector *connector);
55505diff -urNp linux-3.0.4/include/drm/drmP.h linux-3.0.4/include/drm/drmP.h
55506--- linux-3.0.4/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
55507+++ linux-3.0.4/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
55508@@ -73,6 +73,7 @@
55509 #include <linux/workqueue.h>
55510 #include <linux/poll.h>
55511 #include <asm/pgalloc.h>
55512+#include <asm/local.h>
55513 #include "drm.h"
55514
55515 #include <linux/idr.h>
55516@@ -1033,7 +1034,7 @@ struct drm_device {
55517
55518 /** \name Usage Counters */
55519 /*@{ */
55520- int open_count; /**< Outstanding files open */
55521+ local_t open_count; /**< Outstanding files open */
55522 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
55523 atomic_t vma_count; /**< Outstanding vma areas open */
55524 int buf_use; /**< Buffers in use -- cannot alloc */
55525@@ -1044,7 +1045,7 @@ struct drm_device {
55526 /*@{ */
55527 unsigned long counters;
55528 enum drm_stat_type types[15];
55529- atomic_t counts[15];
55530+ atomic_unchecked_t counts[15];
55531 /*@} */
55532
55533 struct list_head filelist;
55534diff -urNp linux-3.0.4/include/drm/ttm/ttm_memory.h linux-3.0.4/include/drm/ttm/ttm_memory.h
55535--- linux-3.0.4/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
55536+++ linux-3.0.4/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
55537@@ -47,7 +47,7 @@
55538
55539 struct ttm_mem_shrink {
55540 int (*do_shrink) (struct ttm_mem_shrink *);
55541-};
55542+} __no_const;
55543
55544 /**
55545 * struct ttm_mem_global - Global memory accounting structure.
55546diff -urNp linux-3.0.4/include/linux/a.out.h linux-3.0.4/include/linux/a.out.h
55547--- linux-3.0.4/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
55548+++ linux-3.0.4/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
55549@@ -39,6 +39,14 @@ enum machine_type {
55550 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
55551 };
55552
55553+/* Constants for the N_FLAGS field */
55554+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
55555+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
55556+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
55557+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
55558+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
55559+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
55560+
55561 #if !defined (N_MAGIC)
55562 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
55563 #endif
55564diff -urNp linux-3.0.4/include/linux/atmdev.h linux-3.0.4/include/linux/atmdev.h
55565--- linux-3.0.4/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
55566+++ linux-3.0.4/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
55567@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
55568 #endif
55569
55570 struct k_atm_aal_stats {
55571-#define __HANDLE_ITEM(i) atomic_t i
55572+#define __HANDLE_ITEM(i) atomic_unchecked_t i
55573 __AAL_STAT_ITEMS
55574 #undef __HANDLE_ITEM
55575 };
55576diff -urNp linux-3.0.4/include/linux/binfmts.h linux-3.0.4/include/linux/binfmts.h
55577--- linux-3.0.4/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
55578+++ linux-3.0.4/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
55579@@ -88,6 +88,7 @@ struct linux_binfmt {
55580 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
55581 int (*load_shlib)(struct file *);
55582 int (*core_dump)(struct coredump_params *cprm);
55583+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
55584 unsigned long min_coredump; /* minimal dump size */
55585 };
55586
55587diff -urNp linux-3.0.4/include/linux/blkdev.h linux-3.0.4/include/linux/blkdev.h
55588--- linux-3.0.4/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
55589+++ linux-3.0.4/include/linux/blkdev.h 2011-08-26 19:49:56.000000000 -0400
55590@@ -1308,7 +1308,7 @@ struct block_device_operations {
55591 /* this callback is with swap_lock and sometimes page table lock held */
55592 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
55593 struct module *owner;
55594-};
55595+} __do_const;
55596
55597 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
55598 unsigned long);
55599diff -urNp linux-3.0.4/include/linux/blktrace_api.h linux-3.0.4/include/linux/blktrace_api.h
55600--- linux-3.0.4/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
55601+++ linux-3.0.4/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
55602@@ -161,7 +161,7 @@ struct blk_trace {
55603 struct dentry *dir;
55604 struct dentry *dropped_file;
55605 struct dentry *msg_file;
55606- atomic_t dropped;
55607+ atomic_unchecked_t dropped;
55608 };
55609
55610 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
55611diff -urNp linux-3.0.4/include/linux/byteorder/little_endian.h linux-3.0.4/include/linux/byteorder/little_endian.h
55612--- linux-3.0.4/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
55613+++ linux-3.0.4/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
55614@@ -42,51 +42,51 @@
55615
55616 static inline __le64 __cpu_to_le64p(const __u64 *p)
55617 {
55618- return (__force __le64)*p;
55619+ return (__force const __le64)*p;
55620 }
55621 static inline __u64 __le64_to_cpup(const __le64 *p)
55622 {
55623- return (__force __u64)*p;
55624+ return (__force const __u64)*p;
55625 }
55626 static inline __le32 __cpu_to_le32p(const __u32 *p)
55627 {
55628- return (__force __le32)*p;
55629+ return (__force const __le32)*p;
55630 }
55631 static inline __u32 __le32_to_cpup(const __le32 *p)
55632 {
55633- return (__force __u32)*p;
55634+ return (__force const __u32)*p;
55635 }
55636 static inline __le16 __cpu_to_le16p(const __u16 *p)
55637 {
55638- return (__force __le16)*p;
55639+ return (__force const __le16)*p;
55640 }
55641 static inline __u16 __le16_to_cpup(const __le16 *p)
55642 {
55643- return (__force __u16)*p;
55644+ return (__force const __u16)*p;
55645 }
55646 static inline __be64 __cpu_to_be64p(const __u64 *p)
55647 {
55648- return (__force __be64)__swab64p(p);
55649+ return (__force const __be64)__swab64p(p);
55650 }
55651 static inline __u64 __be64_to_cpup(const __be64 *p)
55652 {
55653- return __swab64p((__u64 *)p);
55654+ return __swab64p((const __u64 *)p);
55655 }
55656 static inline __be32 __cpu_to_be32p(const __u32 *p)
55657 {
55658- return (__force __be32)__swab32p(p);
55659+ return (__force const __be32)__swab32p(p);
55660 }
55661 static inline __u32 __be32_to_cpup(const __be32 *p)
55662 {
55663- return __swab32p((__u32 *)p);
55664+ return __swab32p((const __u32 *)p);
55665 }
55666 static inline __be16 __cpu_to_be16p(const __u16 *p)
55667 {
55668- return (__force __be16)__swab16p(p);
55669+ return (__force const __be16)__swab16p(p);
55670 }
55671 static inline __u16 __be16_to_cpup(const __be16 *p)
55672 {
55673- return __swab16p((__u16 *)p);
55674+ return __swab16p((const __u16 *)p);
55675 }
55676 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
55677 #define __le64_to_cpus(x) do { (void)(x); } while (0)
55678diff -urNp linux-3.0.4/include/linux/cache.h linux-3.0.4/include/linux/cache.h
55679--- linux-3.0.4/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
55680+++ linux-3.0.4/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
55681@@ -16,6 +16,10 @@
55682 #define __read_mostly
55683 #endif
55684
55685+#ifndef __read_only
55686+#define __read_only __read_mostly
55687+#endif
55688+
55689 #ifndef ____cacheline_aligned
55690 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
55691 #endif
55692diff -urNp linux-3.0.4/include/linux/capability.h linux-3.0.4/include/linux/capability.h
55693--- linux-3.0.4/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
55694+++ linux-3.0.4/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
55695@@ -547,6 +547,9 @@ extern bool capable(int cap);
55696 extern bool ns_capable(struct user_namespace *ns, int cap);
55697 extern bool task_ns_capable(struct task_struct *t, int cap);
55698 extern bool nsown_capable(int cap);
55699+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
55700+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
55701+extern bool capable_nolog(int cap);
55702
55703 /* audit system wants to get cap info from files as well */
55704 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
55705diff -urNp linux-3.0.4/include/linux/cleancache.h linux-3.0.4/include/linux/cleancache.h
55706--- linux-3.0.4/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
55707+++ linux-3.0.4/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
55708@@ -31,7 +31,7 @@ struct cleancache_ops {
55709 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
55710 void (*flush_inode)(int, struct cleancache_filekey);
55711 void (*flush_fs)(int);
55712-};
55713+} __no_const;
55714
55715 extern struct cleancache_ops
55716 cleancache_register_ops(struct cleancache_ops *ops);
55717diff -urNp linux-3.0.4/include/linux/compiler-gcc4.h linux-3.0.4/include/linux/compiler-gcc4.h
55718--- linux-3.0.4/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
55719+++ linux-3.0.4/include/linux/compiler-gcc4.h 2011-08-26 19:49:56.000000000 -0400
55720@@ -31,6 +31,12 @@
55721
55722
55723 #if __GNUC_MINOR__ >= 5
55724+
55725+#ifdef CONSTIFY_PLUGIN
55726+#define __no_const __attribute__((no_const))
55727+#define __do_const __attribute__((do_const))
55728+#endif
55729+
55730 /*
55731 * Mark a position in code as unreachable. This can be used to
55732 * suppress control flow warnings after asm blocks that transfer
55733@@ -46,6 +52,11 @@
55734 #define __noclone __attribute__((__noclone__))
55735
55736 #endif
55737+
55738+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
55739+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
55740+#define __bos0(ptr) __bos((ptr), 0)
55741+#define __bos1(ptr) __bos((ptr), 1)
55742 #endif
55743
55744 #if __GNUC_MINOR__ > 0
55745diff -urNp linux-3.0.4/include/linux/compiler.h linux-3.0.4/include/linux/compiler.h
55746--- linux-3.0.4/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
55747+++ linux-3.0.4/include/linux/compiler.h 2011-10-06 04:17:55.000000000 -0400
55748@@ -5,31 +5,62 @@
55749
55750 #ifdef __CHECKER__
55751 # define __user __attribute__((noderef, address_space(1)))
55752+# define __force_user __force __user
55753 # define __kernel __attribute__((address_space(0)))
55754+# define __force_kernel __force __kernel
55755 # define __safe __attribute__((safe))
55756 # define __force __attribute__((force))
55757 # define __nocast __attribute__((nocast))
55758 # define __iomem __attribute__((noderef, address_space(2)))
55759+# define __force_iomem __force __iomem
55760 # define __acquires(x) __attribute__((context(x,0,1)))
55761 # define __releases(x) __attribute__((context(x,1,0)))
55762 # define __acquire(x) __context__(x,1)
55763 # define __release(x) __context__(x,-1)
55764 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
55765 # define __percpu __attribute__((noderef, address_space(3)))
55766+# define __force_percpu __force __percpu
55767 #ifdef CONFIG_SPARSE_RCU_POINTER
55768 # define __rcu __attribute__((noderef, address_space(4)))
55769+# define __force_rcu __force __rcu
55770 #else
55771 # define __rcu
55772+# define __force_rcu
55773 #endif
55774 extern void __chk_user_ptr(const volatile void __user *);
55775 extern void __chk_io_ptr(const volatile void __iomem *);
55776+#elif defined(CHECKER_PLUGIN)
55777+//# define __user
55778+//# define __force_user
55779+//# define __kernel
55780+//# define __force_kernel
55781+# define __safe
55782+# define __force
55783+# define __nocast
55784+# define __iomem
55785+# define __force_iomem
55786+# define __chk_user_ptr(x) (void)0
55787+# define __chk_io_ptr(x) (void)0
55788+# define __builtin_warning(x, y...) (1)
55789+# define __acquires(x)
55790+# define __releases(x)
55791+# define __acquire(x) (void)0
55792+# define __release(x) (void)0
55793+# define __cond_lock(x,c) (c)
55794+# define __percpu
55795+# define __force_percpu
55796+# define __rcu
55797+# define __force_rcu
55798 #else
55799 # define __user
55800+# define __force_user
55801 # define __kernel
55802+# define __force_kernel
55803 # define __safe
55804 # define __force
55805 # define __nocast
55806 # define __iomem
55807+# define __force_iomem
55808 # define __chk_user_ptr(x) (void)0
55809 # define __chk_io_ptr(x) (void)0
55810 # define __builtin_warning(x, y...) (1)
55811@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile
55812 # define __release(x) (void)0
55813 # define __cond_lock(x,c) (c)
55814 # define __percpu
55815+# define __force_percpu
55816 # define __rcu
55817+# define __force_rcu
55818 #endif
55819
55820 #ifdef __KERNEL__
55821@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_
55822 # define __attribute_const__ /* unimplemented */
55823 #endif
55824
55825+#ifndef __no_const
55826+# define __no_const
55827+#endif
55828+
55829+#ifndef __do_const
55830+# define __do_const
55831+#endif
55832+
55833 /*
55834 * Tell gcc if a function is cold. The compiler will assume any path
55835 * directly leading to the call is unlikely.
55836@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_
55837 #define __cold
55838 #endif
55839
55840+#ifndef __alloc_size
55841+#define __alloc_size(...)
55842+#endif
55843+
55844+#ifndef __bos
55845+#define __bos(ptr, arg)
55846+#endif
55847+
55848+#ifndef __bos0
55849+#define __bos0(ptr)
55850+#endif
55851+
55852+#ifndef __bos1
55853+#define __bos1(ptr)
55854+#endif
55855+
55856 /* Simple shorthand for a section definition */
55857 #ifndef __section
55858 # define __section(S) __attribute__ ((__section__(#S)))
55859@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_
55860 * use is to mediate communication between process-level code and irq/NMI
55861 * handlers, all running on the same CPU.
55862 */
55863-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
55864+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
55865+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
55866
55867 #endif /* __LINUX_COMPILER_H */
55868diff -urNp linux-3.0.4/include/linux/cpuset.h linux-3.0.4/include/linux/cpuset.h
55869--- linux-3.0.4/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
55870+++ linux-3.0.4/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
55871@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
55872 * nodemask.
55873 */
55874 smp_mb();
55875- --ACCESS_ONCE(current->mems_allowed_change_disable);
55876+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
55877 }
55878
55879 static inline void set_mems_allowed(nodemask_t nodemask)
55880diff -urNp linux-3.0.4/include/linux/crypto.h linux-3.0.4/include/linux/crypto.h
55881--- linux-3.0.4/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
55882+++ linux-3.0.4/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
55883@@ -361,7 +361,7 @@ struct cipher_tfm {
55884 const u8 *key, unsigned int keylen);
55885 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
55886 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
55887-};
55888+} __no_const;
55889
55890 struct hash_tfm {
55891 int (*init)(struct hash_desc *desc);
55892@@ -382,13 +382,13 @@ struct compress_tfm {
55893 int (*cot_decompress)(struct crypto_tfm *tfm,
55894 const u8 *src, unsigned int slen,
55895 u8 *dst, unsigned int *dlen);
55896-};
55897+} __no_const;
55898
55899 struct rng_tfm {
55900 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
55901 unsigned int dlen);
55902 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
55903-};
55904+} __no_const;
55905
55906 #define crt_ablkcipher crt_u.ablkcipher
55907 #define crt_aead crt_u.aead
55908diff -urNp linux-3.0.4/include/linux/decompress/mm.h linux-3.0.4/include/linux/decompress/mm.h
55909--- linux-3.0.4/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
55910+++ linux-3.0.4/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
55911@@ -77,7 +77,7 @@ static void free(void *where)
55912 * warnings when not needed (indeed large_malloc / large_free are not
55913 * needed by inflate */
55914
55915-#define malloc(a) kmalloc(a, GFP_KERNEL)
55916+#define malloc(a) kmalloc((a), GFP_KERNEL)
55917 #define free(a) kfree(a)
55918
55919 #define large_malloc(a) vmalloc(a)
55920diff -urNp linux-3.0.4/include/linux/dma-mapping.h linux-3.0.4/include/linux/dma-mapping.h
55921--- linux-3.0.4/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
55922+++ linux-3.0.4/include/linux/dma-mapping.h 2011-08-26 19:49:56.000000000 -0400
55923@@ -50,7 +50,7 @@ struct dma_map_ops {
55924 int (*dma_supported)(struct device *dev, u64 mask);
55925 int (*set_dma_mask)(struct device *dev, u64 mask);
55926 int is_phys;
55927-};
55928+} __do_const;
55929
55930 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
55931
55932diff -urNp linux-3.0.4/include/linux/efi.h linux-3.0.4/include/linux/efi.h
55933--- linux-3.0.4/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
55934+++ linux-3.0.4/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
55935@@ -410,7 +410,7 @@ struct efivar_operations {
55936 efi_get_variable_t *get_variable;
55937 efi_get_next_variable_t *get_next_variable;
55938 efi_set_variable_t *set_variable;
55939-};
55940+} __no_const;
55941
55942 struct efivars {
55943 /*
55944diff -urNp linux-3.0.4/include/linux/elf.h linux-3.0.4/include/linux/elf.h
55945--- linux-3.0.4/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
55946+++ linux-3.0.4/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
55947@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
55948 #define PT_GNU_EH_FRAME 0x6474e550
55949
55950 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
55951+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
55952+
55953+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
55954+
55955+/* Constants for the e_flags field */
55956+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
55957+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
55958+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
55959+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
55960+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
55961+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
55962
55963 /*
55964 * Extended Numbering
55965@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
55966 #define DT_DEBUG 21
55967 #define DT_TEXTREL 22
55968 #define DT_JMPREL 23
55969+#define DT_FLAGS 30
55970+ #define DF_TEXTREL 0x00000004
55971 #define DT_ENCODING 32
55972 #define OLD_DT_LOOS 0x60000000
55973 #define DT_LOOS 0x6000000d
55974@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
55975 #define PF_W 0x2
55976 #define PF_X 0x1
55977
55978+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
55979+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
55980+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
55981+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
55982+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
55983+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
55984+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
55985+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
55986+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
55987+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
55988+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
55989+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
55990+
55991 typedef struct elf32_phdr{
55992 Elf32_Word p_type;
55993 Elf32_Off p_offset;
55994@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
55995 #define EI_OSABI 7
55996 #define EI_PAD 8
55997
55998+#define EI_PAX 14
55999+
56000 #define ELFMAG0 0x7f /* EI_MAG */
56001 #define ELFMAG1 'E'
56002 #define ELFMAG2 'L'
56003@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
56004 #define elf_note elf32_note
56005 #define elf_addr_t Elf32_Off
56006 #define Elf_Half Elf32_Half
56007+#define elf_dyn Elf32_Dyn
56008
56009 #else
56010
56011@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
56012 #define elf_note elf64_note
56013 #define elf_addr_t Elf64_Off
56014 #define Elf_Half Elf64_Half
56015+#define elf_dyn Elf64_Dyn
56016
56017 #endif
56018
56019diff -urNp linux-3.0.4/include/linux/firewire.h linux-3.0.4/include/linux/firewire.h
56020--- linux-3.0.4/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
56021+++ linux-3.0.4/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
56022@@ -428,7 +428,7 @@ struct fw_iso_context {
56023 union {
56024 fw_iso_callback_t sc;
56025 fw_iso_mc_callback_t mc;
56026- } callback;
56027+ } __no_const callback;
56028 void *callback_data;
56029 };
56030
56031diff -urNp linux-3.0.4/include/linux/fscache-cache.h linux-3.0.4/include/linux/fscache-cache.h
56032--- linux-3.0.4/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
56033+++ linux-3.0.4/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
56034@@ -102,7 +102,7 @@ struct fscache_operation {
56035 fscache_operation_release_t release;
56036 };
56037
56038-extern atomic_t fscache_op_debug_id;
56039+extern atomic_unchecked_t fscache_op_debug_id;
56040 extern void fscache_op_work_func(struct work_struct *work);
56041
56042 extern void fscache_enqueue_operation(struct fscache_operation *);
56043@@ -122,7 +122,7 @@ static inline void fscache_operation_ini
56044 {
56045 INIT_WORK(&op->work, fscache_op_work_func);
56046 atomic_set(&op->usage, 1);
56047- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
56048+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
56049 op->processor = processor;
56050 op->release = release;
56051 INIT_LIST_HEAD(&op->pend_link);
56052diff -urNp linux-3.0.4/include/linux/fs.h linux-3.0.4/include/linux/fs.h
56053--- linux-3.0.4/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
56054+++ linux-3.0.4/include/linux/fs.h 2011-08-26 19:49:56.000000000 -0400
56055@@ -109,6 +109,11 @@ struct inodes_stat_t {
56056 /* File was opened by fanotify and shouldn't generate fanotify events */
56057 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
56058
56059+/* Hack for grsec so as not to require read permission simply to execute
56060+ * a binary
56061+ */
56062+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
56063+
56064 /*
56065 * The below are the various read and write types that we support. Some of
56066 * them include behavioral modifiers that send information down to the
56067@@ -1571,7 +1576,8 @@ struct file_operations {
56068 int (*setlease)(struct file *, long, struct file_lock **);
56069 long (*fallocate)(struct file *file, int mode, loff_t offset,
56070 loff_t len);
56071-};
56072+} __do_const;
56073+typedef struct file_operations __no_const file_operations_no_const;
56074
56075 #define IPERM_FLAG_RCU 0x0001
56076
56077diff -urNp linux-3.0.4/include/linux/fsnotify.h linux-3.0.4/include/linux/fsnotify.h
56078--- linux-3.0.4/include/linux/fsnotify.h 2011-07-21 22:17:23.000000000 -0400
56079+++ linux-3.0.4/include/linux/fsnotify.h 2011-08-24 18:10:29.000000000 -0400
56080@@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
56081 */
56082 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
56083 {
56084- return kstrdup(name, GFP_KERNEL);
56085+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
56086 }
56087
56088 /*
56089diff -urNp linux-3.0.4/include/linux/fs_struct.h linux-3.0.4/include/linux/fs_struct.h
56090--- linux-3.0.4/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
56091+++ linux-3.0.4/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
56092@@ -6,7 +6,7 @@
56093 #include <linux/seqlock.h>
56094
56095 struct fs_struct {
56096- int users;
56097+ atomic_t users;
56098 spinlock_t lock;
56099 seqcount_t seq;
56100 int umask;
56101diff -urNp linux-3.0.4/include/linux/ftrace_event.h linux-3.0.4/include/linux/ftrace_event.h
56102--- linux-3.0.4/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
56103+++ linux-3.0.4/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
56104@@ -96,7 +96,7 @@ struct trace_event_functions {
56105 trace_print_func raw;
56106 trace_print_func hex;
56107 trace_print_func binary;
56108-};
56109+} __no_const;
56110
56111 struct trace_event {
56112 struct hlist_node node;
56113@@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
56114 extern int trace_add_event_call(struct ftrace_event_call *call);
56115 extern void trace_remove_event_call(struct ftrace_event_call *call);
56116
56117-#define is_signed_type(type) (((type)(-1)) < 0)
56118+#define is_signed_type(type) (((type)(-1)) < (type)1)
56119
56120 int trace_set_clr_event(const char *system, const char *event, int set);
56121
56122diff -urNp linux-3.0.4/include/linux/genhd.h linux-3.0.4/include/linux/genhd.h
56123--- linux-3.0.4/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
56124+++ linux-3.0.4/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
56125@@ -184,7 +184,7 @@ struct gendisk {
56126 struct kobject *slave_dir;
56127
56128 struct timer_rand_state *random;
56129- atomic_t sync_io; /* RAID */
56130+ atomic_unchecked_t sync_io; /* RAID */
56131 struct disk_events *ev;
56132 #ifdef CONFIG_BLK_DEV_INTEGRITY
56133 struct blk_integrity *integrity;
56134diff -urNp linux-3.0.4/include/linux/gracl.h linux-3.0.4/include/linux/gracl.h
56135--- linux-3.0.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
56136+++ linux-3.0.4/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
56137@@ -0,0 +1,317 @@
56138+#ifndef GR_ACL_H
56139+#define GR_ACL_H
56140+
56141+#include <linux/grdefs.h>
56142+#include <linux/resource.h>
56143+#include <linux/capability.h>
56144+#include <linux/dcache.h>
56145+#include <asm/resource.h>
56146+
56147+/* Major status information */
56148+
56149+#define GR_VERSION "grsecurity 2.2.2"
56150+#define GRSECURITY_VERSION 0x2202
56151+
56152+enum {
56153+ GR_SHUTDOWN = 0,
56154+ GR_ENABLE = 1,
56155+ GR_SPROLE = 2,
56156+ GR_RELOAD = 3,
56157+ GR_SEGVMOD = 4,
56158+ GR_STATUS = 5,
56159+ GR_UNSPROLE = 6,
56160+ GR_PASSSET = 7,
56161+ GR_SPROLEPAM = 8,
56162+};
56163+
56164+/* Password setup definitions
56165+ * kernel/grhash.c */
56166+enum {
56167+ GR_PW_LEN = 128,
56168+ GR_SALT_LEN = 16,
56169+ GR_SHA_LEN = 32,
56170+};
56171+
56172+enum {
56173+ GR_SPROLE_LEN = 64,
56174+};
56175+
56176+enum {
56177+ GR_NO_GLOB = 0,
56178+ GR_REG_GLOB,
56179+ GR_CREATE_GLOB
56180+};
56181+
56182+#define GR_NLIMITS 32
56183+
56184+/* Begin Data Structures */
56185+
56186+struct sprole_pw {
56187+ unsigned char *rolename;
56188+ unsigned char salt[GR_SALT_LEN];
56189+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
56190+};
56191+
56192+struct name_entry {
56193+ __u32 key;
56194+ ino_t inode;
56195+ dev_t device;
56196+ char *name;
56197+ __u16 len;
56198+ __u8 deleted;
56199+ struct name_entry *prev;
56200+ struct name_entry *next;
56201+};
56202+
56203+struct inodev_entry {
56204+ struct name_entry *nentry;
56205+ struct inodev_entry *prev;
56206+ struct inodev_entry *next;
56207+};
56208+
56209+struct acl_role_db {
56210+ struct acl_role_label **r_hash;
56211+ __u32 r_size;
56212+};
56213+
56214+struct inodev_db {
56215+ struct inodev_entry **i_hash;
56216+ __u32 i_size;
56217+};
56218+
56219+struct name_db {
56220+ struct name_entry **n_hash;
56221+ __u32 n_size;
56222+};
56223+
56224+struct crash_uid {
56225+ uid_t uid;
56226+ unsigned long expires;
56227+};
56228+
56229+struct gr_hash_struct {
56230+ void **table;
56231+ void **nametable;
56232+ void *first;
56233+ __u32 table_size;
56234+ __u32 used_size;
56235+ int type;
56236+};
56237+
56238+/* Userspace Grsecurity ACL data structures */
56239+
56240+struct acl_subject_label {
56241+ char *filename;
56242+ ino_t inode;
56243+ dev_t device;
56244+ __u32 mode;
56245+ kernel_cap_t cap_mask;
56246+ kernel_cap_t cap_lower;
56247+ kernel_cap_t cap_invert_audit;
56248+
56249+ struct rlimit res[GR_NLIMITS];
56250+ __u32 resmask;
56251+
56252+ __u8 user_trans_type;
56253+ __u8 group_trans_type;
56254+ uid_t *user_transitions;
56255+ gid_t *group_transitions;
56256+ __u16 user_trans_num;
56257+ __u16 group_trans_num;
56258+
56259+ __u32 sock_families[2];
56260+ __u32 ip_proto[8];
56261+ __u32 ip_type;
56262+ struct acl_ip_label **ips;
56263+ __u32 ip_num;
56264+ __u32 inaddr_any_override;
56265+
56266+ __u32 crashes;
56267+ unsigned long expires;
56268+
56269+ struct acl_subject_label *parent_subject;
56270+ struct gr_hash_struct *hash;
56271+ struct acl_subject_label *prev;
56272+ struct acl_subject_label *next;
56273+
56274+ struct acl_object_label **obj_hash;
56275+ __u32 obj_hash_size;
56276+ __u16 pax_flags;
56277+};
56278+
56279+struct role_allowed_ip {
56280+ __u32 addr;
56281+ __u32 netmask;
56282+
56283+ struct role_allowed_ip *prev;
56284+ struct role_allowed_ip *next;
56285+};
56286+
56287+struct role_transition {
56288+ char *rolename;
56289+
56290+ struct role_transition *prev;
56291+ struct role_transition *next;
56292+};
56293+
56294+struct acl_role_label {
56295+ char *rolename;
56296+ uid_t uidgid;
56297+ __u16 roletype;
56298+
56299+ __u16 auth_attempts;
56300+ unsigned long expires;
56301+
56302+ struct acl_subject_label *root_label;
56303+ struct gr_hash_struct *hash;
56304+
56305+ struct acl_role_label *prev;
56306+ struct acl_role_label *next;
56307+
56308+ struct role_transition *transitions;
56309+ struct role_allowed_ip *allowed_ips;
56310+ uid_t *domain_children;
56311+ __u16 domain_child_num;
56312+
56313+ struct acl_subject_label **subj_hash;
56314+ __u32 subj_hash_size;
56315+};
56316+
56317+struct user_acl_role_db {
56318+ struct acl_role_label **r_table;
56319+ __u32 num_pointers; /* Number of allocations to track */
56320+ __u32 num_roles; /* Number of roles */
56321+ __u32 num_domain_children; /* Number of domain children */
56322+ __u32 num_subjects; /* Number of subjects */
56323+ __u32 num_objects; /* Number of objects */
56324+};
56325+
56326+struct acl_object_label {
56327+ char *filename;
56328+ ino_t inode;
56329+ dev_t device;
56330+ __u32 mode;
56331+
56332+ struct acl_subject_label *nested;
56333+ struct acl_object_label *globbed;
56334+
56335+ /* next two structures not used */
56336+
56337+ struct acl_object_label *prev;
56338+ struct acl_object_label *next;
56339+};
56340+
56341+struct acl_ip_label {
56342+ char *iface;
56343+ __u32 addr;
56344+ __u32 netmask;
56345+ __u16 low, high;
56346+ __u8 mode;
56347+ __u32 type;
56348+ __u32 proto[8];
56349+
56350+ /* next two structures not used */
56351+
56352+ struct acl_ip_label *prev;
56353+ struct acl_ip_label *next;
56354+};
56355+
56356+struct gr_arg {
56357+ struct user_acl_role_db role_db;
56358+ unsigned char pw[GR_PW_LEN];
56359+ unsigned char salt[GR_SALT_LEN];
56360+ unsigned char sum[GR_SHA_LEN];
56361+ unsigned char sp_role[GR_SPROLE_LEN];
56362+ struct sprole_pw *sprole_pws;
56363+ dev_t segv_device;
56364+ ino_t segv_inode;
56365+ uid_t segv_uid;
56366+ __u16 num_sprole_pws;
56367+ __u16 mode;
56368+};
56369+
56370+struct gr_arg_wrapper {
56371+ struct gr_arg *arg;
56372+ __u32 version;
56373+ __u32 size;
56374+};
56375+
56376+struct subject_map {
56377+ struct acl_subject_label *user;
56378+ struct acl_subject_label *kernel;
56379+ struct subject_map *prev;
56380+ struct subject_map *next;
56381+};
56382+
56383+struct acl_subj_map_db {
56384+ struct subject_map **s_hash;
56385+ __u32 s_size;
56386+};
56387+
56388+/* End Data Structures Section */
56389+
56390+/* Hash functions generated by empirical testing by Brad Spengler
56391+ Makes good use of the low bits of the inode. Generally 0-1 times
56392+ in loop for successful match. 0-3 for unsuccessful match.
56393+ Shift/add algorithm with modulus of table size and an XOR*/
56394+
56395+static __inline__ unsigned int
56396+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
56397+{
56398+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
56399+}
56400+
56401+ static __inline__ unsigned int
56402+shash(const struct acl_subject_label *userp, const unsigned int sz)
56403+{
56404+ return ((const unsigned long)userp % sz);
56405+}
56406+
56407+static __inline__ unsigned int
56408+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
56409+{
56410+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
56411+}
56412+
56413+static __inline__ unsigned int
56414+nhash(const char *name, const __u16 len, const unsigned int sz)
56415+{
56416+ return full_name_hash((const unsigned char *)name, len) % sz;
56417+}
56418+
56419+#define FOR_EACH_ROLE_START(role) \
56420+ role = role_list; \
56421+ while (role) {
56422+
56423+#define FOR_EACH_ROLE_END(role) \
56424+ role = role->prev; \
56425+ }
56426+
56427+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
56428+ subj = NULL; \
56429+ iter = 0; \
56430+ while (iter < role->subj_hash_size) { \
56431+ if (subj == NULL) \
56432+ subj = role->subj_hash[iter]; \
56433+ if (subj == NULL) { \
56434+ iter++; \
56435+ continue; \
56436+ }
56437+
56438+#define FOR_EACH_SUBJECT_END(subj,iter) \
56439+ subj = subj->next; \
56440+ if (subj == NULL) \
56441+ iter++; \
56442+ }
56443+
56444+
56445+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
56446+ subj = role->hash->first; \
56447+ while (subj != NULL) {
56448+
56449+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
56450+ subj = subj->next; \
56451+ }
56452+
56453+#endif
56454+
56455diff -urNp linux-3.0.4/include/linux/gralloc.h linux-3.0.4/include/linux/gralloc.h
56456--- linux-3.0.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
56457+++ linux-3.0.4/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
56458@@ -0,0 +1,9 @@
56459+#ifndef __GRALLOC_H
56460+#define __GRALLOC_H
56461+
56462+void acl_free_all(void);
56463+int acl_alloc_stack_init(unsigned long size);
56464+void *acl_alloc(unsigned long len);
56465+void *acl_alloc_num(unsigned long num, unsigned long len);
56466+
56467+#endif
56468diff -urNp linux-3.0.4/include/linux/grdefs.h linux-3.0.4/include/linux/grdefs.h
56469--- linux-3.0.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
56470+++ linux-3.0.4/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
56471@@ -0,0 +1,140 @@
56472+#ifndef GRDEFS_H
56473+#define GRDEFS_H
56474+
56475+/* Begin grsecurity status declarations */
56476+
56477+enum {
56478+ GR_READY = 0x01,
56479+ GR_STATUS_INIT = 0x00 // disabled state
56480+};
56481+
56482+/* Begin ACL declarations */
56483+
56484+/* Role flags */
56485+
56486+enum {
56487+ GR_ROLE_USER = 0x0001,
56488+ GR_ROLE_GROUP = 0x0002,
56489+ GR_ROLE_DEFAULT = 0x0004,
56490+ GR_ROLE_SPECIAL = 0x0008,
56491+ GR_ROLE_AUTH = 0x0010,
56492+ GR_ROLE_NOPW = 0x0020,
56493+ GR_ROLE_GOD = 0x0040,
56494+ GR_ROLE_LEARN = 0x0080,
56495+ GR_ROLE_TPE = 0x0100,
56496+ GR_ROLE_DOMAIN = 0x0200,
56497+ GR_ROLE_PAM = 0x0400,
56498+ GR_ROLE_PERSIST = 0x0800
56499+};
56500+
56501+/* ACL Subject and Object mode flags */
56502+enum {
56503+ GR_DELETED = 0x80000000
56504+};
56505+
56506+/* ACL Object-only mode flags */
56507+enum {
56508+ GR_READ = 0x00000001,
56509+ GR_APPEND = 0x00000002,
56510+ GR_WRITE = 0x00000004,
56511+ GR_EXEC = 0x00000008,
56512+ GR_FIND = 0x00000010,
56513+ GR_INHERIT = 0x00000020,
56514+ GR_SETID = 0x00000040,
56515+ GR_CREATE = 0x00000080,
56516+ GR_DELETE = 0x00000100,
56517+ GR_LINK = 0x00000200,
56518+ GR_AUDIT_READ = 0x00000400,
56519+ GR_AUDIT_APPEND = 0x00000800,
56520+ GR_AUDIT_WRITE = 0x00001000,
56521+ GR_AUDIT_EXEC = 0x00002000,
56522+ GR_AUDIT_FIND = 0x00004000,
56523+ GR_AUDIT_INHERIT= 0x00008000,
56524+ GR_AUDIT_SETID = 0x00010000,
56525+ GR_AUDIT_CREATE = 0x00020000,
56526+ GR_AUDIT_DELETE = 0x00040000,
56527+ GR_AUDIT_LINK = 0x00080000,
56528+ GR_PTRACERD = 0x00100000,
56529+ GR_NOPTRACE = 0x00200000,
56530+ GR_SUPPRESS = 0x00400000,
56531+ GR_NOLEARN = 0x00800000,
56532+ GR_INIT_TRANSFER= 0x01000000
56533+};
56534+
56535+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
56536+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
56537+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
56538+
56539+/* ACL subject-only mode flags */
56540+enum {
56541+ GR_KILL = 0x00000001,
56542+ GR_VIEW = 0x00000002,
56543+ GR_PROTECTED = 0x00000004,
56544+ GR_LEARN = 0x00000008,
56545+ GR_OVERRIDE = 0x00000010,
56546+ /* just a placeholder, this mode is only used in userspace */
56547+ GR_DUMMY = 0x00000020,
56548+ GR_PROTSHM = 0x00000040,
56549+ GR_KILLPROC = 0x00000080,
56550+ GR_KILLIPPROC = 0x00000100,
56551+ /* just a placeholder, this mode is only used in userspace */
56552+ GR_NOTROJAN = 0x00000200,
56553+ GR_PROTPROCFD = 0x00000400,
56554+ GR_PROCACCT = 0x00000800,
56555+ GR_RELAXPTRACE = 0x00001000,
56556+ GR_NESTED = 0x00002000,
56557+ GR_INHERITLEARN = 0x00004000,
56558+ GR_PROCFIND = 0x00008000,
56559+ GR_POVERRIDE = 0x00010000,
56560+ GR_KERNELAUTH = 0x00020000,
56561+ GR_ATSECURE = 0x00040000,
56562+ GR_SHMEXEC = 0x00080000
56563+};
56564+
56565+enum {
56566+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
56567+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
56568+ GR_PAX_ENABLE_MPROTECT = 0x0004,
56569+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
56570+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
56571+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
56572+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
56573+ GR_PAX_DISABLE_MPROTECT = 0x0400,
56574+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
56575+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
56576+};
56577+
56578+enum {
56579+ GR_ID_USER = 0x01,
56580+ GR_ID_GROUP = 0x02,
56581+};
56582+
56583+enum {
56584+ GR_ID_ALLOW = 0x01,
56585+ GR_ID_DENY = 0x02,
56586+};
56587+
56588+#define GR_CRASH_RES 31
56589+#define GR_UIDTABLE_MAX 500
56590+
56591+/* begin resource learning section */
56592+enum {
56593+ GR_RLIM_CPU_BUMP = 60,
56594+ GR_RLIM_FSIZE_BUMP = 50000,
56595+ GR_RLIM_DATA_BUMP = 10000,
56596+ GR_RLIM_STACK_BUMP = 1000,
56597+ GR_RLIM_CORE_BUMP = 10000,
56598+ GR_RLIM_RSS_BUMP = 500000,
56599+ GR_RLIM_NPROC_BUMP = 1,
56600+ GR_RLIM_NOFILE_BUMP = 5,
56601+ GR_RLIM_MEMLOCK_BUMP = 50000,
56602+ GR_RLIM_AS_BUMP = 500000,
56603+ GR_RLIM_LOCKS_BUMP = 2,
56604+ GR_RLIM_SIGPENDING_BUMP = 5,
56605+ GR_RLIM_MSGQUEUE_BUMP = 10000,
56606+ GR_RLIM_NICE_BUMP = 1,
56607+ GR_RLIM_RTPRIO_BUMP = 1,
56608+ GR_RLIM_RTTIME_BUMP = 1000000
56609+};
56610+
56611+#endif
56612diff -urNp linux-3.0.4/include/linux/grinternal.h linux-3.0.4/include/linux/grinternal.h
56613--- linux-3.0.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
56614+++ linux-3.0.4/include/linux/grinternal.h 2011-09-24 08:43:45.000000000 -0400
56615@@ -0,0 +1,219 @@
56616+#ifndef __GRINTERNAL_H
56617+#define __GRINTERNAL_H
56618+
56619+#ifdef CONFIG_GRKERNSEC
56620+
56621+#include <linux/fs.h>
56622+#include <linux/mnt_namespace.h>
56623+#include <linux/nsproxy.h>
56624+#include <linux/gracl.h>
56625+#include <linux/grdefs.h>
56626+#include <linux/grmsg.h>
56627+
56628+void gr_add_learn_entry(const char *fmt, ...)
56629+ __attribute__ ((format (printf, 1, 2)));
56630+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
56631+ const struct vfsmount *mnt);
56632+__u32 gr_check_create(const struct dentry *new_dentry,
56633+ const struct dentry *parent,
56634+ const struct vfsmount *mnt, const __u32 mode);
56635+int gr_check_protected_task(const struct task_struct *task);
56636+__u32 to_gr_audit(const __u32 reqmode);
56637+int gr_set_acls(const int type);
56638+int gr_apply_subject_to_task(struct task_struct *task);
56639+int gr_acl_is_enabled(void);
56640+char gr_roletype_to_char(void);
56641+
56642+void gr_handle_alertkill(struct task_struct *task);
56643+char *gr_to_filename(const struct dentry *dentry,
56644+ const struct vfsmount *mnt);
56645+char *gr_to_filename1(const struct dentry *dentry,
56646+ const struct vfsmount *mnt);
56647+char *gr_to_filename2(const struct dentry *dentry,
56648+ const struct vfsmount *mnt);
56649+char *gr_to_filename3(const struct dentry *dentry,
56650+ const struct vfsmount *mnt);
56651+
56652+extern int grsec_enable_harden_ptrace;
56653+extern int grsec_enable_link;
56654+extern int grsec_enable_fifo;
56655+extern int grsec_enable_execve;
56656+extern int grsec_enable_shm;
56657+extern int grsec_enable_execlog;
56658+extern int grsec_enable_signal;
56659+extern int grsec_enable_audit_ptrace;
56660+extern int grsec_enable_forkfail;
56661+extern int grsec_enable_time;
56662+extern int grsec_enable_rofs;
56663+extern int grsec_enable_chroot_shmat;
56664+extern int grsec_enable_chroot_mount;
56665+extern int grsec_enable_chroot_double;
56666+extern int grsec_enable_chroot_pivot;
56667+extern int grsec_enable_chroot_chdir;
56668+extern int grsec_enable_chroot_chmod;
56669+extern int grsec_enable_chroot_mknod;
56670+extern int grsec_enable_chroot_fchdir;
56671+extern int grsec_enable_chroot_nice;
56672+extern int grsec_enable_chroot_execlog;
56673+extern int grsec_enable_chroot_caps;
56674+extern int grsec_enable_chroot_sysctl;
56675+extern int grsec_enable_chroot_unix;
56676+extern int grsec_enable_tpe;
56677+extern int grsec_tpe_gid;
56678+extern int grsec_enable_tpe_all;
56679+extern int grsec_enable_tpe_invert;
56680+extern int grsec_enable_socket_all;
56681+extern int grsec_socket_all_gid;
56682+extern int grsec_enable_socket_client;
56683+extern int grsec_socket_client_gid;
56684+extern int grsec_enable_socket_server;
56685+extern int grsec_socket_server_gid;
56686+extern int grsec_audit_gid;
56687+extern int grsec_enable_group;
56688+extern int grsec_enable_audit_textrel;
56689+extern int grsec_enable_log_rwxmaps;
56690+extern int grsec_enable_mount;
56691+extern int grsec_enable_chdir;
56692+extern int grsec_resource_logging;
56693+extern int grsec_enable_blackhole;
56694+extern int grsec_lastack_retries;
56695+extern int grsec_enable_brute;
56696+extern int grsec_lock;
56697+
56698+extern spinlock_t grsec_alert_lock;
56699+extern unsigned long grsec_alert_wtime;
56700+extern unsigned long grsec_alert_fyet;
56701+
56702+extern spinlock_t grsec_audit_lock;
56703+
56704+extern rwlock_t grsec_exec_file_lock;
56705+
56706+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
56707+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
56708+ (tsk)->exec_file->f_vfsmnt) : "/")
56709+
56710+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
56711+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
56712+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
56713+
56714+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
56715+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
56716+ (tsk)->exec_file->f_vfsmnt) : "/")
56717+
56718+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
56719+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
56720+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
56721+
56722+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
56723+
56724+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
56725+
56726+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
56727+ (task)->pid, (cred)->uid, \
56728+ (cred)->euid, (cred)->gid, (cred)->egid, \
56729+ gr_parent_task_fullpath(task), \
56730+ (task)->real_parent->comm, (task)->real_parent->pid, \
56731+ (pcred)->uid, (pcred)->euid, \
56732+ (pcred)->gid, (pcred)->egid
56733+
56734+#define GR_CHROOT_CAPS {{ \
56735+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
56736+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
56737+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
56738+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
56739+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
56740+ CAP_TO_MASK(CAP_IPC_OWNER) , CAP_TO_MASK(CAP_SYSLOG) }}
56741+
56742+#define security_learn(normal_msg,args...) \
56743+({ \
56744+ read_lock(&grsec_exec_file_lock); \
56745+ gr_add_learn_entry(normal_msg "\n", ## args); \
56746+ read_unlock(&grsec_exec_file_lock); \
56747+})
56748+
56749+enum {
56750+ GR_DO_AUDIT,
56751+ GR_DONT_AUDIT,
56752+ /* used for non-audit messages that we shouldn't kill the task on */
56753+ GR_DONT_AUDIT_GOOD
56754+};
56755+
56756+enum {
56757+ GR_TTYSNIFF,
56758+ GR_RBAC,
56759+ GR_RBAC_STR,
56760+ GR_STR_RBAC,
56761+ GR_RBAC_MODE2,
56762+ GR_RBAC_MODE3,
56763+ GR_FILENAME,
56764+ GR_SYSCTL_HIDDEN,
56765+ GR_NOARGS,
56766+ GR_ONE_INT,
56767+ GR_ONE_INT_TWO_STR,
56768+ GR_ONE_STR,
56769+ GR_STR_INT,
56770+ GR_TWO_STR_INT,
56771+ GR_TWO_INT,
56772+ GR_TWO_U64,
56773+ GR_THREE_INT,
56774+ GR_FIVE_INT_TWO_STR,
56775+ GR_TWO_STR,
56776+ GR_THREE_STR,
56777+ GR_FOUR_STR,
56778+ GR_STR_FILENAME,
56779+ GR_FILENAME_STR,
56780+ GR_FILENAME_TWO_INT,
56781+ GR_FILENAME_TWO_INT_STR,
56782+ GR_TEXTREL,
56783+ GR_PTRACE,
56784+ GR_RESOURCE,
56785+ GR_CAP,
56786+ GR_SIG,
56787+ GR_SIG2,
56788+ GR_CRASH1,
56789+ GR_CRASH2,
56790+ GR_PSACCT,
56791+ GR_RWXMAP
56792+};
56793+
56794+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
56795+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
56796+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
56797+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
56798+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
56799+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
56800+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
56801+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
56802+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
56803+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
56804+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
56805+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
56806+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
56807+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
56808+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
56809+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
56810+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
56811+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
56812+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
56813+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
56814+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
56815+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
56816+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
56817+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
56818+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
56819+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
56820+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
56821+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
56822+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
56823+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
56824+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
56825+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
56826+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
56827+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
56828+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
56829+
56830+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
56831+
56832+#endif
56833+
56834+#endif
56835diff -urNp linux-3.0.4/include/linux/grmsg.h linux-3.0.4/include/linux/grmsg.h
56836--- linux-3.0.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
56837+++ linux-3.0.4/include/linux/grmsg.h 2011-09-14 09:16:54.000000000 -0400
56838@@ -0,0 +1,108 @@
56839+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
56840+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
56841+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
56842+#define GR_STOPMOD_MSG "denied modification of module state by "
56843+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
56844+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
56845+#define GR_IOPERM_MSG "denied use of ioperm() by "
56846+#define GR_IOPL_MSG "denied use of iopl() by "
56847+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
56848+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
56849+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
56850+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
56851+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
56852+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
56853+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
56854+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
56855+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
56856+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
56857+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
56858+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
56859+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
56860+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
56861+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
56862+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
56863+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
56864+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
56865+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
56866+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
56867+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
56868+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
56869+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
56870+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
56871+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
56872+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
56873+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
56874+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
56875+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
56876+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
56877+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
56878+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
56879+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
56880+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
56881+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
56882+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
56883+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
56884+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
56885+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
56886+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
56887+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
56888+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
56889+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
56890+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
56891+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
56892+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
56893+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
56894+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
56895+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
56896+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
56897+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
56898+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
56899+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
56900+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
56901+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
56902+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
56903+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
56904+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
56905+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
56906+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
56907+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
56908+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
56909+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
56910+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
56911+#define GR_FAILFORK_MSG "failed fork with errno %s by "
56912+#define GR_NICE_CHROOT_MSG "denied priority change by "
56913+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
56914+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
56915+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
56916+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
56917+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
56918+#define GR_TIME_MSG "time set by "
56919+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
56920+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
56921+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
56922+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
56923+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
56924+#define GR_BIND_MSG "denied bind() by "
56925+#define GR_CONNECT_MSG "denied connect() by "
56926+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
56927+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
56928+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
56929+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
56930+#define GR_CAP_ACL_MSG "use of %s denied for "
56931+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
56932+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
56933+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
56934+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
56935+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
56936+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
56937+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
56938+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
56939+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
56940+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
56941+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
56942+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
56943+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
56944+#define GR_VM86_MSG "denied use of vm86 by "
56945+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
56946+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
56947diff -urNp linux-3.0.4/include/linux/grsecurity.h linux-3.0.4/include/linux/grsecurity.h
56948--- linux-3.0.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
56949+++ linux-3.0.4/include/linux/grsecurity.h 2011-09-14 09:16:54.000000000 -0400
56950@@ -0,0 +1,226 @@
56951+#ifndef GR_SECURITY_H
56952+#define GR_SECURITY_H
56953+#include <linux/fs.h>
56954+#include <linux/fs_struct.h>
56955+#include <linux/binfmts.h>
56956+#include <linux/gracl.h>
56957+
56958+/* notify of brain-dead configs */
56959+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56960+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
56961+#endif
56962+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
56963+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
56964+#endif
56965+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
56966+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
56967+#endif
56968+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
56969+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
56970+#endif
56971+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
56972+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
56973+#endif
56974+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
56975+#error "CONFIG_PAX enabled, but no PaX options are enabled."
56976+#endif
56977+
56978+#include <linux/compat.h>
56979+
56980+struct user_arg_ptr {
56981+#ifdef CONFIG_COMPAT
56982+ bool is_compat;
56983+#endif
56984+ union {
56985+ const char __user *const __user *native;
56986+#ifdef CONFIG_COMPAT
56987+ compat_uptr_t __user *compat;
56988+#endif
56989+ } ptr;
56990+};
56991+
56992+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
56993+void gr_handle_brute_check(void);
56994+void gr_handle_kernel_exploit(void);
56995+int gr_process_user_ban(void);
56996+
56997+char gr_roletype_to_char(void);
56998+
56999+int gr_acl_enable_at_secure(void);
57000+
57001+int gr_check_user_change(int real, int effective, int fs);
57002+int gr_check_group_change(int real, int effective, int fs);
57003+
57004+void gr_del_task_from_ip_table(struct task_struct *p);
57005+
57006+int gr_pid_is_chrooted(struct task_struct *p);
57007+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
57008+int gr_handle_chroot_nice(void);
57009+int gr_handle_chroot_sysctl(const int op);
57010+int gr_handle_chroot_setpriority(struct task_struct *p,
57011+ const int niceval);
57012+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
57013+int gr_handle_chroot_chroot(const struct dentry *dentry,
57014+ const struct vfsmount *mnt);
57015+void gr_handle_chroot_chdir(struct path *path);
57016+int gr_handle_chroot_chmod(const struct dentry *dentry,
57017+ const struct vfsmount *mnt, const int mode);
57018+int gr_handle_chroot_mknod(const struct dentry *dentry,
57019+ const struct vfsmount *mnt, const int mode);
57020+int gr_handle_chroot_mount(const struct dentry *dentry,
57021+ const struct vfsmount *mnt,
57022+ const char *dev_name);
57023+int gr_handle_chroot_pivot(void);
57024+int gr_handle_chroot_unix(const pid_t pid);
57025+
57026+int gr_handle_rawio(const struct inode *inode);
57027+
57028+void gr_handle_ioperm(void);
57029+void gr_handle_iopl(void);
57030+
57031+int gr_tpe_allow(const struct file *file);
57032+
57033+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
57034+void gr_clear_chroot_entries(struct task_struct *task);
57035+
57036+void gr_log_forkfail(const int retval);
57037+void gr_log_timechange(void);
57038+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
57039+void gr_log_chdir(const struct dentry *dentry,
57040+ const struct vfsmount *mnt);
57041+void gr_log_chroot_exec(const struct dentry *dentry,
57042+ const struct vfsmount *mnt);
57043+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
57044+void gr_log_remount(const char *devname, const int retval);
57045+void gr_log_unmount(const char *devname, const int retval);
57046+void gr_log_mount(const char *from, const char *to, const int retval);
57047+void gr_log_textrel(struct vm_area_struct *vma);
57048+void gr_log_rwxmmap(struct file *file);
57049+void gr_log_rwxmprotect(struct file *file);
57050+
57051+int gr_handle_follow_link(const struct inode *parent,
57052+ const struct inode *inode,
57053+ const struct dentry *dentry,
57054+ const struct vfsmount *mnt);
57055+int gr_handle_fifo(const struct dentry *dentry,
57056+ const struct vfsmount *mnt,
57057+ const struct dentry *dir, const int flag,
57058+ const int acc_mode);
57059+int gr_handle_hardlink(const struct dentry *dentry,
57060+ const struct vfsmount *mnt,
57061+ struct inode *inode,
57062+ const int mode, const char *to);
57063+
57064+int gr_is_capable(const int cap);
57065+int gr_is_capable_nolog(const int cap);
57066+void gr_learn_resource(const struct task_struct *task, const int limit,
57067+ const unsigned long wanted, const int gt);
57068+void gr_copy_label(struct task_struct *tsk);
57069+void gr_handle_crash(struct task_struct *task, const int sig);
57070+int gr_handle_signal(const struct task_struct *p, const int sig);
57071+int gr_check_crash_uid(const uid_t uid);
57072+int gr_check_protected_task(const struct task_struct *task);
57073+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
57074+int gr_acl_handle_mmap(const struct file *file,
57075+ const unsigned long prot);
57076+int gr_acl_handle_mprotect(const struct file *file,
57077+ const unsigned long prot);
57078+int gr_check_hidden_task(const struct task_struct *tsk);
57079+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
57080+ const struct vfsmount *mnt);
57081+__u32 gr_acl_handle_utime(const struct dentry *dentry,
57082+ const struct vfsmount *mnt);
57083+__u32 gr_acl_handle_access(const struct dentry *dentry,
57084+ const struct vfsmount *mnt, const int fmode);
57085+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
57086+ const struct vfsmount *mnt, mode_t mode);
57087+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
57088+ const struct vfsmount *mnt, mode_t mode);
57089+__u32 gr_acl_handle_chown(const struct dentry *dentry,
57090+ const struct vfsmount *mnt);
57091+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
57092+ const struct vfsmount *mnt);
57093+int gr_handle_ptrace(struct task_struct *task, const long request);
57094+int gr_handle_proc_ptrace(struct task_struct *task);
57095+__u32 gr_acl_handle_execve(const struct dentry *dentry,
57096+ const struct vfsmount *mnt);
57097+int gr_check_crash_exec(const struct file *filp);
57098+int gr_acl_is_enabled(void);
57099+void gr_set_kernel_label(struct task_struct *task);
57100+void gr_set_role_label(struct task_struct *task, const uid_t uid,
57101+ const gid_t gid);
57102+int gr_set_proc_label(const struct dentry *dentry,
57103+ const struct vfsmount *mnt,
57104+ const int unsafe_share);
57105+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
57106+ const struct vfsmount *mnt);
57107+__u32 gr_acl_handle_open(const struct dentry *dentry,
57108+ const struct vfsmount *mnt, const int fmode);
57109+__u32 gr_acl_handle_creat(const struct dentry *dentry,
57110+ const struct dentry *p_dentry,
57111+ const struct vfsmount *p_mnt, const int fmode,
57112+ const int imode);
57113+void gr_handle_create(const struct dentry *dentry,
57114+ const struct vfsmount *mnt);
57115+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
57116+ const struct dentry *parent_dentry,
57117+ const struct vfsmount *parent_mnt,
57118+ const int mode);
57119+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
57120+ const struct dentry *parent_dentry,
57121+ const struct vfsmount *parent_mnt);
57122+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
57123+ const struct vfsmount *mnt);
57124+void gr_handle_delete(const ino_t ino, const dev_t dev);
57125+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
57126+ const struct vfsmount *mnt);
57127+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
57128+ const struct dentry *parent_dentry,
57129+ const struct vfsmount *parent_mnt,
57130+ const char *from);
57131+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
57132+ const struct dentry *parent_dentry,
57133+ const struct vfsmount *parent_mnt,
57134+ const struct dentry *old_dentry,
57135+ const struct vfsmount *old_mnt, const char *to);
57136+int gr_acl_handle_rename(struct dentry *new_dentry,
57137+ struct dentry *parent_dentry,
57138+ const struct vfsmount *parent_mnt,
57139+ struct dentry *old_dentry,
57140+ struct inode *old_parent_inode,
57141+ struct vfsmount *old_mnt, const char *newname);
57142+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
57143+ struct dentry *old_dentry,
57144+ struct dentry *new_dentry,
57145+ struct vfsmount *mnt, const __u8 replace);
57146+__u32 gr_check_link(const struct dentry *new_dentry,
57147+ const struct dentry *parent_dentry,
57148+ const struct vfsmount *parent_mnt,
57149+ const struct dentry *old_dentry,
57150+ const struct vfsmount *old_mnt);
57151+int gr_acl_handle_filldir(const struct file *file, const char *name,
57152+ const unsigned int namelen, const ino_t ino);
57153+
57154+__u32 gr_acl_handle_unix(const struct dentry *dentry,
57155+ const struct vfsmount *mnt);
57156+void gr_acl_handle_exit(void);
57157+void gr_acl_handle_psacct(struct task_struct *task, const long code);
57158+int gr_acl_handle_procpidmem(const struct task_struct *task);
57159+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
57160+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
57161+void gr_audit_ptrace(struct task_struct *task);
57162+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
57163+
57164+#ifdef CONFIG_GRKERNSEC
57165+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
57166+void gr_handle_vm86(void);
57167+void gr_handle_mem_readwrite(u64 from, u64 to);
57168+
57169+extern int grsec_enable_dmesg;
57170+extern int grsec_disable_privio;
57171+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57172+extern int grsec_enable_chroot_findtask;
57173+#endif
57174+#endif
57175+
57176+#endif
57177diff -urNp linux-3.0.4/include/linux/grsock.h linux-3.0.4/include/linux/grsock.h
57178--- linux-3.0.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
57179+++ linux-3.0.4/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
57180@@ -0,0 +1,19 @@
57181+#ifndef __GRSOCK_H
57182+#define __GRSOCK_H
57183+
57184+extern void gr_attach_curr_ip(const struct sock *sk);
57185+extern int gr_handle_sock_all(const int family, const int type,
57186+ const int protocol);
57187+extern int gr_handle_sock_server(const struct sockaddr *sck);
57188+extern int gr_handle_sock_server_other(const struct sock *sck);
57189+extern int gr_handle_sock_client(const struct sockaddr *sck);
57190+extern int gr_search_connect(struct socket * sock,
57191+ struct sockaddr_in * addr);
57192+extern int gr_search_bind(struct socket * sock,
57193+ struct sockaddr_in * addr);
57194+extern int gr_search_listen(struct socket * sock);
57195+extern int gr_search_accept(struct socket * sock);
57196+extern int gr_search_socket(const int domain, const int type,
57197+ const int protocol);
57198+
57199+#endif
57200diff -urNp linux-3.0.4/include/linux/hid.h linux-3.0.4/include/linux/hid.h
57201--- linux-3.0.4/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
57202+++ linux-3.0.4/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
57203@@ -675,7 +675,7 @@ struct hid_ll_driver {
57204 unsigned int code, int value);
57205
57206 int (*parse)(struct hid_device *hdev);
57207-};
57208+} __no_const;
57209
57210 #define PM_HINT_FULLON 1<<5
57211 #define PM_HINT_NORMAL 1<<1
57212diff -urNp linux-3.0.4/include/linux/highmem.h linux-3.0.4/include/linux/highmem.h
57213--- linux-3.0.4/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
57214+++ linux-3.0.4/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
57215@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
57216 kunmap_atomic(kaddr, KM_USER0);
57217 }
57218
57219+static inline void sanitize_highpage(struct page *page)
57220+{
57221+ void *kaddr;
57222+ unsigned long flags;
57223+
57224+ local_irq_save(flags);
57225+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
57226+ clear_page(kaddr);
57227+ kunmap_atomic(kaddr, KM_CLEARPAGE);
57228+ local_irq_restore(flags);
57229+}
57230+
57231 static inline void zero_user_segments(struct page *page,
57232 unsigned start1, unsigned end1,
57233 unsigned start2, unsigned end2)
57234diff -urNp linux-3.0.4/include/linux/i2c.h linux-3.0.4/include/linux/i2c.h
57235--- linux-3.0.4/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
57236+++ linux-3.0.4/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
57237@@ -346,6 +346,7 @@ struct i2c_algorithm {
57238 /* To determine what the adapter supports */
57239 u32 (*functionality) (struct i2c_adapter *);
57240 };
57241+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
57242
57243 /*
57244 * i2c_adapter is the structure used to identify a physical i2c bus along
57245diff -urNp linux-3.0.4/include/linux/i2o.h linux-3.0.4/include/linux/i2o.h
57246--- linux-3.0.4/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
57247+++ linux-3.0.4/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
57248@@ -564,7 +564,7 @@ struct i2o_controller {
57249 struct i2o_device *exec; /* Executive */
57250 #if BITS_PER_LONG == 64
57251 spinlock_t context_list_lock; /* lock for context_list */
57252- atomic_t context_list_counter; /* needed for unique contexts */
57253+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
57254 struct list_head context_list; /* list of context id's
57255 and pointers */
57256 #endif
57257diff -urNp linux-3.0.4/include/linux/init.h linux-3.0.4/include/linux/init.h
57258--- linux-3.0.4/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
57259+++ linux-3.0.4/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
57260@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
57261
57262 /* Each module must use one module_init(). */
57263 #define module_init(initfn) \
57264- static inline initcall_t __inittest(void) \
57265+ static inline __used initcall_t __inittest(void) \
57266 { return initfn; } \
57267 int init_module(void) __attribute__((alias(#initfn)));
57268
57269 /* This is only required if you want to be unloadable. */
57270 #define module_exit(exitfn) \
57271- static inline exitcall_t __exittest(void) \
57272+ static inline __used exitcall_t __exittest(void) \
57273 { return exitfn; } \
57274 void cleanup_module(void) __attribute__((alias(#exitfn)));
57275
57276diff -urNp linux-3.0.4/include/linux/init_task.h linux-3.0.4/include/linux/init_task.h
57277--- linux-3.0.4/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
57278+++ linux-3.0.4/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
57279@@ -126,6 +126,12 @@ extern struct cred init_cred;
57280 # define INIT_PERF_EVENTS(tsk)
57281 #endif
57282
57283+#ifdef CONFIG_X86
57284+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
57285+#else
57286+#define INIT_TASK_THREAD_INFO
57287+#endif
57288+
57289 /*
57290 * INIT_TASK is used to set up the first task table, touch at
57291 * your own risk!. Base=0, limit=0x1fffff (=2MB)
57292@@ -164,6 +170,7 @@ extern struct cred init_cred;
57293 RCU_INIT_POINTER(.cred, &init_cred), \
57294 .comm = "swapper", \
57295 .thread = INIT_THREAD, \
57296+ INIT_TASK_THREAD_INFO \
57297 .fs = &init_fs, \
57298 .files = &init_files, \
57299 .signal = &init_signals, \
57300diff -urNp linux-3.0.4/include/linux/intel-iommu.h linux-3.0.4/include/linux/intel-iommu.h
57301--- linux-3.0.4/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
57302+++ linux-3.0.4/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
57303@@ -296,7 +296,7 @@ struct iommu_flush {
57304 u8 fm, u64 type);
57305 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
57306 unsigned int size_order, u64 type);
57307-};
57308+} __no_const;
57309
57310 enum {
57311 SR_DMAR_FECTL_REG,
57312diff -urNp linux-3.0.4/include/linux/interrupt.h linux-3.0.4/include/linux/interrupt.h
57313--- linux-3.0.4/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
57314+++ linux-3.0.4/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
57315@@ -422,7 +422,7 @@ enum
57316 /* map softirq index to softirq name. update 'softirq_to_name' in
57317 * kernel/softirq.c when adding a new softirq.
57318 */
57319-extern char *softirq_to_name[NR_SOFTIRQS];
57320+extern const char * const softirq_to_name[NR_SOFTIRQS];
57321
57322 /* softirq mask and active fields moved to irq_cpustat_t in
57323 * asm/hardirq.h to get better cache usage. KAO
57324@@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
57325
57326 struct softirq_action
57327 {
57328- void (*action)(struct softirq_action *);
57329+ void (*action)(void);
57330 };
57331
57332 asmlinkage void do_softirq(void);
57333 asmlinkage void __do_softirq(void);
57334-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
57335+extern void open_softirq(int nr, void (*action)(void));
57336 extern void softirq_init(void);
57337 static inline void __raise_softirq_irqoff(unsigned int nr)
57338 {
57339diff -urNp linux-3.0.4/include/linux/kallsyms.h linux-3.0.4/include/linux/kallsyms.h
57340--- linux-3.0.4/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
57341+++ linux-3.0.4/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
57342@@ -15,7 +15,8 @@
57343
57344 struct module;
57345
57346-#ifdef CONFIG_KALLSYMS
57347+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
57348+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57349 /* Lookup the address for a symbol. Returns 0 if not found. */
57350 unsigned long kallsyms_lookup_name(const char *name);
57351
57352@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
57353 /* Stupid that this does nothing, but I didn't create this mess. */
57354 #define __print_symbol(fmt, addr)
57355 #endif /*CONFIG_KALLSYMS*/
57356+#else /* when included by kallsyms.c, vsnprintf.c, or
57357+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
57358+extern void __print_symbol(const char *fmt, unsigned long address);
57359+extern int sprint_backtrace(char *buffer, unsigned long address);
57360+extern int sprint_symbol(char *buffer, unsigned long address);
57361+const char *kallsyms_lookup(unsigned long addr,
57362+ unsigned long *symbolsize,
57363+ unsigned long *offset,
57364+ char **modname, char *namebuf);
57365+#endif
57366
57367 /* This macro allows us to keep printk typechecking */
57368 static void __check_printsym_format(const char *fmt, ...)
57369diff -urNp linux-3.0.4/include/linux/kgdb.h linux-3.0.4/include/linux/kgdb.h
57370--- linux-3.0.4/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
57371+++ linux-3.0.4/include/linux/kgdb.h 2011-08-26 19:49:56.000000000 -0400
57372@@ -53,7 +53,7 @@ extern int kgdb_connected;
57373 extern int kgdb_io_module_registered;
57374
57375 extern atomic_t kgdb_setting_breakpoint;
57376-extern atomic_t kgdb_cpu_doing_single_step;
57377+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
57378
57379 extern struct task_struct *kgdb_usethread;
57380 extern struct task_struct *kgdb_contthread;
57381@@ -251,7 +251,7 @@ struct kgdb_arch {
57382 void (*disable_hw_break)(struct pt_regs *regs);
57383 void (*remove_all_hw_break)(void);
57384 void (*correct_hw_break)(void);
57385-};
57386+} __do_const;
57387
57388 /**
57389 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
57390@@ -276,7 +276,7 @@ struct kgdb_io {
57391 void (*pre_exception) (void);
57392 void (*post_exception) (void);
57393 int is_console;
57394-};
57395+} __do_const;
57396
57397 extern struct kgdb_arch arch_kgdb_ops;
57398
57399diff -urNp linux-3.0.4/include/linux/kmod.h linux-3.0.4/include/linux/kmod.h
57400--- linux-3.0.4/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
57401+++ linux-3.0.4/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
57402@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
57403 * usually useless though. */
57404 extern int __request_module(bool wait, const char *name, ...) \
57405 __attribute__((format(printf, 2, 3)));
57406+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
57407+ __attribute__((format(printf, 3, 4)));
57408 #define request_module(mod...) __request_module(true, mod)
57409 #define request_module_nowait(mod...) __request_module(false, mod)
57410 #define try_then_request_module(x, mod...) \
57411diff -urNp linux-3.0.4/include/linux/kvm_host.h linux-3.0.4/include/linux/kvm_host.h
57412--- linux-3.0.4/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
57413+++ linux-3.0.4/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
57414@@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
57415 void vcpu_load(struct kvm_vcpu *vcpu);
57416 void vcpu_put(struct kvm_vcpu *vcpu);
57417
57418-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
57419+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
57420 struct module *module);
57421 void kvm_exit(void);
57422
57423@@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
57424 struct kvm_guest_debug *dbg);
57425 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
57426
57427-int kvm_arch_init(void *opaque);
57428+int kvm_arch_init(const void *opaque);
57429 void kvm_arch_exit(void);
57430
57431 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
57432diff -urNp linux-3.0.4/include/linux/libata.h linux-3.0.4/include/linux/libata.h
57433--- linux-3.0.4/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
57434+++ linux-3.0.4/include/linux/libata.h 2011-08-26 19:49:56.000000000 -0400
57435@@ -899,7 +899,7 @@ struct ata_port_operations {
57436 * fields must be pointers.
57437 */
57438 const struct ata_port_operations *inherits;
57439-};
57440+} __do_const;
57441
57442 struct ata_port_info {
57443 unsigned long flags;
57444diff -urNp linux-3.0.4/include/linux/mca.h linux-3.0.4/include/linux/mca.h
57445--- linux-3.0.4/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
57446+++ linux-3.0.4/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
57447@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
57448 int region);
57449 void * (*mca_transform_memory)(struct mca_device *,
57450 void *memory);
57451-};
57452+} __no_const;
57453
57454 struct mca_bus {
57455 u64 default_dma_mask;
57456diff -urNp linux-3.0.4/include/linux/memory.h linux-3.0.4/include/linux/memory.h
57457--- linux-3.0.4/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
57458+++ linux-3.0.4/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
57459@@ -144,7 +144,7 @@ struct memory_accessor {
57460 size_t count);
57461 ssize_t (*write)(struct memory_accessor *, const char *buf,
57462 off_t offset, size_t count);
57463-};
57464+} __no_const;
57465
57466 /*
57467 * Kernel text modification mutex, used for code patching. Users of this lock
57468diff -urNp linux-3.0.4/include/linux/mfd/abx500.h linux-3.0.4/include/linux/mfd/abx500.h
57469--- linux-3.0.4/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
57470+++ linux-3.0.4/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
57471@@ -234,6 +234,7 @@ struct abx500_ops {
57472 int (*event_registers_startup_state_get) (struct device *, u8 *);
57473 int (*startup_irq_enabled) (struct device *, unsigned int);
57474 };
57475+typedef struct abx500_ops __no_const abx500_ops_no_const;
57476
57477 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
57478 void abx500_remove_ops(struct device *dev);
57479diff -urNp linux-3.0.4/include/linux/mm.h linux-3.0.4/include/linux/mm.h
57480--- linux-3.0.4/include/linux/mm.h 2011-09-02 18:11:21.000000000 -0400
57481+++ linux-3.0.4/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
57482@@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
57483
57484 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
57485 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
57486+
57487+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
57488+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
57489+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
57490+#else
57491 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
57492+#endif
57493+
57494 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
57495 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
57496
57497@@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
57498 int set_page_dirty_lock(struct page *page);
57499 int clear_page_dirty_for_io(struct page *page);
57500
57501-/* Is the vma a continuation of the stack vma above it? */
57502-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
57503-{
57504- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
57505-}
57506-
57507-static inline int stack_guard_page_start(struct vm_area_struct *vma,
57508- unsigned long addr)
57509-{
57510- return (vma->vm_flags & VM_GROWSDOWN) &&
57511- (vma->vm_start == addr) &&
57512- !vma_growsdown(vma->vm_prev, addr);
57513-}
57514-
57515-/* Is the vma a continuation of the stack vma below it? */
57516-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
57517-{
57518- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
57519-}
57520-
57521-static inline int stack_guard_page_end(struct vm_area_struct *vma,
57522- unsigned long addr)
57523-{
57524- return (vma->vm_flags & VM_GROWSUP) &&
57525- (vma->vm_end == addr) &&
57526- !vma_growsup(vma->vm_next, addr);
57527-}
57528-
57529 extern unsigned long move_page_tables(struct vm_area_struct *vma,
57530 unsigned long old_addr, struct vm_area_struct *new_vma,
57531 unsigned long new_addr, unsigned long len);
57532@@ -1169,6 +1148,15 @@ struct shrinker {
57533 extern void register_shrinker(struct shrinker *);
57534 extern void unregister_shrinker(struct shrinker *);
57535
57536+#ifdef CONFIG_MMU
57537+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
57538+#else
57539+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
57540+{
57541+ return __pgprot(0);
57542+}
57543+#endif
57544+
57545 int vma_wants_writenotify(struct vm_area_struct *vma);
57546
57547 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
57548@@ -1452,6 +1440,7 @@ out:
57549 }
57550
57551 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
57552+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
57553
57554 extern unsigned long do_brk(unsigned long, unsigned long);
57555
57556@@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
57557 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
57558 struct vm_area_struct **pprev);
57559
57560+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
57561+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
57562+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
57563+
57564 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
57565 NULL if none. Assume start_addr < end_addr. */
57566 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
57567@@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
57568 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
57569 }
57570
57571-#ifdef CONFIG_MMU
57572-pgprot_t vm_get_page_prot(unsigned long vm_flags);
57573-#else
57574-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
57575-{
57576- return __pgprot(0);
57577-}
57578-#endif
57579-
57580 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
57581 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
57582 unsigned long pfn, unsigned long size, pgprot_t);
57583@@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
57584 extern int sysctl_memory_failure_early_kill;
57585 extern int sysctl_memory_failure_recovery;
57586 extern void shake_page(struct page *p, int access);
57587-extern atomic_long_t mce_bad_pages;
57588+extern atomic_long_unchecked_t mce_bad_pages;
57589 extern int soft_offline_page(struct page *page, int flags);
57590
57591 extern void dump_page(struct page *page);
57592@@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
57593 unsigned int pages_per_huge_page);
57594 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
57595
57596+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
57597+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
57598+#else
57599+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
57600+#endif
57601+
57602 #endif /* __KERNEL__ */
57603 #endif /* _LINUX_MM_H */
57604diff -urNp linux-3.0.4/include/linux/mm_types.h linux-3.0.4/include/linux/mm_types.h
57605--- linux-3.0.4/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
57606+++ linux-3.0.4/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
57607@@ -184,6 +184,8 @@ struct vm_area_struct {
57608 #ifdef CONFIG_NUMA
57609 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
57610 #endif
57611+
57612+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
57613 };
57614
57615 struct core_thread {
57616@@ -316,6 +318,24 @@ struct mm_struct {
57617 #ifdef CONFIG_CPUMASK_OFFSTACK
57618 struct cpumask cpumask_allocation;
57619 #endif
57620+
57621+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57622+ unsigned long pax_flags;
57623+#endif
57624+
57625+#ifdef CONFIG_PAX_DLRESOLVE
57626+ unsigned long call_dl_resolve;
57627+#endif
57628+
57629+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
57630+ unsigned long call_syscall;
57631+#endif
57632+
57633+#ifdef CONFIG_PAX_ASLR
57634+ unsigned long delta_mmap; /* randomized offset */
57635+ unsigned long delta_stack; /* randomized offset */
57636+#endif
57637+
57638 };
57639
57640 static inline void mm_init_cpumask(struct mm_struct *mm)
57641diff -urNp linux-3.0.4/include/linux/mmu_notifier.h linux-3.0.4/include/linux/mmu_notifier.h
57642--- linux-3.0.4/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
57643+++ linux-3.0.4/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
57644@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
57645 */
57646 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
57647 ({ \
57648- pte_t __pte; \
57649+ pte_t ___pte; \
57650 struct vm_area_struct *___vma = __vma; \
57651 unsigned long ___address = __address; \
57652- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
57653+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
57654 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
57655- __pte; \
57656+ ___pte; \
57657 })
57658
57659 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
57660diff -urNp linux-3.0.4/include/linux/mmzone.h linux-3.0.4/include/linux/mmzone.h
57661--- linux-3.0.4/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
57662+++ linux-3.0.4/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
57663@@ -350,7 +350,7 @@ struct zone {
57664 unsigned long flags; /* zone flags, see below */
57665
57666 /* Zone statistics */
57667- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57668+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57669
57670 /*
57671 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
57672diff -urNp linux-3.0.4/include/linux/mod_devicetable.h linux-3.0.4/include/linux/mod_devicetable.h
57673--- linux-3.0.4/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
57674+++ linux-3.0.4/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
57675@@ -12,7 +12,7 @@
57676 typedef unsigned long kernel_ulong_t;
57677 #endif
57678
57679-#define PCI_ANY_ID (~0)
57680+#define PCI_ANY_ID ((__u16)~0)
57681
57682 struct pci_device_id {
57683 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
57684@@ -131,7 +131,7 @@ struct usb_device_id {
57685 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
57686 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
57687
57688-#define HID_ANY_ID (~0)
57689+#define HID_ANY_ID (~0U)
57690
57691 struct hid_device_id {
57692 __u16 bus;
57693diff -urNp linux-3.0.4/include/linux/module.h linux-3.0.4/include/linux/module.h
57694--- linux-3.0.4/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
57695+++ linux-3.0.4/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
57696@@ -16,6 +16,7 @@
57697 #include <linux/kobject.h>
57698 #include <linux/moduleparam.h>
57699 #include <linux/tracepoint.h>
57700+#include <linux/fs.h>
57701
57702 #include <linux/percpu.h>
57703 #include <asm/module.h>
57704@@ -325,19 +326,16 @@ struct module
57705 int (*init)(void);
57706
57707 /* If this is non-NULL, vfree after init() returns */
57708- void *module_init;
57709+ void *module_init_rx, *module_init_rw;
57710
57711 /* Here is the actual code + data, vfree'd on unload. */
57712- void *module_core;
57713+ void *module_core_rx, *module_core_rw;
57714
57715 /* Here are the sizes of the init and core sections */
57716- unsigned int init_size, core_size;
57717+ unsigned int init_size_rw, core_size_rw;
57718
57719 /* The size of the executable code in each section. */
57720- unsigned int init_text_size, core_text_size;
57721-
57722- /* Size of RO sections of the module (text+rodata) */
57723- unsigned int init_ro_size, core_ro_size;
57724+ unsigned int init_size_rx, core_size_rx;
57725
57726 /* Arch-specific module values */
57727 struct mod_arch_specific arch;
57728@@ -393,6 +391,10 @@ struct module
57729 #ifdef CONFIG_EVENT_TRACING
57730 struct ftrace_event_call **trace_events;
57731 unsigned int num_trace_events;
57732+ struct file_operations trace_id;
57733+ struct file_operations trace_enable;
57734+ struct file_operations trace_format;
57735+ struct file_operations trace_filter;
57736 #endif
57737 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
57738 unsigned int num_ftrace_callsites;
57739@@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
57740 bool is_module_percpu_address(unsigned long addr);
57741 bool is_module_text_address(unsigned long addr);
57742
57743+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
57744+{
57745+
57746+#ifdef CONFIG_PAX_KERNEXEC
57747+ if (ktla_ktva(addr) >= (unsigned long)start &&
57748+ ktla_ktva(addr) < (unsigned long)start + size)
57749+ return 1;
57750+#endif
57751+
57752+ return ((void *)addr >= start && (void *)addr < start + size);
57753+}
57754+
57755+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
57756+{
57757+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
57758+}
57759+
57760+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
57761+{
57762+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
57763+}
57764+
57765+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
57766+{
57767+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
57768+}
57769+
57770+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
57771+{
57772+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
57773+}
57774+
57775 static inline int within_module_core(unsigned long addr, struct module *mod)
57776 {
57777- return (unsigned long)mod->module_core <= addr &&
57778- addr < (unsigned long)mod->module_core + mod->core_size;
57779+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
57780 }
57781
57782 static inline int within_module_init(unsigned long addr, struct module *mod)
57783 {
57784- return (unsigned long)mod->module_init <= addr &&
57785- addr < (unsigned long)mod->module_init + mod->init_size;
57786+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
57787 }
57788
57789 /* Search for module by name: must hold module_mutex. */
57790diff -urNp linux-3.0.4/include/linux/moduleloader.h linux-3.0.4/include/linux/moduleloader.h
57791--- linux-3.0.4/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
57792+++ linux-3.0.4/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
57793@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
57794 sections. Returns NULL on failure. */
57795 void *module_alloc(unsigned long size);
57796
57797+#ifdef CONFIG_PAX_KERNEXEC
57798+void *module_alloc_exec(unsigned long size);
57799+#else
57800+#define module_alloc_exec(x) module_alloc(x)
57801+#endif
57802+
57803 /* Free memory returned from module_alloc. */
57804 void module_free(struct module *mod, void *module_region);
57805
57806+#ifdef CONFIG_PAX_KERNEXEC
57807+void module_free_exec(struct module *mod, void *module_region);
57808+#else
57809+#define module_free_exec(x, y) module_free((x), (y))
57810+#endif
57811+
57812 /* Apply the given relocation to the (simplified) ELF. Return -error
57813 or 0. */
57814 int apply_relocate(Elf_Shdr *sechdrs,
57815diff -urNp linux-3.0.4/include/linux/moduleparam.h linux-3.0.4/include/linux/moduleparam.h
57816--- linux-3.0.4/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
57817+++ linux-3.0.4/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
57818@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
57819 * @len is usually just sizeof(string).
57820 */
57821 #define module_param_string(name, string, len, perm) \
57822- static const struct kparam_string __param_string_##name \
57823+ static const struct kparam_string __param_string_##name __used \
57824 = { len, string }; \
57825 __module_param_call(MODULE_PARAM_PREFIX, name, \
57826 &param_ops_string, \
57827@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
57828 * module_param_named() for why this might be necessary.
57829 */
57830 #define module_param_array_named(name, array, type, nump, perm) \
57831- static const struct kparam_array __param_arr_##name \
57832+ static const struct kparam_array __param_arr_##name __used \
57833 = { .max = ARRAY_SIZE(array), .num = nump, \
57834 .ops = &param_ops_##type, \
57835 .elemsize = sizeof(array[0]), .elem = array }; \
57836diff -urNp linux-3.0.4/include/linux/namei.h linux-3.0.4/include/linux/namei.h
57837--- linux-3.0.4/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
57838+++ linux-3.0.4/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
57839@@ -24,7 +24,7 @@ struct nameidata {
57840 unsigned seq;
57841 int last_type;
57842 unsigned depth;
57843- char *saved_names[MAX_NESTED_LINKS + 1];
57844+ const char *saved_names[MAX_NESTED_LINKS + 1];
57845
57846 /* Intent data */
57847 union {
57848@@ -91,12 +91,12 @@ extern int follow_up(struct path *);
57849 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
57850 extern void unlock_rename(struct dentry *, struct dentry *);
57851
57852-static inline void nd_set_link(struct nameidata *nd, char *path)
57853+static inline void nd_set_link(struct nameidata *nd, const char *path)
57854 {
57855 nd->saved_names[nd->depth] = path;
57856 }
57857
57858-static inline char *nd_get_link(struct nameidata *nd)
57859+static inline const char *nd_get_link(const struct nameidata *nd)
57860 {
57861 return nd->saved_names[nd->depth];
57862 }
57863diff -urNp linux-3.0.4/include/linux/netdevice.h linux-3.0.4/include/linux/netdevice.h
57864--- linux-3.0.4/include/linux/netdevice.h 2011-09-02 18:11:21.000000000 -0400
57865+++ linux-3.0.4/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
57866@@ -979,6 +979,7 @@ struct net_device_ops {
57867 int (*ndo_set_features)(struct net_device *dev,
57868 u32 features);
57869 };
57870+typedef struct net_device_ops __no_const net_device_ops_no_const;
57871
57872 /*
57873 * The DEVICE structure.
57874diff -urNp linux-3.0.4/include/linux/netfilter/xt_gradm.h linux-3.0.4/include/linux/netfilter/xt_gradm.h
57875--- linux-3.0.4/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
57876+++ linux-3.0.4/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
57877@@ -0,0 +1,9 @@
57878+#ifndef _LINUX_NETFILTER_XT_GRADM_H
57879+#define _LINUX_NETFILTER_XT_GRADM_H 1
57880+
57881+struct xt_gradm_mtinfo {
57882+ __u16 flags;
57883+ __u16 invflags;
57884+};
57885+
57886+#endif
57887diff -urNp linux-3.0.4/include/linux/of_pdt.h linux-3.0.4/include/linux/of_pdt.h
57888--- linux-3.0.4/include/linux/of_pdt.h 2011-07-21 22:17:23.000000000 -0400
57889+++ linux-3.0.4/include/linux/of_pdt.h 2011-08-30 06:20:11.000000000 -0400
57890@@ -32,7 +32,7 @@ struct of_pdt_ops {
57891
57892 /* return 0 on success; fill in 'len' with number of bytes in path */
57893 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
57894-};
57895+} __no_const;
57896
57897 extern void *prom_early_alloc(unsigned long size);
57898
57899diff -urNp linux-3.0.4/include/linux/oprofile.h linux-3.0.4/include/linux/oprofile.h
57900--- linux-3.0.4/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
57901+++ linux-3.0.4/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
57902@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
57903 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
57904 char const * name, ulong * val);
57905
57906-/** Create a file for read-only access to an atomic_t. */
57907+/** Create a file for read-only access to an atomic_unchecked_t. */
57908 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
57909- char const * name, atomic_t * val);
57910+ char const * name, atomic_unchecked_t * val);
57911
57912 /** create a directory */
57913 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
57914diff -urNp linux-3.0.4/include/linux/padata.h linux-3.0.4/include/linux/padata.h
57915--- linux-3.0.4/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
57916+++ linux-3.0.4/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
57917@@ -129,7 +129,7 @@ struct parallel_data {
57918 struct padata_instance *pinst;
57919 struct padata_parallel_queue __percpu *pqueue;
57920 struct padata_serial_queue __percpu *squeue;
57921- atomic_t seq_nr;
57922+ atomic_unchecked_t seq_nr;
57923 atomic_t reorder_objects;
57924 atomic_t refcnt;
57925 unsigned int max_seq_nr;
57926diff -urNp linux-3.0.4/include/linux/perf_event.h linux-3.0.4/include/linux/perf_event.h
57927--- linux-3.0.4/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
57928+++ linux-3.0.4/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
57929@@ -761,8 +761,8 @@ struct perf_event {
57930
57931 enum perf_event_active_state state;
57932 unsigned int attach_state;
57933- local64_t count;
57934- atomic64_t child_count;
57935+ local64_t count; /* PaX: fix it one day */
57936+ atomic64_unchecked_t child_count;
57937
57938 /*
57939 * These are the total time in nanoseconds that the event
57940@@ -813,8 +813,8 @@ struct perf_event {
57941 * These accumulate total time (in nanoseconds) that children
57942 * events have been enabled and running, respectively.
57943 */
57944- atomic64_t child_total_time_enabled;
57945- atomic64_t child_total_time_running;
57946+ atomic64_unchecked_t child_total_time_enabled;
57947+ atomic64_unchecked_t child_total_time_running;
57948
57949 /*
57950 * Protect attach/detach and child_list:
57951diff -urNp linux-3.0.4/include/linux/pipe_fs_i.h linux-3.0.4/include/linux/pipe_fs_i.h
57952--- linux-3.0.4/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
57953+++ linux-3.0.4/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
57954@@ -46,9 +46,9 @@ struct pipe_buffer {
57955 struct pipe_inode_info {
57956 wait_queue_head_t wait;
57957 unsigned int nrbufs, curbuf, buffers;
57958- unsigned int readers;
57959- unsigned int writers;
57960- unsigned int waiting_writers;
57961+ atomic_t readers;
57962+ atomic_t writers;
57963+ atomic_t waiting_writers;
57964 unsigned int r_counter;
57965 unsigned int w_counter;
57966 struct page *tmp_page;
57967diff -urNp linux-3.0.4/include/linux/pm_runtime.h linux-3.0.4/include/linux/pm_runtime.h
57968--- linux-3.0.4/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
57969+++ linux-3.0.4/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
57970@@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
57971
57972 static inline void pm_runtime_mark_last_busy(struct device *dev)
57973 {
57974- ACCESS_ONCE(dev->power.last_busy) = jiffies;
57975+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
57976 }
57977
57978 #else /* !CONFIG_PM_RUNTIME */
57979diff -urNp linux-3.0.4/include/linux/poison.h linux-3.0.4/include/linux/poison.h
57980--- linux-3.0.4/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
57981+++ linux-3.0.4/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
57982@@ -19,8 +19,8 @@
57983 * under normal circumstances, used to verify that nobody uses
57984 * non-initialized list entries.
57985 */
57986-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
57987-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
57988+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
57989+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
57990
57991 /********** include/linux/timer.h **********/
57992 /*
57993diff -urNp linux-3.0.4/include/linux/preempt.h linux-3.0.4/include/linux/preempt.h
57994--- linux-3.0.4/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
57995+++ linux-3.0.4/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
57996@@ -115,7 +115,7 @@ struct preempt_ops {
57997 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
57998 void (*sched_out)(struct preempt_notifier *notifier,
57999 struct task_struct *next);
58000-};
58001+} __no_const;
58002
58003 /**
58004 * preempt_notifier - key for installing preemption notifiers
58005diff -urNp linux-3.0.4/include/linux/proc_fs.h linux-3.0.4/include/linux/proc_fs.h
58006--- linux-3.0.4/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
58007+++ linux-3.0.4/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
58008@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
58009 return proc_create_data(name, mode, parent, proc_fops, NULL);
58010 }
58011
58012+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
58013+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
58014+{
58015+#ifdef CONFIG_GRKERNSEC_PROC_USER
58016+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
58017+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58018+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
58019+#else
58020+ return proc_create_data(name, mode, parent, proc_fops, NULL);
58021+#endif
58022+}
58023+
58024+
58025 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
58026 mode_t mode, struct proc_dir_entry *base,
58027 read_proc_t *read_proc, void * data)
58028@@ -258,7 +271,7 @@ union proc_op {
58029 int (*proc_show)(struct seq_file *m,
58030 struct pid_namespace *ns, struct pid *pid,
58031 struct task_struct *task);
58032-};
58033+} __no_const;
58034
58035 struct ctl_table_header;
58036 struct ctl_table;
58037diff -urNp linux-3.0.4/include/linux/ptrace.h linux-3.0.4/include/linux/ptrace.h
58038--- linux-3.0.4/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
58039+++ linux-3.0.4/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
58040@@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
58041 extern void exit_ptrace(struct task_struct *tracer);
58042 #define PTRACE_MODE_READ 1
58043 #define PTRACE_MODE_ATTACH 2
58044-/* Returns 0 on success, -errno on denial. */
58045-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
58046 /* Returns true on success, false on denial. */
58047 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
58048+/* Returns true on success, false on denial. */
58049+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
58050
58051 static inline int ptrace_reparented(struct task_struct *child)
58052 {
58053diff -urNp linux-3.0.4/include/linux/random.h linux-3.0.4/include/linux/random.h
58054--- linux-3.0.4/include/linux/random.h 2011-09-02 18:11:21.000000000 -0400
58055+++ linux-3.0.4/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
58056@@ -69,12 +69,17 @@ void srandom32(u32 seed);
58057
58058 u32 prandom32(struct rnd_state *);
58059
58060+static inline unsigned long pax_get_random_long(void)
58061+{
58062+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
58063+}
58064+
58065 /*
58066 * Handle minimum values for seeds
58067 */
58068 static inline u32 __seed(u32 x, u32 m)
58069 {
58070- return (x < m) ? x + m : x;
58071+ return (x <= m) ? x + m + 1 : x;
58072 }
58073
58074 /**
58075diff -urNp linux-3.0.4/include/linux/reboot.h linux-3.0.4/include/linux/reboot.h
58076--- linux-3.0.4/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
58077+++ linux-3.0.4/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
58078@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
58079 * Architecture-specific implementations of sys_reboot commands.
58080 */
58081
58082-extern void machine_restart(char *cmd);
58083-extern void machine_halt(void);
58084-extern void machine_power_off(void);
58085+extern void machine_restart(char *cmd) __noreturn;
58086+extern void machine_halt(void) __noreturn;
58087+extern void machine_power_off(void) __noreturn;
58088
58089 extern void machine_shutdown(void);
58090 struct pt_regs;
58091@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
58092 */
58093
58094 extern void kernel_restart_prepare(char *cmd);
58095-extern void kernel_restart(char *cmd);
58096-extern void kernel_halt(void);
58097-extern void kernel_power_off(void);
58098+extern void kernel_restart(char *cmd) __noreturn;
58099+extern void kernel_halt(void) __noreturn;
58100+extern void kernel_power_off(void) __noreturn;
58101
58102 extern int C_A_D; /* for sysctl */
58103 void ctrl_alt_del(void);
58104@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
58105 * Emergency restart, callable from an interrupt handler.
58106 */
58107
58108-extern void emergency_restart(void);
58109+extern void emergency_restart(void) __noreturn;
58110 #include <asm/emergency-restart.h>
58111
58112 #endif
58113diff -urNp linux-3.0.4/include/linux/reiserfs_fs.h linux-3.0.4/include/linux/reiserfs_fs.h
58114--- linux-3.0.4/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
58115+++ linux-3.0.4/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
58116@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
58117 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
58118
58119 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
58120-#define get_generation(s) atomic_read (&fs_generation(s))
58121+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
58122 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
58123 #define __fs_changed(gen,s) (gen != get_generation (s))
58124 #define fs_changed(gen,s) \
58125diff -urNp linux-3.0.4/include/linux/reiserfs_fs_sb.h linux-3.0.4/include/linux/reiserfs_fs_sb.h
58126--- linux-3.0.4/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
58127+++ linux-3.0.4/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
58128@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
58129 /* Comment? -Hans */
58130 wait_queue_head_t s_wait;
58131 /* To be obsoleted soon by per buffer seals.. -Hans */
58132- atomic_t s_generation_counter; // increased by one every time the
58133+ atomic_unchecked_t s_generation_counter; // increased by one every time the
58134 // tree gets re-balanced
58135 unsigned long s_properties; /* File system properties. Currently holds
58136 on-disk FS format */
58137diff -urNp linux-3.0.4/include/linux/relay.h linux-3.0.4/include/linux/relay.h
58138--- linux-3.0.4/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
58139+++ linux-3.0.4/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
58140@@ -159,7 +159,7 @@ struct rchan_callbacks
58141 * The callback should return 0 if successful, negative if not.
58142 */
58143 int (*remove_buf_file)(struct dentry *dentry);
58144-};
58145+} __no_const;
58146
58147 /*
58148 * CONFIG_RELAY kernel API, kernel/relay.c
58149diff -urNp linux-3.0.4/include/linux/rfkill.h linux-3.0.4/include/linux/rfkill.h
58150--- linux-3.0.4/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
58151+++ linux-3.0.4/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
58152@@ -147,6 +147,7 @@ struct rfkill_ops {
58153 void (*query)(struct rfkill *rfkill, void *data);
58154 int (*set_block)(void *data, bool blocked);
58155 };
58156+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
58157
58158 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
58159 /**
58160diff -urNp linux-3.0.4/include/linux/rmap.h linux-3.0.4/include/linux/rmap.h
58161--- linux-3.0.4/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
58162+++ linux-3.0.4/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
58163@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
58164 void anon_vma_init(void); /* create anon_vma_cachep */
58165 int anon_vma_prepare(struct vm_area_struct *);
58166 void unlink_anon_vmas(struct vm_area_struct *);
58167-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
58168-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
58169+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
58170+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
58171 void __anon_vma_link(struct vm_area_struct *);
58172
58173 static inline void anon_vma_merge(struct vm_area_struct *vma,
58174diff -urNp linux-3.0.4/include/linux/sched.h linux-3.0.4/include/linux/sched.h
58175--- linux-3.0.4/include/linux/sched.h 2011-07-21 22:17:23.000000000 -0400
58176+++ linux-3.0.4/include/linux/sched.h 2011-08-25 17:22:27.000000000 -0400
58177@@ -100,6 +100,7 @@ struct bio_list;
58178 struct fs_struct;
58179 struct perf_event_context;
58180 struct blk_plug;
58181+struct linux_binprm;
58182
58183 /*
58184 * List of flags we want to share for kernel threads,
58185@@ -380,10 +381,13 @@ struct user_namespace;
58186 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
58187
58188 extern int sysctl_max_map_count;
58189+extern unsigned long sysctl_heap_stack_gap;
58190
58191 #include <linux/aio.h>
58192
58193 #ifdef CONFIG_MMU
58194+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
58195+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
58196 extern void arch_pick_mmap_layout(struct mm_struct *mm);
58197 extern unsigned long
58198 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
58199@@ -629,6 +633,17 @@ struct signal_struct {
58200 #ifdef CONFIG_TASKSTATS
58201 struct taskstats *stats;
58202 #endif
58203+
58204+#ifdef CONFIG_GRKERNSEC
58205+ u32 curr_ip;
58206+ u32 saved_ip;
58207+ u32 gr_saddr;
58208+ u32 gr_daddr;
58209+ u16 gr_sport;
58210+ u16 gr_dport;
58211+ u8 used_accept:1;
58212+#endif
58213+
58214 #ifdef CONFIG_AUDIT
58215 unsigned audit_tty;
58216 struct tty_audit_buf *tty_audit_buf;
58217@@ -710,6 +725,11 @@ struct user_struct {
58218 struct key *session_keyring; /* UID's default session keyring */
58219 #endif
58220
58221+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58222+ unsigned int banned;
58223+ unsigned long ban_expires;
58224+#endif
58225+
58226 /* Hash table maintenance information */
58227 struct hlist_node uidhash_node;
58228 uid_t uid;
58229@@ -1340,8 +1360,8 @@ struct task_struct {
58230 struct list_head thread_group;
58231
58232 struct completion *vfork_done; /* for vfork() */
58233- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
58234- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
58235+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
58236+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
58237
58238 cputime_t utime, stime, utimescaled, stimescaled;
58239 cputime_t gtime;
58240@@ -1357,13 +1377,6 @@ struct task_struct {
58241 struct task_cputime cputime_expires;
58242 struct list_head cpu_timers[3];
58243
58244-/* process credentials */
58245- const struct cred __rcu *real_cred; /* objective and real subjective task
58246- * credentials (COW) */
58247- const struct cred __rcu *cred; /* effective (overridable) subjective task
58248- * credentials (COW) */
58249- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
58250-
58251 char comm[TASK_COMM_LEN]; /* executable name excluding path
58252 - access with [gs]et_task_comm (which lock
58253 it with task_lock())
58254@@ -1380,8 +1393,16 @@ struct task_struct {
58255 #endif
58256 /* CPU-specific state of this task */
58257 struct thread_struct thread;
58258+/* thread_info moved to task_struct */
58259+#ifdef CONFIG_X86
58260+ struct thread_info tinfo;
58261+#endif
58262 /* filesystem information */
58263 struct fs_struct *fs;
58264+
58265+ const struct cred __rcu *cred; /* effective (overridable) subjective task
58266+ * credentials (COW) */
58267+
58268 /* open file information */
58269 struct files_struct *files;
58270 /* namespaces */
58271@@ -1428,6 +1449,11 @@ struct task_struct {
58272 struct rt_mutex_waiter *pi_blocked_on;
58273 #endif
58274
58275+/* process credentials */
58276+ const struct cred __rcu *real_cred; /* objective and real subjective task
58277+ * credentials (COW) */
58278+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
58279+
58280 #ifdef CONFIG_DEBUG_MUTEXES
58281 /* mutex deadlock detection */
58282 struct mutex_waiter *blocked_on;
58283@@ -1538,6 +1564,21 @@ struct task_struct {
58284 unsigned long default_timer_slack_ns;
58285
58286 struct list_head *scm_work_list;
58287+
58288+#ifdef CONFIG_GRKERNSEC
58289+ /* grsecurity */
58290+ struct dentry *gr_chroot_dentry;
58291+ struct acl_subject_label *acl;
58292+ struct acl_role_label *role;
58293+ struct file *exec_file;
58294+ u16 acl_role_id;
58295+ /* is this the task that authenticated to the special role */
58296+ u8 acl_sp_role;
58297+ u8 is_writable;
58298+ u8 brute;
58299+ u8 gr_is_chrooted;
58300+#endif
58301+
58302 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
58303 /* Index of current stored address in ret_stack */
58304 int curr_ret_stack;
58305@@ -1572,6 +1613,57 @@ struct task_struct {
58306 #endif
58307 };
58308
58309+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
58310+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
58311+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
58312+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
58313+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
58314+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
58315+
58316+#ifdef CONFIG_PAX_SOFTMODE
58317+extern int pax_softmode;
58318+#endif
58319+
58320+extern int pax_check_flags(unsigned long *);
58321+
58322+/* if tsk != current then task_lock must be held on it */
58323+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58324+static inline unsigned long pax_get_flags(struct task_struct *tsk)
58325+{
58326+ if (likely(tsk->mm))
58327+ return tsk->mm->pax_flags;
58328+ else
58329+ return 0UL;
58330+}
58331+
58332+/* if tsk != current then task_lock must be held on it */
58333+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
58334+{
58335+ if (likely(tsk->mm)) {
58336+ tsk->mm->pax_flags = flags;
58337+ return 0;
58338+ }
58339+ return -EINVAL;
58340+}
58341+#endif
58342+
58343+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
58344+extern void pax_set_initial_flags(struct linux_binprm *bprm);
58345+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
58346+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
58347+#endif
58348+
58349+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
58350+extern void pax_report_insns(void *pc, void *sp);
58351+extern void pax_report_refcount_overflow(struct pt_regs *regs);
58352+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
58353+
58354+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
58355+extern void pax_track_stack(void);
58356+#else
58357+static inline void pax_track_stack(void) {}
58358+#endif
58359+
58360 /* Future-safe accessor for struct task_struct's cpus_allowed. */
58361 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
58362
58363@@ -1768,6 +1860,7 @@ extern void thread_group_times(struct ta
58364 #define PF_DUMPCORE 0x00000200 /* dumped core */
58365 #define PF_SIGNALED 0x00000400 /* killed by a signal */
58366 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
58367+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
58368 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
58369 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
58370 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
58371@@ -2056,7 +2149,9 @@ void yield(void);
58372 extern struct exec_domain default_exec_domain;
58373
58374 union thread_union {
58375+#ifndef CONFIG_X86
58376 struct thread_info thread_info;
58377+#endif
58378 unsigned long stack[THREAD_SIZE/sizeof(long)];
58379 };
58380
58381@@ -2089,6 +2184,7 @@ extern struct pid_namespace init_pid_ns;
58382 */
58383
58384 extern struct task_struct *find_task_by_vpid(pid_t nr);
58385+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
58386 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
58387 struct pid_namespace *ns);
58388
58389@@ -2225,7 +2321,7 @@ extern void __cleanup_sighand(struct sig
58390 extern void exit_itimers(struct signal_struct *);
58391 extern void flush_itimer_signals(void);
58392
58393-extern NORET_TYPE void do_group_exit(int);
58394+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
58395
58396 extern void daemonize(const char *, ...);
58397 extern int allow_signal(int);
58398@@ -2393,13 +2489,17 @@ static inline unsigned long *end_of_stac
58399
58400 #endif
58401
58402-static inline int object_is_on_stack(void *obj)
58403+static inline int object_starts_on_stack(void *obj)
58404 {
58405- void *stack = task_stack_page(current);
58406+ const void *stack = task_stack_page(current);
58407
58408 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
58409 }
58410
58411+#ifdef CONFIG_PAX_USERCOPY
58412+extern int object_is_on_stack(const void *obj, unsigned long len);
58413+#endif
58414+
58415 extern void thread_info_cache_init(void);
58416
58417 #ifdef CONFIG_DEBUG_STACK_USAGE
58418diff -urNp linux-3.0.4/include/linux/screen_info.h linux-3.0.4/include/linux/screen_info.h
58419--- linux-3.0.4/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
58420+++ linux-3.0.4/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
58421@@ -43,7 +43,8 @@ struct screen_info {
58422 __u16 pages; /* 0x32 */
58423 __u16 vesa_attributes; /* 0x34 */
58424 __u32 capabilities; /* 0x36 */
58425- __u8 _reserved[6]; /* 0x3a */
58426+ __u16 vesapm_size; /* 0x3a */
58427+ __u8 _reserved[4]; /* 0x3c */
58428 } __attribute__((packed));
58429
58430 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
58431diff -urNp linux-3.0.4/include/linux/security.h linux-3.0.4/include/linux/security.h
58432--- linux-3.0.4/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
58433+++ linux-3.0.4/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
58434@@ -36,6 +36,7 @@
58435 #include <linux/key.h>
58436 #include <linux/xfrm.h>
58437 #include <linux/slab.h>
58438+#include <linux/grsecurity.h>
58439 #include <net/flow.h>
58440
58441 /* Maximum number of letters for an LSM name string */
58442diff -urNp linux-3.0.4/include/linux/seq_file.h linux-3.0.4/include/linux/seq_file.h
58443--- linux-3.0.4/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
58444+++ linux-3.0.4/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
58445@@ -32,6 +32,7 @@ struct seq_operations {
58446 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
58447 int (*show) (struct seq_file *m, void *v);
58448 };
58449+typedef struct seq_operations __no_const seq_operations_no_const;
58450
58451 #define SEQ_SKIP 1
58452
58453diff -urNp linux-3.0.4/include/linux/shmem_fs.h linux-3.0.4/include/linux/shmem_fs.h
58454--- linux-3.0.4/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
58455+++ linux-3.0.4/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
58456@@ -10,7 +10,7 @@
58457
58458 #define SHMEM_NR_DIRECT 16
58459
58460-#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
58461+#define SHMEM_SYMLINK_INLINE_LEN 64
58462
58463 struct shmem_inode_info {
58464 spinlock_t lock;
58465diff -urNp linux-3.0.4/include/linux/shm.h linux-3.0.4/include/linux/shm.h
58466--- linux-3.0.4/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
58467+++ linux-3.0.4/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
58468@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
58469 pid_t shm_cprid;
58470 pid_t shm_lprid;
58471 struct user_struct *mlock_user;
58472+#ifdef CONFIG_GRKERNSEC
58473+ time_t shm_createtime;
58474+ pid_t shm_lapid;
58475+#endif
58476 };
58477
58478 /* shm_mode upper byte flags */
58479diff -urNp linux-3.0.4/include/linux/skbuff.h linux-3.0.4/include/linux/skbuff.h
58480--- linux-3.0.4/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
58481+++ linux-3.0.4/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
58482@@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
58483 */
58484 static inline int skb_queue_empty(const struct sk_buff_head *list)
58485 {
58486- return list->next == (struct sk_buff *)list;
58487+ return list->next == (const struct sk_buff *)list;
58488 }
58489
58490 /**
58491@@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
58492 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
58493 const struct sk_buff *skb)
58494 {
58495- return skb->next == (struct sk_buff *)list;
58496+ return skb->next == (const struct sk_buff *)list;
58497 }
58498
58499 /**
58500@@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
58501 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
58502 const struct sk_buff *skb)
58503 {
58504- return skb->prev == (struct sk_buff *)list;
58505+ return skb->prev == (const struct sk_buff *)list;
58506 }
58507
58508 /**
58509@@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
58510 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
58511 */
58512 #ifndef NET_SKB_PAD
58513-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
58514+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
58515 #endif
58516
58517 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
58518diff -urNp linux-3.0.4/include/linux/slab_def.h linux-3.0.4/include/linux/slab_def.h
58519--- linux-3.0.4/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
58520+++ linux-3.0.4/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
58521@@ -96,10 +96,10 @@ struct kmem_cache {
58522 unsigned long node_allocs;
58523 unsigned long node_frees;
58524 unsigned long node_overflow;
58525- atomic_t allochit;
58526- atomic_t allocmiss;
58527- atomic_t freehit;
58528- atomic_t freemiss;
58529+ atomic_unchecked_t allochit;
58530+ atomic_unchecked_t allocmiss;
58531+ atomic_unchecked_t freehit;
58532+ atomic_unchecked_t freemiss;
58533
58534 /*
58535 * If debugging is enabled, then the allocator can add additional
58536diff -urNp linux-3.0.4/include/linux/slab.h linux-3.0.4/include/linux/slab.h
58537--- linux-3.0.4/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
58538+++ linux-3.0.4/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
58539@@ -11,12 +11,20 @@
58540
58541 #include <linux/gfp.h>
58542 #include <linux/types.h>
58543+#include <linux/err.h>
58544
58545 /*
58546 * Flags to pass to kmem_cache_create().
58547 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
58548 */
58549 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
58550+
58551+#ifdef CONFIG_PAX_USERCOPY
58552+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
58553+#else
58554+#define SLAB_USERCOPY 0x00000000UL
58555+#endif
58556+
58557 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
58558 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
58559 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
58560@@ -87,10 +95,13 @@
58561 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
58562 * Both make kfree a no-op.
58563 */
58564-#define ZERO_SIZE_PTR ((void *)16)
58565+#define ZERO_SIZE_PTR \
58566+({ \
58567+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
58568+ (void *)(-MAX_ERRNO-1L); \
58569+})
58570
58571-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
58572- (unsigned long)ZERO_SIZE_PTR)
58573+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
58574
58575 /*
58576 * struct kmem_cache related prototypes
58577@@ -141,6 +152,7 @@ void * __must_check krealloc(const void
58578 void kfree(const void *);
58579 void kzfree(const void *);
58580 size_t ksize(const void *);
58581+void check_object_size(const void *ptr, unsigned long n, bool to);
58582
58583 /*
58584 * Allocator specific definitions. These are mainly used to establish optimized
58585@@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
58586
58587 void __init kmem_cache_init_late(void);
58588
58589+#define kmalloc(x, y) \
58590+({ \
58591+ void *___retval; \
58592+ intoverflow_t ___x = (intoverflow_t)x; \
58593+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
58594+ ___retval = NULL; \
58595+ else \
58596+ ___retval = kmalloc((size_t)___x, (y)); \
58597+ ___retval; \
58598+})
58599+
58600+#define kmalloc_node(x, y, z) \
58601+({ \
58602+ void *___retval; \
58603+ intoverflow_t ___x = (intoverflow_t)x; \
58604+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
58605+ ___retval = NULL; \
58606+ else \
58607+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
58608+ ___retval; \
58609+})
58610+
58611+#define kzalloc(x, y) \
58612+({ \
58613+ void *___retval; \
58614+ intoverflow_t ___x = (intoverflow_t)x; \
58615+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
58616+ ___retval = NULL; \
58617+ else \
58618+ ___retval = kzalloc((size_t)___x, (y)); \
58619+ ___retval; \
58620+})
58621+
58622+#define __krealloc(x, y, z) \
58623+({ \
58624+ void *___retval; \
58625+ intoverflow_t ___y = (intoverflow_t)y; \
58626+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
58627+ ___retval = NULL; \
58628+ else \
58629+ ___retval = __krealloc((x), (size_t)___y, (z)); \
58630+ ___retval; \
58631+})
58632+
58633+#define krealloc(x, y, z) \
58634+({ \
58635+ void *___retval; \
58636+ intoverflow_t ___y = (intoverflow_t)y; \
58637+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
58638+ ___retval = NULL; \
58639+ else \
58640+ ___retval = krealloc((x), (size_t)___y, (z)); \
58641+ ___retval; \
58642+})
58643+
58644 #endif /* _LINUX_SLAB_H */
58645diff -urNp linux-3.0.4/include/linux/slub_def.h linux-3.0.4/include/linux/slub_def.h
58646--- linux-3.0.4/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
58647+++ linux-3.0.4/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
58648@@ -82,7 +82,7 @@ struct kmem_cache {
58649 struct kmem_cache_order_objects max;
58650 struct kmem_cache_order_objects min;
58651 gfp_t allocflags; /* gfp flags to use on each alloc */
58652- int refcount; /* Refcount for slab cache destroy */
58653+ atomic_t refcount; /* Refcount for slab cache destroy */
58654 void (*ctor)(void *);
58655 int inuse; /* Offset to metadata */
58656 int align; /* Alignment */
58657@@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
58658 }
58659
58660 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
58661-void *__kmalloc(size_t size, gfp_t flags);
58662+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
58663
58664 static __always_inline void *
58665 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
58666diff -urNp linux-3.0.4/include/linux/sonet.h linux-3.0.4/include/linux/sonet.h
58667--- linux-3.0.4/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
58668+++ linux-3.0.4/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
58669@@ -61,7 +61,7 @@ struct sonet_stats {
58670 #include <asm/atomic.h>
58671
58672 struct k_sonet_stats {
58673-#define __HANDLE_ITEM(i) atomic_t i
58674+#define __HANDLE_ITEM(i) atomic_unchecked_t i
58675 __SONET_ITEMS
58676 #undef __HANDLE_ITEM
58677 };
58678diff -urNp linux-3.0.4/include/linux/sunrpc/clnt.h linux-3.0.4/include/linux/sunrpc/clnt.h
58679--- linux-3.0.4/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
58680+++ linux-3.0.4/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
58681@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
58682 {
58683 switch (sap->sa_family) {
58684 case AF_INET:
58685- return ntohs(((struct sockaddr_in *)sap)->sin_port);
58686+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
58687 case AF_INET6:
58688- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
58689+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
58690 }
58691 return 0;
58692 }
58693@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
58694 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
58695 const struct sockaddr *src)
58696 {
58697- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
58698+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
58699 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
58700
58701 dsin->sin_family = ssin->sin_family;
58702@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
58703 if (sa->sa_family != AF_INET6)
58704 return 0;
58705
58706- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
58707+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
58708 }
58709
58710 #endif /* __KERNEL__ */
58711diff -urNp linux-3.0.4/include/linux/sunrpc/svc_rdma.h linux-3.0.4/include/linux/sunrpc/svc_rdma.h
58712--- linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
58713+++ linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
58714@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
58715 extern unsigned int svcrdma_max_requests;
58716 extern unsigned int svcrdma_max_req_size;
58717
58718-extern atomic_t rdma_stat_recv;
58719-extern atomic_t rdma_stat_read;
58720-extern atomic_t rdma_stat_write;
58721-extern atomic_t rdma_stat_sq_starve;
58722-extern atomic_t rdma_stat_rq_starve;
58723-extern atomic_t rdma_stat_rq_poll;
58724-extern atomic_t rdma_stat_rq_prod;
58725-extern atomic_t rdma_stat_sq_poll;
58726-extern atomic_t rdma_stat_sq_prod;
58727+extern atomic_unchecked_t rdma_stat_recv;
58728+extern atomic_unchecked_t rdma_stat_read;
58729+extern atomic_unchecked_t rdma_stat_write;
58730+extern atomic_unchecked_t rdma_stat_sq_starve;
58731+extern atomic_unchecked_t rdma_stat_rq_starve;
58732+extern atomic_unchecked_t rdma_stat_rq_poll;
58733+extern atomic_unchecked_t rdma_stat_rq_prod;
58734+extern atomic_unchecked_t rdma_stat_sq_poll;
58735+extern atomic_unchecked_t rdma_stat_sq_prod;
58736
58737 #define RPCRDMA_VERSION 1
58738
58739diff -urNp linux-3.0.4/include/linux/sysctl.h linux-3.0.4/include/linux/sysctl.h
58740--- linux-3.0.4/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
58741+++ linux-3.0.4/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
58742@@ -155,7 +155,11 @@ enum
58743 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
58744 };
58745
58746-
58747+#ifdef CONFIG_PAX_SOFTMODE
58748+enum {
58749+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
58750+};
58751+#endif
58752
58753 /* CTL_VM names: */
58754 enum
58755@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
58756
58757 extern int proc_dostring(struct ctl_table *, int,
58758 void __user *, size_t *, loff_t *);
58759+extern int proc_dostring_modpriv(struct ctl_table *, int,
58760+ void __user *, size_t *, loff_t *);
58761 extern int proc_dointvec(struct ctl_table *, int,
58762 void __user *, size_t *, loff_t *);
58763 extern int proc_dointvec_minmax(struct ctl_table *, int,
58764diff -urNp linux-3.0.4/include/linux/tty_ldisc.h linux-3.0.4/include/linux/tty_ldisc.h
58765--- linux-3.0.4/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
58766+++ linux-3.0.4/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
58767@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
58768
58769 struct module *owner;
58770
58771- int refcount;
58772+ atomic_t refcount;
58773 };
58774
58775 struct tty_ldisc {
58776diff -urNp linux-3.0.4/include/linux/types.h linux-3.0.4/include/linux/types.h
58777--- linux-3.0.4/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
58778+++ linux-3.0.4/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
58779@@ -213,10 +213,26 @@ typedef struct {
58780 int counter;
58781 } atomic_t;
58782
58783+#ifdef CONFIG_PAX_REFCOUNT
58784+typedef struct {
58785+ int counter;
58786+} atomic_unchecked_t;
58787+#else
58788+typedef atomic_t atomic_unchecked_t;
58789+#endif
58790+
58791 #ifdef CONFIG_64BIT
58792 typedef struct {
58793 long counter;
58794 } atomic64_t;
58795+
58796+#ifdef CONFIG_PAX_REFCOUNT
58797+typedef struct {
58798+ long counter;
58799+} atomic64_unchecked_t;
58800+#else
58801+typedef atomic64_t atomic64_unchecked_t;
58802+#endif
58803 #endif
58804
58805 struct list_head {
58806diff -urNp linux-3.0.4/include/linux/uaccess.h linux-3.0.4/include/linux/uaccess.h
58807--- linux-3.0.4/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
58808+++ linux-3.0.4/include/linux/uaccess.h 2011-10-06 04:17:55.000000000 -0400
58809@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
58810 long ret; \
58811 mm_segment_t old_fs = get_fs(); \
58812 \
58813- set_fs(KERNEL_DS); \
58814 pagefault_disable(); \
58815- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
58816- pagefault_enable(); \
58817+ set_fs(KERNEL_DS); \
58818+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
58819 set_fs(old_fs); \
58820+ pagefault_enable(); \
58821 ret; \
58822 })
58823
58824diff -urNp linux-3.0.4/include/linux/unaligned/access_ok.h linux-3.0.4/include/linux/unaligned/access_ok.h
58825--- linux-3.0.4/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
58826+++ linux-3.0.4/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
58827@@ -6,32 +6,32 @@
58828
58829 static inline u16 get_unaligned_le16(const void *p)
58830 {
58831- return le16_to_cpup((__le16 *)p);
58832+ return le16_to_cpup((const __le16 *)p);
58833 }
58834
58835 static inline u32 get_unaligned_le32(const void *p)
58836 {
58837- return le32_to_cpup((__le32 *)p);
58838+ return le32_to_cpup((const __le32 *)p);
58839 }
58840
58841 static inline u64 get_unaligned_le64(const void *p)
58842 {
58843- return le64_to_cpup((__le64 *)p);
58844+ return le64_to_cpup((const __le64 *)p);
58845 }
58846
58847 static inline u16 get_unaligned_be16(const void *p)
58848 {
58849- return be16_to_cpup((__be16 *)p);
58850+ return be16_to_cpup((const __be16 *)p);
58851 }
58852
58853 static inline u32 get_unaligned_be32(const void *p)
58854 {
58855- return be32_to_cpup((__be32 *)p);
58856+ return be32_to_cpup((const __be32 *)p);
58857 }
58858
58859 static inline u64 get_unaligned_be64(const void *p)
58860 {
58861- return be64_to_cpup((__be64 *)p);
58862+ return be64_to_cpup((const __be64 *)p);
58863 }
58864
58865 static inline void put_unaligned_le16(u16 val, void *p)
58866diff -urNp linux-3.0.4/include/linux/vmalloc.h linux-3.0.4/include/linux/vmalloc.h
58867--- linux-3.0.4/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
58868+++ linux-3.0.4/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
58869@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
58870 #define VM_MAP 0x00000004 /* vmap()ed pages */
58871 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
58872 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
58873+
58874+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
58875+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
58876+#endif
58877+
58878 /* bits [20..32] reserved for arch specific ioremap internals */
58879
58880 /*
58881@@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
58882 # endif
58883 #endif
58884
58885+#define vmalloc(x) \
58886+({ \
58887+ void *___retval; \
58888+ intoverflow_t ___x = (intoverflow_t)x; \
58889+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
58890+ ___retval = NULL; \
58891+ else \
58892+ ___retval = vmalloc((unsigned long)___x); \
58893+ ___retval; \
58894+})
58895+
58896+#define vzalloc(x) \
58897+({ \
58898+ void *___retval; \
58899+ intoverflow_t ___x = (intoverflow_t)x; \
58900+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
58901+ ___retval = NULL; \
58902+ else \
58903+ ___retval = vzalloc((unsigned long)___x); \
58904+ ___retval; \
58905+})
58906+
58907+#define __vmalloc(x, y, z) \
58908+({ \
58909+ void *___retval; \
58910+ intoverflow_t ___x = (intoverflow_t)x; \
58911+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
58912+ ___retval = NULL; \
58913+ else \
58914+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
58915+ ___retval; \
58916+})
58917+
58918+#define vmalloc_user(x) \
58919+({ \
58920+ void *___retval; \
58921+ intoverflow_t ___x = (intoverflow_t)x; \
58922+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
58923+ ___retval = NULL; \
58924+ else \
58925+ ___retval = vmalloc_user((unsigned long)___x); \
58926+ ___retval; \
58927+})
58928+
58929+#define vmalloc_exec(x) \
58930+({ \
58931+ void *___retval; \
58932+ intoverflow_t ___x = (intoverflow_t)x; \
58933+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
58934+ ___retval = NULL; \
58935+ else \
58936+ ___retval = vmalloc_exec((unsigned long)___x); \
58937+ ___retval; \
58938+})
58939+
58940+#define vmalloc_node(x, y) \
58941+({ \
58942+ void *___retval; \
58943+ intoverflow_t ___x = (intoverflow_t)x; \
58944+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
58945+ ___retval = NULL; \
58946+ else \
58947+ ___retval = vmalloc_node((unsigned long)___x, (y));\
58948+ ___retval; \
58949+})
58950+
58951+#define vzalloc_node(x, y) \
58952+({ \
58953+ void *___retval; \
58954+ intoverflow_t ___x = (intoverflow_t)x; \
58955+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
58956+ ___retval = NULL; \
58957+ else \
58958+ ___retval = vzalloc_node((unsigned long)___x, (y));\
58959+ ___retval; \
58960+})
58961+
58962+#define vmalloc_32(x) \
58963+({ \
58964+ void *___retval; \
58965+ intoverflow_t ___x = (intoverflow_t)x; \
58966+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
58967+ ___retval = NULL; \
58968+ else \
58969+ ___retval = vmalloc_32((unsigned long)___x); \
58970+ ___retval; \
58971+})
58972+
58973+#define vmalloc_32_user(x) \
58974+({ \
58975+void *___retval; \
58976+ intoverflow_t ___x = (intoverflow_t)x; \
58977+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
58978+ ___retval = NULL; \
58979+ else \
58980+ ___retval = vmalloc_32_user((unsigned long)___x);\
58981+ ___retval; \
58982+})
58983+
58984 #endif /* _LINUX_VMALLOC_H */
58985diff -urNp linux-3.0.4/include/linux/vmstat.h linux-3.0.4/include/linux/vmstat.h
58986--- linux-3.0.4/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
58987+++ linux-3.0.4/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
58988@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
58989 /*
58990 * Zone based page accounting with per cpu differentials.
58991 */
58992-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
58993+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
58994
58995 static inline void zone_page_state_add(long x, struct zone *zone,
58996 enum zone_stat_item item)
58997 {
58998- atomic_long_add(x, &zone->vm_stat[item]);
58999- atomic_long_add(x, &vm_stat[item]);
59000+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
59001+ atomic_long_add_unchecked(x, &vm_stat[item]);
59002 }
59003
59004 static inline unsigned long global_page_state(enum zone_stat_item item)
59005 {
59006- long x = atomic_long_read(&vm_stat[item]);
59007+ long x = atomic_long_read_unchecked(&vm_stat[item]);
59008 #ifdef CONFIG_SMP
59009 if (x < 0)
59010 x = 0;
59011@@ -109,7 +109,7 @@ static inline unsigned long global_page_
59012 static inline unsigned long zone_page_state(struct zone *zone,
59013 enum zone_stat_item item)
59014 {
59015- long x = atomic_long_read(&zone->vm_stat[item]);
59016+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
59017 #ifdef CONFIG_SMP
59018 if (x < 0)
59019 x = 0;
59020@@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
59021 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
59022 enum zone_stat_item item)
59023 {
59024- long x = atomic_long_read(&zone->vm_stat[item]);
59025+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
59026
59027 #ifdef CONFIG_SMP
59028 int cpu;
59029@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
59030
59031 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
59032 {
59033- atomic_long_inc(&zone->vm_stat[item]);
59034- atomic_long_inc(&vm_stat[item]);
59035+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
59036+ atomic_long_inc_unchecked(&vm_stat[item]);
59037 }
59038
59039 static inline void __inc_zone_page_state(struct page *page,
59040@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
59041
59042 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
59043 {
59044- atomic_long_dec(&zone->vm_stat[item]);
59045- atomic_long_dec(&vm_stat[item]);
59046+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
59047+ atomic_long_dec_unchecked(&vm_stat[item]);
59048 }
59049
59050 static inline void __dec_zone_page_state(struct page *page,
59051diff -urNp linux-3.0.4/include/media/saa7146_vv.h linux-3.0.4/include/media/saa7146_vv.h
59052--- linux-3.0.4/include/media/saa7146_vv.h 2011-07-21 22:17:23.000000000 -0400
59053+++ linux-3.0.4/include/media/saa7146_vv.h 2011-08-24 18:26:09.000000000 -0400
59054@@ -163,7 +163,7 @@ struct saa7146_ext_vv
59055 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
59056
59057 /* the extension can override this */
59058- struct v4l2_ioctl_ops ops;
59059+ v4l2_ioctl_ops_no_const ops;
59060 /* pointer to the saa7146 core ops */
59061 const struct v4l2_ioctl_ops *core_ops;
59062
59063diff -urNp linux-3.0.4/include/media/v4l2-ioctl.h linux-3.0.4/include/media/v4l2-ioctl.h
59064--- linux-3.0.4/include/media/v4l2-ioctl.h 2011-07-21 22:17:23.000000000 -0400
59065+++ linux-3.0.4/include/media/v4l2-ioctl.h 2011-08-24 18:25:45.000000000 -0400
59066@@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
59067 long (*vidioc_default) (struct file *file, void *fh,
59068 bool valid_prio, int cmd, void *arg);
59069 };
59070+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
59071
59072
59073 /* v4l debugging and diagnostics */
59074diff -urNp linux-3.0.4/include/net/caif/cfctrl.h linux-3.0.4/include/net/caif/cfctrl.h
59075--- linux-3.0.4/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
59076+++ linux-3.0.4/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
59077@@ -52,7 +52,7 @@ struct cfctrl_rsp {
59078 void (*radioset_rsp)(void);
59079 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
59080 struct cflayer *client_layer);
59081-};
59082+} __no_const;
59083
59084 /* Link Setup Parameters for CAIF-Links. */
59085 struct cfctrl_link_param {
59086@@ -101,8 +101,8 @@ struct cfctrl_request_info {
59087 struct cfctrl {
59088 struct cfsrvl serv;
59089 struct cfctrl_rsp res;
59090- atomic_t req_seq_no;
59091- atomic_t rsp_seq_no;
59092+ atomic_unchecked_t req_seq_no;
59093+ atomic_unchecked_t rsp_seq_no;
59094 struct list_head list;
59095 /* Protects from simultaneous access to first_req list */
59096 spinlock_t info_list_lock;
59097diff -urNp linux-3.0.4/include/net/flow.h linux-3.0.4/include/net/flow.h
59098--- linux-3.0.4/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
59099+++ linux-3.0.4/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
59100@@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
59101 u8 dir, flow_resolve_t resolver, void *ctx);
59102
59103 extern void flow_cache_flush(void);
59104-extern atomic_t flow_cache_genid;
59105+extern atomic_unchecked_t flow_cache_genid;
59106
59107 #endif
59108diff -urNp linux-3.0.4/include/net/inetpeer.h linux-3.0.4/include/net/inetpeer.h
59109--- linux-3.0.4/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
59110+++ linux-3.0.4/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
59111@@ -43,8 +43,8 @@ struct inet_peer {
59112 */
59113 union {
59114 struct {
59115- atomic_t rid; /* Frag reception counter */
59116- atomic_t ip_id_count; /* IP ID for the next packet */
59117+ atomic_unchecked_t rid; /* Frag reception counter */
59118+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
59119 __u32 tcp_ts;
59120 __u32 tcp_ts_stamp;
59121 u32 metrics[RTAX_MAX];
59122@@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
59123 {
59124 more++;
59125 inet_peer_refcheck(p);
59126- return atomic_add_return(more, &p->ip_id_count) - more;
59127+ return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
59128 }
59129
59130 #endif /* _NET_INETPEER_H */
59131diff -urNp linux-3.0.4/include/net/ip_fib.h linux-3.0.4/include/net/ip_fib.h
59132--- linux-3.0.4/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
59133+++ linux-3.0.4/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
59134@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
59135
59136 #define FIB_RES_SADDR(net, res) \
59137 ((FIB_RES_NH(res).nh_saddr_genid == \
59138- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
59139+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
59140 FIB_RES_NH(res).nh_saddr : \
59141 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
59142 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
59143diff -urNp linux-3.0.4/include/net/ip_vs.h linux-3.0.4/include/net/ip_vs.h
59144--- linux-3.0.4/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
59145+++ linux-3.0.4/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
59146@@ -509,7 +509,7 @@ struct ip_vs_conn {
59147 struct ip_vs_conn *control; /* Master control connection */
59148 atomic_t n_control; /* Number of controlled ones */
59149 struct ip_vs_dest *dest; /* real server */
59150- atomic_t in_pkts; /* incoming packet counter */
59151+ atomic_unchecked_t in_pkts; /* incoming packet counter */
59152
59153 /* packet transmitter for different forwarding methods. If it
59154 mangles the packet, it must return NF_DROP or better NF_STOLEN,
59155@@ -647,7 +647,7 @@ struct ip_vs_dest {
59156 __be16 port; /* port number of the server */
59157 union nf_inet_addr addr; /* IP address of the server */
59158 volatile unsigned flags; /* dest status flags */
59159- atomic_t conn_flags; /* flags to copy to conn */
59160+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
59161 atomic_t weight; /* server weight */
59162
59163 atomic_t refcnt; /* reference counter */
59164diff -urNp linux-3.0.4/include/net/irda/ircomm_core.h linux-3.0.4/include/net/irda/ircomm_core.h
59165--- linux-3.0.4/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
59166+++ linux-3.0.4/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
59167@@ -51,7 +51,7 @@ typedef struct {
59168 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
59169 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
59170 struct ircomm_info *);
59171-} call_t;
59172+} __no_const call_t;
59173
59174 struct ircomm_cb {
59175 irda_queue_t queue;
59176diff -urNp linux-3.0.4/include/net/irda/ircomm_tty.h linux-3.0.4/include/net/irda/ircomm_tty.h
59177--- linux-3.0.4/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
59178+++ linux-3.0.4/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
59179@@ -35,6 +35,7 @@
59180 #include <linux/termios.h>
59181 #include <linux/timer.h>
59182 #include <linux/tty.h> /* struct tty_struct */
59183+#include <asm/local.h>
59184
59185 #include <net/irda/irias_object.h>
59186 #include <net/irda/ircomm_core.h>
59187@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
59188 unsigned short close_delay;
59189 unsigned short closing_wait; /* time to wait before closing */
59190
59191- int open_count;
59192- int blocked_open; /* # of blocked opens */
59193+ local_t open_count;
59194+ local_t blocked_open; /* # of blocked opens */
59195
59196 /* Protect concurent access to :
59197 * o self->open_count
59198diff -urNp linux-3.0.4/include/net/iucv/af_iucv.h linux-3.0.4/include/net/iucv/af_iucv.h
59199--- linux-3.0.4/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
59200+++ linux-3.0.4/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
59201@@ -87,7 +87,7 @@ struct iucv_sock {
59202 struct iucv_sock_list {
59203 struct hlist_head head;
59204 rwlock_t lock;
59205- atomic_t autobind_name;
59206+ atomic_unchecked_t autobind_name;
59207 };
59208
59209 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
59210diff -urNp linux-3.0.4/include/net/lapb.h linux-3.0.4/include/net/lapb.h
59211--- linux-3.0.4/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
59212+++ linux-3.0.4/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
59213@@ -95,7 +95,7 @@ struct lapb_cb {
59214 struct sk_buff_head write_queue;
59215 struct sk_buff_head ack_queue;
59216 unsigned char window;
59217- struct lapb_register_struct callbacks;
59218+ struct lapb_register_struct *callbacks;
59219
59220 /* FRMR control information */
59221 struct lapb_frame frmr_data;
59222diff -urNp linux-3.0.4/include/net/neighbour.h linux-3.0.4/include/net/neighbour.h
59223--- linux-3.0.4/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
59224+++ linux-3.0.4/include/net/neighbour.h 2011-08-31 18:39:25.000000000 -0400
59225@@ -124,7 +124,7 @@ struct neigh_ops {
59226 int (*connected_output)(struct sk_buff*);
59227 int (*hh_output)(struct sk_buff*);
59228 int (*queue_xmit)(struct sk_buff*);
59229-};
59230+} __do_const;
59231
59232 struct pneigh_entry {
59233 struct pneigh_entry *next;
59234diff -urNp linux-3.0.4/include/net/netlink.h linux-3.0.4/include/net/netlink.h
59235--- linux-3.0.4/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
59236+++ linux-3.0.4/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
59237@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
59238 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
59239 {
59240 if (mark)
59241- skb_trim(skb, (unsigned char *) mark - skb->data);
59242+ skb_trim(skb, (const unsigned char *) mark - skb->data);
59243 }
59244
59245 /**
59246diff -urNp linux-3.0.4/include/net/netns/ipv4.h linux-3.0.4/include/net/netns/ipv4.h
59247--- linux-3.0.4/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
59248+++ linux-3.0.4/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
59249@@ -56,8 +56,8 @@ struct netns_ipv4 {
59250
59251 unsigned int sysctl_ping_group_range[2];
59252
59253- atomic_t rt_genid;
59254- atomic_t dev_addr_genid;
59255+ atomic_unchecked_t rt_genid;
59256+ atomic_unchecked_t dev_addr_genid;
59257
59258 #ifdef CONFIG_IP_MROUTE
59259 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
59260diff -urNp linux-3.0.4/include/net/sctp/sctp.h linux-3.0.4/include/net/sctp/sctp.h
59261--- linux-3.0.4/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
59262+++ linux-3.0.4/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
59263@@ -315,9 +315,9 @@ do { \
59264
59265 #else /* SCTP_DEBUG */
59266
59267-#define SCTP_DEBUG_PRINTK(whatever...)
59268-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
59269-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
59270+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
59271+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
59272+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
59273 #define SCTP_ENABLE_DEBUG
59274 #define SCTP_DISABLE_DEBUG
59275 #define SCTP_ASSERT(expr, str, func)
59276diff -urNp linux-3.0.4/include/net/sock.h linux-3.0.4/include/net/sock.h
59277--- linux-3.0.4/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
59278+++ linux-3.0.4/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
59279@@ -277,7 +277,7 @@ struct sock {
59280 #ifdef CONFIG_RPS
59281 __u32 sk_rxhash;
59282 #endif
59283- atomic_t sk_drops;
59284+ atomic_unchecked_t sk_drops;
59285 int sk_rcvbuf;
59286
59287 struct sk_filter __rcu *sk_filter;
59288@@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
59289 }
59290
59291 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
59292- char __user *from, char *to,
59293+ char __user *from, unsigned char *to,
59294 int copy, int offset)
59295 {
59296 if (skb->ip_summed == CHECKSUM_NONE) {
59297diff -urNp linux-3.0.4/include/net/tcp.h linux-3.0.4/include/net/tcp.h
59298--- linux-3.0.4/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
59299+++ linux-3.0.4/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
59300@@ -1374,8 +1374,8 @@ enum tcp_seq_states {
59301 struct tcp_seq_afinfo {
59302 char *name;
59303 sa_family_t family;
59304- struct file_operations seq_fops;
59305- struct seq_operations seq_ops;
59306+ file_operations_no_const seq_fops;
59307+ seq_operations_no_const seq_ops;
59308 };
59309
59310 struct tcp_iter_state {
59311diff -urNp linux-3.0.4/include/net/udp.h linux-3.0.4/include/net/udp.h
59312--- linux-3.0.4/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
59313+++ linux-3.0.4/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
59314@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
59315 char *name;
59316 sa_family_t family;
59317 struct udp_table *udp_table;
59318- struct file_operations seq_fops;
59319- struct seq_operations seq_ops;
59320+ file_operations_no_const seq_fops;
59321+ seq_operations_no_const seq_ops;
59322 };
59323
59324 struct udp_iter_state {
59325diff -urNp linux-3.0.4/include/net/xfrm.h linux-3.0.4/include/net/xfrm.h
59326--- linux-3.0.4/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
59327+++ linux-3.0.4/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
59328@@ -505,7 +505,7 @@ struct xfrm_policy {
59329 struct timer_list timer;
59330
59331 struct flow_cache_object flo;
59332- atomic_t genid;
59333+ atomic_unchecked_t genid;
59334 u32 priority;
59335 u32 index;
59336 struct xfrm_mark mark;
59337diff -urNp linux-3.0.4/include/rdma/iw_cm.h linux-3.0.4/include/rdma/iw_cm.h
59338--- linux-3.0.4/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
59339+++ linux-3.0.4/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
59340@@ -120,7 +120,7 @@ struct iw_cm_verbs {
59341 int backlog);
59342
59343 int (*destroy_listen)(struct iw_cm_id *cm_id);
59344-};
59345+} __no_const;
59346
59347 /**
59348 * iw_create_cm_id - Create an IW CM identifier.
59349diff -urNp linux-3.0.4/include/scsi/libfc.h linux-3.0.4/include/scsi/libfc.h
59350--- linux-3.0.4/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
59351+++ linux-3.0.4/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
59352@@ -750,6 +750,7 @@ struct libfc_function_template {
59353 */
59354 void (*disc_stop_final) (struct fc_lport *);
59355 };
59356+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
59357
59358 /**
59359 * struct fc_disc - Discovery context
59360@@ -853,7 +854,7 @@ struct fc_lport {
59361 struct fc_vport *vport;
59362
59363 /* Operational Information */
59364- struct libfc_function_template tt;
59365+ libfc_function_template_no_const tt;
59366 u8 link_up;
59367 u8 qfull;
59368 enum fc_lport_state state;
59369diff -urNp linux-3.0.4/include/scsi/scsi_device.h linux-3.0.4/include/scsi/scsi_device.h
59370--- linux-3.0.4/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
59371+++ linux-3.0.4/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
59372@@ -161,9 +161,9 @@ struct scsi_device {
59373 unsigned int max_device_blocked; /* what device_blocked counts down from */
59374 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
59375
59376- atomic_t iorequest_cnt;
59377- atomic_t iodone_cnt;
59378- atomic_t ioerr_cnt;
59379+ atomic_unchecked_t iorequest_cnt;
59380+ atomic_unchecked_t iodone_cnt;
59381+ atomic_unchecked_t ioerr_cnt;
59382
59383 struct device sdev_gendev,
59384 sdev_dev;
59385diff -urNp linux-3.0.4/include/scsi/scsi_transport_fc.h linux-3.0.4/include/scsi/scsi_transport_fc.h
59386--- linux-3.0.4/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
59387+++ linux-3.0.4/include/scsi/scsi_transport_fc.h 2011-08-26 19:49:56.000000000 -0400
59388@@ -711,7 +711,7 @@ struct fc_function_template {
59389 unsigned long show_host_system_hostname:1;
59390
59391 unsigned long disable_target_scan:1;
59392-};
59393+} __do_const;
59394
59395
59396 /**
59397diff -urNp linux-3.0.4/include/sound/ak4xxx-adda.h linux-3.0.4/include/sound/ak4xxx-adda.h
59398--- linux-3.0.4/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
59399+++ linux-3.0.4/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
59400@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
59401 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
59402 unsigned char val);
59403 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
59404-};
59405+} __no_const;
59406
59407 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
59408
59409diff -urNp linux-3.0.4/include/sound/hwdep.h linux-3.0.4/include/sound/hwdep.h
59410--- linux-3.0.4/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
59411+++ linux-3.0.4/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
59412@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
59413 struct snd_hwdep_dsp_status *status);
59414 int (*dsp_load)(struct snd_hwdep *hw,
59415 struct snd_hwdep_dsp_image *image);
59416-};
59417+} __no_const;
59418
59419 struct snd_hwdep {
59420 struct snd_card *card;
59421diff -urNp linux-3.0.4/include/sound/info.h linux-3.0.4/include/sound/info.h
59422--- linux-3.0.4/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
59423+++ linux-3.0.4/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
59424@@ -44,7 +44,7 @@ struct snd_info_entry_text {
59425 struct snd_info_buffer *buffer);
59426 void (*write)(struct snd_info_entry *entry,
59427 struct snd_info_buffer *buffer);
59428-};
59429+} __no_const;
59430
59431 struct snd_info_entry_ops {
59432 int (*open)(struct snd_info_entry *entry,
59433diff -urNp linux-3.0.4/include/sound/pcm.h linux-3.0.4/include/sound/pcm.h
59434--- linux-3.0.4/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
59435+++ linux-3.0.4/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
59436@@ -81,6 +81,7 @@ struct snd_pcm_ops {
59437 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
59438 int (*ack)(struct snd_pcm_substream *substream);
59439 };
59440+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
59441
59442 /*
59443 *
59444diff -urNp linux-3.0.4/include/sound/sb16_csp.h linux-3.0.4/include/sound/sb16_csp.h
59445--- linux-3.0.4/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
59446+++ linux-3.0.4/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
59447@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
59448 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
59449 int (*csp_stop) (struct snd_sb_csp * p);
59450 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
59451-};
59452+} __no_const;
59453
59454 /*
59455 * CSP private data
59456diff -urNp linux-3.0.4/include/sound/soc.h linux-3.0.4/include/sound/soc.h
59457--- linux-3.0.4/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
59458+++ linux-3.0.4/include/sound/soc.h 2011-08-26 19:49:56.000000000 -0400
59459@@ -636,7 +636,7 @@ struct snd_soc_platform_driver {
59460
59461 /* platform stream ops */
59462 struct snd_pcm_ops *ops;
59463-};
59464+} __do_const;
59465
59466 struct snd_soc_platform {
59467 const char *name;
59468diff -urNp linux-3.0.4/include/sound/ymfpci.h linux-3.0.4/include/sound/ymfpci.h
59469--- linux-3.0.4/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
59470+++ linux-3.0.4/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
59471@@ -358,7 +358,7 @@ struct snd_ymfpci {
59472 spinlock_t reg_lock;
59473 spinlock_t voice_lock;
59474 wait_queue_head_t interrupt_sleep;
59475- atomic_t interrupt_sleep_count;
59476+ atomic_unchecked_t interrupt_sleep_count;
59477 struct snd_info_entry *proc_entry;
59478 const struct firmware *dsp_microcode;
59479 const struct firmware *controller_microcode;
59480diff -urNp linux-3.0.4/include/target/target_core_base.h linux-3.0.4/include/target/target_core_base.h
59481--- linux-3.0.4/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
59482+++ linux-3.0.4/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
59483@@ -364,7 +364,7 @@ struct t10_reservation_ops {
59484 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
59485 int (*t10_pr_register)(struct se_cmd *);
59486 int (*t10_pr_clear)(struct se_cmd *);
59487-};
59488+} __no_const;
59489
59490 struct t10_reservation_template {
59491 /* Reservation effects all target ports */
59492@@ -432,8 +432,8 @@ struct se_transport_task {
59493 atomic_t t_task_cdbs_left;
59494 atomic_t t_task_cdbs_ex_left;
59495 atomic_t t_task_cdbs_timeout_left;
59496- atomic_t t_task_cdbs_sent;
59497- atomic_t t_transport_aborted;
59498+ atomic_unchecked_t t_task_cdbs_sent;
59499+ atomic_unchecked_t t_transport_aborted;
59500 atomic_t t_transport_active;
59501 atomic_t t_transport_complete;
59502 atomic_t t_transport_queue_active;
59503@@ -774,7 +774,7 @@ struct se_device {
59504 atomic_t active_cmds;
59505 atomic_t simple_cmds;
59506 atomic_t depth_left;
59507- atomic_t dev_ordered_id;
59508+ atomic_unchecked_t dev_ordered_id;
59509 atomic_t dev_tur_active;
59510 atomic_t execute_tasks;
59511 atomic_t dev_status_thr_count;
59512diff -urNp linux-3.0.4/include/trace/events/irq.h linux-3.0.4/include/trace/events/irq.h
59513--- linux-3.0.4/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
59514+++ linux-3.0.4/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
59515@@ -36,7 +36,7 @@ struct softirq_action;
59516 */
59517 TRACE_EVENT(irq_handler_entry,
59518
59519- TP_PROTO(int irq, struct irqaction *action),
59520+ TP_PROTO(int irq, const struct irqaction *action),
59521
59522 TP_ARGS(irq, action),
59523
59524@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
59525 */
59526 TRACE_EVENT(irq_handler_exit,
59527
59528- TP_PROTO(int irq, struct irqaction *action, int ret),
59529+ TP_PROTO(int irq, const struct irqaction *action, int ret),
59530
59531 TP_ARGS(irq, action, ret),
59532
59533diff -urNp linux-3.0.4/include/video/udlfb.h linux-3.0.4/include/video/udlfb.h
59534--- linux-3.0.4/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
59535+++ linux-3.0.4/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
59536@@ -51,10 +51,10 @@ struct dlfb_data {
59537 int base8;
59538 u32 pseudo_palette[256];
59539 /* blit-only rendering path metrics, exposed through sysfs */
59540- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
59541- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
59542- atomic_t bytes_sent; /* to usb, after compression including overhead */
59543- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
59544+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
59545+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
59546+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
59547+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
59548 };
59549
59550 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
59551diff -urNp linux-3.0.4/include/video/uvesafb.h linux-3.0.4/include/video/uvesafb.h
59552--- linux-3.0.4/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
59553+++ linux-3.0.4/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
59554@@ -177,6 +177,7 @@ struct uvesafb_par {
59555 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
59556 u8 pmi_setpal; /* PMI for palette changes */
59557 u16 *pmi_base; /* protected mode interface location */
59558+ u8 *pmi_code; /* protected mode code location */
59559 void *pmi_start;
59560 void *pmi_pal;
59561 u8 *vbe_state_orig; /*
59562diff -urNp linux-3.0.4/init/do_mounts.c linux-3.0.4/init/do_mounts.c
59563--- linux-3.0.4/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
59564+++ linux-3.0.4/init/do_mounts.c 2011-10-06 04:17:55.000000000 -0400
59565@@ -287,11 +287,11 @@ static void __init get_fs_names(char *pa
59566
59567 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
59568 {
59569- int err = sys_mount(name, "/root", fs, flags, data);
59570+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
59571 if (err)
59572 return err;
59573
59574- sys_chdir((const char __user __force *)"/root");
59575+ sys_chdir((const char __force_user*)"/root");
59576 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
59577 printk(KERN_INFO
59578 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
59579@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
59580 va_start(args, fmt);
59581 vsprintf(buf, fmt, args);
59582 va_end(args);
59583- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
59584+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
59585 if (fd >= 0) {
59586 sys_ioctl(fd, FDEJECT, 0);
59587 sys_close(fd);
59588 }
59589 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
59590- fd = sys_open("/dev/console", O_RDWR, 0);
59591+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
59592 if (fd >= 0) {
59593 sys_ioctl(fd, TCGETS, (long)&termios);
59594 termios.c_lflag &= ~ICANON;
59595 sys_ioctl(fd, TCSETSF, (long)&termios);
59596- sys_read(fd, &c, 1);
59597+ sys_read(fd, (char __user *)&c, 1);
59598 termios.c_lflag |= ICANON;
59599 sys_ioctl(fd, TCSETSF, (long)&termios);
59600 sys_close(fd);
59601@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
59602 mount_root();
59603 out:
59604 devtmpfs_mount("dev");
59605- sys_mount(".", "/", NULL, MS_MOVE, NULL);
59606- sys_chroot((const char __user __force *)".");
59607+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
59608+ sys_chroot((const char __force_user *)".");
59609 }
59610diff -urNp linux-3.0.4/init/do_mounts.h linux-3.0.4/init/do_mounts.h
59611--- linux-3.0.4/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
59612+++ linux-3.0.4/init/do_mounts.h 2011-10-06 04:17:55.000000000 -0400
59613@@ -15,15 +15,15 @@ extern int root_mountflags;
59614
59615 static inline int create_dev(char *name, dev_t dev)
59616 {
59617- sys_unlink(name);
59618- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
59619+ sys_unlink((char __force_user *)name);
59620+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
59621 }
59622
59623 #if BITS_PER_LONG == 32
59624 static inline u32 bstat(char *name)
59625 {
59626 struct stat64 stat;
59627- if (sys_stat64(name, &stat) != 0)
59628+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
59629 return 0;
59630 if (!S_ISBLK(stat.st_mode))
59631 return 0;
59632@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
59633 static inline u32 bstat(char *name)
59634 {
59635 struct stat stat;
59636- if (sys_newstat(name, &stat) != 0)
59637+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
59638 return 0;
59639 if (!S_ISBLK(stat.st_mode))
59640 return 0;
59641diff -urNp linux-3.0.4/init/do_mounts_initrd.c linux-3.0.4/init/do_mounts_initrd.c
59642--- linux-3.0.4/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
59643+++ linux-3.0.4/init/do_mounts_initrd.c 2011-10-06 04:17:55.000000000 -0400
59644@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
59645 create_dev("/dev/root.old", Root_RAM0);
59646 /* mount initrd on rootfs' /root */
59647 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
59648- sys_mkdir("/old", 0700);
59649- root_fd = sys_open("/", 0, 0);
59650- old_fd = sys_open("/old", 0, 0);
59651+ sys_mkdir((const char __force_user *)"/old", 0700);
59652+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
59653+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
59654 /* move initrd over / and chdir/chroot in initrd root */
59655- sys_chdir("/root");
59656- sys_mount(".", "/", NULL, MS_MOVE, NULL);
59657- sys_chroot(".");
59658+ sys_chdir((const char __force_user *)"/root");
59659+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
59660+ sys_chroot((const char __force_user *)".");
59661
59662 /*
59663 * In case that a resume from disk is carried out by linuxrc or one of
59664@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
59665
59666 /* move initrd to rootfs' /old */
59667 sys_fchdir(old_fd);
59668- sys_mount("/", ".", NULL, MS_MOVE, NULL);
59669+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
59670 /* switch root and cwd back to / of rootfs */
59671 sys_fchdir(root_fd);
59672- sys_chroot(".");
59673+ sys_chroot((const char __force_user *)".");
59674 sys_close(old_fd);
59675 sys_close(root_fd);
59676
59677 if (new_decode_dev(real_root_dev) == Root_RAM0) {
59678- sys_chdir("/old");
59679+ sys_chdir((const char __force_user *)"/old");
59680 return;
59681 }
59682
59683@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
59684 mount_root();
59685
59686 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
59687- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
59688+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
59689 if (!error)
59690 printk("okay\n");
59691 else {
59692- int fd = sys_open("/dev/root.old", O_RDWR, 0);
59693+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
59694 if (error == -ENOENT)
59695 printk("/initrd does not exist. Ignored.\n");
59696 else
59697 printk("failed\n");
59698 printk(KERN_NOTICE "Unmounting old root\n");
59699- sys_umount("/old", MNT_DETACH);
59700+ sys_umount((char __force_user *)"/old", MNT_DETACH);
59701 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
59702 if (fd < 0) {
59703 error = fd;
59704@@ -116,11 +116,11 @@ int __init initrd_load(void)
59705 * mounted in the normal path.
59706 */
59707 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
59708- sys_unlink("/initrd.image");
59709+ sys_unlink((const char __force_user *)"/initrd.image");
59710 handle_initrd();
59711 return 1;
59712 }
59713 }
59714- sys_unlink("/initrd.image");
59715+ sys_unlink((const char __force_user *)"/initrd.image");
59716 return 0;
59717 }
59718diff -urNp linux-3.0.4/init/do_mounts_md.c linux-3.0.4/init/do_mounts_md.c
59719--- linux-3.0.4/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
59720+++ linux-3.0.4/init/do_mounts_md.c 2011-10-06 04:17:55.000000000 -0400
59721@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
59722 partitioned ? "_d" : "", minor,
59723 md_setup_args[ent].device_names);
59724
59725- fd = sys_open(name, 0, 0);
59726+ fd = sys_open((char __force_user *)name, 0, 0);
59727 if (fd < 0) {
59728 printk(KERN_ERR "md: open failed - cannot start "
59729 "array %s\n", name);
59730@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
59731 * array without it
59732 */
59733 sys_close(fd);
59734- fd = sys_open(name, 0, 0);
59735+ fd = sys_open((char __force_user *)name, 0, 0);
59736 sys_ioctl(fd, BLKRRPART, 0);
59737 }
59738 sys_close(fd);
59739@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
59740
59741 wait_for_device_probe();
59742
59743- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
59744+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
59745 if (fd >= 0) {
59746 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
59747 sys_close(fd);
59748diff -urNp linux-3.0.4/init/initramfs.c linux-3.0.4/init/initramfs.c
59749--- linux-3.0.4/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
59750+++ linux-3.0.4/init/initramfs.c 2011-10-06 04:17:55.000000000 -0400
59751@@ -74,7 +74,7 @@ static void __init free_hash(void)
59752 }
59753 }
59754
59755-static long __init do_utime(char __user *filename, time_t mtime)
59756+static long __init do_utime(__force char __user *filename, time_t mtime)
59757 {
59758 struct timespec t[2];
59759
59760@@ -109,7 +109,7 @@ static void __init dir_utime(void)
59761 struct dir_entry *de, *tmp;
59762 list_for_each_entry_safe(de, tmp, &dir_list, list) {
59763 list_del(&de->list);
59764- do_utime(de->name, de->mtime);
59765+ do_utime((char __force_user *)de->name, de->mtime);
59766 kfree(de->name);
59767 kfree(de);
59768 }
59769@@ -271,7 +271,7 @@ static int __init maybe_link(void)
59770 if (nlink >= 2) {
59771 char *old = find_link(major, minor, ino, mode, collected);
59772 if (old)
59773- return (sys_link(old, collected) < 0) ? -1 : 1;
59774+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
59775 }
59776 return 0;
59777 }
59778@@ -280,11 +280,11 @@ static void __init clean_path(char *path
59779 {
59780 struct stat st;
59781
59782- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
59783+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
59784 if (S_ISDIR(st.st_mode))
59785- sys_rmdir(path);
59786+ sys_rmdir((char __force_user *)path);
59787 else
59788- sys_unlink(path);
59789+ sys_unlink((char __force_user *)path);
59790 }
59791 }
59792
59793@@ -305,7 +305,7 @@ static int __init do_name(void)
59794 int openflags = O_WRONLY|O_CREAT;
59795 if (ml != 1)
59796 openflags |= O_TRUNC;
59797- wfd = sys_open(collected, openflags, mode);
59798+ wfd = sys_open((char __force_user *)collected, openflags, mode);
59799
59800 if (wfd >= 0) {
59801 sys_fchown(wfd, uid, gid);
59802@@ -317,17 +317,17 @@ static int __init do_name(void)
59803 }
59804 }
59805 } else if (S_ISDIR(mode)) {
59806- sys_mkdir(collected, mode);
59807- sys_chown(collected, uid, gid);
59808- sys_chmod(collected, mode);
59809+ sys_mkdir((char __force_user *)collected, mode);
59810+ sys_chown((char __force_user *)collected, uid, gid);
59811+ sys_chmod((char __force_user *)collected, mode);
59812 dir_add(collected, mtime);
59813 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
59814 S_ISFIFO(mode) || S_ISSOCK(mode)) {
59815 if (maybe_link() == 0) {
59816- sys_mknod(collected, mode, rdev);
59817- sys_chown(collected, uid, gid);
59818- sys_chmod(collected, mode);
59819- do_utime(collected, mtime);
59820+ sys_mknod((char __force_user *)collected, mode, rdev);
59821+ sys_chown((char __force_user *)collected, uid, gid);
59822+ sys_chmod((char __force_user *)collected, mode);
59823+ do_utime((char __force_user *)collected, mtime);
59824 }
59825 }
59826 return 0;
59827@@ -336,15 +336,15 @@ static int __init do_name(void)
59828 static int __init do_copy(void)
59829 {
59830 if (count >= body_len) {
59831- sys_write(wfd, victim, body_len);
59832+ sys_write(wfd, (char __force_user *)victim, body_len);
59833 sys_close(wfd);
59834- do_utime(vcollected, mtime);
59835+ do_utime((char __force_user *)vcollected, mtime);
59836 kfree(vcollected);
59837 eat(body_len);
59838 state = SkipIt;
59839 return 0;
59840 } else {
59841- sys_write(wfd, victim, count);
59842+ sys_write(wfd, (char __force_user *)victim, count);
59843 body_len -= count;
59844 eat(count);
59845 return 1;
59846@@ -355,9 +355,9 @@ static int __init do_symlink(void)
59847 {
59848 collected[N_ALIGN(name_len) + body_len] = '\0';
59849 clean_path(collected, 0);
59850- sys_symlink(collected + N_ALIGN(name_len), collected);
59851- sys_lchown(collected, uid, gid);
59852- do_utime(collected, mtime);
59853+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
59854+ sys_lchown((char __force_user *)collected, uid, gid);
59855+ do_utime((char __force_user *)collected, mtime);
59856 state = SkipIt;
59857 next_state = Reset;
59858 return 0;
59859diff -urNp linux-3.0.4/init/Kconfig linux-3.0.4/init/Kconfig
59860--- linux-3.0.4/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
59861+++ linux-3.0.4/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
59862@@ -1195,7 +1195,7 @@ config SLUB_DEBUG
59863
59864 config COMPAT_BRK
59865 bool "Disable heap randomization"
59866- default y
59867+ default n
59868 help
59869 Randomizing heap placement makes heap exploits harder, but it
59870 also breaks ancient binaries (including anything libc5 based).
59871diff -urNp linux-3.0.4/init/main.c linux-3.0.4/init/main.c
59872--- linux-3.0.4/init/main.c 2011-07-21 22:17:23.000000000 -0400
59873+++ linux-3.0.4/init/main.c 2011-10-06 04:17:55.000000000 -0400
59874@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
59875 extern void tc_init(void);
59876 #endif
59877
59878+extern void grsecurity_init(void);
59879+
59880 /*
59881 * Debug helper: via this flag we know that we are in 'early bootup code'
59882 * where only the boot processor is running with IRQ disabled. This means
59883@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
59884
59885 __setup("reset_devices", set_reset_devices);
59886
59887+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
59888+extern char pax_enter_kernel_user[];
59889+extern char pax_exit_kernel_user[];
59890+extern pgdval_t clone_pgd_mask;
59891+#endif
59892+
59893+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
59894+static int __init setup_pax_nouderef(char *str)
59895+{
59896+#ifdef CONFIG_X86_32
59897+ unsigned int cpu;
59898+ struct desc_struct *gdt;
59899+
59900+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
59901+ gdt = get_cpu_gdt_table(cpu);
59902+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
59903+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
59904+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
59905+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
59906+ }
59907+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
59908+#else
59909+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
59910+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
59911+ clone_pgd_mask = ~(pgdval_t)0UL;
59912+#endif
59913+
59914+ return 0;
59915+}
59916+early_param("pax_nouderef", setup_pax_nouderef);
59917+#endif
59918+
59919+#ifdef CONFIG_PAX_SOFTMODE
59920+int pax_softmode;
59921+
59922+static int __init setup_pax_softmode(char *str)
59923+{
59924+ get_option(&str, &pax_softmode);
59925+ return 1;
59926+}
59927+__setup("pax_softmode=", setup_pax_softmode);
59928+#endif
59929+
59930 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
59931 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
59932 static const char *panic_later, *panic_param;
59933@@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
59934 {
59935 int count = preempt_count();
59936 int ret;
59937+ const char *msg1 = "", *msg2 = "";
59938
59939 if (initcall_debug)
59940 ret = do_one_initcall_debug(fn);
59941@@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
59942 sprintf(msgbuf, "error code %d ", ret);
59943
59944 if (preempt_count() != count) {
59945- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
59946+ msg1 = " preemption imbalance";
59947 preempt_count() = count;
59948 }
59949 if (irqs_disabled()) {
59950- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
59951+ msg2 = " disabled interrupts";
59952 local_irq_enable();
59953 }
59954- if (msgbuf[0]) {
59955- printk("initcall %pF returned with %s\n", fn, msgbuf);
59956+ if (msgbuf[0] || *msg1 || *msg2) {
59957+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
59958 }
59959
59960 return ret;
59961@@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
59962 do_basic_setup();
59963
59964 /* Open the /dev/console on the rootfs, this should never fail */
59965- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
59966+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
59967 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
59968
59969 (void) sys_dup(0);
59970@@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
59971 if (!ramdisk_execute_command)
59972 ramdisk_execute_command = "/init";
59973
59974- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
59975+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
59976 ramdisk_execute_command = NULL;
59977 prepare_namespace();
59978 }
59979
59980+ grsecurity_init();
59981+
59982 /*
59983 * Ok, we have completed the initial bootup, and
59984 * we're essentially up and running. Get rid of the
59985diff -urNp linux-3.0.4/ipc/mqueue.c linux-3.0.4/ipc/mqueue.c
59986--- linux-3.0.4/ipc/mqueue.c 2011-07-21 22:17:23.000000000 -0400
59987+++ linux-3.0.4/ipc/mqueue.c 2011-08-23 21:48:14.000000000 -0400
59988@@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
59989 mq_bytes = (mq_msg_tblsz +
59990 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
59991
59992+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
59993 spin_lock(&mq_lock);
59994 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
59995 u->mq_bytes + mq_bytes >
59996diff -urNp linux-3.0.4/ipc/msg.c linux-3.0.4/ipc/msg.c
59997--- linux-3.0.4/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
59998+++ linux-3.0.4/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
59999@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
60000 return security_msg_queue_associate(msq, msgflg);
60001 }
60002
60003+static struct ipc_ops msg_ops = {
60004+ .getnew = newque,
60005+ .associate = msg_security,
60006+ .more_checks = NULL
60007+};
60008+
60009 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
60010 {
60011 struct ipc_namespace *ns;
60012- struct ipc_ops msg_ops;
60013 struct ipc_params msg_params;
60014
60015 ns = current->nsproxy->ipc_ns;
60016
60017- msg_ops.getnew = newque;
60018- msg_ops.associate = msg_security;
60019- msg_ops.more_checks = NULL;
60020-
60021 msg_params.key = key;
60022 msg_params.flg = msgflg;
60023
60024diff -urNp linux-3.0.4/ipc/sem.c linux-3.0.4/ipc/sem.c
60025--- linux-3.0.4/ipc/sem.c 2011-09-02 18:11:21.000000000 -0400
60026+++ linux-3.0.4/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
60027@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
60028 return 0;
60029 }
60030
60031+static struct ipc_ops sem_ops = {
60032+ .getnew = newary,
60033+ .associate = sem_security,
60034+ .more_checks = sem_more_checks
60035+};
60036+
60037 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
60038 {
60039 struct ipc_namespace *ns;
60040- struct ipc_ops sem_ops;
60041 struct ipc_params sem_params;
60042
60043 ns = current->nsproxy->ipc_ns;
60044@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
60045 if (nsems < 0 || nsems > ns->sc_semmsl)
60046 return -EINVAL;
60047
60048- sem_ops.getnew = newary;
60049- sem_ops.associate = sem_security;
60050- sem_ops.more_checks = sem_more_checks;
60051-
60052 sem_params.key = key;
60053 sem_params.flg = semflg;
60054 sem_params.u.nsems = nsems;
60055@@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
60056 int nsems;
60057 struct list_head tasks;
60058
60059+ pax_track_stack();
60060+
60061 sma = sem_lock_check(ns, semid);
60062 if (IS_ERR(sma))
60063 return PTR_ERR(sma);
60064@@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
60065 struct ipc_namespace *ns;
60066 struct list_head tasks;
60067
60068+ pax_track_stack();
60069+
60070 ns = current->nsproxy->ipc_ns;
60071
60072 if (nsops < 1 || semid < 0)
60073diff -urNp linux-3.0.4/ipc/shm.c linux-3.0.4/ipc/shm.c
60074--- linux-3.0.4/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
60075+++ linux-3.0.4/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
60076@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
60077 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
60078 #endif
60079
60080+#ifdef CONFIG_GRKERNSEC
60081+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
60082+ const time_t shm_createtime, const uid_t cuid,
60083+ const int shmid);
60084+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
60085+ const time_t shm_createtime);
60086+#endif
60087+
60088 void shm_init_ns(struct ipc_namespace *ns)
60089 {
60090 ns->shm_ctlmax = SHMMAX;
60091@@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
60092 shp->shm_lprid = 0;
60093 shp->shm_atim = shp->shm_dtim = 0;
60094 shp->shm_ctim = get_seconds();
60095+#ifdef CONFIG_GRKERNSEC
60096+ {
60097+ struct timespec timeval;
60098+ do_posix_clock_monotonic_gettime(&timeval);
60099+
60100+ shp->shm_createtime = timeval.tv_sec;
60101+ }
60102+#endif
60103 shp->shm_segsz = size;
60104 shp->shm_nattch = 0;
60105 shp->shm_file = file;
60106@@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
60107 return 0;
60108 }
60109
60110+static struct ipc_ops shm_ops = {
60111+ .getnew = newseg,
60112+ .associate = shm_security,
60113+ .more_checks = shm_more_checks
60114+};
60115+
60116 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
60117 {
60118 struct ipc_namespace *ns;
60119- struct ipc_ops shm_ops;
60120 struct ipc_params shm_params;
60121
60122 ns = current->nsproxy->ipc_ns;
60123
60124- shm_ops.getnew = newseg;
60125- shm_ops.associate = shm_security;
60126- shm_ops.more_checks = shm_more_checks;
60127-
60128 shm_params.key = key;
60129 shm_params.flg = shmflg;
60130 shm_params.u.size = size;
60131@@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
60132 case SHM_LOCK:
60133 case SHM_UNLOCK:
60134 {
60135- struct file *uninitialized_var(shm_file);
60136-
60137 lru_add_drain_all(); /* drain pagevecs to lru lists */
60138
60139 shp = shm_lock_check(ns, shmid);
60140@@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
60141 if (err)
60142 goto out_unlock;
60143
60144+#ifdef CONFIG_GRKERNSEC
60145+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
60146+ shp->shm_perm.cuid, shmid) ||
60147+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
60148+ err = -EACCES;
60149+ goto out_unlock;
60150+ }
60151+#endif
60152+
60153 path = shp->shm_file->f_path;
60154 path_get(&path);
60155 shp->shm_nattch++;
60156+#ifdef CONFIG_GRKERNSEC
60157+ shp->shm_lapid = current->pid;
60158+#endif
60159 size = i_size_read(path.dentry->d_inode);
60160 shm_unlock(shp);
60161
60162diff -urNp linux-3.0.4/kernel/acct.c linux-3.0.4/kernel/acct.c
60163--- linux-3.0.4/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
60164+++ linux-3.0.4/kernel/acct.c 2011-10-06 04:17:55.000000000 -0400
60165@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
60166 */
60167 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
60168 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
60169- file->f_op->write(file, (char *)&ac,
60170+ file->f_op->write(file, (char __force_user *)&ac,
60171 sizeof(acct_t), &file->f_pos);
60172 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
60173 set_fs(fs);
60174diff -urNp linux-3.0.4/kernel/audit.c linux-3.0.4/kernel/audit.c
60175--- linux-3.0.4/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
60176+++ linux-3.0.4/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
60177@@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
60178 3) suppressed due to audit_rate_limit
60179 4) suppressed due to audit_backlog_limit
60180 */
60181-static atomic_t audit_lost = ATOMIC_INIT(0);
60182+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
60183
60184 /* The netlink socket. */
60185 static struct sock *audit_sock;
60186@@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
60187 unsigned long now;
60188 int print;
60189
60190- atomic_inc(&audit_lost);
60191+ atomic_inc_unchecked(&audit_lost);
60192
60193 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
60194
60195@@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
60196 printk(KERN_WARNING
60197 "audit: audit_lost=%d audit_rate_limit=%d "
60198 "audit_backlog_limit=%d\n",
60199- atomic_read(&audit_lost),
60200+ atomic_read_unchecked(&audit_lost),
60201 audit_rate_limit,
60202 audit_backlog_limit);
60203 audit_panic(message);
60204@@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
60205 status_set.pid = audit_pid;
60206 status_set.rate_limit = audit_rate_limit;
60207 status_set.backlog_limit = audit_backlog_limit;
60208- status_set.lost = atomic_read(&audit_lost);
60209+ status_set.lost = atomic_read_unchecked(&audit_lost);
60210 status_set.backlog = skb_queue_len(&audit_skb_queue);
60211 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
60212 &status_set, sizeof(status_set));
60213diff -urNp linux-3.0.4/kernel/auditsc.c linux-3.0.4/kernel/auditsc.c
60214--- linux-3.0.4/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
60215+++ linux-3.0.4/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
60216@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
60217 }
60218
60219 /* global counter which is incremented every time something logs in */
60220-static atomic_t session_id = ATOMIC_INIT(0);
60221+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
60222
60223 /**
60224 * audit_set_loginuid - set a task's audit_context loginuid
60225@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
60226 */
60227 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
60228 {
60229- unsigned int sessionid = atomic_inc_return(&session_id);
60230+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
60231 struct audit_context *context = task->audit_context;
60232
60233 if (context && context->in_syscall) {
60234diff -urNp linux-3.0.4/kernel/capability.c linux-3.0.4/kernel/capability.c
60235--- linux-3.0.4/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
60236+++ linux-3.0.4/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
60237@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
60238 * before modification is attempted and the application
60239 * fails.
60240 */
60241+ if (tocopy > ARRAY_SIZE(kdata))
60242+ return -EFAULT;
60243+
60244 if (copy_to_user(dataptr, kdata, tocopy
60245 * sizeof(struct __user_cap_data_struct))) {
60246 return -EFAULT;
60247@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
60248 BUG();
60249 }
60250
60251- if (security_capable(ns, current_cred(), cap) == 0) {
60252+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
60253 current->flags |= PF_SUPERPRIV;
60254 return true;
60255 }
60256@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
60257 }
60258 EXPORT_SYMBOL(ns_capable);
60259
60260+bool ns_capable_nolog(struct user_namespace *ns, int cap)
60261+{
60262+ if (unlikely(!cap_valid(cap))) {
60263+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
60264+ BUG();
60265+ }
60266+
60267+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
60268+ current->flags |= PF_SUPERPRIV;
60269+ return true;
60270+ }
60271+ return false;
60272+}
60273+EXPORT_SYMBOL(ns_capable_nolog);
60274+
60275+bool capable_nolog(int cap)
60276+{
60277+ return ns_capable_nolog(&init_user_ns, cap);
60278+}
60279+EXPORT_SYMBOL(capable_nolog);
60280+
60281 /**
60282 * task_ns_capable - Determine whether current task has a superior
60283 * capability targeted at a specific task's user namespace.
60284@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
60285 }
60286 EXPORT_SYMBOL(task_ns_capable);
60287
60288+bool task_ns_capable_nolog(struct task_struct *t, int cap)
60289+{
60290+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
60291+}
60292+EXPORT_SYMBOL(task_ns_capable_nolog);
60293+
60294 /**
60295 * nsown_capable - Check superior capability to one's own user_ns
60296 * @cap: The capability in question
60297diff -urNp linux-3.0.4/kernel/cgroup.c linux-3.0.4/kernel/cgroup.c
60298--- linux-3.0.4/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
60299+++ linux-3.0.4/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
60300@@ -593,6 +593,8 @@ static struct css_set *find_css_set(
60301 struct hlist_head *hhead;
60302 struct cg_cgroup_link *link;
60303
60304+ pax_track_stack();
60305+
60306 /* First see if we already have a cgroup group that matches
60307 * the desired set */
60308 read_lock(&css_set_lock);
60309diff -urNp linux-3.0.4/kernel/compat.c linux-3.0.4/kernel/compat.c
60310--- linux-3.0.4/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
60311+++ linux-3.0.4/kernel/compat.c 2011-10-06 04:17:55.000000000 -0400
60312@@ -13,6 +13,7 @@
60313
60314 #include <linux/linkage.h>
60315 #include <linux/compat.h>
60316+#include <linux/module.h>
60317 #include <linux/errno.h>
60318 #include <linux/time.h>
60319 #include <linux/signal.h>
60320@@ -166,7 +167,7 @@ static long compat_nanosleep_restart(str
60321 mm_segment_t oldfs;
60322 long ret;
60323
60324- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
60325+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
60326 oldfs = get_fs();
60327 set_fs(KERNEL_DS);
60328 ret = hrtimer_nanosleep_restart(restart);
60329@@ -198,7 +199,7 @@ asmlinkage long compat_sys_nanosleep(str
60330 oldfs = get_fs();
60331 set_fs(KERNEL_DS);
60332 ret = hrtimer_nanosleep(&tu,
60333- rmtp ? (struct timespec __user *)&rmt : NULL,
60334+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
60335 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
60336 set_fs(oldfs);
60337
60338@@ -307,7 +308,7 @@ asmlinkage long compat_sys_sigpending(co
60339 mm_segment_t old_fs = get_fs();
60340
60341 set_fs(KERNEL_DS);
60342- ret = sys_sigpending((old_sigset_t __user *) &s);
60343+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
60344 set_fs(old_fs);
60345 if (ret == 0)
60346 ret = put_user(s, set);
60347@@ -330,8 +331,8 @@ asmlinkage long compat_sys_sigprocmask(i
60348 old_fs = get_fs();
60349 set_fs(KERNEL_DS);
60350 ret = sys_sigprocmask(how,
60351- set ? (old_sigset_t __user *) &s : NULL,
60352- oset ? (old_sigset_t __user *) &s : NULL);
60353+ set ? (old_sigset_t __force_user *) &s : NULL,
60354+ oset ? (old_sigset_t __force_user *) &s : NULL);
60355 set_fs(old_fs);
60356 if (ret == 0)
60357 if (oset)
60358@@ -368,7 +369,7 @@ asmlinkage long compat_sys_old_getrlimit
60359 mm_segment_t old_fs = get_fs();
60360
60361 set_fs(KERNEL_DS);
60362- ret = sys_old_getrlimit(resource, &r);
60363+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
60364 set_fs(old_fs);
60365
60366 if (!ret) {
60367@@ -440,7 +441,7 @@ asmlinkage long compat_sys_getrusage(int
60368 mm_segment_t old_fs = get_fs();
60369
60370 set_fs(KERNEL_DS);
60371- ret = sys_getrusage(who, (struct rusage __user *) &r);
60372+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
60373 set_fs(old_fs);
60374
60375 if (ret)
60376@@ -467,8 +468,8 @@ compat_sys_wait4(compat_pid_t pid, compa
60377 set_fs (KERNEL_DS);
60378 ret = sys_wait4(pid,
60379 (stat_addr ?
60380- (unsigned int __user *) &status : NULL),
60381- options, (struct rusage __user *) &r);
60382+ (unsigned int __force_user *) &status : NULL),
60383+ options, (struct rusage __force_user *) &r);
60384 set_fs (old_fs);
60385
60386 if (ret > 0) {
60387@@ -493,8 +494,8 @@ asmlinkage long compat_sys_waitid(int wh
60388 memset(&info, 0, sizeof(info));
60389
60390 set_fs(KERNEL_DS);
60391- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
60392- uru ? (struct rusage __user *)&ru : NULL);
60393+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
60394+ uru ? (struct rusage __force_user *)&ru : NULL);
60395 set_fs(old_fs);
60396
60397 if ((ret < 0) || (info.si_signo == 0))
60398@@ -624,8 +625,8 @@ long compat_sys_timer_settime(timer_t ti
60399 oldfs = get_fs();
60400 set_fs(KERNEL_DS);
60401 err = sys_timer_settime(timer_id, flags,
60402- (struct itimerspec __user *) &newts,
60403- (struct itimerspec __user *) &oldts);
60404+ (struct itimerspec __force_user *) &newts,
60405+ (struct itimerspec __force_user *) &oldts);
60406 set_fs(oldfs);
60407 if (!err && old && put_compat_itimerspec(old, &oldts))
60408 return -EFAULT;
60409@@ -642,7 +643,7 @@ long compat_sys_timer_gettime(timer_t ti
60410 oldfs = get_fs();
60411 set_fs(KERNEL_DS);
60412 err = sys_timer_gettime(timer_id,
60413- (struct itimerspec __user *) &ts);
60414+ (struct itimerspec __force_user *) &ts);
60415 set_fs(oldfs);
60416 if (!err && put_compat_itimerspec(setting, &ts))
60417 return -EFAULT;
60418@@ -661,7 +662,7 @@ long compat_sys_clock_settime(clockid_t
60419 oldfs = get_fs();
60420 set_fs(KERNEL_DS);
60421 err = sys_clock_settime(which_clock,
60422- (struct timespec __user *) &ts);
60423+ (struct timespec __force_user *) &ts);
60424 set_fs(oldfs);
60425 return err;
60426 }
60427@@ -676,7 +677,7 @@ long compat_sys_clock_gettime(clockid_t
60428 oldfs = get_fs();
60429 set_fs(KERNEL_DS);
60430 err = sys_clock_gettime(which_clock,
60431- (struct timespec __user *) &ts);
60432+ (struct timespec __force_user *) &ts);
60433 set_fs(oldfs);
60434 if (!err && put_compat_timespec(&ts, tp))
60435 return -EFAULT;
60436@@ -696,7 +697,7 @@ long compat_sys_clock_adjtime(clockid_t
60437
60438 oldfs = get_fs();
60439 set_fs(KERNEL_DS);
60440- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
60441+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
60442 set_fs(oldfs);
60443
60444 err = compat_put_timex(utp, &txc);
60445@@ -716,7 +717,7 @@ long compat_sys_clock_getres(clockid_t w
60446 oldfs = get_fs();
60447 set_fs(KERNEL_DS);
60448 err = sys_clock_getres(which_clock,
60449- (struct timespec __user *) &ts);
60450+ (struct timespec __force_user *) &ts);
60451 set_fs(oldfs);
60452 if (!err && tp && put_compat_timespec(&ts, tp))
60453 return -EFAULT;
60454@@ -728,9 +729,9 @@ static long compat_clock_nanosleep_resta
60455 long err;
60456 mm_segment_t oldfs;
60457 struct timespec tu;
60458- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
60459+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
60460
60461- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
60462+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
60463 oldfs = get_fs();
60464 set_fs(KERNEL_DS);
60465 err = clock_nanosleep_restart(restart);
60466@@ -762,8 +763,8 @@ long compat_sys_clock_nanosleep(clockid_
60467 oldfs = get_fs();
60468 set_fs(KERNEL_DS);
60469 err = sys_clock_nanosleep(which_clock, flags,
60470- (struct timespec __user *) &in,
60471- (struct timespec __user *) &out);
60472+ (struct timespec __force_user *) &in,
60473+ (struct timespec __force_user *) &out);
60474 set_fs(oldfs);
60475
60476 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
60477diff -urNp linux-3.0.4/kernel/configs.c linux-3.0.4/kernel/configs.c
60478--- linux-3.0.4/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
60479+++ linux-3.0.4/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
60480@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
60481 struct proc_dir_entry *entry;
60482
60483 /* create the current config file */
60484+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
60485+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
60486+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
60487+ &ikconfig_file_ops);
60488+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60489+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
60490+ &ikconfig_file_ops);
60491+#endif
60492+#else
60493 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
60494 &ikconfig_file_ops);
60495+#endif
60496+
60497 if (!entry)
60498 return -ENOMEM;
60499
60500diff -urNp linux-3.0.4/kernel/cred.c linux-3.0.4/kernel/cred.c
60501--- linux-3.0.4/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
60502+++ linux-3.0.4/kernel/cred.c 2011-08-25 17:23:03.000000000 -0400
60503@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
60504 */
60505 void __put_cred(struct cred *cred)
60506 {
60507+ pax_track_stack();
60508+
60509 kdebug("__put_cred(%p{%d,%d})", cred,
60510 atomic_read(&cred->usage),
60511 read_cred_subscribers(cred));
60512@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
60513 {
60514 struct cred *cred;
60515
60516+ pax_track_stack();
60517+
60518 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
60519 atomic_read(&tsk->cred->usage),
60520 read_cred_subscribers(tsk->cred));
60521@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
60522 {
60523 const struct cred *cred;
60524
60525+ pax_track_stack();
60526+
60527 rcu_read_lock();
60528
60529 do {
60530@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
60531 {
60532 struct cred *new;
60533
60534+ pax_track_stack();
60535+
60536 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
60537 if (!new)
60538 return NULL;
60539@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
60540 const struct cred *old;
60541 struct cred *new;
60542
60543+ pax_track_stack();
60544+
60545 validate_process_creds();
60546
60547 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
60548@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
60549 struct thread_group_cred *tgcred = NULL;
60550 struct cred *new;
60551
60552+ pax_track_stack();
60553+
60554 #ifdef CONFIG_KEYS
60555 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
60556 if (!tgcred)
60557@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
60558 struct cred *new;
60559 int ret;
60560
60561+ pax_track_stack();
60562+
60563 if (
60564 #ifdef CONFIG_KEYS
60565 !p->cred->thread_keyring &&
60566@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
60567 struct task_struct *task = current;
60568 const struct cred *old = task->real_cred;
60569
60570+ pax_track_stack();
60571+
60572 kdebug("commit_creds(%p{%d,%d})", new,
60573 atomic_read(&new->usage),
60574 read_cred_subscribers(new));
60575@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
60576
60577 get_cred(new); /* we will require a ref for the subj creds too */
60578
60579+ gr_set_role_label(task, new->uid, new->gid);
60580+
60581 /* dumpability changes */
60582 if (old->euid != new->euid ||
60583 old->egid != new->egid ||
60584@@ -508,10 +526,8 @@ int commit_creds(struct cred *new)
60585 key_fsgid_changed(task);
60586
60587 /* do it
60588- * - What if a process setreuid()'s and this brings the
60589- * new uid over his NPROC rlimit? We can check this now
60590- * cheaply with the new uid cache, so if it matters
60591- * we should be checking for it. -DaveM
60592+ * RLIMIT_NPROC limits on user->processes have already been checked
60593+ * in set_user().
60594 */
60595 alter_cred_subscribers(new, 2);
60596 if (new->user != old->user)
60597@@ -551,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
60598 */
60599 void abort_creds(struct cred *new)
60600 {
60601+ pax_track_stack();
60602+
60603 kdebug("abort_creds(%p{%d,%d})", new,
60604 atomic_read(&new->usage),
60605 read_cred_subscribers(new));
60606@@ -574,6 +592,8 @@ const struct cred *override_creds(const
60607 {
60608 const struct cred *old = current->cred;
60609
60610+ pax_track_stack();
60611+
60612 kdebug("override_creds(%p{%d,%d})", new,
60613 atomic_read(&new->usage),
60614 read_cred_subscribers(new));
60615@@ -603,6 +623,8 @@ void revert_creds(const struct cred *old
60616 {
60617 const struct cred *override = current->cred;
60618
60619+ pax_track_stack();
60620+
60621 kdebug("revert_creds(%p{%d,%d})", old,
60622 atomic_read(&old->usage),
60623 read_cred_subscribers(old));
60624@@ -649,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
60625 const struct cred *old;
60626 struct cred *new;
60627
60628+ pax_track_stack();
60629+
60630 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
60631 if (!new)
60632 return NULL;
60633@@ -703,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
60634 */
60635 int set_security_override(struct cred *new, u32 secid)
60636 {
60637+ pax_track_stack();
60638+
60639 return security_kernel_act_as(new, secid);
60640 }
60641 EXPORT_SYMBOL(set_security_override);
60642@@ -722,6 +748,8 @@ int set_security_override_from_ctx(struc
60643 u32 secid;
60644 int ret;
60645
60646+ pax_track_stack();
60647+
60648 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
60649 if (ret < 0)
60650 return ret;
60651diff -urNp linux-3.0.4/kernel/debug/debug_core.c linux-3.0.4/kernel/debug/debug_core.c
60652--- linux-3.0.4/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
60653+++ linux-3.0.4/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
60654@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
60655 */
60656 static atomic_t masters_in_kgdb;
60657 static atomic_t slaves_in_kgdb;
60658-static atomic_t kgdb_break_tasklet_var;
60659+static atomic_unchecked_t kgdb_break_tasklet_var;
60660 atomic_t kgdb_setting_breakpoint;
60661
60662 struct task_struct *kgdb_usethread;
60663@@ -129,7 +129,7 @@ int kgdb_single_step;
60664 static pid_t kgdb_sstep_pid;
60665
60666 /* to keep track of the CPU which is doing the single stepping*/
60667-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
60668+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
60669
60670 /*
60671 * If you are debugging a problem where roundup (the collection of
60672@@ -542,7 +542,7 @@ return_normal:
60673 * kernel will only try for the value of sstep_tries before
60674 * giving up and continuing on.
60675 */
60676- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
60677+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
60678 (kgdb_info[cpu].task &&
60679 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
60680 atomic_set(&kgdb_active, -1);
60681@@ -636,8 +636,8 @@ cpu_master_loop:
60682 }
60683
60684 kgdb_restore:
60685- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
60686- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
60687+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
60688+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
60689 if (kgdb_info[sstep_cpu].task)
60690 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
60691 else
60692@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
60693 static void kgdb_tasklet_bpt(unsigned long ing)
60694 {
60695 kgdb_breakpoint();
60696- atomic_set(&kgdb_break_tasklet_var, 0);
60697+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
60698 }
60699
60700 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
60701
60702 void kgdb_schedule_breakpoint(void)
60703 {
60704- if (atomic_read(&kgdb_break_tasklet_var) ||
60705+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
60706 atomic_read(&kgdb_active) != -1 ||
60707 atomic_read(&kgdb_setting_breakpoint))
60708 return;
60709- atomic_inc(&kgdb_break_tasklet_var);
60710+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
60711 tasklet_schedule(&kgdb_tasklet_breakpoint);
60712 }
60713 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
60714diff -urNp linux-3.0.4/kernel/debug/kdb/kdb_main.c linux-3.0.4/kernel/debug/kdb/kdb_main.c
60715--- linux-3.0.4/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
60716+++ linux-3.0.4/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
60717@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
60718 list_for_each_entry(mod, kdb_modules, list) {
60719
60720 kdb_printf("%-20s%8u 0x%p ", mod->name,
60721- mod->core_size, (void *)mod);
60722+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
60723 #ifdef CONFIG_MODULE_UNLOAD
60724 kdb_printf("%4d ", module_refcount(mod));
60725 #endif
60726@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
60727 kdb_printf(" (Loading)");
60728 else
60729 kdb_printf(" (Live)");
60730- kdb_printf(" 0x%p", mod->module_core);
60731+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
60732
60733 #ifdef CONFIG_MODULE_UNLOAD
60734 {
60735diff -urNp linux-3.0.4/kernel/events/core.c linux-3.0.4/kernel/events/core.c
60736--- linux-3.0.4/kernel/events/core.c 2011-09-02 18:11:21.000000000 -0400
60737+++ linux-3.0.4/kernel/events/core.c 2011-09-14 09:08:05.000000000 -0400
60738@@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
60739 return 0;
60740 }
60741
60742-static atomic64_t perf_event_id;
60743+static atomic64_unchecked_t perf_event_id;
60744
60745 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
60746 enum event_type_t event_type);
60747@@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
60748
60749 static inline u64 perf_event_count(struct perf_event *event)
60750 {
60751- return local64_read(&event->count) + atomic64_read(&event->child_count);
60752+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
60753 }
60754
60755 static u64 perf_event_read(struct perf_event *event)
60756@@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
60757 mutex_lock(&event->child_mutex);
60758 total += perf_event_read(event);
60759 *enabled += event->total_time_enabled +
60760- atomic64_read(&event->child_total_time_enabled);
60761+ atomic64_read_unchecked(&event->child_total_time_enabled);
60762 *running += event->total_time_running +
60763- atomic64_read(&event->child_total_time_running);
60764+ atomic64_read_unchecked(&event->child_total_time_running);
60765
60766 list_for_each_entry(child, &event->child_list, child_list) {
60767 total += perf_event_read(child);
60768@@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
60769 userpg->offset -= local64_read(&event->hw.prev_count);
60770
60771 userpg->time_enabled = event->total_time_enabled +
60772- atomic64_read(&event->child_total_time_enabled);
60773+ atomic64_read_unchecked(&event->child_total_time_enabled);
60774
60775 userpg->time_running = event->total_time_running +
60776- atomic64_read(&event->child_total_time_running);
60777+ atomic64_read_unchecked(&event->child_total_time_running);
60778
60779 barrier();
60780 ++userpg->lock;
60781@@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
60782 values[n++] = perf_event_count(event);
60783 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60784 values[n++] = enabled +
60785- atomic64_read(&event->child_total_time_enabled);
60786+ atomic64_read_unchecked(&event->child_total_time_enabled);
60787 }
60788 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60789 values[n++] = running +
60790- atomic64_read(&event->child_total_time_running);
60791+ atomic64_read_unchecked(&event->child_total_time_running);
60792 }
60793 if (read_format & PERF_FORMAT_ID)
60794 values[n++] = primary_event_id(event);
60795@@ -4833,12 +4833,12 @@ static void perf_event_mmap_event(struct
60796 * need to add enough zero bytes after the string to handle
60797 * the 64bit alignment we do later.
60798 */
60799- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
60800+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
60801 if (!buf) {
60802 name = strncpy(tmp, "//enomem", sizeof(tmp));
60803 goto got_name;
60804 }
60805- name = d_path(&file->f_path, buf, PATH_MAX);
60806+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
60807 if (IS_ERR(name)) {
60808 name = strncpy(tmp, "//toolong", sizeof(tmp));
60809 goto got_name;
60810@@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
60811 event->parent = parent_event;
60812
60813 event->ns = get_pid_ns(current->nsproxy->pid_ns);
60814- event->id = atomic64_inc_return(&perf_event_id);
60815+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
60816
60817 event->state = PERF_EVENT_STATE_INACTIVE;
60818
60819@@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
60820 /*
60821 * Add back the child's count to the parent's count:
60822 */
60823- atomic64_add(child_val, &parent_event->child_count);
60824- atomic64_add(child_event->total_time_enabled,
60825+ atomic64_add_unchecked(child_val, &parent_event->child_count);
60826+ atomic64_add_unchecked(child_event->total_time_enabled,
60827 &parent_event->child_total_time_enabled);
60828- atomic64_add(child_event->total_time_running,
60829+ atomic64_add_unchecked(child_event->total_time_running,
60830 &parent_event->child_total_time_running);
60831
60832 /*
60833diff -urNp linux-3.0.4/kernel/exit.c linux-3.0.4/kernel/exit.c
60834--- linux-3.0.4/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
60835+++ linux-3.0.4/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
60836@@ -57,6 +57,10 @@
60837 #include <asm/pgtable.h>
60838 #include <asm/mmu_context.h>
60839
60840+#ifdef CONFIG_GRKERNSEC
60841+extern rwlock_t grsec_exec_file_lock;
60842+#endif
60843+
60844 static void exit_mm(struct task_struct * tsk);
60845
60846 static void __unhash_process(struct task_struct *p, bool group_dead)
60847@@ -169,6 +173,10 @@ void release_task(struct task_struct * p
60848 struct task_struct *leader;
60849 int zap_leader;
60850 repeat:
60851+#ifdef CONFIG_NET
60852+ gr_del_task_from_ip_table(p);
60853+#endif
60854+
60855 tracehook_prepare_release_task(p);
60856 /* don't need to get the RCU readlock here - the process is dead and
60857 * can't be modifying its own credentials. But shut RCU-lockdep up */
60858@@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
60859 {
60860 write_lock_irq(&tasklist_lock);
60861
60862+#ifdef CONFIG_GRKERNSEC
60863+ write_lock(&grsec_exec_file_lock);
60864+ if (current->exec_file) {
60865+ fput(current->exec_file);
60866+ current->exec_file = NULL;
60867+ }
60868+ write_unlock(&grsec_exec_file_lock);
60869+#endif
60870+
60871 ptrace_unlink(current);
60872 /* Reparent to init */
60873 current->real_parent = current->parent = kthreadd_task;
60874 list_move_tail(&current->sibling, &current->real_parent->children);
60875
60876+ gr_set_kernel_label(current);
60877+
60878 /* Set the exit signal to SIGCHLD so we signal init on exit */
60879 current->exit_signal = SIGCHLD;
60880
60881@@ -394,7 +413,7 @@ int allow_signal(int sig)
60882 * know it'll be handled, so that they don't get converted to
60883 * SIGKILL or just silently dropped.
60884 */
60885- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
60886+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
60887 recalc_sigpending();
60888 spin_unlock_irq(&current->sighand->siglock);
60889 return 0;
60890@@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
60891 vsnprintf(current->comm, sizeof(current->comm), name, args);
60892 va_end(args);
60893
60894+#ifdef CONFIG_GRKERNSEC
60895+ write_lock(&grsec_exec_file_lock);
60896+ if (current->exec_file) {
60897+ fput(current->exec_file);
60898+ current->exec_file = NULL;
60899+ }
60900+ write_unlock(&grsec_exec_file_lock);
60901+#endif
60902+
60903+ gr_set_kernel_label(current);
60904+
60905 /*
60906 * If we were started as result of loading a module, close all of the
60907 * user space pages. We don't need them, and if we didn't close them
60908@@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
60909 struct task_struct *tsk = current;
60910 int group_dead;
60911
60912- profile_task_exit(tsk);
60913-
60914- WARN_ON(atomic_read(&tsk->fs_excl));
60915- WARN_ON(blk_needs_flush_plug(tsk));
60916-
60917 if (unlikely(in_interrupt()))
60918 panic("Aiee, killing interrupt handler!");
60919- if (unlikely(!tsk->pid))
60920- panic("Attempted to kill the idle task!");
60921
60922 /*
60923 * If do_exit is called because this processes oopsed, it's possible
60924@@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
60925 */
60926 set_fs(USER_DS);
60927
60928+ profile_task_exit(tsk);
60929+
60930+ WARN_ON(atomic_read(&tsk->fs_excl));
60931+ WARN_ON(blk_needs_flush_plug(tsk));
60932+
60933+ if (unlikely(!tsk->pid))
60934+ panic("Attempted to kill the idle task!");
60935+
60936 tracehook_report_exit(&code);
60937
60938 validate_creds_for_do_exit(tsk);
60939@@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
60940 tsk->exit_code = code;
60941 taskstats_exit(tsk, group_dead);
60942
60943+ gr_acl_handle_psacct(tsk, code);
60944+ gr_acl_handle_exit();
60945+
60946 exit_mm(tsk);
60947
60948 if (group_dead)
60949diff -urNp linux-3.0.4/kernel/fork.c linux-3.0.4/kernel/fork.c
60950--- linux-3.0.4/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
60951+++ linux-3.0.4/kernel/fork.c 2011-08-25 17:23:36.000000000 -0400
60952@@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
60953 *stackend = STACK_END_MAGIC; /* for overflow detection */
60954
60955 #ifdef CONFIG_CC_STACKPROTECTOR
60956- tsk->stack_canary = get_random_int();
60957+ tsk->stack_canary = pax_get_random_long();
60958 #endif
60959
60960 /* One for us, one for whoever does the "release_task()" (usually parent) */
60961@@ -308,13 +308,77 @@ out:
60962 }
60963
60964 #ifdef CONFIG_MMU
60965+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
60966+{
60967+ struct vm_area_struct *tmp;
60968+ unsigned long charge;
60969+ struct mempolicy *pol;
60970+ struct file *file;
60971+
60972+ charge = 0;
60973+ if (mpnt->vm_flags & VM_ACCOUNT) {
60974+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
60975+ if (security_vm_enough_memory(len))
60976+ goto fail_nomem;
60977+ charge = len;
60978+ }
60979+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
60980+ if (!tmp)
60981+ goto fail_nomem;
60982+ *tmp = *mpnt;
60983+ tmp->vm_mm = mm;
60984+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
60985+ pol = mpol_dup(vma_policy(mpnt));
60986+ if (IS_ERR(pol))
60987+ goto fail_nomem_policy;
60988+ vma_set_policy(tmp, pol);
60989+ if (anon_vma_fork(tmp, mpnt))
60990+ goto fail_nomem_anon_vma_fork;
60991+ tmp->vm_flags &= ~VM_LOCKED;
60992+ tmp->vm_next = tmp->vm_prev = NULL;
60993+ tmp->vm_mirror = NULL;
60994+ file = tmp->vm_file;
60995+ if (file) {
60996+ struct inode *inode = file->f_path.dentry->d_inode;
60997+ struct address_space *mapping = file->f_mapping;
60998+
60999+ get_file(file);
61000+ if (tmp->vm_flags & VM_DENYWRITE)
61001+ atomic_dec(&inode->i_writecount);
61002+ mutex_lock(&mapping->i_mmap_mutex);
61003+ if (tmp->vm_flags & VM_SHARED)
61004+ mapping->i_mmap_writable++;
61005+ flush_dcache_mmap_lock(mapping);
61006+ /* insert tmp into the share list, just after mpnt */
61007+ vma_prio_tree_add(tmp, mpnt);
61008+ flush_dcache_mmap_unlock(mapping);
61009+ mutex_unlock(&mapping->i_mmap_mutex);
61010+ }
61011+
61012+ /*
61013+ * Clear hugetlb-related page reserves for children. This only
61014+ * affects MAP_PRIVATE mappings. Faults generated by the child
61015+ * are not guaranteed to succeed, even if read-only
61016+ */
61017+ if (is_vm_hugetlb_page(tmp))
61018+ reset_vma_resv_huge_pages(tmp);
61019+
61020+ return tmp;
61021+
61022+fail_nomem_anon_vma_fork:
61023+ mpol_put(pol);
61024+fail_nomem_policy:
61025+ kmem_cache_free(vm_area_cachep, tmp);
61026+fail_nomem:
61027+ vm_unacct_memory(charge);
61028+ return NULL;
61029+}
61030+
61031 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
61032 {
61033 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
61034 struct rb_node **rb_link, *rb_parent;
61035 int retval;
61036- unsigned long charge;
61037- struct mempolicy *pol;
61038
61039 down_write(&oldmm->mmap_sem);
61040 flush_cache_dup_mm(oldmm);
61041@@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
61042 mm->locked_vm = 0;
61043 mm->mmap = NULL;
61044 mm->mmap_cache = NULL;
61045- mm->free_area_cache = oldmm->mmap_base;
61046- mm->cached_hole_size = ~0UL;
61047+ mm->free_area_cache = oldmm->free_area_cache;
61048+ mm->cached_hole_size = oldmm->cached_hole_size;
61049 mm->map_count = 0;
61050 cpumask_clear(mm_cpumask(mm));
61051 mm->mm_rb = RB_ROOT;
61052@@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
61053
61054 prev = NULL;
61055 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
61056- struct file *file;
61057-
61058 if (mpnt->vm_flags & VM_DONTCOPY) {
61059 long pages = vma_pages(mpnt);
61060 mm->total_vm -= pages;
61061@@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
61062 -pages);
61063 continue;
61064 }
61065- charge = 0;
61066- if (mpnt->vm_flags & VM_ACCOUNT) {
61067- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
61068- if (security_vm_enough_memory(len))
61069- goto fail_nomem;
61070- charge = len;
61071- }
61072- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
61073- if (!tmp)
61074- goto fail_nomem;
61075- *tmp = *mpnt;
61076- INIT_LIST_HEAD(&tmp->anon_vma_chain);
61077- pol = mpol_dup(vma_policy(mpnt));
61078- retval = PTR_ERR(pol);
61079- if (IS_ERR(pol))
61080- goto fail_nomem_policy;
61081- vma_set_policy(tmp, pol);
61082- tmp->vm_mm = mm;
61083- if (anon_vma_fork(tmp, mpnt))
61084- goto fail_nomem_anon_vma_fork;
61085- tmp->vm_flags &= ~VM_LOCKED;
61086- tmp->vm_next = tmp->vm_prev = NULL;
61087- file = tmp->vm_file;
61088- if (file) {
61089- struct inode *inode = file->f_path.dentry->d_inode;
61090- struct address_space *mapping = file->f_mapping;
61091-
61092- get_file(file);
61093- if (tmp->vm_flags & VM_DENYWRITE)
61094- atomic_dec(&inode->i_writecount);
61095- mutex_lock(&mapping->i_mmap_mutex);
61096- if (tmp->vm_flags & VM_SHARED)
61097- mapping->i_mmap_writable++;
61098- flush_dcache_mmap_lock(mapping);
61099- /* insert tmp into the share list, just after mpnt */
61100- vma_prio_tree_add(tmp, mpnt);
61101- flush_dcache_mmap_unlock(mapping);
61102- mutex_unlock(&mapping->i_mmap_mutex);
61103+ tmp = dup_vma(mm, mpnt);
61104+ if (!tmp) {
61105+ retval = -ENOMEM;
61106+ goto out;
61107 }
61108
61109 /*
61110- * Clear hugetlb-related page reserves for children. This only
61111- * affects MAP_PRIVATE mappings. Faults generated by the child
61112- * are not guaranteed to succeed, even if read-only
61113- */
61114- if (is_vm_hugetlb_page(tmp))
61115- reset_vma_resv_huge_pages(tmp);
61116-
61117- /*
61118 * Link in the new vma and copy the page table entries.
61119 */
61120 *pprev = tmp;
61121@@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
61122 if (retval)
61123 goto out;
61124 }
61125+
61126+#ifdef CONFIG_PAX_SEGMEXEC
61127+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
61128+ struct vm_area_struct *mpnt_m;
61129+
61130+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
61131+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
61132+
61133+ if (!mpnt->vm_mirror)
61134+ continue;
61135+
61136+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
61137+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
61138+ mpnt->vm_mirror = mpnt_m;
61139+ } else {
61140+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
61141+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
61142+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
61143+ mpnt->vm_mirror->vm_mirror = mpnt;
61144+ }
61145+ }
61146+ BUG_ON(mpnt_m);
61147+ }
61148+#endif
61149+
61150 /* a new mm has just been created */
61151 arch_dup_mmap(oldmm, mm);
61152 retval = 0;
61153@@ -429,14 +474,6 @@ out:
61154 flush_tlb_mm(oldmm);
61155 up_write(&oldmm->mmap_sem);
61156 return retval;
61157-fail_nomem_anon_vma_fork:
61158- mpol_put(pol);
61159-fail_nomem_policy:
61160- kmem_cache_free(vm_area_cachep, tmp);
61161-fail_nomem:
61162- retval = -ENOMEM;
61163- vm_unacct_memory(charge);
61164- goto out;
61165 }
61166
61167 static inline int mm_alloc_pgd(struct mm_struct * mm)
61168@@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
61169 spin_unlock(&fs->lock);
61170 return -EAGAIN;
61171 }
61172- fs->users++;
61173+ atomic_inc(&fs->users);
61174 spin_unlock(&fs->lock);
61175 return 0;
61176 }
61177 tsk->fs = copy_fs_struct(fs);
61178 if (!tsk->fs)
61179 return -ENOMEM;
61180+ gr_set_chroot_entries(tsk, &tsk->fs->root);
61181 return 0;
61182 }
61183
61184@@ -1104,12 +1142,16 @@ static struct task_struct *copy_process(
61185 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
61186 #endif
61187 retval = -EAGAIN;
61188+
61189+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
61190+
61191 if (atomic_read(&p->real_cred->user->processes) >=
61192 task_rlimit(p, RLIMIT_NPROC)) {
61193- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
61194- p->real_cred->user != INIT_USER)
61195+ if (p->real_cred->user != INIT_USER &&
61196+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
61197 goto bad_fork_free;
61198 }
61199+ current->flags &= ~PF_NPROC_EXCEEDED;
61200
61201 retval = copy_creds(p, clone_flags);
61202 if (retval < 0)
61203@@ -1250,6 +1292,8 @@ static struct task_struct *copy_process(
61204 if (clone_flags & CLONE_THREAD)
61205 p->tgid = current->tgid;
61206
61207+ gr_copy_label(p);
61208+
61209 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
61210 /*
61211 * Clear TID on mm_release()?
61212@@ -1414,6 +1458,8 @@ bad_fork_cleanup_count:
61213 bad_fork_free:
61214 free_task(p);
61215 fork_out:
61216+ gr_log_forkfail(retval);
61217+
61218 return ERR_PTR(retval);
61219 }
61220
61221@@ -1502,6 +1548,8 @@ long do_fork(unsigned long clone_flags,
61222 if (clone_flags & CLONE_PARENT_SETTID)
61223 put_user(nr, parent_tidptr);
61224
61225+ gr_handle_brute_check();
61226+
61227 if (clone_flags & CLONE_VFORK) {
61228 p->vfork_done = &vfork;
61229 init_completion(&vfork);
61230@@ -1610,7 +1658,7 @@ static int unshare_fs(unsigned long unsh
61231 return 0;
61232
61233 /* don't need lock here; in the worst case we'll do useless copy */
61234- if (fs->users == 1)
61235+ if (atomic_read(&fs->users) == 1)
61236 return 0;
61237
61238 *new_fsp = copy_fs_struct(fs);
61239@@ -1697,7 +1745,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
61240 fs = current->fs;
61241 spin_lock(&fs->lock);
61242 current->fs = new_fs;
61243- if (--fs->users)
61244+ gr_set_chroot_entries(current, &current->fs->root);
61245+ if (atomic_dec_return(&fs->users))
61246 new_fs = NULL;
61247 else
61248 new_fs = fs;
61249diff -urNp linux-3.0.4/kernel/futex.c linux-3.0.4/kernel/futex.c
61250--- linux-3.0.4/kernel/futex.c 2011-09-02 18:11:21.000000000 -0400
61251+++ linux-3.0.4/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
61252@@ -54,6 +54,7 @@
61253 #include <linux/mount.h>
61254 #include <linux/pagemap.h>
61255 #include <linux/syscalls.h>
61256+#include <linux/ptrace.h>
61257 #include <linux/signal.h>
61258 #include <linux/module.h>
61259 #include <linux/magic.h>
61260@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
61261 struct page *page, *page_head;
61262 int err, ro = 0;
61263
61264+#ifdef CONFIG_PAX_SEGMEXEC
61265+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
61266+ return -EFAULT;
61267+#endif
61268+
61269 /*
61270 * The futex address must be "naturally" aligned.
61271 */
61272@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
61273 struct futex_q q = futex_q_init;
61274 int ret;
61275
61276+ pax_track_stack();
61277+
61278 if (!bitset)
61279 return -EINVAL;
61280 q.bitset = bitset;
61281@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
61282 struct futex_q q = futex_q_init;
61283 int res, ret;
61284
61285+ pax_track_stack();
61286+
61287 if (!bitset)
61288 return -EINVAL;
61289
61290@@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
61291 {
61292 struct robust_list_head __user *head;
61293 unsigned long ret;
61294+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
61295 const struct cred *cred = current_cred(), *pcred;
61296+#endif
61297
61298 if (!futex_cmpxchg_enabled)
61299 return -ENOSYS;
61300@@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
61301 if (!p)
61302 goto err_unlock;
61303 ret = -EPERM;
61304+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61305+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
61306+ goto err_unlock;
61307+#else
61308 pcred = __task_cred(p);
61309 /* If victim is in different user_ns, then uids are not
61310 comparable, so we must have CAP_SYS_PTRACE */
61311@@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
61312 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
61313 goto err_unlock;
61314 ok:
61315+#endif
61316 head = p->robust_list;
61317 rcu_read_unlock();
61318 }
61319@@ -2712,6 +2729,7 @@ static int __init futex_init(void)
61320 {
61321 u32 curval;
61322 int i;
61323+ mm_segment_t oldfs;
61324
61325 /*
61326 * This will fail and we want it. Some arch implementations do
61327@@ -2723,8 +2741,11 @@ static int __init futex_init(void)
61328 * implementation, the non-functional ones will return
61329 * -ENOSYS.
61330 */
61331+ oldfs = get_fs();
61332+ set_fs(USER_DS);
61333 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
61334 futex_cmpxchg_enabled = 1;
61335+ set_fs(oldfs);
61336
61337 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
61338 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
61339diff -urNp linux-3.0.4/kernel/futex_compat.c linux-3.0.4/kernel/futex_compat.c
61340--- linux-3.0.4/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
61341+++ linux-3.0.4/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
61342@@ -10,6 +10,7 @@
61343 #include <linux/compat.h>
61344 #include <linux/nsproxy.h>
61345 #include <linux/futex.h>
61346+#include <linux/ptrace.h>
61347
61348 #include <asm/uaccess.h>
61349
61350@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
61351 {
61352 struct compat_robust_list_head __user *head;
61353 unsigned long ret;
61354- const struct cred *cred = current_cred(), *pcred;
61355+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
61356+ const struct cred *cred = current_cred();
61357+ const struct cred *pcred;
61358+#endif
61359
61360 if (!futex_cmpxchg_enabled)
61361 return -ENOSYS;
61362@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
61363 if (!p)
61364 goto err_unlock;
61365 ret = -EPERM;
61366+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61367+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
61368+ goto err_unlock;
61369+#else
61370 pcred = __task_cred(p);
61371 /* If victim is in different user_ns, then uids are not
61372 comparable, so we must have CAP_SYS_PTRACE */
61373@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
61374 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
61375 goto err_unlock;
61376 ok:
61377+#endif
61378 head = p->compat_robust_list;
61379 rcu_read_unlock();
61380 }
61381diff -urNp linux-3.0.4/kernel/gcov/base.c linux-3.0.4/kernel/gcov/base.c
61382--- linux-3.0.4/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
61383+++ linux-3.0.4/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
61384@@ -102,11 +102,6 @@ void gcov_enable_events(void)
61385 }
61386
61387 #ifdef CONFIG_MODULES
61388-static inline int within(void *addr, void *start, unsigned long size)
61389-{
61390- return ((addr >= start) && (addr < start + size));
61391-}
61392-
61393 /* Update list and generate events when modules are unloaded. */
61394 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
61395 void *data)
61396@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
61397 prev = NULL;
61398 /* Remove entries located in module from linked list. */
61399 for (info = gcov_info_head; info; info = info->next) {
61400- if (within(info, mod->module_core, mod->core_size)) {
61401+ if (within_module_core_rw((unsigned long)info, mod)) {
61402 if (prev)
61403 prev->next = info->next;
61404 else
61405diff -urNp linux-3.0.4/kernel/hrtimer.c linux-3.0.4/kernel/hrtimer.c
61406--- linux-3.0.4/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
61407+++ linux-3.0.4/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
61408@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
61409 local_irq_restore(flags);
61410 }
61411
61412-static void run_hrtimer_softirq(struct softirq_action *h)
61413+static void run_hrtimer_softirq(void)
61414 {
61415 hrtimer_peek_ahead_timers();
61416 }
61417diff -urNp linux-3.0.4/kernel/jump_label.c linux-3.0.4/kernel/jump_label.c
61418--- linux-3.0.4/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
61419+++ linux-3.0.4/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
61420@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
61421
61422 size = (((unsigned long)stop - (unsigned long)start)
61423 / sizeof(struct jump_entry));
61424+ pax_open_kernel();
61425 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
61426+ pax_close_kernel();
61427 }
61428
61429 static void jump_label_update(struct jump_label_key *key, int enable);
61430@@ -297,10 +299,12 @@ static void jump_label_invalidate_module
61431 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
61432 struct jump_entry *iter;
61433
61434+ pax_open_kernel();
61435 for (iter = iter_start; iter < iter_stop; iter++) {
61436 if (within_module_init(iter->code, mod))
61437 iter->code = 0;
61438 }
61439+ pax_close_kernel();
61440 }
61441
61442 static int
61443diff -urNp linux-3.0.4/kernel/kallsyms.c linux-3.0.4/kernel/kallsyms.c
61444--- linux-3.0.4/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
61445+++ linux-3.0.4/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
61446@@ -11,6 +11,9 @@
61447 * Changed the compression method from stem compression to "table lookup"
61448 * compression (see scripts/kallsyms.c for a more complete description)
61449 */
61450+#ifdef CONFIG_GRKERNSEC_HIDESYM
61451+#define __INCLUDED_BY_HIDESYM 1
61452+#endif
61453 #include <linux/kallsyms.h>
61454 #include <linux/module.h>
61455 #include <linux/init.h>
61456@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
61457
61458 static inline int is_kernel_inittext(unsigned long addr)
61459 {
61460+ if (system_state != SYSTEM_BOOTING)
61461+ return 0;
61462+
61463 if (addr >= (unsigned long)_sinittext
61464 && addr <= (unsigned long)_einittext)
61465 return 1;
61466 return 0;
61467 }
61468
61469+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61470+#ifdef CONFIG_MODULES
61471+static inline int is_module_text(unsigned long addr)
61472+{
61473+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
61474+ return 1;
61475+
61476+ addr = ktla_ktva(addr);
61477+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
61478+}
61479+#else
61480+static inline int is_module_text(unsigned long addr)
61481+{
61482+ return 0;
61483+}
61484+#endif
61485+#endif
61486+
61487 static inline int is_kernel_text(unsigned long addr)
61488 {
61489 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
61490@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
61491
61492 static inline int is_kernel(unsigned long addr)
61493 {
61494+
61495+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61496+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
61497+ return 1;
61498+
61499+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
61500+#else
61501 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
61502+#endif
61503+
61504 return 1;
61505 return in_gate_area_no_mm(addr);
61506 }
61507
61508 static int is_ksym_addr(unsigned long addr)
61509 {
61510+
61511+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61512+ if (is_module_text(addr))
61513+ return 0;
61514+#endif
61515+
61516 if (all_var)
61517 return is_kernel(addr);
61518
61519@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
61520
61521 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
61522 {
61523- iter->name[0] = '\0';
61524 iter->nameoff = get_symbol_offset(new_pos);
61525 iter->pos = new_pos;
61526 }
61527@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
61528 {
61529 struct kallsym_iter *iter = m->private;
61530
61531+#ifdef CONFIG_GRKERNSEC_HIDESYM
61532+ if (current_uid())
61533+ return 0;
61534+#endif
61535+
61536 /* Some debugging symbols have no name. Ignore them. */
61537 if (!iter->name[0])
61538 return 0;
61539@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
61540 struct kallsym_iter *iter;
61541 int ret;
61542
61543- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
61544+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
61545 if (!iter)
61546 return -ENOMEM;
61547 reset_iter(iter, 0);
61548diff -urNp linux-3.0.4/kernel/kexec.c linux-3.0.4/kernel/kexec.c
61549--- linux-3.0.4/kernel/kexec.c 2011-07-21 22:17:23.000000000 -0400
61550+++ linux-3.0.4/kernel/kexec.c 2011-10-06 04:17:55.000000000 -0400
61551@@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(un
61552 unsigned long flags)
61553 {
61554 struct compat_kexec_segment in;
61555- struct kexec_segment out, __user *ksegments;
61556+ struct kexec_segment out;
61557+ struct kexec_segment __user *ksegments;
61558 unsigned long i, result;
61559
61560 /* Don't allow clients that don't understand the native
61561diff -urNp linux-3.0.4/kernel/kmod.c linux-3.0.4/kernel/kmod.c
61562--- linux-3.0.4/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
61563+++ linux-3.0.4/kernel/kmod.c 2011-10-06 04:17:55.000000000 -0400
61564@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
61565 * If module auto-loading support is disabled then this function
61566 * becomes a no-operation.
61567 */
61568-int __request_module(bool wait, const char *fmt, ...)
61569+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
61570 {
61571- va_list args;
61572 char module_name[MODULE_NAME_LEN];
61573 unsigned int max_modprobes;
61574 int ret;
61575- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
61576+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
61577 static char *envp[] = { "HOME=/",
61578 "TERM=linux",
61579 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
61580@@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
61581 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
61582 static int kmod_loop_msg;
61583
61584- va_start(args, fmt);
61585- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
61586- va_end(args);
61587+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
61588 if (ret >= MODULE_NAME_LEN)
61589 return -ENAMETOOLONG;
61590
61591@@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
61592 if (ret)
61593 return ret;
61594
61595+#ifdef CONFIG_GRKERNSEC_MODHARDEN
61596+ if (!current_uid()) {
61597+ /* hack to workaround consolekit/udisks stupidity */
61598+ read_lock(&tasklist_lock);
61599+ if (!strcmp(current->comm, "mount") &&
61600+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
61601+ read_unlock(&tasklist_lock);
61602+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
61603+ return -EPERM;
61604+ }
61605+ read_unlock(&tasklist_lock);
61606+ }
61607+#endif
61608+
61609 /* If modprobe needs a service that is in a module, we get a recursive
61610 * loop. Limit the number of running kmod threads to max_threads/2 or
61611 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
61612@@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
61613 atomic_dec(&kmod_concurrent);
61614 return ret;
61615 }
61616+
61617+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
61618+{
61619+ va_list args;
61620+ int ret;
61621+
61622+ va_start(args, fmt);
61623+ ret = ____request_module(wait, module_param, fmt, args);
61624+ va_end(args);
61625+
61626+ return ret;
61627+}
61628+
61629+int __request_module(bool wait, const char *fmt, ...)
61630+{
61631+ va_list args;
61632+ int ret;
61633+
61634+#ifdef CONFIG_GRKERNSEC_MODHARDEN
61635+ if (current_uid()) {
61636+ char module_param[MODULE_NAME_LEN];
61637+
61638+ memset(module_param, 0, sizeof(module_param));
61639+
61640+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
61641+
61642+ va_start(args, fmt);
61643+ ret = ____request_module(wait, module_param, fmt, args);
61644+ va_end(args);
61645+
61646+ return ret;
61647+ }
61648+#endif
61649+
61650+ va_start(args, fmt);
61651+ ret = ____request_module(wait, NULL, fmt, args);
61652+ va_end(args);
61653+
61654+ return ret;
61655+}
61656+
61657 EXPORT_SYMBOL(__request_module);
61658 #endif /* CONFIG_MODULES */
61659
61660@@ -220,7 +272,7 @@ static int wait_for_helper(void *data)
61661 *
61662 * Thus the __user pointer cast is valid here.
61663 */
61664- sys_wait4(pid, (int __user *)&ret, 0, NULL);
61665+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
61666
61667 /*
61668 * If ret is 0, either ____call_usermodehelper failed and the
61669diff -urNp linux-3.0.4/kernel/kprobes.c linux-3.0.4/kernel/kprobes.c
61670--- linux-3.0.4/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
61671+++ linux-3.0.4/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
61672@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
61673 * kernel image and loaded module images reside. This is required
61674 * so x86_64 can correctly handle the %rip-relative fixups.
61675 */
61676- kip->insns = module_alloc(PAGE_SIZE);
61677+ kip->insns = module_alloc_exec(PAGE_SIZE);
61678 if (!kip->insns) {
61679 kfree(kip);
61680 return NULL;
61681@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
61682 */
61683 if (!list_is_singular(&kip->list)) {
61684 list_del(&kip->list);
61685- module_free(NULL, kip->insns);
61686+ module_free_exec(NULL, kip->insns);
61687 kfree(kip);
61688 }
61689 return 1;
61690@@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
61691 {
61692 int i, err = 0;
61693 unsigned long offset = 0, size = 0;
61694- char *modname, namebuf[128];
61695+ char *modname, namebuf[KSYM_NAME_LEN];
61696 const char *symbol_name;
61697 void *addr;
61698 struct kprobe_blackpoint *kb;
61699@@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
61700 const char *sym = NULL;
61701 unsigned int i = *(loff_t *) v;
61702 unsigned long offset = 0;
61703- char *modname, namebuf[128];
61704+ char *modname, namebuf[KSYM_NAME_LEN];
61705
61706 head = &kprobe_table[i];
61707 preempt_disable();
61708diff -urNp linux-3.0.4/kernel/lockdep.c linux-3.0.4/kernel/lockdep.c
61709--- linux-3.0.4/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
61710+++ linux-3.0.4/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
61711@@ -583,6 +583,10 @@ static int static_obj(void *obj)
61712 end = (unsigned long) &_end,
61713 addr = (unsigned long) obj;
61714
61715+#ifdef CONFIG_PAX_KERNEXEC
61716+ start = ktla_ktva(start);
61717+#endif
61718+
61719 /*
61720 * static variable?
61721 */
61722@@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
61723 if (!static_obj(lock->key)) {
61724 debug_locks_off();
61725 printk("INFO: trying to register non-static key.\n");
61726+ printk("lock:%pS key:%pS.\n", lock, lock->key);
61727 printk("the code is fine but needs lockdep annotation.\n");
61728 printk("turning off the locking correctness validator.\n");
61729 dump_stack();
61730@@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
61731 if (!class)
61732 return 0;
61733 }
61734- atomic_inc((atomic_t *)&class->ops);
61735+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
61736 if (very_verbose(class)) {
61737 printk("\nacquire class [%p] %s", class->key, class->name);
61738 if (class->name_version > 1)
61739diff -urNp linux-3.0.4/kernel/lockdep_proc.c linux-3.0.4/kernel/lockdep_proc.c
61740--- linux-3.0.4/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
61741+++ linux-3.0.4/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
61742@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
61743
61744 static void print_name(struct seq_file *m, struct lock_class *class)
61745 {
61746- char str[128];
61747+ char str[KSYM_NAME_LEN];
61748 const char *name = class->name;
61749
61750 if (!name) {
61751diff -urNp linux-3.0.4/kernel/module.c linux-3.0.4/kernel/module.c
61752--- linux-3.0.4/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
61753+++ linux-3.0.4/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
61754@@ -58,6 +58,7 @@
61755 #include <linux/jump_label.h>
61756 #include <linux/pfn.h>
61757 #include <linux/bsearch.h>
61758+#include <linux/grsecurity.h>
61759
61760 #define CREATE_TRACE_POINTS
61761 #include <trace/events/module.h>
61762@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
61763
61764 /* Bounds of module allocation, for speeding __module_address.
61765 * Protected by module_mutex. */
61766-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
61767+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
61768+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
61769
61770 int register_module_notifier(struct notifier_block * nb)
61771 {
61772@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
61773 return true;
61774
61775 list_for_each_entry_rcu(mod, &modules, list) {
61776- struct symsearch arr[] = {
61777+ struct symsearch modarr[] = {
61778 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
61779 NOT_GPL_ONLY, false },
61780 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
61781@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
61782 #endif
61783 };
61784
61785- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
61786+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
61787 return true;
61788 }
61789 return false;
61790@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
61791 static int percpu_modalloc(struct module *mod,
61792 unsigned long size, unsigned long align)
61793 {
61794- if (align > PAGE_SIZE) {
61795+ if (align-1 >= PAGE_SIZE) {
61796 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
61797 mod->name, align, PAGE_SIZE);
61798 align = PAGE_SIZE;
61799@@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
61800 */
61801 #ifdef CONFIG_SYSFS
61802
61803-#ifdef CONFIG_KALLSYMS
61804+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
61805 static inline bool sect_empty(const Elf_Shdr *sect)
61806 {
61807 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
61808@@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
61809
61810 static void unset_module_core_ro_nx(struct module *mod)
61811 {
61812- set_page_attributes(mod->module_core + mod->core_text_size,
61813- mod->module_core + mod->core_size,
61814+ set_page_attributes(mod->module_core_rw,
61815+ mod->module_core_rw + mod->core_size_rw,
61816 set_memory_x);
61817- set_page_attributes(mod->module_core,
61818- mod->module_core + mod->core_ro_size,
61819+ set_page_attributes(mod->module_core_rx,
61820+ mod->module_core_rx + mod->core_size_rx,
61821 set_memory_rw);
61822 }
61823
61824 static void unset_module_init_ro_nx(struct module *mod)
61825 {
61826- set_page_attributes(mod->module_init + mod->init_text_size,
61827- mod->module_init + mod->init_size,
61828+ set_page_attributes(mod->module_init_rw,
61829+ mod->module_init_rw + mod->init_size_rw,
61830 set_memory_x);
61831- set_page_attributes(mod->module_init,
61832- mod->module_init + mod->init_ro_size,
61833+ set_page_attributes(mod->module_init_rx,
61834+ mod->module_init_rx + mod->init_size_rx,
61835 set_memory_rw);
61836 }
61837
61838@@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
61839
61840 mutex_lock(&module_mutex);
61841 list_for_each_entry_rcu(mod, &modules, list) {
61842- if ((mod->module_core) && (mod->core_text_size)) {
61843- set_page_attributes(mod->module_core,
61844- mod->module_core + mod->core_text_size,
61845+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
61846+ set_page_attributes(mod->module_core_rx,
61847+ mod->module_core_rx + mod->core_size_rx,
61848 set_memory_rw);
61849 }
61850- if ((mod->module_init) && (mod->init_text_size)) {
61851- set_page_attributes(mod->module_init,
61852- mod->module_init + mod->init_text_size,
61853+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
61854+ set_page_attributes(mod->module_init_rx,
61855+ mod->module_init_rx + mod->init_size_rx,
61856 set_memory_rw);
61857 }
61858 }
61859@@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
61860
61861 mutex_lock(&module_mutex);
61862 list_for_each_entry_rcu(mod, &modules, list) {
61863- if ((mod->module_core) && (mod->core_text_size)) {
61864- set_page_attributes(mod->module_core,
61865- mod->module_core + mod->core_text_size,
61866+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
61867+ set_page_attributes(mod->module_core_rx,
61868+ mod->module_core_rx + mod->core_size_rx,
61869 set_memory_ro);
61870 }
61871- if ((mod->module_init) && (mod->init_text_size)) {
61872- set_page_attributes(mod->module_init,
61873- mod->module_init + mod->init_text_size,
61874+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
61875+ set_page_attributes(mod->module_init_rx,
61876+ mod->module_init_rx + mod->init_size_rx,
61877 set_memory_ro);
61878 }
61879 }
61880@@ -1722,16 +1724,19 @@ static void free_module(struct module *m
61881
61882 /* This may be NULL, but that's OK */
61883 unset_module_init_ro_nx(mod);
61884- module_free(mod, mod->module_init);
61885+ module_free(mod, mod->module_init_rw);
61886+ module_free_exec(mod, mod->module_init_rx);
61887 kfree(mod->args);
61888 percpu_modfree(mod);
61889
61890 /* Free lock-classes: */
61891- lockdep_free_key_range(mod->module_core, mod->core_size);
61892+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
61893+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
61894
61895 /* Finally, free the core (containing the module structure) */
61896 unset_module_core_ro_nx(mod);
61897- module_free(mod, mod->module_core);
61898+ module_free_exec(mod, mod->module_core_rx);
61899+ module_free(mod, mod->module_core_rw);
61900
61901 #ifdef CONFIG_MPU
61902 update_protections(current->mm);
61903@@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
61904 unsigned int i;
61905 int ret = 0;
61906 const struct kernel_symbol *ksym;
61907+#ifdef CONFIG_GRKERNSEC_MODHARDEN
61908+ int is_fs_load = 0;
61909+ int register_filesystem_found = 0;
61910+ char *p;
61911+
61912+ p = strstr(mod->args, "grsec_modharden_fs");
61913+ if (p) {
61914+ char *endptr = p + strlen("grsec_modharden_fs");
61915+ /* copy \0 as well */
61916+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
61917+ is_fs_load = 1;
61918+ }
61919+#endif
61920
61921 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
61922 const char *name = info->strtab + sym[i].st_name;
61923
61924+#ifdef CONFIG_GRKERNSEC_MODHARDEN
61925+ /* it's a real shame this will never get ripped and copied
61926+ upstream! ;(
61927+ */
61928+ if (is_fs_load && !strcmp(name, "register_filesystem"))
61929+ register_filesystem_found = 1;
61930+#endif
61931+
61932 switch (sym[i].st_shndx) {
61933 case SHN_COMMON:
61934 /* We compiled with -fno-common. These are not
61935@@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
61936 ksym = resolve_symbol_wait(mod, info, name);
61937 /* Ok if resolved. */
61938 if (ksym && !IS_ERR(ksym)) {
61939+ pax_open_kernel();
61940 sym[i].st_value = ksym->value;
61941+ pax_close_kernel();
61942 break;
61943 }
61944
61945@@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
61946 secbase = (unsigned long)mod_percpu(mod);
61947 else
61948 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
61949+ pax_open_kernel();
61950 sym[i].st_value += secbase;
61951+ pax_close_kernel();
61952 break;
61953 }
61954 }
61955
61956+#ifdef CONFIG_GRKERNSEC_MODHARDEN
61957+ if (is_fs_load && !register_filesystem_found) {
61958+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
61959+ ret = -EPERM;
61960+ }
61961+#endif
61962+
61963 return ret;
61964 }
61965
61966@@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
61967 || s->sh_entsize != ~0UL
61968 || strstarts(sname, ".init"))
61969 continue;
61970- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
61971+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
61972+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
61973+ else
61974+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
61975 DEBUGP("\t%s\n", name);
61976 }
61977- switch (m) {
61978- case 0: /* executable */
61979- mod->core_size = debug_align(mod->core_size);
61980- mod->core_text_size = mod->core_size;
61981- break;
61982- case 1: /* RO: text and ro-data */
61983- mod->core_size = debug_align(mod->core_size);
61984- mod->core_ro_size = mod->core_size;
61985- break;
61986- case 3: /* whole core */
61987- mod->core_size = debug_align(mod->core_size);
61988- break;
61989- }
61990 }
61991
61992 DEBUGP("Init section allocation order:\n");
61993@@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
61994 || s->sh_entsize != ~0UL
61995 || !strstarts(sname, ".init"))
61996 continue;
61997- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
61998- | INIT_OFFSET_MASK);
61999+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
62000+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
62001+ else
62002+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
62003+ s->sh_entsize |= INIT_OFFSET_MASK;
62004 DEBUGP("\t%s\n", sname);
62005 }
62006- switch (m) {
62007- case 0: /* executable */
62008- mod->init_size = debug_align(mod->init_size);
62009- mod->init_text_size = mod->init_size;
62010- break;
62011- case 1: /* RO: text and ro-data */
62012- mod->init_size = debug_align(mod->init_size);
62013- mod->init_ro_size = mod->init_size;
62014- break;
62015- case 3: /* whole init */
62016- mod->init_size = debug_align(mod->init_size);
62017- break;
62018- }
62019 }
62020 }
62021
62022@@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
62023
62024 /* Put symbol section at end of init part of module. */
62025 symsect->sh_flags |= SHF_ALLOC;
62026- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
62027+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
62028 info->index.sym) | INIT_OFFSET_MASK;
62029 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
62030
62031@@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
62032 }
62033
62034 /* Append room for core symbols at end of core part. */
62035- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
62036- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
62037+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
62038+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
62039
62040 /* Put string table section at end of init part of module. */
62041 strsect->sh_flags |= SHF_ALLOC;
62042- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
62043+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
62044 info->index.str) | INIT_OFFSET_MASK;
62045 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
62046
62047 /* Append room for core symbols' strings at end of core part. */
62048- info->stroffs = mod->core_size;
62049+ info->stroffs = mod->core_size_rx;
62050 __set_bit(0, info->strmap);
62051- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
62052+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
62053 }
62054
62055 static void add_kallsyms(struct module *mod, const struct load_info *info)
62056@@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
62057 /* Make sure we get permanent strtab: don't use info->strtab. */
62058 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
62059
62060+ pax_open_kernel();
62061+
62062 /* Set types up while we still have access to sections. */
62063 for (i = 0; i < mod->num_symtab; i++)
62064 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
62065
62066- mod->core_symtab = dst = mod->module_core + info->symoffs;
62067+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
62068 src = mod->symtab;
62069 *dst = *src;
62070 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
62071@@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
62072 }
62073 mod->core_num_syms = ndst;
62074
62075- mod->core_strtab = s = mod->module_core + info->stroffs;
62076+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
62077 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
62078 if (test_bit(i, info->strmap))
62079 *++s = mod->strtab[i];
62080+
62081+ pax_close_kernel();
62082 }
62083 #else
62084 static inline void layout_symtab(struct module *mod, struct load_info *info)
62085@@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
62086 ddebug_remove_module(debug->modname);
62087 }
62088
62089-static void *module_alloc_update_bounds(unsigned long size)
62090+static void *module_alloc_update_bounds_rw(unsigned long size)
62091 {
62092 void *ret = module_alloc(size);
62093
62094 if (ret) {
62095 mutex_lock(&module_mutex);
62096 /* Update module bounds. */
62097- if ((unsigned long)ret < module_addr_min)
62098- module_addr_min = (unsigned long)ret;
62099- if ((unsigned long)ret + size > module_addr_max)
62100- module_addr_max = (unsigned long)ret + size;
62101+ if ((unsigned long)ret < module_addr_min_rw)
62102+ module_addr_min_rw = (unsigned long)ret;
62103+ if ((unsigned long)ret + size > module_addr_max_rw)
62104+ module_addr_max_rw = (unsigned long)ret + size;
62105+ mutex_unlock(&module_mutex);
62106+ }
62107+ return ret;
62108+}
62109+
62110+static void *module_alloc_update_bounds_rx(unsigned long size)
62111+{
62112+ void *ret = module_alloc_exec(size);
62113+
62114+ if (ret) {
62115+ mutex_lock(&module_mutex);
62116+ /* Update module bounds. */
62117+ if ((unsigned long)ret < module_addr_min_rx)
62118+ module_addr_min_rx = (unsigned long)ret;
62119+ if ((unsigned long)ret + size > module_addr_max_rx)
62120+ module_addr_max_rx = (unsigned long)ret + size;
62121 mutex_unlock(&module_mutex);
62122 }
62123 return ret;
62124@@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
62125 void *ptr;
62126
62127 /* Do the allocs. */
62128- ptr = module_alloc_update_bounds(mod->core_size);
62129+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
62130 /*
62131 * The pointer to this block is stored in the module structure
62132 * which is inside the block. Just mark it as not being a
62133@@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
62134 if (!ptr)
62135 return -ENOMEM;
62136
62137- memset(ptr, 0, mod->core_size);
62138- mod->module_core = ptr;
62139+ memset(ptr, 0, mod->core_size_rw);
62140+ mod->module_core_rw = ptr;
62141
62142- ptr = module_alloc_update_bounds(mod->init_size);
62143+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
62144 /*
62145 * The pointer to this block is stored in the module structure
62146 * which is inside the block. This block doesn't need to be
62147 * scanned as it contains data and code that will be freed
62148 * after the module is initialized.
62149 */
62150- kmemleak_ignore(ptr);
62151- if (!ptr && mod->init_size) {
62152- module_free(mod, mod->module_core);
62153+ kmemleak_not_leak(ptr);
62154+ if (!ptr && mod->init_size_rw) {
62155+ module_free(mod, mod->module_core_rw);
62156 return -ENOMEM;
62157 }
62158- memset(ptr, 0, mod->init_size);
62159- mod->module_init = ptr;
62160+ memset(ptr, 0, mod->init_size_rw);
62161+ mod->module_init_rw = ptr;
62162+
62163+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
62164+ kmemleak_not_leak(ptr);
62165+ if (!ptr) {
62166+ module_free(mod, mod->module_init_rw);
62167+ module_free(mod, mod->module_core_rw);
62168+ return -ENOMEM;
62169+ }
62170+
62171+ pax_open_kernel();
62172+ memset(ptr, 0, mod->core_size_rx);
62173+ pax_close_kernel();
62174+ mod->module_core_rx = ptr;
62175+
62176+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
62177+ kmemleak_not_leak(ptr);
62178+ if (!ptr && mod->init_size_rx) {
62179+ module_free_exec(mod, mod->module_core_rx);
62180+ module_free(mod, mod->module_init_rw);
62181+ module_free(mod, mod->module_core_rw);
62182+ return -ENOMEM;
62183+ }
62184+
62185+ pax_open_kernel();
62186+ memset(ptr, 0, mod->init_size_rx);
62187+ pax_close_kernel();
62188+ mod->module_init_rx = ptr;
62189
62190 /* Transfer each section which specifies SHF_ALLOC */
62191 DEBUGP("final section addresses:\n");
62192@@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
62193 if (!(shdr->sh_flags & SHF_ALLOC))
62194 continue;
62195
62196- if (shdr->sh_entsize & INIT_OFFSET_MASK)
62197- dest = mod->module_init
62198- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
62199- else
62200- dest = mod->module_core + shdr->sh_entsize;
62201+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
62202+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
62203+ dest = mod->module_init_rw
62204+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
62205+ else
62206+ dest = mod->module_init_rx
62207+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
62208+ } else {
62209+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
62210+ dest = mod->module_core_rw + shdr->sh_entsize;
62211+ else
62212+ dest = mod->module_core_rx + shdr->sh_entsize;
62213+ }
62214+
62215+ if (shdr->sh_type != SHT_NOBITS) {
62216+
62217+#ifdef CONFIG_PAX_KERNEXEC
62218+#ifdef CONFIG_X86_64
62219+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
62220+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
62221+#endif
62222+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
62223+ pax_open_kernel();
62224+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
62225+ pax_close_kernel();
62226+ } else
62227+#endif
62228
62229- if (shdr->sh_type != SHT_NOBITS)
62230 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
62231+ }
62232 /* Update sh_addr to point to copy in image. */
62233- shdr->sh_addr = (unsigned long)dest;
62234+
62235+#ifdef CONFIG_PAX_KERNEXEC
62236+ if (shdr->sh_flags & SHF_EXECINSTR)
62237+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
62238+ else
62239+#endif
62240+
62241+ shdr->sh_addr = (unsigned long)dest;
62242 DEBUGP("\t0x%lx %s\n",
62243 shdr->sh_addr, info->secstrings + shdr->sh_name);
62244 }
62245@@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
62246 * Do it before processing of module parameters, so the module
62247 * can provide parameter accessor functions of its own.
62248 */
62249- if (mod->module_init)
62250- flush_icache_range((unsigned long)mod->module_init,
62251- (unsigned long)mod->module_init
62252- + mod->init_size);
62253- flush_icache_range((unsigned long)mod->module_core,
62254- (unsigned long)mod->module_core + mod->core_size);
62255+ if (mod->module_init_rx)
62256+ flush_icache_range((unsigned long)mod->module_init_rx,
62257+ (unsigned long)mod->module_init_rx
62258+ + mod->init_size_rx);
62259+ flush_icache_range((unsigned long)mod->module_core_rx,
62260+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
62261
62262 set_fs(old_fs);
62263 }
62264@@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
62265 {
62266 kfree(info->strmap);
62267 percpu_modfree(mod);
62268- module_free(mod, mod->module_init);
62269- module_free(mod, mod->module_core);
62270+ module_free_exec(mod, mod->module_init_rx);
62271+ module_free_exec(mod, mod->module_core_rx);
62272+ module_free(mod, mod->module_init_rw);
62273+ module_free(mod, mod->module_core_rw);
62274 }
62275
62276 static int post_relocation(struct module *mod, const struct load_info *info)
62277@@ -2770,9 +2865,38 @@ static struct module *load_module(void _
62278 if (err)
62279 goto free_unload;
62280
62281+ /* Now copy in args */
62282+ mod->args = strndup_user(uargs, ~0UL >> 1);
62283+ if (IS_ERR(mod->args)) {
62284+ err = PTR_ERR(mod->args);
62285+ goto free_unload;
62286+ }
62287+
62288 /* Set up MODINFO_ATTR fields */
62289 setup_modinfo(mod, &info);
62290
62291+#ifdef CONFIG_GRKERNSEC_MODHARDEN
62292+ {
62293+ char *p, *p2;
62294+
62295+ if (strstr(mod->args, "grsec_modharden_netdev")) {
62296+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
62297+ err = -EPERM;
62298+ goto free_modinfo;
62299+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
62300+ p += strlen("grsec_modharden_normal");
62301+ p2 = strstr(p, "_");
62302+ if (p2) {
62303+ *p2 = '\0';
62304+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
62305+ *p2 = '_';
62306+ }
62307+ err = -EPERM;
62308+ goto free_modinfo;
62309+ }
62310+ }
62311+#endif
62312+
62313 /* Fix up syms, so that st_value is a pointer to location. */
62314 err = simplify_symbols(mod, &info);
62315 if (err < 0)
62316@@ -2788,13 +2912,6 @@ static struct module *load_module(void _
62317
62318 flush_module_icache(mod);
62319
62320- /* Now copy in args */
62321- mod->args = strndup_user(uargs, ~0UL >> 1);
62322- if (IS_ERR(mod->args)) {
62323- err = PTR_ERR(mod->args);
62324- goto free_arch_cleanup;
62325- }
62326-
62327 /* Mark state as coming so strong_try_module_get() ignores us. */
62328 mod->state = MODULE_STATE_COMING;
62329
62330@@ -2854,11 +2971,10 @@ static struct module *load_module(void _
62331 unlock:
62332 mutex_unlock(&module_mutex);
62333 synchronize_sched();
62334- kfree(mod->args);
62335- free_arch_cleanup:
62336 module_arch_cleanup(mod);
62337 free_modinfo:
62338 free_modinfo(mod);
62339+ kfree(mod->args);
62340 free_unload:
62341 module_unload_free(mod);
62342 free_module:
62343@@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
62344 MODULE_STATE_COMING, mod);
62345
62346 /* Set RO and NX regions for core */
62347- set_section_ro_nx(mod->module_core,
62348- mod->core_text_size,
62349- mod->core_ro_size,
62350- mod->core_size);
62351+ set_section_ro_nx(mod->module_core_rx,
62352+ mod->core_size_rx,
62353+ mod->core_size_rx,
62354+ mod->core_size_rx);
62355
62356 /* Set RO and NX regions for init */
62357- set_section_ro_nx(mod->module_init,
62358- mod->init_text_size,
62359- mod->init_ro_size,
62360- mod->init_size);
62361+ set_section_ro_nx(mod->module_init_rx,
62362+ mod->init_size_rx,
62363+ mod->init_size_rx,
62364+ mod->init_size_rx);
62365
62366 do_mod_ctors(mod);
62367 /* Start the module */
62368@@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
62369 mod->strtab = mod->core_strtab;
62370 #endif
62371 unset_module_init_ro_nx(mod);
62372- module_free(mod, mod->module_init);
62373- mod->module_init = NULL;
62374- mod->init_size = 0;
62375- mod->init_ro_size = 0;
62376- mod->init_text_size = 0;
62377+ module_free(mod, mod->module_init_rw);
62378+ module_free_exec(mod, mod->module_init_rx);
62379+ mod->module_init_rw = NULL;
62380+ mod->module_init_rx = NULL;
62381+ mod->init_size_rw = 0;
62382+ mod->init_size_rx = 0;
62383 mutex_unlock(&module_mutex);
62384
62385 return 0;
62386@@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
62387 unsigned long nextval;
62388
62389 /* At worse, next value is at end of module */
62390- if (within_module_init(addr, mod))
62391- nextval = (unsigned long)mod->module_init+mod->init_text_size;
62392+ if (within_module_init_rx(addr, mod))
62393+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
62394+ else if (within_module_init_rw(addr, mod))
62395+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
62396+ else if (within_module_core_rx(addr, mod))
62397+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
62398+ else if (within_module_core_rw(addr, mod))
62399+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
62400 else
62401- nextval = (unsigned long)mod->module_core+mod->core_text_size;
62402+ return NULL;
62403
62404 /* Scan for closest preceding symbol, and next symbol. (ELF
62405 starts real symbols at 1). */
62406@@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
62407 char buf[8];
62408
62409 seq_printf(m, "%s %u",
62410- mod->name, mod->init_size + mod->core_size);
62411+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
62412 print_unload_info(m, mod);
62413
62414 /* Informative for users. */
62415@@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
62416 mod->state == MODULE_STATE_COMING ? "Loading":
62417 "Live");
62418 /* Used by oprofile and other similar tools. */
62419- seq_printf(m, " 0x%pK", mod->module_core);
62420+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
62421
62422 /* Taints info */
62423 if (mod->taints)
62424@@ -3283,7 +3406,17 @@ static const struct file_operations proc
62425
62426 static int __init proc_modules_init(void)
62427 {
62428+#ifndef CONFIG_GRKERNSEC_HIDESYM
62429+#ifdef CONFIG_GRKERNSEC_PROC_USER
62430+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
62431+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62432+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
62433+#else
62434 proc_create("modules", 0, NULL, &proc_modules_operations);
62435+#endif
62436+#else
62437+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
62438+#endif
62439 return 0;
62440 }
62441 module_init(proc_modules_init);
62442@@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
62443 {
62444 struct module *mod;
62445
62446- if (addr < module_addr_min || addr > module_addr_max)
62447+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
62448+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
62449 return NULL;
62450
62451 list_for_each_entry_rcu(mod, &modules, list)
62452- if (within_module_core(addr, mod)
62453- || within_module_init(addr, mod))
62454+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
62455 return mod;
62456 return NULL;
62457 }
62458@@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
62459 */
62460 struct module *__module_text_address(unsigned long addr)
62461 {
62462- struct module *mod = __module_address(addr);
62463+ struct module *mod;
62464+
62465+#ifdef CONFIG_X86_32
62466+ addr = ktla_ktva(addr);
62467+#endif
62468+
62469+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
62470+ return NULL;
62471+
62472+ mod = __module_address(addr);
62473+
62474 if (mod) {
62475 /* Make sure it's within the text section. */
62476- if (!within(addr, mod->module_init, mod->init_text_size)
62477- && !within(addr, mod->module_core, mod->core_text_size))
62478+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
62479 mod = NULL;
62480 }
62481 return mod;
62482diff -urNp linux-3.0.4/kernel/mutex.c linux-3.0.4/kernel/mutex.c
62483--- linux-3.0.4/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
62484+++ linux-3.0.4/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
62485@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
62486 spin_lock_mutex(&lock->wait_lock, flags);
62487
62488 debug_mutex_lock_common(lock, &waiter);
62489- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
62490+ debug_mutex_add_waiter(lock, &waiter, task);
62491
62492 /* add waiting tasks to the end of the waitqueue (FIFO): */
62493 list_add_tail(&waiter.list, &lock->wait_list);
62494@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
62495 * TASK_UNINTERRUPTIBLE case.)
62496 */
62497 if (unlikely(signal_pending_state(state, task))) {
62498- mutex_remove_waiter(lock, &waiter,
62499- task_thread_info(task));
62500+ mutex_remove_waiter(lock, &waiter, task);
62501 mutex_release(&lock->dep_map, 1, ip);
62502 spin_unlock_mutex(&lock->wait_lock, flags);
62503
62504@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
62505 done:
62506 lock_acquired(&lock->dep_map, ip);
62507 /* got the lock - rejoice! */
62508- mutex_remove_waiter(lock, &waiter, current_thread_info());
62509+ mutex_remove_waiter(lock, &waiter, task);
62510 mutex_set_owner(lock);
62511
62512 /* set it to 0 if there are no waiters left: */
62513diff -urNp linux-3.0.4/kernel/mutex-debug.c linux-3.0.4/kernel/mutex-debug.c
62514--- linux-3.0.4/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
62515+++ linux-3.0.4/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
62516@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
62517 }
62518
62519 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
62520- struct thread_info *ti)
62521+ struct task_struct *task)
62522 {
62523 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
62524
62525 /* Mark the current thread as blocked on the lock: */
62526- ti->task->blocked_on = waiter;
62527+ task->blocked_on = waiter;
62528 }
62529
62530 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
62531- struct thread_info *ti)
62532+ struct task_struct *task)
62533 {
62534 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
62535- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
62536- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
62537- ti->task->blocked_on = NULL;
62538+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
62539+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
62540+ task->blocked_on = NULL;
62541
62542 list_del_init(&waiter->list);
62543 waiter->task = NULL;
62544diff -urNp linux-3.0.4/kernel/mutex-debug.h linux-3.0.4/kernel/mutex-debug.h
62545--- linux-3.0.4/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
62546+++ linux-3.0.4/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
62547@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
62548 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
62549 extern void debug_mutex_add_waiter(struct mutex *lock,
62550 struct mutex_waiter *waiter,
62551- struct thread_info *ti);
62552+ struct task_struct *task);
62553 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
62554- struct thread_info *ti);
62555+ struct task_struct *task);
62556 extern void debug_mutex_unlock(struct mutex *lock);
62557 extern void debug_mutex_init(struct mutex *lock, const char *name,
62558 struct lock_class_key *key);
62559diff -urNp linux-3.0.4/kernel/padata.c linux-3.0.4/kernel/padata.c
62560--- linux-3.0.4/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
62561+++ linux-3.0.4/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
62562@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
62563 padata->pd = pd;
62564 padata->cb_cpu = cb_cpu;
62565
62566- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
62567- atomic_set(&pd->seq_nr, -1);
62568+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
62569+ atomic_set_unchecked(&pd->seq_nr, -1);
62570
62571- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
62572+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
62573
62574 target_cpu = padata_cpu_hash(padata);
62575 queue = per_cpu_ptr(pd->pqueue, target_cpu);
62576@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
62577 padata_init_pqueues(pd);
62578 padata_init_squeues(pd);
62579 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
62580- atomic_set(&pd->seq_nr, -1);
62581+ atomic_set_unchecked(&pd->seq_nr, -1);
62582 atomic_set(&pd->reorder_objects, 0);
62583 atomic_set(&pd->refcnt, 0);
62584 pd->pinst = pinst;
62585diff -urNp linux-3.0.4/kernel/panic.c linux-3.0.4/kernel/panic.c
62586--- linux-3.0.4/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
62587+++ linux-3.0.4/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
62588@@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
62589 const char *board;
62590
62591 printk(KERN_WARNING "------------[ cut here ]------------\n");
62592- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
62593+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
62594 board = dmi_get_system_info(DMI_PRODUCT_NAME);
62595 if (board)
62596 printk(KERN_WARNING "Hardware name: %s\n", board);
62597@@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
62598 */
62599 void __stack_chk_fail(void)
62600 {
62601- panic("stack-protector: Kernel stack is corrupted in: %p\n",
62602+ dump_stack();
62603+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
62604 __builtin_return_address(0));
62605 }
62606 EXPORT_SYMBOL(__stack_chk_fail);
62607diff -urNp linux-3.0.4/kernel/pid.c linux-3.0.4/kernel/pid.c
62608--- linux-3.0.4/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
62609+++ linux-3.0.4/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
62610@@ -33,6 +33,7 @@
62611 #include <linux/rculist.h>
62612 #include <linux/bootmem.h>
62613 #include <linux/hash.h>
62614+#include <linux/security.h>
62615 #include <linux/pid_namespace.h>
62616 #include <linux/init_task.h>
62617 #include <linux/syscalls.h>
62618@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
62619
62620 int pid_max = PID_MAX_DEFAULT;
62621
62622-#define RESERVED_PIDS 300
62623+#define RESERVED_PIDS 500
62624
62625 int pid_max_min = RESERVED_PIDS + 1;
62626 int pid_max_max = PID_MAX_LIMIT;
62627@@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
62628 */
62629 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
62630 {
62631+ struct task_struct *task;
62632+
62633 rcu_lockdep_assert(rcu_read_lock_held());
62634- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
62635+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
62636+
62637+ if (gr_pid_is_chrooted(task))
62638+ return NULL;
62639+
62640+ return task;
62641 }
62642
62643 struct task_struct *find_task_by_vpid(pid_t vnr)
62644@@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
62645 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
62646 }
62647
62648+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
62649+{
62650+ rcu_lockdep_assert(rcu_read_lock_held());
62651+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
62652+}
62653+
62654 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
62655 {
62656 struct pid *pid;
62657diff -urNp linux-3.0.4/kernel/posix-cpu-timers.c linux-3.0.4/kernel/posix-cpu-timers.c
62658--- linux-3.0.4/kernel/posix-cpu-timers.c 2011-07-21 22:17:23.000000000 -0400
62659+++ linux-3.0.4/kernel/posix-cpu-timers.c 2011-08-23 21:48:14.000000000 -0400
62660@@ -6,6 +6,7 @@
62661 #include <linux/posix-timers.h>
62662 #include <linux/errno.h>
62663 #include <linux/math64.h>
62664+#include <linux/security.h>
62665 #include <asm/uaccess.h>
62666 #include <linux/kernel_stat.h>
62667 #include <trace/events/timer.h>
62668@@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
62669
62670 static __init int init_posix_cpu_timers(void)
62671 {
62672- struct k_clock process = {
62673+ static struct k_clock process = {
62674 .clock_getres = process_cpu_clock_getres,
62675 .clock_get = process_cpu_clock_get,
62676 .timer_create = process_cpu_timer_create,
62677 .nsleep = process_cpu_nsleep,
62678 .nsleep_restart = process_cpu_nsleep_restart,
62679 };
62680- struct k_clock thread = {
62681+ static struct k_clock thread = {
62682 .clock_getres = thread_cpu_clock_getres,
62683 .clock_get = thread_cpu_clock_get,
62684 .timer_create = thread_cpu_timer_create,
62685diff -urNp linux-3.0.4/kernel/posix-timers.c linux-3.0.4/kernel/posix-timers.c
62686--- linux-3.0.4/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
62687+++ linux-3.0.4/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
62688@@ -43,6 +43,7 @@
62689 #include <linux/idr.h>
62690 #include <linux/posix-clock.h>
62691 #include <linux/posix-timers.h>
62692+#include <linux/grsecurity.h>
62693 #include <linux/syscalls.h>
62694 #include <linux/wait.h>
62695 #include <linux/workqueue.h>
62696@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
62697 * which we beg off on and pass to do_sys_settimeofday().
62698 */
62699
62700-static struct k_clock posix_clocks[MAX_CLOCKS];
62701+static struct k_clock *posix_clocks[MAX_CLOCKS];
62702
62703 /*
62704 * These ones are defined below.
62705@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
62706 */
62707 static __init int init_posix_timers(void)
62708 {
62709- struct k_clock clock_realtime = {
62710+ static struct k_clock clock_realtime = {
62711 .clock_getres = hrtimer_get_res,
62712 .clock_get = posix_clock_realtime_get,
62713 .clock_set = posix_clock_realtime_set,
62714@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
62715 .timer_get = common_timer_get,
62716 .timer_del = common_timer_del,
62717 };
62718- struct k_clock clock_monotonic = {
62719+ static struct k_clock clock_monotonic = {
62720 .clock_getres = hrtimer_get_res,
62721 .clock_get = posix_ktime_get_ts,
62722 .nsleep = common_nsleep,
62723@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
62724 .timer_get = common_timer_get,
62725 .timer_del = common_timer_del,
62726 };
62727- struct k_clock clock_monotonic_raw = {
62728+ static struct k_clock clock_monotonic_raw = {
62729 .clock_getres = hrtimer_get_res,
62730 .clock_get = posix_get_monotonic_raw,
62731 };
62732- struct k_clock clock_realtime_coarse = {
62733+ static struct k_clock clock_realtime_coarse = {
62734 .clock_getres = posix_get_coarse_res,
62735 .clock_get = posix_get_realtime_coarse,
62736 };
62737- struct k_clock clock_monotonic_coarse = {
62738+ static struct k_clock clock_monotonic_coarse = {
62739 .clock_getres = posix_get_coarse_res,
62740 .clock_get = posix_get_monotonic_coarse,
62741 };
62742- struct k_clock clock_boottime = {
62743+ static struct k_clock clock_boottime = {
62744 .clock_getres = hrtimer_get_res,
62745 .clock_get = posix_get_boottime,
62746 .nsleep = common_nsleep,
62747@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
62748 .timer_del = common_timer_del,
62749 };
62750
62751+ pax_track_stack();
62752+
62753 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
62754 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
62755 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
62756@@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
62757 return;
62758 }
62759
62760- posix_clocks[clock_id] = *new_clock;
62761+ posix_clocks[clock_id] = new_clock;
62762 }
62763 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
62764
62765@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
62766 return (id & CLOCKFD_MASK) == CLOCKFD ?
62767 &clock_posix_dynamic : &clock_posix_cpu;
62768
62769- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
62770+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
62771 return NULL;
62772- return &posix_clocks[id];
62773+ return posix_clocks[id];
62774 }
62775
62776 static int common_timer_create(struct k_itimer *new_timer)
62777@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
62778 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
62779 return -EFAULT;
62780
62781+ /* only the CLOCK_REALTIME clock can be set, all other clocks
62782+ have their clock_set fptr set to a nosettime dummy function
62783+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
62784+ call common_clock_set, which calls do_sys_settimeofday, which
62785+ we hook
62786+ */
62787+
62788 return kc->clock_set(which_clock, &new_tp);
62789 }
62790
62791diff -urNp linux-3.0.4/kernel/power/poweroff.c linux-3.0.4/kernel/power/poweroff.c
62792--- linux-3.0.4/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
62793+++ linux-3.0.4/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
62794@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
62795 .enable_mask = SYSRQ_ENABLE_BOOT,
62796 };
62797
62798-static int pm_sysrq_init(void)
62799+static int __init pm_sysrq_init(void)
62800 {
62801 register_sysrq_key('o', &sysrq_poweroff_op);
62802 return 0;
62803diff -urNp linux-3.0.4/kernel/power/process.c linux-3.0.4/kernel/power/process.c
62804--- linux-3.0.4/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
62805+++ linux-3.0.4/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
62806@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
62807 u64 elapsed_csecs64;
62808 unsigned int elapsed_csecs;
62809 bool wakeup = false;
62810+ bool timedout = false;
62811
62812 do_gettimeofday(&start);
62813
62814@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
62815
62816 while (true) {
62817 todo = 0;
62818+ if (time_after(jiffies, end_time))
62819+ timedout = true;
62820 read_lock(&tasklist_lock);
62821 do_each_thread(g, p) {
62822 if (frozen(p) || !freezable(p))
62823@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
62824 * try_to_stop() after schedule() in ptrace/signal
62825 * stop sees TIF_FREEZE.
62826 */
62827- if (!task_is_stopped_or_traced(p) &&
62828- !freezer_should_skip(p))
62829+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
62830 todo++;
62831+ if (timedout) {
62832+ printk(KERN_ERR "Task refusing to freeze:\n");
62833+ sched_show_task(p);
62834+ }
62835+ }
62836 } while_each_thread(g, p);
62837 read_unlock(&tasklist_lock);
62838
62839@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
62840 todo += wq_busy;
62841 }
62842
62843- if (!todo || time_after(jiffies, end_time))
62844+ if (!todo || timedout)
62845 break;
62846
62847 if (pm_wakeup_pending()) {
62848diff -urNp linux-3.0.4/kernel/printk.c linux-3.0.4/kernel/printk.c
62849--- linux-3.0.4/kernel/printk.c 2011-07-21 22:17:23.000000000 -0400
62850+++ linux-3.0.4/kernel/printk.c 2011-08-23 21:48:14.000000000 -0400
62851@@ -313,12 +313,17 @@ static int check_syslog_permissions(int
62852 if (from_file && type != SYSLOG_ACTION_OPEN)
62853 return 0;
62854
62855+#ifdef CONFIG_GRKERNSEC_DMESG
62856+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
62857+ return -EPERM;
62858+#endif
62859+
62860 if (syslog_action_restricted(type)) {
62861 if (capable(CAP_SYSLOG))
62862 return 0;
62863 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
62864 if (capable(CAP_SYS_ADMIN)) {
62865- WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
62866+ printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
62867 "but no CAP_SYSLOG (deprecated).\n");
62868 return 0;
62869 }
62870diff -urNp linux-3.0.4/kernel/profile.c linux-3.0.4/kernel/profile.c
62871--- linux-3.0.4/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
62872+++ linux-3.0.4/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
62873@@ -39,7 +39,7 @@ struct profile_hit {
62874 /* Oprofile timer tick hook */
62875 static int (*timer_hook)(struct pt_regs *) __read_mostly;
62876
62877-static atomic_t *prof_buffer;
62878+static atomic_unchecked_t *prof_buffer;
62879 static unsigned long prof_len, prof_shift;
62880
62881 int prof_on __read_mostly;
62882@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
62883 hits[i].pc = 0;
62884 continue;
62885 }
62886- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
62887+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
62888 hits[i].hits = hits[i].pc = 0;
62889 }
62890 }
62891@@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
62892 * Add the current hit(s) and flush the write-queue out
62893 * to the global buffer:
62894 */
62895- atomic_add(nr_hits, &prof_buffer[pc]);
62896+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
62897 for (i = 0; i < NR_PROFILE_HIT; ++i) {
62898- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
62899+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
62900 hits[i].pc = hits[i].hits = 0;
62901 }
62902 out:
62903@@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
62904 {
62905 unsigned long pc;
62906 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
62907- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
62908+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
62909 }
62910 #endif /* !CONFIG_SMP */
62911
62912@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
62913 return -EFAULT;
62914 buf++; p++; count--; read++;
62915 }
62916- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
62917+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
62918 if (copy_to_user(buf, (void *)pnt, count))
62919 return -EFAULT;
62920 read += count;
62921@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
62922 }
62923 #endif
62924 profile_discard_flip_buffers();
62925- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
62926+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
62927 return count;
62928 }
62929
62930diff -urNp linux-3.0.4/kernel/ptrace.c linux-3.0.4/kernel/ptrace.c
62931--- linux-3.0.4/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
62932+++ linux-3.0.4/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
62933@@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
62934 return ret;
62935 }
62936
62937-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
62938+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
62939+ unsigned int log)
62940 {
62941 const struct cred *cred = current_cred(), *tcred;
62942
62943@@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
62944 cred->gid == tcred->sgid &&
62945 cred->gid == tcred->gid))
62946 goto ok;
62947- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
62948+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
62949+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
62950 goto ok;
62951 rcu_read_unlock();
62952 return -EPERM;
62953@@ -167,7 +169,9 @@ ok:
62954 smp_rmb();
62955 if (task->mm)
62956 dumpable = get_dumpable(task->mm);
62957- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
62958+ if (!dumpable &&
62959+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
62960+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
62961 return -EPERM;
62962
62963 return security_ptrace_access_check(task, mode);
62964@@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
62965 {
62966 int err;
62967 task_lock(task);
62968- err = __ptrace_may_access(task, mode);
62969+ err = __ptrace_may_access(task, mode, 0);
62970+ task_unlock(task);
62971+ return !err;
62972+}
62973+
62974+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
62975+{
62976+ int err;
62977+ task_lock(task);
62978+ err = __ptrace_may_access(task, mode, 1);
62979 task_unlock(task);
62980 return !err;
62981 }
62982@@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
62983 goto out;
62984
62985 task_lock(task);
62986- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
62987+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
62988 task_unlock(task);
62989 if (retval)
62990 goto unlock_creds;
62991@@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
62992 goto unlock_tasklist;
62993
62994 task->ptrace = PT_PTRACED;
62995- if (task_ns_capable(task, CAP_SYS_PTRACE))
62996+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
62997 task->ptrace |= PT_PTRACE_CAP;
62998
62999 __ptrace_link(task, current);
63000@@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
63001 {
63002 int copied = 0;
63003
63004+ pax_track_stack();
63005+
63006 while (len > 0) {
63007 char buf[128];
63008 int this_len, retval;
63009@@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
63010 break;
63011 return -EIO;
63012 }
63013- if (copy_to_user(dst, buf, retval))
63014+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
63015 return -EFAULT;
63016 copied += retval;
63017 src += retval;
63018@@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
63019 {
63020 int copied = 0;
63021
63022+ pax_track_stack();
63023+
63024 while (len > 0) {
63025 char buf[128];
63026 int this_len, retval;
63027@@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
63028 {
63029 int ret = -EIO;
63030 siginfo_t siginfo;
63031- void __user *datavp = (void __user *) data;
63032+ void __user *datavp = (__force void __user *) data;
63033 unsigned long __user *datalp = datavp;
63034
63035+ pax_track_stack();
63036+
63037 switch (request) {
63038 case PTRACE_PEEKTEXT:
63039 case PTRACE_PEEKDATA:
63040@@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
63041 goto out;
63042 }
63043
63044+ if (gr_handle_ptrace(child, request)) {
63045+ ret = -EPERM;
63046+ goto out_put_task_struct;
63047+ }
63048+
63049 if (request == PTRACE_ATTACH) {
63050 ret = ptrace_attach(child);
63051 /*
63052 * Some architectures need to do book-keeping after
63053 * a ptrace attach.
63054 */
63055- if (!ret)
63056+ if (!ret) {
63057 arch_ptrace_attach(child);
63058+ gr_audit_ptrace(child);
63059+ }
63060 goto out_put_task_struct;
63061 }
63062
63063@@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
63064 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
63065 if (copied != sizeof(tmp))
63066 return -EIO;
63067- return put_user(tmp, (unsigned long __user *)data);
63068+ return put_user(tmp, (__force unsigned long __user *)data);
63069 }
63070
63071 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
63072@@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
63073 siginfo_t siginfo;
63074 int ret;
63075
63076+ pax_track_stack();
63077+
63078 switch (request) {
63079 case PTRACE_PEEKTEXT:
63080 case PTRACE_PEEKDATA:
63081@@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
63082 goto out;
63083 }
63084
63085+ if (gr_handle_ptrace(child, request)) {
63086+ ret = -EPERM;
63087+ goto out_put_task_struct;
63088+ }
63089+
63090 if (request == PTRACE_ATTACH) {
63091 ret = ptrace_attach(child);
63092 /*
63093 * Some architectures need to do book-keeping after
63094 * a ptrace attach.
63095 */
63096- if (!ret)
63097+ if (!ret) {
63098 arch_ptrace_attach(child);
63099+ gr_audit_ptrace(child);
63100+ }
63101 goto out_put_task_struct;
63102 }
63103
63104diff -urNp linux-3.0.4/kernel/rcutorture.c linux-3.0.4/kernel/rcutorture.c
63105--- linux-3.0.4/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
63106+++ linux-3.0.4/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
63107@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
63108 { 0 };
63109 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
63110 { 0 };
63111-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
63112-static atomic_t n_rcu_torture_alloc;
63113-static atomic_t n_rcu_torture_alloc_fail;
63114-static atomic_t n_rcu_torture_free;
63115-static atomic_t n_rcu_torture_mberror;
63116-static atomic_t n_rcu_torture_error;
63117+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
63118+static atomic_unchecked_t n_rcu_torture_alloc;
63119+static atomic_unchecked_t n_rcu_torture_alloc_fail;
63120+static atomic_unchecked_t n_rcu_torture_free;
63121+static atomic_unchecked_t n_rcu_torture_mberror;
63122+static atomic_unchecked_t n_rcu_torture_error;
63123 static long n_rcu_torture_boost_ktrerror;
63124 static long n_rcu_torture_boost_rterror;
63125 static long n_rcu_torture_boost_failure;
63126@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
63127
63128 spin_lock_bh(&rcu_torture_lock);
63129 if (list_empty(&rcu_torture_freelist)) {
63130- atomic_inc(&n_rcu_torture_alloc_fail);
63131+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
63132 spin_unlock_bh(&rcu_torture_lock);
63133 return NULL;
63134 }
63135- atomic_inc(&n_rcu_torture_alloc);
63136+ atomic_inc_unchecked(&n_rcu_torture_alloc);
63137 p = rcu_torture_freelist.next;
63138 list_del_init(p);
63139 spin_unlock_bh(&rcu_torture_lock);
63140@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
63141 static void
63142 rcu_torture_free(struct rcu_torture *p)
63143 {
63144- atomic_inc(&n_rcu_torture_free);
63145+ atomic_inc_unchecked(&n_rcu_torture_free);
63146 spin_lock_bh(&rcu_torture_lock);
63147 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
63148 spin_unlock_bh(&rcu_torture_lock);
63149@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
63150 i = rp->rtort_pipe_count;
63151 if (i > RCU_TORTURE_PIPE_LEN)
63152 i = RCU_TORTURE_PIPE_LEN;
63153- atomic_inc(&rcu_torture_wcount[i]);
63154+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
63155 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
63156 rp->rtort_mbtest = 0;
63157 rcu_torture_free(rp);
63158@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
63159 i = rp->rtort_pipe_count;
63160 if (i > RCU_TORTURE_PIPE_LEN)
63161 i = RCU_TORTURE_PIPE_LEN;
63162- atomic_inc(&rcu_torture_wcount[i]);
63163+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
63164 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
63165 rp->rtort_mbtest = 0;
63166 list_del(&rp->rtort_free);
63167@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
63168 i = old_rp->rtort_pipe_count;
63169 if (i > RCU_TORTURE_PIPE_LEN)
63170 i = RCU_TORTURE_PIPE_LEN;
63171- atomic_inc(&rcu_torture_wcount[i]);
63172+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
63173 old_rp->rtort_pipe_count++;
63174 cur_ops->deferred_free(old_rp);
63175 }
63176@@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
63177 return;
63178 }
63179 if (p->rtort_mbtest == 0)
63180- atomic_inc(&n_rcu_torture_mberror);
63181+ atomic_inc_unchecked(&n_rcu_torture_mberror);
63182 spin_lock(&rand_lock);
63183 cur_ops->read_delay(&rand);
63184 n_rcu_torture_timers++;
63185@@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
63186 continue;
63187 }
63188 if (p->rtort_mbtest == 0)
63189- atomic_inc(&n_rcu_torture_mberror);
63190+ atomic_inc_unchecked(&n_rcu_torture_mberror);
63191 cur_ops->read_delay(&rand);
63192 preempt_disable();
63193 pipe_count = p->rtort_pipe_count;
63194@@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
63195 rcu_torture_current,
63196 rcu_torture_current_version,
63197 list_empty(&rcu_torture_freelist),
63198- atomic_read(&n_rcu_torture_alloc),
63199- atomic_read(&n_rcu_torture_alloc_fail),
63200- atomic_read(&n_rcu_torture_free),
63201- atomic_read(&n_rcu_torture_mberror),
63202+ atomic_read_unchecked(&n_rcu_torture_alloc),
63203+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
63204+ atomic_read_unchecked(&n_rcu_torture_free),
63205+ atomic_read_unchecked(&n_rcu_torture_mberror),
63206 n_rcu_torture_boost_ktrerror,
63207 n_rcu_torture_boost_rterror,
63208 n_rcu_torture_boost_failure,
63209 n_rcu_torture_boosts,
63210 n_rcu_torture_timers);
63211- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
63212+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
63213 n_rcu_torture_boost_ktrerror != 0 ||
63214 n_rcu_torture_boost_rterror != 0 ||
63215 n_rcu_torture_boost_failure != 0)
63216@@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
63217 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
63218 if (i > 1) {
63219 cnt += sprintf(&page[cnt], "!!! ");
63220- atomic_inc(&n_rcu_torture_error);
63221+ atomic_inc_unchecked(&n_rcu_torture_error);
63222 WARN_ON_ONCE(1);
63223 }
63224 cnt += sprintf(&page[cnt], "Reader Pipe: ");
63225@@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
63226 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
63227 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
63228 cnt += sprintf(&page[cnt], " %d",
63229- atomic_read(&rcu_torture_wcount[i]));
63230+ atomic_read_unchecked(&rcu_torture_wcount[i]));
63231 }
63232 cnt += sprintf(&page[cnt], "\n");
63233 if (cur_ops->stats)
63234@@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
63235
63236 if (cur_ops->cleanup)
63237 cur_ops->cleanup();
63238- if (atomic_read(&n_rcu_torture_error))
63239+ if (atomic_read_unchecked(&n_rcu_torture_error))
63240 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
63241 else
63242 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
63243@@ -1476,17 +1476,17 @@ rcu_torture_init(void)
63244
63245 rcu_torture_current = NULL;
63246 rcu_torture_current_version = 0;
63247- atomic_set(&n_rcu_torture_alloc, 0);
63248- atomic_set(&n_rcu_torture_alloc_fail, 0);
63249- atomic_set(&n_rcu_torture_free, 0);
63250- atomic_set(&n_rcu_torture_mberror, 0);
63251- atomic_set(&n_rcu_torture_error, 0);
63252+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
63253+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
63254+ atomic_set_unchecked(&n_rcu_torture_free, 0);
63255+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
63256+ atomic_set_unchecked(&n_rcu_torture_error, 0);
63257 n_rcu_torture_boost_ktrerror = 0;
63258 n_rcu_torture_boost_rterror = 0;
63259 n_rcu_torture_boost_failure = 0;
63260 n_rcu_torture_boosts = 0;
63261 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
63262- atomic_set(&rcu_torture_wcount[i], 0);
63263+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
63264 for_each_possible_cpu(cpu) {
63265 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
63266 per_cpu(rcu_torture_count, cpu)[i] = 0;
63267diff -urNp linux-3.0.4/kernel/rcutree.c linux-3.0.4/kernel/rcutree.c
63268--- linux-3.0.4/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
63269+++ linux-3.0.4/kernel/rcutree.c 2011-09-14 09:08:05.000000000 -0400
63270@@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
63271 }
63272 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
63273 smp_mb__before_atomic_inc(); /* See above. */
63274- atomic_inc(&rdtp->dynticks);
63275+ atomic_inc_unchecked(&rdtp->dynticks);
63276 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
63277- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
63278+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
63279 local_irq_restore(flags);
63280
63281 /* If the interrupt queued a callback, get out of dyntick mode. */
63282@@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
63283 return;
63284 }
63285 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
63286- atomic_inc(&rdtp->dynticks);
63287+ atomic_inc_unchecked(&rdtp->dynticks);
63288 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
63289 smp_mb__after_atomic_inc(); /* See above. */
63290- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
63291+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
63292 local_irq_restore(flags);
63293 }
63294
63295@@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
63296 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
63297
63298 if (rdtp->dynticks_nmi_nesting == 0 &&
63299- (atomic_read(&rdtp->dynticks) & 0x1))
63300+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
63301 return;
63302 rdtp->dynticks_nmi_nesting++;
63303 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
63304- atomic_inc(&rdtp->dynticks);
63305+ atomic_inc_unchecked(&rdtp->dynticks);
63306 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
63307 smp_mb__after_atomic_inc(); /* See above. */
63308- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
63309+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
63310 }
63311
63312 /**
63313@@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
63314 return;
63315 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
63316 smp_mb__before_atomic_inc(); /* See above. */
63317- atomic_inc(&rdtp->dynticks);
63318+ atomic_inc_unchecked(&rdtp->dynticks);
63319 smp_mb__after_atomic_inc(); /* Force delay to next write. */
63320- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
63321+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
63322 }
63323
63324 /**
63325@@ -469,7 +469,7 @@ void rcu_irq_exit(void)
63326 */
63327 static int dyntick_save_progress_counter(struct rcu_data *rdp)
63328 {
63329- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
63330+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
63331 return 0;
63332 }
63333
63334@@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(stru
63335 unsigned long curr;
63336 unsigned long snap;
63337
63338- curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
63339+ curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
63340 snap = (unsigned long)rdp->dynticks_snap;
63341
63342 /*
63343@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
63344 /*
63345 * Do softirq processing for the current CPU.
63346 */
63347-static void rcu_process_callbacks(struct softirq_action *unused)
63348+static void rcu_process_callbacks(void)
63349 {
63350 __rcu_process_callbacks(&rcu_sched_state,
63351 &__get_cpu_var(rcu_sched_data));
63352diff -urNp linux-3.0.4/kernel/rcutree.h linux-3.0.4/kernel/rcutree.h
63353--- linux-3.0.4/kernel/rcutree.h 2011-07-21 22:17:23.000000000 -0400
63354+++ linux-3.0.4/kernel/rcutree.h 2011-09-14 09:08:05.000000000 -0400
63355@@ -86,7 +86,7 @@
63356 struct rcu_dynticks {
63357 int dynticks_nesting; /* Track irq/process nesting level. */
63358 int dynticks_nmi_nesting; /* Track NMI nesting level. */
63359- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
63360+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
63361 };
63362
63363 /* RCU's kthread states for tracing. */
63364diff -urNp linux-3.0.4/kernel/rcutree_plugin.h linux-3.0.4/kernel/rcutree_plugin.h
63365--- linux-3.0.4/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
63366+++ linux-3.0.4/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
63367@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
63368
63369 /* Clean up and exit. */
63370 smp_mb(); /* ensure expedited GP seen before counter increment. */
63371- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
63372+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
63373 unlock_mb_ret:
63374 mutex_unlock(&sync_rcu_preempt_exp_mutex);
63375 mb_ret:
63376@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
63377
63378 #else /* #ifndef CONFIG_SMP */
63379
63380-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
63381-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
63382+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
63383+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
63384
63385 static int synchronize_sched_expedited_cpu_stop(void *data)
63386 {
63387@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
63388 int firstsnap, s, snap, trycount = 0;
63389
63390 /* Note that atomic_inc_return() implies full memory barrier. */
63391- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
63392+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
63393 get_online_cpus();
63394
63395 /*
63396@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
63397 }
63398
63399 /* Check to see if someone else did our work for us. */
63400- s = atomic_read(&sync_sched_expedited_done);
63401+ s = atomic_read_unchecked(&sync_sched_expedited_done);
63402 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
63403 smp_mb(); /* ensure test happens before caller kfree */
63404 return;
63405@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
63406 * grace period works for us.
63407 */
63408 get_online_cpus();
63409- snap = atomic_read(&sync_sched_expedited_started) - 1;
63410+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
63411 smp_mb(); /* ensure read is before try_stop_cpus(). */
63412 }
63413
63414@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
63415 * than we did beat us to the punch.
63416 */
63417 do {
63418- s = atomic_read(&sync_sched_expedited_done);
63419+ s = atomic_read_unchecked(&sync_sched_expedited_done);
63420 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
63421 smp_mb(); /* ensure test happens before caller kfree */
63422 break;
63423 }
63424- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
63425+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
63426
63427 put_online_cpus();
63428 }
63429diff -urNp linux-3.0.4/kernel/relay.c linux-3.0.4/kernel/relay.c
63430--- linux-3.0.4/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
63431+++ linux-3.0.4/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
63432@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
63433 };
63434 ssize_t ret;
63435
63436+ pax_track_stack();
63437+
63438 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
63439 return 0;
63440 if (splice_grow_spd(pipe, &spd))
63441diff -urNp linux-3.0.4/kernel/resource.c linux-3.0.4/kernel/resource.c
63442--- linux-3.0.4/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
63443+++ linux-3.0.4/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
63444@@ -141,8 +141,18 @@ static const struct file_operations proc
63445
63446 static int __init ioresources_init(void)
63447 {
63448+#ifdef CONFIG_GRKERNSEC_PROC_ADD
63449+#ifdef CONFIG_GRKERNSEC_PROC_USER
63450+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
63451+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
63452+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63453+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
63454+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
63455+#endif
63456+#else
63457 proc_create("ioports", 0, NULL, &proc_ioports_operations);
63458 proc_create("iomem", 0, NULL, &proc_iomem_operations);
63459+#endif
63460 return 0;
63461 }
63462 __initcall(ioresources_init);
63463diff -urNp linux-3.0.4/kernel/rtmutex-tester.c linux-3.0.4/kernel/rtmutex-tester.c
63464--- linux-3.0.4/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
63465+++ linux-3.0.4/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
63466@@ -20,7 +20,7 @@
63467 #define MAX_RT_TEST_MUTEXES 8
63468
63469 static spinlock_t rttest_lock;
63470-static atomic_t rttest_event;
63471+static atomic_unchecked_t rttest_event;
63472
63473 struct test_thread_data {
63474 int opcode;
63475@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
63476
63477 case RTTEST_LOCKCONT:
63478 td->mutexes[td->opdata] = 1;
63479- td->event = atomic_add_return(1, &rttest_event);
63480+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63481 return 0;
63482
63483 case RTTEST_RESET:
63484@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
63485 return 0;
63486
63487 case RTTEST_RESETEVENT:
63488- atomic_set(&rttest_event, 0);
63489+ atomic_set_unchecked(&rttest_event, 0);
63490 return 0;
63491
63492 default:
63493@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
63494 return ret;
63495
63496 td->mutexes[id] = 1;
63497- td->event = atomic_add_return(1, &rttest_event);
63498+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63499 rt_mutex_lock(&mutexes[id]);
63500- td->event = atomic_add_return(1, &rttest_event);
63501+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63502 td->mutexes[id] = 4;
63503 return 0;
63504
63505@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
63506 return ret;
63507
63508 td->mutexes[id] = 1;
63509- td->event = atomic_add_return(1, &rttest_event);
63510+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63511 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
63512- td->event = atomic_add_return(1, &rttest_event);
63513+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63514 td->mutexes[id] = ret ? 0 : 4;
63515 return ret ? -EINTR : 0;
63516
63517@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
63518 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
63519 return ret;
63520
63521- td->event = atomic_add_return(1, &rttest_event);
63522+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63523 rt_mutex_unlock(&mutexes[id]);
63524- td->event = atomic_add_return(1, &rttest_event);
63525+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63526 td->mutexes[id] = 0;
63527 return 0;
63528
63529@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
63530 break;
63531
63532 td->mutexes[dat] = 2;
63533- td->event = atomic_add_return(1, &rttest_event);
63534+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63535 break;
63536
63537 default:
63538@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
63539 return;
63540
63541 td->mutexes[dat] = 3;
63542- td->event = atomic_add_return(1, &rttest_event);
63543+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63544 break;
63545
63546 case RTTEST_LOCKNOWAIT:
63547@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
63548 return;
63549
63550 td->mutexes[dat] = 1;
63551- td->event = atomic_add_return(1, &rttest_event);
63552+ td->event = atomic_add_return_unchecked(1, &rttest_event);
63553 return;
63554
63555 default:
63556diff -urNp linux-3.0.4/kernel/sched_autogroup.c linux-3.0.4/kernel/sched_autogroup.c
63557--- linux-3.0.4/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
63558+++ linux-3.0.4/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
63559@@ -7,7 +7,7 @@
63560
63561 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
63562 static struct autogroup autogroup_default;
63563-static atomic_t autogroup_seq_nr;
63564+static atomic_unchecked_t autogroup_seq_nr;
63565
63566 static void __init autogroup_init(struct task_struct *init_task)
63567 {
63568@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
63569
63570 kref_init(&ag->kref);
63571 init_rwsem(&ag->lock);
63572- ag->id = atomic_inc_return(&autogroup_seq_nr);
63573+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
63574 ag->tg = tg;
63575 #ifdef CONFIG_RT_GROUP_SCHED
63576 /*
63577diff -urNp linux-3.0.4/kernel/sched.c linux-3.0.4/kernel/sched.c
63578--- linux-3.0.4/kernel/sched.c 2011-07-21 22:17:23.000000000 -0400
63579+++ linux-3.0.4/kernel/sched.c 2011-08-23 21:48:14.000000000 -0400
63580@@ -4251,6 +4251,8 @@ asmlinkage void __sched schedule(void)
63581 struct rq *rq;
63582 int cpu;
63583
63584+ pax_track_stack();
63585+
63586 need_resched:
63587 preempt_disable();
63588 cpu = smp_processor_id();
63589@@ -4934,6 +4936,8 @@ int can_nice(const struct task_struct *p
63590 /* convert nice value [19,-20] to rlimit style value [1,40] */
63591 int nice_rlim = 20 - nice;
63592
63593+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
63594+
63595 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
63596 capable(CAP_SYS_NICE));
63597 }
63598@@ -4967,7 +4971,8 @@ SYSCALL_DEFINE1(nice, int, increment)
63599 if (nice > 19)
63600 nice = 19;
63601
63602- if (increment < 0 && !can_nice(current, nice))
63603+ if (increment < 0 && (!can_nice(current, nice) ||
63604+ gr_handle_chroot_nice()))
63605 return -EPERM;
63606
63607 retval = security_task_setnice(current, nice);
63608@@ -5111,6 +5116,7 @@ recheck:
63609 unsigned long rlim_rtprio =
63610 task_rlimit(p, RLIMIT_RTPRIO);
63611
63612+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
63613 /* can't set/change the rt policy */
63614 if (policy != p->policy && !rlim_rtprio)
63615 return -EPERM;
63616diff -urNp linux-3.0.4/kernel/sched_fair.c linux-3.0.4/kernel/sched_fair.c
63617--- linux-3.0.4/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
63618+++ linux-3.0.4/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
63619@@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
63620 * run_rebalance_domains is triggered when needed from the scheduler tick.
63621 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
63622 */
63623-static void run_rebalance_domains(struct softirq_action *h)
63624+static void run_rebalance_domains(void)
63625 {
63626 int this_cpu = smp_processor_id();
63627 struct rq *this_rq = cpu_rq(this_cpu);
63628diff -urNp linux-3.0.4/kernel/signal.c linux-3.0.4/kernel/signal.c
63629--- linux-3.0.4/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
63630+++ linux-3.0.4/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
63631@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
63632
63633 int print_fatal_signals __read_mostly;
63634
63635-static void __user *sig_handler(struct task_struct *t, int sig)
63636+static __sighandler_t sig_handler(struct task_struct *t, int sig)
63637 {
63638 return t->sighand->action[sig - 1].sa.sa_handler;
63639 }
63640
63641-static int sig_handler_ignored(void __user *handler, int sig)
63642+static int sig_handler_ignored(__sighandler_t handler, int sig)
63643 {
63644 /* Is it explicitly or implicitly ignored? */
63645 return handler == SIG_IGN ||
63646@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
63647 static int sig_task_ignored(struct task_struct *t, int sig,
63648 int from_ancestor_ns)
63649 {
63650- void __user *handler;
63651+ __sighandler_t handler;
63652
63653 handler = sig_handler(t, sig);
63654
63655@@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
63656 atomic_inc(&user->sigpending);
63657 rcu_read_unlock();
63658
63659+ if (!override_rlimit)
63660+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
63661+
63662 if (override_rlimit ||
63663 atomic_read(&user->sigpending) <=
63664 task_rlimit(t, RLIMIT_SIGPENDING)) {
63665@@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
63666
63667 int unhandled_signal(struct task_struct *tsk, int sig)
63668 {
63669- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
63670+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
63671 if (is_global_init(tsk))
63672 return 1;
63673 if (handler != SIG_IGN && handler != SIG_DFL)
63674@@ -770,6 +773,13 @@ static int check_kill_permission(int sig
63675 }
63676 }
63677
63678+ /* allow glibc communication via tgkill to other threads in our
63679+ thread group */
63680+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
63681+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
63682+ && gr_handle_signal(t, sig))
63683+ return -EPERM;
63684+
63685 return security_task_kill(t, info, sig, 0);
63686 }
63687
63688@@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
63689 return send_signal(sig, info, p, 1);
63690 }
63691
63692-static int
63693+int
63694 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
63695 {
63696 return send_signal(sig, info, t, 0);
63697@@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
63698 unsigned long int flags;
63699 int ret, blocked, ignored;
63700 struct k_sigaction *action;
63701+ int is_unhandled = 0;
63702
63703 spin_lock_irqsave(&t->sighand->siglock, flags);
63704 action = &t->sighand->action[sig-1];
63705@@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
63706 }
63707 if (action->sa.sa_handler == SIG_DFL)
63708 t->signal->flags &= ~SIGNAL_UNKILLABLE;
63709+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
63710+ is_unhandled = 1;
63711 ret = specific_send_sig_info(sig, info, t);
63712 spin_unlock_irqrestore(&t->sighand->siglock, flags);
63713
63714+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
63715+ normal operation */
63716+ if (is_unhandled) {
63717+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
63718+ gr_handle_crash(t, sig);
63719+ }
63720+
63721 return ret;
63722 }
63723
63724@@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
63725 ret = check_kill_permission(sig, info, p);
63726 rcu_read_unlock();
63727
63728- if (!ret && sig)
63729+ if (!ret && sig) {
63730 ret = do_send_sig_info(sig, info, p, true);
63731+ if (!ret)
63732+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
63733+ }
63734
63735 return ret;
63736 }
63737@@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
63738 {
63739 siginfo_t info;
63740
63741+ pax_track_stack();
63742+
63743 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
63744
63745 memset(&info, 0, sizeof info);
63746@@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
63747 int error = -ESRCH;
63748
63749 rcu_read_lock();
63750- p = find_task_by_vpid(pid);
63751+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63752+ /* allow glibc communication via tgkill to other threads in our
63753+ thread group */
63754+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
63755+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
63756+ p = find_task_by_vpid_unrestricted(pid);
63757+ else
63758+#endif
63759+ p = find_task_by_vpid(pid);
63760 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
63761 error = check_kill_permission(sig, info, p);
63762 /*
63763diff -urNp linux-3.0.4/kernel/smp.c linux-3.0.4/kernel/smp.c
63764--- linux-3.0.4/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
63765+++ linux-3.0.4/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
63766@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
63767 }
63768 EXPORT_SYMBOL(smp_call_function);
63769
63770-void ipi_call_lock(void)
63771+void ipi_call_lock(void) __acquires(call_function.lock)
63772 {
63773 raw_spin_lock(&call_function.lock);
63774 }
63775
63776-void ipi_call_unlock(void)
63777+void ipi_call_unlock(void) __releases(call_function.lock)
63778 {
63779 raw_spin_unlock(&call_function.lock);
63780 }
63781
63782-void ipi_call_lock_irq(void)
63783+void ipi_call_lock_irq(void) __acquires(call_function.lock)
63784 {
63785 raw_spin_lock_irq(&call_function.lock);
63786 }
63787
63788-void ipi_call_unlock_irq(void)
63789+void ipi_call_unlock_irq(void) __releases(call_function.lock)
63790 {
63791 raw_spin_unlock_irq(&call_function.lock);
63792 }
63793diff -urNp linux-3.0.4/kernel/softirq.c linux-3.0.4/kernel/softirq.c
63794--- linux-3.0.4/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
63795+++ linux-3.0.4/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
63796@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
63797
63798 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
63799
63800-char *softirq_to_name[NR_SOFTIRQS] = {
63801+const char * const softirq_to_name[NR_SOFTIRQS] = {
63802 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
63803 "TASKLET", "SCHED", "HRTIMER", "RCU"
63804 };
63805@@ -235,7 +235,7 @@ restart:
63806 kstat_incr_softirqs_this_cpu(vec_nr);
63807
63808 trace_softirq_entry(vec_nr);
63809- h->action(h);
63810+ h->action();
63811 trace_softirq_exit(vec_nr);
63812 if (unlikely(prev_count != preempt_count())) {
63813 printk(KERN_ERR "huh, entered softirq %u %s %p"
63814@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
63815 local_irq_restore(flags);
63816 }
63817
63818-void open_softirq(int nr, void (*action)(struct softirq_action *))
63819+void open_softirq(int nr, void (*action)(void))
63820 {
63821- softirq_vec[nr].action = action;
63822+ pax_open_kernel();
63823+ *(void **)&softirq_vec[nr].action = action;
63824+ pax_close_kernel();
63825 }
63826
63827 /*
63828@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
63829
63830 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
63831
63832-static void tasklet_action(struct softirq_action *a)
63833+static void tasklet_action(void)
63834 {
63835 struct tasklet_struct *list;
63836
63837@@ -476,7 +478,7 @@ static void tasklet_action(struct softir
63838 }
63839 }
63840
63841-static void tasklet_hi_action(struct softirq_action *a)
63842+static void tasklet_hi_action(void)
63843 {
63844 struct tasklet_struct *list;
63845
63846diff -urNp linux-3.0.4/kernel/sys.c linux-3.0.4/kernel/sys.c
63847--- linux-3.0.4/kernel/sys.c 2011-09-02 18:11:26.000000000 -0400
63848+++ linux-3.0.4/kernel/sys.c 2011-10-06 04:17:55.000000000 -0400
63849@@ -158,6 +158,12 @@ static int set_one_prio(struct task_stru
63850 error = -EACCES;
63851 goto out;
63852 }
63853+
63854+ if (gr_handle_chroot_setpriority(p, niceval)) {
63855+ error = -EACCES;
63856+ goto out;
63857+ }
63858+
63859 no_nice = security_task_setnice(p, niceval);
63860 if (no_nice) {
63861 error = no_nice;
63862@@ -541,6 +547,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
63863 goto error;
63864 }
63865
63866+ if (gr_check_group_change(new->gid, new->egid, -1))
63867+ goto error;
63868+
63869 if (rgid != (gid_t) -1 ||
63870 (egid != (gid_t) -1 && egid != old->gid))
63871 new->sgid = new->egid;
63872@@ -570,6 +579,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
63873 old = current_cred();
63874
63875 retval = -EPERM;
63876+
63877+ if (gr_check_group_change(gid, gid, gid))
63878+ goto error;
63879+
63880 if (nsown_capable(CAP_SETGID))
63881 new->gid = new->egid = new->sgid = new->fsgid = gid;
63882 else if (gid == old->gid || gid == old->sgid)
63883@@ -595,11 +608,18 @@ static int set_user(struct cred *new)
63884 if (!new_user)
63885 return -EAGAIN;
63886
63887+ /*
63888+ * We don't fail in case of NPROC limit excess here because too many
63889+ * poorly written programs don't check set*uid() return code, assuming
63890+ * it never fails if called by root. We may still enforce NPROC limit
63891+ * for programs doing set*uid()+execve() by harmlessly deferring the
63892+ * failure to the execve() stage.
63893+ */
63894 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
63895- new_user != INIT_USER) {
63896- free_uid(new_user);
63897- return -EAGAIN;
63898- }
63899+ new_user != INIT_USER)
63900+ current->flags |= PF_NPROC_EXCEEDED;
63901+ else
63902+ current->flags &= ~PF_NPROC_EXCEEDED;
63903
63904 free_uid(new->user);
63905 new->user = new_user;
63906@@ -650,6 +670,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
63907 goto error;
63908 }
63909
63910+ if (gr_check_user_change(new->uid, new->euid, -1))
63911+ goto error;
63912+
63913 if (new->uid != old->uid) {
63914 retval = set_user(new);
63915 if (retval < 0)
63916@@ -694,6 +717,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
63917 old = current_cred();
63918
63919 retval = -EPERM;
63920+
63921+ if (gr_check_crash_uid(uid))
63922+ goto error;
63923+ if (gr_check_user_change(uid, uid, uid))
63924+ goto error;
63925+
63926 if (nsown_capable(CAP_SETUID)) {
63927 new->suid = new->uid = uid;
63928 if (uid != old->uid) {
63929@@ -748,6 +777,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
63930 goto error;
63931 }
63932
63933+ if (gr_check_user_change(ruid, euid, -1))
63934+ goto error;
63935+
63936 if (ruid != (uid_t) -1) {
63937 new->uid = ruid;
63938 if (ruid != old->uid) {
63939@@ -812,6 +844,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
63940 goto error;
63941 }
63942
63943+ if (gr_check_group_change(rgid, egid, -1))
63944+ goto error;
63945+
63946 if (rgid != (gid_t) -1)
63947 new->gid = rgid;
63948 if (egid != (gid_t) -1)
63949@@ -858,6 +893,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
63950 old = current_cred();
63951 old_fsuid = old->fsuid;
63952
63953+ if (gr_check_user_change(-1, -1, uid))
63954+ goto error;
63955+
63956 if (uid == old->uid || uid == old->euid ||
63957 uid == old->suid || uid == old->fsuid ||
63958 nsown_capable(CAP_SETUID)) {
63959@@ -868,6 +906,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
63960 }
63961 }
63962
63963+error:
63964 abort_creds(new);
63965 return old_fsuid;
63966
63967@@ -894,12 +933,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
63968 if (gid == old->gid || gid == old->egid ||
63969 gid == old->sgid || gid == old->fsgid ||
63970 nsown_capable(CAP_SETGID)) {
63971+ if (gr_check_group_change(-1, -1, gid))
63972+ goto error;
63973+
63974 if (gid != old_fsgid) {
63975 new->fsgid = gid;
63976 goto change_okay;
63977 }
63978 }
63979
63980+error:
63981 abort_creds(new);
63982 return old_fsgid;
63983
63984@@ -1205,19 +1248,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_
63985 return -EFAULT;
63986
63987 down_read(&uts_sem);
63988- error = __copy_to_user(&name->sysname, &utsname()->sysname,
63989+ error = __copy_to_user(name->sysname, &utsname()->sysname,
63990 __OLD_UTS_LEN);
63991 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
63992- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
63993+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
63994 __OLD_UTS_LEN);
63995 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
63996- error |= __copy_to_user(&name->release, &utsname()->release,
63997+ error |= __copy_to_user(name->release, &utsname()->release,
63998 __OLD_UTS_LEN);
63999 error |= __put_user(0, name->release + __OLD_UTS_LEN);
64000- error |= __copy_to_user(&name->version, &utsname()->version,
64001+ error |= __copy_to_user(name->version, &utsname()->version,
64002 __OLD_UTS_LEN);
64003 error |= __put_user(0, name->version + __OLD_UTS_LEN);
64004- error |= __copy_to_user(&name->machine, &utsname()->machine,
64005+ error |= __copy_to_user(name->machine, &utsname()->machine,
64006 __OLD_UTS_LEN);
64007 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
64008 up_read(&uts_sem);
64009@@ -1680,7 +1723,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
64010 error = get_dumpable(me->mm);
64011 break;
64012 case PR_SET_DUMPABLE:
64013- if (arg2 < 0 || arg2 > 1) {
64014+ if (arg2 > 1) {
64015 error = -EINVAL;
64016 break;
64017 }
64018diff -urNp linux-3.0.4/kernel/sysctl_binary.c linux-3.0.4/kernel/sysctl_binary.c
64019--- linux-3.0.4/kernel/sysctl_binary.c 2011-07-21 22:17:23.000000000 -0400
64020+++ linux-3.0.4/kernel/sysctl_binary.c 2011-10-06 04:17:55.000000000 -0400
64021@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *f
64022 int i;
64023
64024 set_fs(KERNEL_DS);
64025- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
64026+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
64027 set_fs(old_fs);
64028 if (result < 0)
64029 goto out_kfree;
64030@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *f
64031 }
64032
64033 set_fs(KERNEL_DS);
64034- result = vfs_write(file, buffer, str - buffer, &pos);
64035+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
64036 set_fs(old_fs);
64037 if (result < 0)
64038 goto out_kfree;
64039@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file
64040 int i;
64041
64042 set_fs(KERNEL_DS);
64043- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
64044+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
64045 set_fs(old_fs);
64046 if (result < 0)
64047 goto out_kfree;
64048@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file
64049 }
64050
64051 set_fs(KERNEL_DS);
64052- result = vfs_write(file, buffer, str - buffer, &pos);
64053+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
64054 set_fs(old_fs);
64055 if (result < 0)
64056 goto out_kfree;
64057@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *fil
64058 int i;
64059
64060 set_fs(KERNEL_DS);
64061- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
64062+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
64063 set_fs(old_fs);
64064 if (result < 0)
64065 goto out;
64066@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struc
64067 __le16 dnaddr;
64068
64069 set_fs(KERNEL_DS);
64070- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
64071+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
64072 set_fs(old_fs);
64073 if (result < 0)
64074 goto out;
64075@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struc
64076 le16_to_cpu(dnaddr) & 0x3ff);
64077
64078 set_fs(KERNEL_DS);
64079- result = vfs_write(file, buf, len, &pos);
64080+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
64081 set_fs(old_fs);
64082 if (result < 0)
64083 goto out;
64084diff -urNp linux-3.0.4/kernel/sysctl.c linux-3.0.4/kernel/sysctl.c
64085--- linux-3.0.4/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
64086+++ linux-3.0.4/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
64087@@ -85,6 +85,13 @@
64088
64089
64090 #if defined(CONFIG_SYSCTL)
64091+#include <linux/grsecurity.h>
64092+#include <linux/grinternal.h>
64093+
64094+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
64095+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
64096+ const int op);
64097+extern int gr_handle_chroot_sysctl(const int op);
64098
64099 /* External variables not in a header file. */
64100 extern int sysctl_overcommit_memory;
64101@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
64102 }
64103
64104 #endif
64105+extern struct ctl_table grsecurity_table[];
64106
64107 static struct ctl_table root_table[];
64108 static struct ctl_table_root sysctl_table_root;
64109@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
64110 int sysctl_legacy_va_layout;
64111 #endif
64112
64113+#ifdef CONFIG_PAX_SOFTMODE
64114+static ctl_table pax_table[] = {
64115+ {
64116+ .procname = "softmode",
64117+ .data = &pax_softmode,
64118+ .maxlen = sizeof(unsigned int),
64119+ .mode = 0600,
64120+ .proc_handler = &proc_dointvec,
64121+ },
64122+
64123+ { }
64124+};
64125+#endif
64126+
64127 /* The default sysctl tables: */
64128
64129 static struct ctl_table root_table[] = {
64130@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
64131 #endif
64132
64133 static struct ctl_table kern_table[] = {
64134+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
64135+ {
64136+ .procname = "grsecurity",
64137+ .mode = 0500,
64138+ .child = grsecurity_table,
64139+ },
64140+#endif
64141+
64142+#ifdef CONFIG_PAX_SOFTMODE
64143+ {
64144+ .procname = "pax",
64145+ .mode = 0500,
64146+ .child = pax_table,
64147+ },
64148+#endif
64149+
64150 {
64151 .procname = "sched_child_runs_first",
64152 .data = &sysctl_sched_child_runs_first,
64153@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
64154 .data = &modprobe_path,
64155 .maxlen = KMOD_PATH_LEN,
64156 .mode = 0644,
64157- .proc_handler = proc_dostring,
64158+ .proc_handler = proc_dostring_modpriv,
64159 },
64160 {
64161 .procname = "modules_disabled",
64162@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
64163 .extra1 = &zero,
64164 .extra2 = &one,
64165 },
64166+#endif
64167 {
64168 .procname = "kptr_restrict",
64169 .data = &kptr_restrict,
64170 .maxlen = sizeof(int),
64171 .mode = 0644,
64172 .proc_handler = proc_dmesg_restrict,
64173+#ifdef CONFIG_GRKERNSEC_HIDESYM
64174+ .extra1 = &two,
64175+#else
64176 .extra1 = &zero,
64177+#endif
64178 .extra2 = &two,
64179 },
64180-#endif
64181 {
64182 .procname = "ngroups_max",
64183 .data = &ngroups_max,
64184@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
64185 .proc_handler = proc_dointvec_minmax,
64186 .extra1 = &zero,
64187 },
64188+ {
64189+ .procname = "heap_stack_gap",
64190+ .data = &sysctl_heap_stack_gap,
64191+ .maxlen = sizeof(sysctl_heap_stack_gap),
64192+ .mode = 0644,
64193+ .proc_handler = proc_doulongvec_minmax,
64194+ },
64195 #else
64196 {
64197 .procname = "nr_trim_pages",
64198@@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
64199 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
64200 {
64201 int mode;
64202+ int error;
64203+
64204+ if (table->parent != NULL && table->parent->procname != NULL &&
64205+ table->procname != NULL &&
64206+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
64207+ return -EACCES;
64208+ if (gr_handle_chroot_sysctl(op))
64209+ return -EACCES;
64210+ error = gr_handle_sysctl(table, op);
64211+ if (error)
64212+ return error;
64213
64214 if (root->permissions)
64215 mode = root->permissions(root, current->nsproxy, table);
64216@@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
64217 buffer, lenp, ppos);
64218 }
64219
64220+int proc_dostring_modpriv(struct ctl_table *table, int write,
64221+ void __user *buffer, size_t *lenp, loff_t *ppos)
64222+{
64223+ if (write && !capable(CAP_SYS_MODULE))
64224+ return -EPERM;
64225+
64226+ return _proc_do_string(table->data, table->maxlen, write,
64227+ buffer, lenp, ppos);
64228+}
64229+
64230 static size_t proc_skip_spaces(char **buf)
64231 {
64232 size_t ret;
64233@@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
64234 len = strlen(tmp);
64235 if (len > *size)
64236 len = *size;
64237+ if (len > sizeof(tmp))
64238+ len = sizeof(tmp);
64239 if (copy_to_user(*buf, tmp, len))
64240 return -EFAULT;
64241 *size -= len;
64242@@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
64243 *i = val;
64244 } else {
64245 val = convdiv * (*i) / convmul;
64246- if (!first)
64247+ if (!first) {
64248 err = proc_put_char(&buffer, &left, '\t');
64249+ if (err)
64250+ break;
64251+ }
64252 err = proc_put_long(&buffer, &left, val, false);
64253 if (err)
64254 break;
64255@@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
64256 return -ENOSYS;
64257 }
64258
64259+int proc_dostring_modpriv(struct ctl_table *table, int write,
64260+ void __user *buffer, size_t *lenp, loff_t *ppos)
64261+{
64262+ return -ENOSYS;
64263+}
64264+
64265 int proc_dointvec(struct ctl_table *table, int write,
64266 void __user *buffer, size_t *lenp, loff_t *ppos)
64267 {
64268@@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
64269 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
64270 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
64271 EXPORT_SYMBOL(proc_dostring);
64272+EXPORT_SYMBOL(proc_dostring_modpriv);
64273 EXPORT_SYMBOL(proc_doulongvec_minmax);
64274 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
64275 EXPORT_SYMBOL(register_sysctl_table);
64276diff -urNp linux-3.0.4/kernel/sysctl_check.c linux-3.0.4/kernel/sysctl_check.c
64277--- linux-3.0.4/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
64278+++ linux-3.0.4/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
64279@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
64280 set_fail(&fail, table, "Directory with extra2");
64281 } else {
64282 if ((table->proc_handler == proc_dostring) ||
64283+ (table->proc_handler == proc_dostring_modpriv) ||
64284 (table->proc_handler == proc_dointvec) ||
64285 (table->proc_handler == proc_dointvec_minmax) ||
64286 (table->proc_handler == proc_dointvec_jiffies) ||
64287diff -urNp linux-3.0.4/kernel/taskstats.c linux-3.0.4/kernel/taskstats.c
64288--- linux-3.0.4/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
64289+++ linux-3.0.4/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
64290@@ -27,9 +27,12 @@
64291 #include <linux/cgroup.h>
64292 #include <linux/fs.h>
64293 #include <linux/file.h>
64294+#include <linux/grsecurity.h>
64295 #include <net/genetlink.h>
64296 #include <asm/atomic.h>
64297
64298+extern int gr_is_taskstats_denied(int pid);
64299+
64300 /*
64301 * Maximum length of a cpumask that can be specified in
64302 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
64303@@ -558,6 +561,9 @@ err:
64304
64305 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
64306 {
64307+ if (gr_is_taskstats_denied(current->pid))
64308+ return -EACCES;
64309+
64310 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
64311 return cmd_attr_register_cpumask(info);
64312 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
64313diff -urNp linux-3.0.4/kernel/time/alarmtimer.c linux-3.0.4/kernel/time/alarmtimer.c
64314--- linux-3.0.4/kernel/time/alarmtimer.c 2011-07-21 22:17:23.000000000 -0400
64315+++ linux-3.0.4/kernel/time/alarmtimer.c 2011-08-23 21:47:56.000000000 -0400
64316@@ -685,7 +685,7 @@ static int __init alarmtimer_init(void)
64317 {
64318 int error = 0;
64319 int i;
64320- struct k_clock alarm_clock = {
64321+ static struct k_clock alarm_clock = {
64322 .clock_getres = alarm_clock_getres,
64323 .clock_get = alarm_clock_get,
64324 .timer_create = alarm_timer_create,
64325diff -urNp linux-3.0.4/kernel/time/tick-broadcast.c linux-3.0.4/kernel/time/tick-broadcast.c
64326--- linux-3.0.4/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
64327+++ linux-3.0.4/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
64328@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
64329 * then clear the broadcast bit.
64330 */
64331 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
64332- int cpu = smp_processor_id();
64333+ cpu = smp_processor_id();
64334
64335 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
64336 tick_broadcast_clear_oneshot(cpu);
64337diff -urNp linux-3.0.4/kernel/time/timekeeping.c linux-3.0.4/kernel/time/timekeeping.c
64338--- linux-3.0.4/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
64339+++ linux-3.0.4/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
64340@@ -14,6 +14,7 @@
64341 #include <linux/init.h>
64342 #include <linux/mm.h>
64343 #include <linux/sched.h>
64344+#include <linux/grsecurity.h>
64345 #include <linux/syscore_ops.h>
64346 #include <linux/clocksource.h>
64347 #include <linux/jiffies.h>
64348@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
64349 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
64350 return -EINVAL;
64351
64352+ gr_log_timechange();
64353+
64354 write_seqlock_irqsave(&xtime_lock, flags);
64355
64356 timekeeping_forward_now();
64357diff -urNp linux-3.0.4/kernel/time/timer_list.c linux-3.0.4/kernel/time/timer_list.c
64358--- linux-3.0.4/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
64359+++ linux-3.0.4/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
64360@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
64361
64362 static void print_name_offset(struct seq_file *m, void *sym)
64363 {
64364+#ifdef CONFIG_GRKERNSEC_HIDESYM
64365+ SEQ_printf(m, "<%p>", NULL);
64366+#else
64367 char symname[KSYM_NAME_LEN];
64368
64369 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
64370 SEQ_printf(m, "<%pK>", sym);
64371 else
64372 SEQ_printf(m, "%s", symname);
64373+#endif
64374 }
64375
64376 static void
64377@@ -112,7 +116,11 @@ next_one:
64378 static void
64379 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
64380 {
64381+#ifdef CONFIG_GRKERNSEC_HIDESYM
64382+ SEQ_printf(m, " .base: %p\n", NULL);
64383+#else
64384 SEQ_printf(m, " .base: %pK\n", base);
64385+#endif
64386 SEQ_printf(m, " .index: %d\n",
64387 base->index);
64388 SEQ_printf(m, " .resolution: %Lu nsecs\n",
64389@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
64390 {
64391 struct proc_dir_entry *pe;
64392
64393+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64394+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
64395+#else
64396 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
64397+#endif
64398 if (!pe)
64399 return -ENOMEM;
64400 return 0;
64401diff -urNp linux-3.0.4/kernel/time/timer_stats.c linux-3.0.4/kernel/time/timer_stats.c
64402--- linux-3.0.4/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
64403+++ linux-3.0.4/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
64404@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
64405 static unsigned long nr_entries;
64406 static struct entry entries[MAX_ENTRIES];
64407
64408-static atomic_t overflow_count;
64409+static atomic_unchecked_t overflow_count;
64410
64411 /*
64412 * The entries are in a hash-table, for fast lookup:
64413@@ -140,7 +140,7 @@ static void reset_entries(void)
64414 nr_entries = 0;
64415 memset(entries, 0, sizeof(entries));
64416 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
64417- atomic_set(&overflow_count, 0);
64418+ atomic_set_unchecked(&overflow_count, 0);
64419 }
64420
64421 static struct entry *alloc_entry(void)
64422@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
64423 if (likely(entry))
64424 entry->count++;
64425 else
64426- atomic_inc(&overflow_count);
64427+ atomic_inc_unchecked(&overflow_count);
64428
64429 out_unlock:
64430 raw_spin_unlock_irqrestore(lock, flags);
64431@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
64432
64433 static void print_name_offset(struct seq_file *m, unsigned long addr)
64434 {
64435+#ifdef CONFIG_GRKERNSEC_HIDESYM
64436+ seq_printf(m, "<%p>", NULL);
64437+#else
64438 char symname[KSYM_NAME_LEN];
64439
64440 if (lookup_symbol_name(addr, symname) < 0)
64441 seq_printf(m, "<%p>", (void *)addr);
64442 else
64443 seq_printf(m, "%s", symname);
64444+#endif
64445 }
64446
64447 static int tstats_show(struct seq_file *m, void *v)
64448@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
64449
64450 seq_puts(m, "Timer Stats Version: v0.2\n");
64451 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
64452- if (atomic_read(&overflow_count))
64453+ if (atomic_read_unchecked(&overflow_count))
64454 seq_printf(m, "Overflow: %d entries\n",
64455- atomic_read(&overflow_count));
64456+ atomic_read_unchecked(&overflow_count));
64457
64458 for (i = 0; i < nr_entries; i++) {
64459 entry = entries + i;
64460@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
64461 {
64462 struct proc_dir_entry *pe;
64463
64464+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64465+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
64466+#else
64467 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
64468+#endif
64469 if (!pe)
64470 return -ENOMEM;
64471 return 0;
64472diff -urNp linux-3.0.4/kernel/time.c linux-3.0.4/kernel/time.c
64473--- linux-3.0.4/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
64474+++ linux-3.0.4/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
64475@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
64476 return error;
64477
64478 if (tz) {
64479+ /* we log in do_settimeofday called below, so don't log twice
64480+ */
64481+ if (!tv)
64482+ gr_log_timechange();
64483+
64484 /* SMP safe, global irq locking makes it work. */
64485 sys_tz = *tz;
64486 update_vsyscall_tz();
64487diff -urNp linux-3.0.4/kernel/timer.c linux-3.0.4/kernel/timer.c
64488--- linux-3.0.4/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
64489+++ linux-3.0.4/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
64490@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
64491 /*
64492 * This function runs timers and the timer-tq in bottom half context.
64493 */
64494-static void run_timer_softirq(struct softirq_action *h)
64495+static void run_timer_softirq(void)
64496 {
64497 struct tvec_base *base = __this_cpu_read(tvec_bases);
64498
64499diff -urNp linux-3.0.4/kernel/trace/blktrace.c linux-3.0.4/kernel/trace/blktrace.c
64500--- linux-3.0.4/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
64501+++ linux-3.0.4/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
64502@@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
64503 struct blk_trace *bt = filp->private_data;
64504 char buf[16];
64505
64506- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
64507+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
64508
64509 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
64510 }
64511@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
64512 return 1;
64513
64514 bt = buf->chan->private_data;
64515- atomic_inc(&bt->dropped);
64516+ atomic_inc_unchecked(&bt->dropped);
64517 return 0;
64518 }
64519
64520@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
64521
64522 bt->dir = dir;
64523 bt->dev = dev;
64524- atomic_set(&bt->dropped, 0);
64525+ atomic_set_unchecked(&bt->dropped, 0);
64526
64527 ret = -EIO;
64528 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
64529diff -urNp linux-3.0.4/kernel/trace/ftrace.c linux-3.0.4/kernel/trace/ftrace.c
64530--- linux-3.0.4/kernel/trace/ftrace.c 2011-07-21 22:17:23.000000000 -0400
64531+++ linux-3.0.4/kernel/trace/ftrace.c 2011-08-23 21:47:56.000000000 -0400
64532@@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
64533 if (unlikely(ftrace_disabled))
64534 return 0;
64535
64536+ ret = ftrace_arch_code_modify_prepare();
64537+ FTRACE_WARN_ON(ret);
64538+ if (ret)
64539+ return 0;
64540+
64541 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
64542+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
64543 if (ret) {
64544 ftrace_bug(ret, ip);
64545- return 0;
64546 }
64547- return 1;
64548+ return ret ? 0 : 1;
64549 }
64550
64551 /*
64552@@ -2550,7 +2555,7 @@ static void ftrace_free_entry_rcu(struct
64553
64554 int
64555 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
64556- void *data)
64557+ void *data)
64558 {
64559 struct ftrace_func_probe *entry;
64560 struct ftrace_page *pg;
64561diff -urNp linux-3.0.4/kernel/trace/trace.c linux-3.0.4/kernel/trace/trace.c
64562--- linux-3.0.4/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
64563+++ linux-3.0.4/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
64564@@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
64565 size_t rem;
64566 unsigned int i;
64567
64568+ pax_track_stack();
64569+
64570 if (splice_grow_spd(pipe, &spd))
64571 return -ENOMEM;
64572
64573@@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
64574 int entries, size, i;
64575 size_t ret;
64576
64577+ pax_track_stack();
64578+
64579 if (splice_grow_spd(pipe, &spd))
64580 return -ENOMEM;
64581
64582@@ -3990,10 +3994,9 @@ static const struct file_operations trac
64583 };
64584 #endif
64585
64586-static struct dentry *d_tracer;
64587-
64588 struct dentry *tracing_init_dentry(void)
64589 {
64590+ static struct dentry *d_tracer;
64591 static int once;
64592
64593 if (d_tracer)
64594@@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
64595 return d_tracer;
64596 }
64597
64598-static struct dentry *d_percpu;
64599-
64600 struct dentry *tracing_dentry_percpu(void)
64601 {
64602+ static struct dentry *d_percpu;
64603 static int once;
64604 struct dentry *d_tracer;
64605
64606diff -urNp linux-3.0.4/kernel/trace/trace_events.c linux-3.0.4/kernel/trace/trace_events.c
64607--- linux-3.0.4/kernel/trace/trace_events.c 2011-09-02 18:11:21.000000000 -0400
64608+++ linux-3.0.4/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
64609@@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
64610 struct ftrace_module_file_ops {
64611 struct list_head list;
64612 struct module *mod;
64613- struct file_operations id;
64614- struct file_operations enable;
64615- struct file_operations format;
64616- struct file_operations filter;
64617 };
64618
64619 static struct ftrace_module_file_ops *
64620@@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
64621
64622 file_ops->mod = mod;
64623
64624- file_ops->id = ftrace_event_id_fops;
64625- file_ops->id.owner = mod;
64626-
64627- file_ops->enable = ftrace_enable_fops;
64628- file_ops->enable.owner = mod;
64629-
64630- file_ops->filter = ftrace_event_filter_fops;
64631- file_ops->filter.owner = mod;
64632-
64633- file_ops->format = ftrace_event_format_fops;
64634- file_ops->format.owner = mod;
64635+ pax_open_kernel();
64636+ *(void **)&mod->trace_id.owner = mod;
64637+ *(void **)&mod->trace_enable.owner = mod;
64638+ *(void **)&mod->trace_filter.owner = mod;
64639+ *(void **)&mod->trace_format.owner = mod;
64640+ pax_close_kernel();
64641
64642 list_add(&file_ops->list, &ftrace_module_file_list);
64643
64644@@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
64645
64646 for_each_event(call, start, end) {
64647 __trace_add_event_call(*call, mod,
64648- &file_ops->id, &file_ops->enable,
64649- &file_ops->filter, &file_ops->format);
64650+ &mod->trace_id, &mod->trace_enable,
64651+ &mod->trace_filter, &mod->trace_format);
64652 }
64653 }
64654
64655diff -urNp linux-3.0.4/kernel/trace/trace_kprobe.c linux-3.0.4/kernel/trace/trace_kprobe.c
64656--- linux-3.0.4/kernel/trace/trace_kprobe.c 2011-07-21 22:17:23.000000000 -0400
64657+++ linux-3.0.4/kernel/trace/trace_kprobe.c 2011-10-06 04:17:55.000000000 -0400
64658@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(me
64659 long ret;
64660 int maxlen = get_rloc_len(*(u32 *)dest);
64661 u8 *dst = get_rloc_data(dest);
64662- u8 *src = addr;
64663+ const u8 __user *src = (const u8 __force_user *)addr;
64664 mm_segment_t old_fs = get_fs();
64665 if (!maxlen)
64666 return;
64667@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(me
64668 pagefault_disable();
64669 do
64670 ret = __copy_from_user_inatomic(dst++, src++, 1);
64671- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
64672+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
64673 dst[-1] = '\0';
64674 pagefault_enable();
64675 set_fs(old_fs);
64676@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(me
64677 ((u8 *)get_rloc_data(dest))[0] = '\0';
64678 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
64679 } else
64680- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
64681+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
64682 get_rloc_offs(*(u32 *)dest));
64683 }
64684 /* Return the length of string -- including null terminal byte */
64685@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(me
64686 set_fs(KERNEL_DS);
64687 pagefault_disable();
64688 do {
64689- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
64690+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
64691 len++;
64692 } while (c && ret == 0 && len < MAX_STRING_SIZE);
64693 pagefault_enable();
64694diff -urNp linux-3.0.4/kernel/trace/trace_mmiotrace.c linux-3.0.4/kernel/trace/trace_mmiotrace.c
64695--- linux-3.0.4/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
64696+++ linux-3.0.4/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
64697@@ -24,7 +24,7 @@ struct header_iter {
64698 static struct trace_array *mmio_trace_array;
64699 static bool overrun_detected;
64700 static unsigned long prev_overruns;
64701-static atomic_t dropped_count;
64702+static atomic_unchecked_t dropped_count;
64703
64704 static void mmio_reset_data(struct trace_array *tr)
64705 {
64706@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
64707
64708 static unsigned long count_overruns(struct trace_iterator *iter)
64709 {
64710- unsigned long cnt = atomic_xchg(&dropped_count, 0);
64711+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
64712 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
64713
64714 if (over > prev_overruns)
64715@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
64716 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
64717 sizeof(*entry), 0, pc);
64718 if (!event) {
64719- atomic_inc(&dropped_count);
64720+ atomic_inc_unchecked(&dropped_count);
64721 return;
64722 }
64723 entry = ring_buffer_event_data(event);
64724@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
64725 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
64726 sizeof(*entry), 0, pc);
64727 if (!event) {
64728- atomic_inc(&dropped_count);
64729+ atomic_inc_unchecked(&dropped_count);
64730 return;
64731 }
64732 entry = ring_buffer_event_data(event);
64733diff -urNp linux-3.0.4/kernel/trace/trace_output.c linux-3.0.4/kernel/trace/trace_output.c
64734--- linux-3.0.4/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
64735+++ linux-3.0.4/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
64736@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
64737
64738 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
64739 if (!IS_ERR(p)) {
64740- p = mangle_path(s->buffer + s->len, p, "\n");
64741+ p = mangle_path(s->buffer + s->len, p, "\n\\");
64742 if (p) {
64743 s->len = p - s->buffer;
64744 return 1;
64745diff -urNp linux-3.0.4/kernel/trace/trace_stack.c linux-3.0.4/kernel/trace/trace_stack.c
64746--- linux-3.0.4/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
64747+++ linux-3.0.4/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
64748@@ -50,7 +50,7 @@ static inline void check_stack(void)
64749 return;
64750
64751 /* we do not handle interrupt stacks yet */
64752- if (!object_is_on_stack(&this_size))
64753+ if (!object_starts_on_stack(&this_size))
64754 return;
64755
64756 local_irq_save(flags);
64757diff -urNp linux-3.0.4/kernel/trace/trace_workqueue.c linux-3.0.4/kernel/trace/trace_workqueue.c
64758--- linux-3.0.4/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
64759+++ linux-3.0.4/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
64760@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
64761 int cpu;
64762 pid_t pid;
64763 /* Can be inserted from interrupt or user context, need to be atomic */
64764- atomic_t inserted;
64765+ atomic_unchecked_t inserted;
64766 /*
64767 * Don't need to be atomic, works are serialized in a single workqueue thread
64768 * on a single CPU.
64769@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
64770 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
64771 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
64772 if (node->pid == wq_thread->pid) {
64773- atomic_inc(&node->inserted);
64774+ atomic_inc_unchecked(&node->inserted);
64775 goto found;
64776 }
64777 }
64778@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
64779 tsk = get_pid_task(pid, PIDTYPE_PID);
64780 if (tsk) {
64781 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
64782- atomic_read(&cws->inserted), cws->executed,
64783+ atomic_read_unchecked(&cws->inserted), cws->executed,
64784 tsk->comm);
64785 put_task_struct(tsk);
64786 }
64787diff -urNp linux-3.0.4/lib/bitmap.c linux-3.0.4/lib/bitmap.c
64788--- linux-3.0.4/lib/bitmap.c 2011-07-21 22:17:23.000000000 -0400
64789+++ linux-3.0.4/lib/bitmap.c 2011-10-06 04:17:55.000000000 -0400
64790@@ -421,7 +421,7 @@ int __bitmap_parse(const char *buf, unsi
64791 {
64792 int c, old_c, totaldigits, ndigits, nchunks, nbits;
64793 u32 chunk;
64794- const char __user *ubuf = buf;
64795+ const char __user *ubuf = (const char __force_user *)buf;
64796
64797 bitmap_zero(maskp, nmaskbits);
64798
64799@@ -506,7 +506,7 @@ int bitmap_parse_user(const char __user
64800 {
64801 if (!access_ok(VERIFY_READ, ubuf, ulen))
64802 return -EFAULT;
64803- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
64804+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
64805 }
64806 EXPORT_SYMBOL(bitmap_parse_user);
64807
64808@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char
64809 {
64810 unsigned a, b;
64811 int c, old_c, totaldigits;
64812- const char __user *ubuf = buf;
64813+ const char __user *ubuf = (const char __force_user *)buf;
64814 int exp_digit, in_range;
64815
64816 totaldigits = c = 0;
64817@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __u
64818 {
64819 if (!access_ok(VERIFY_READ, ubuf, ulen))
64820 return -EFAULT;
64821- return __bitmap_parselist((const char *)ubuf,
64822+ return __bitmap_parselist((const char __force_kernel *)ubuf,
64823 ulen, 1, maskp, nmaskbits);
64824 }
64825 EXPORT_SYMBOL(bitmap_parselist_user);
64826diff -urNp linux-3.0.4/lib/bug.c linux-3.0.4/lib/bug.c
64827--- linux-3.0.4/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
64828+++ linux-3.0.4/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
64829@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
64830 return BUG_TRAP_TYPE_NONE;
64831
64832 bug = find_bug(bugaddr);
64833+ if (!bug)
64834+ return BUG_TRAP_TYPE_NONE;
64835
64836 file = NULL;
64837 line = 0;
64838diff -urNp linux-3.0.4/lib/debugobjects.c linux-3.0.4/lib/debugobjects.c
64839--- linux-3.0.4/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
64840+++ linux-3.0.4/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
64841@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
64842 if (limit > 4)
64843 return;
64844
64845- is_on_stack = object_is_on_stack(addr);
64846+ is_on_stack = object_starts_on_stack(addr);
64847 if (is_on_stack == onstack)
64848 return;
64849
64850diff -urNp linux-3.0.4/lib/devres.c linux-3.0.4/lib/devres.c
64851--- linux-3.0.4/lib/devres.c 2011-07-21 22:17:23.000000000 -0400
64852+++ linux-3.0.4/lib/devres.c 2011-10-06 04:17:55.000000000 -0400
64853@@ -81,7 +81,7 @@ void devm_iounmap(struct device *dev, vo
64854 {
64855 iounmap(addr);
64856 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
64857- (void *)addr));
64858+ (void __force *)addr));
64859 }
64860 EXPORT_SYMBOL(devm_iounmap);
64861
64862@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *de
64863 {
64864 ioport_unmap(addr);
64865 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
64866- devm_ioport_map_match, (void *)addr));
64867+ devm_ioport_map_match, (void __force *)addr));
64868 }
64869 EXPORT_SYMBOL(devm_ioport_unmap);
64870
64871diff -urNp linux-3.0.4/lib/dma-debug.c linux-3.0.4/lib/dma-debug.c
64872--- linux-3.0.4/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
64873+++ linux-3.0.4/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
64874@@ -870,7 +870,7 @@ out:
64875
64876 static void check_for_stack(struct device *dev, void *addr)
64877 {
64878- if (object_is_on_stack(addr))
64879+ if (object_starts_on_stack(addr))
64880 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
64881 "stack [addr=%p]\n", addr);
64882 }
64883diff -urNp linux-3.0.4/lib/extable.c linux-3.0.4/lib/extable.c
64884--- linux-3.0.4/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
64885+++ linux-3.0.4/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
64886@@ -13,6 +13,7 @@
64887 #include <linux/init.h>
64888 #include <linux/sort.h>
64889 #include <asm/uaccess.h>
64890+#include <asm/pgtable.h>
64891
64892 #ifndef ARCH_HAS_SORT_EXTABLE
64893 /*
64894@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
64895 void sort_extable(struct exception_table_entry *start,
64896 struct exception_table_entry *finish)
64897 {
64898+ pax_open_kernel();
64899 sort(start, finish - start, sizeof(struct exception_table_entry),
64900 cmp_ex, NULL);
64901+ pax_close_kernel();
64902 }
64903
64904 #ifdef CONFIG_MODULES
64905diff -urNp linux-3.0.4/lib/inflate.c linux-3.0.4/lib/inflate.c
64906--- linux-3.0.4/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
64907+++ linux-3.0.4/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
64908@@ -269,7 +269,7 @@ static void free(void *where)
64909 malloc_ptr = free_mem_ptr;
64910 }
64911 #else
64912-#define malloc(a) kmalloc(a, GFP_KERNEL)
64913+#define malloc(a) kmalloc((a), GFP_KERNEL)
64914 #define free(a) kfree(a)
64915 #endif
64916
64917diff -urNp linux-3.0.4/lib/Kconfig.debug linux-3.0.4/lib/Kconfig.debug
64918--- linux-3.0.4/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
64919+++ linux-3.0.4/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
64920@@ -1088,6 +1088,7 @@ config LATENCYTOP
64921 depends on DEBUG_KERNEL
64922 depends on STACKTRACE_SUPPORT
64923 depends on PROC_FS
64924+ depends on !GRKERNSEC_HIDESYM
64925 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
64926 select KALLSYMS
64927 select KALLSYMS_ALL
64928diff -urNp linux-3.0.4/lib/kref.c linux-3.0.4/lib/kref.c
64929--- linux-3.0.4/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
64930+++ linux-3.0.4/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
64931@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
64932 */
64933 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
64934 {
64935- WARN_ON(release == NULL);
64936+ BUG_ON(release == NULL);
64937 WARN_ON(release == (void (*)(struct kref *))kfree);
64938
64939 if (atomic_dec_and_test(&kref->refcount)) {
64940diff -urNp linux-3.0.4/lib/radix-tree.c linux-3.0.4/lib/radix-tree.c
64941--- linux-3.0.4/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
64942+++ linux-3.0.4/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
64943@@ -80,7 +80,7 @@ struct radix_tree_preload {
64944 int nr;
64945 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
64946 };
64947-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
64948+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
64949
64950 static inline void *ptr_to_indirect(void *ptr)
64951 {
64952diff -urNp linux-3.0.4/lib/vsprintf.c linux-3.0.4/lib/vsprintf.c
64953--- linux-3.0.4/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
64954+++ linux-3.0.4/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
64955@@ -16,6 +16,9 @@
64956 * - scnprintf and vscnprintf
64957 */
64958
64959+#ifdef CONFIG_GRKERNSEC_HIDESYM
64960+#define __INCLUDED_BY_HIDESYM 1
64961+#endif
64962 #include <stdarg.h>
64963 #include <linux/module.h>
64964 #include <linux/types.h>
64965@@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
64966 char sym[KSYM_SYMBOL_LEN];
64967 if (ext == 'B')
64968 sprint_backtrace(sym, value);
64969- else if (ext != 'f' && ext != 's')
64970+ else if (ext != 'f' && ext != 's' && ext != 'a')
64971 sprint_symbol(sym, value);
64972 else
64973 kallsyms_lookup(value, NULL, NULL, NULL, sym);
64974@@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
64975 return string(buf, end, uuid, spec);
64976 }
64977
64978+#ifdef CONFIG_GRKERNSEC_HIDESYM
64979+int kptr_restrict __read_mostly = 2;
64980+#else
64981 int kptr_restrict __read_mostly;
64982+#endif
64983
64984 /*
64985 * Show a '%p' thing. A kernel extension is that the '%p' is followed
64986@@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
64987 * - 'S' For symbolic direct pointers with offset
64988 * - 's' For symbolic direct pointers without offset
64989 * - 'B' For backtraced symbolic direct pointers with offset
64990+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
64991+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
64992 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
64993 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
64994 * - 'M' For a 6-byte MAC address, it prints the address in the
64995@@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
64996 {
64997 if (!ptr && *fmt != 'K') {
64998 /*
64999- * Print (null) with the same width as a pointer so it makes
65000+ * Print (nil) with the same width as a pointer so it makes
65001 * tabular output look nice.
65002 */
65003 if (spec.field_width == -1)
65004 spec.field_width = 2 * sizeof(void *);
65005- return string(buf, end, "(null)", spec);
65006+ return string(buf, end, "(nil)", spec);
65007 }
65008
65009 switch (*fmt) {
65010@@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
65011 /* Fallthrough */
65012 case 'S':
65013 case 's':
65014+#ifdef CONFIG_GRKERNSEC_HIDESYM
65015+ break;
65016+#else
65017+ return symbol_string(buf, end, ptr, spec, *fmt);
65018+#endif
65019+ case 'A':
65020+ case 'a':
65021 case 'B':
65022 return symbol_string(buf, end, ptr, spec, *fmt);
65023 case 'R':
65024@@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
65025 typeof(type) value; \
65026 if (sizeof(type) == 8) { \
65027 args = PTR_ALIGN(args, sizeof(u32)); \
65028- *(u32 *)&value = *(u32 *)args; \
65029- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
65030+ *(u32 *)&value = *(const u32 *)args; \
65031+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
65032 } else { \
65033 args = PTR_ALIGN(args, sizeof(type)); \
65034- value = *(typeof(type) *)args; \
65035+ value = *(const typeof(type) *)args; \
65036 } \
65037 args += sizeof(type); \
65038 value; \
65039@@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
65040 case FORMAT_TYPE_STR: {
65041 const char *str_arg = args;
65042 args += strlen(str_arg) + 1;
65043- str = string(str, end, (char *)str_arg, spec);
65044+ str = string(str, end, str_arg, spec);
65045 break;
65046 }
65047
65048diff -urNp linux-3.0.4/localversion-grsec linux-3.0.4/localversion-grsec
65049--- linux-3.0.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
65050+++ linux-3.0.4/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
65051@@ -0,0 +1 @@
65052+-grsec
65053diff -urNp linux-3.0.4/Makefile linux-3.0.4/Makefile
65054--- linux-3.0.4/Makefile 2011-09-02 18:11:26.000000000 -0400
65055+++ linux-3.0.4/Makefile 2011-10-06 04:17:55.000000000 -0400
65056@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
65057
65058 HOSTCC = gcc
65059 HOSTCXX = g++
65060-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
65061-HOSTCXXFLAGS = -O2
65062+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
65063+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
65064+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
65065
65066 # Decide whether to build built-in, modular, or both.
65067 # Normally, just do built-in.
65068@@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
65069 KBUILD_CPPFLAGS := -D__KERNEL__
65070
65071 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
65072+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
65073 -fno-strict-aliasing -fno-common \
65074 -Werror-implicit-function-declaration \
65075 -Wno-format-security \
65076 -fno-delete-null-pointer-checks
65077+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
65078 KBUILD_AFLAGS_KERNEL :=
65079 KBUILD_CFLAGS_KERNEL :=
65080 KBUILD_AFLAGS := -D__ASSEMBLY__
65081@@ -407,8 +410,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
65082 # Rules shared between *config targets and build targets
65083
65084 # Basic helpers built in scripts/
65085-PHONY += scripts_basic
65086-scripts_basic:
65087+PHONY += scripts_basic gcc-plugins
65088+scripts_basic: gcc-plugins
65089 $(Q)$(MAKE) $(build)=scripts/basic
65090 $(Q)rm -f .tmp_quiet_recordmcount
65091
65092@@ -564,6 +567,36 @@ else
65093 KBUILD_CFLAGS += -O2
65094 endif
65095
65096+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
65097+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
65098+ifdef CONFIG_PAX_MEMORY_STACKLEAK
65099+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
65100+endif
65101+ifdef CONFIG_KALLOCSTAT_PLUGIN
65102+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
65103+endif
65104+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
65105+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
65106+endif
65107+ifdef CONFIG_CHECKER_PLUGIN
65108+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
65109+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
65110+endif
65111+endif
65112+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
65113+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
65114+gcc-plugins:
65115+ $(Q)$(MAKE) $(build)=tools/gcc
65116+else
65117+gcc-plugins:
65118+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
65119+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
65120+else
65121+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
65122+endif
65123+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
65124+endif
65125+
65126 include $(srctree)/arch/$(SRCARCH)/Makefile
65127
65128 ifneq ($(CONFIG_FRAME_WARN),0)
65129@@ -708,7 +741,7 @@ export mod_strip_cmd
65130
65131
65132 ifeq ($(KBUILD_EXTMOD),)
65133-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
65134+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
65135
65136 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
65137 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
65138@@ -907,6 +940,8 @@ define rule_vmlinux-modpost
65139 endef
65140
65141 # vmlinux image - including updated kernel symbols
65142+$(vmlinux-all): KBUILD_CFLAGS += $(GCC_PLUGINS)
65143+$(vmlinux-all): gcc-plugins
65144 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
65145 ifdef CONFIG_HEADERS_CHECK
65146 $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
65147@@ -941,7 +976,8 @@ $(sort $(vmlinux-init) $(vmlinux-main))
65148 # Error messages still appears in the original language
65149
65150 PHONY += $(vmlinux-dirs)
65151-$(vmlinux-dirs): prepare scripts
65152+$(vmlinux-dirs): KBUILD_CFLAGS += $(GCC_PLUGINS)
65153+$(vmlinux-dirs): gcc-plugins prepare scripts
65154 $(Q)$(MAKE) $(build)=$@
65155
65156 # Store (new) KERNELRELASE string in include/config/kernel.release
65157@@ -986,6 +1022,7 @@ prepare0: archprepare FORCE
65158 $(Q)$(MAKE) $(build)=. missing-syscalls
65159
65160 # All the preparing..
65161+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
65162 prepare: prepare0
65163
65164 # Generate some files
65165@@ -1102,7 +1139,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
65166
65167 # Target to prepare building external modules
65168 PHONY += modules_prepare
65169-modules_prepare: prepare scripts
65170+modules_prepare: gcc-plugins prepare scripts
65171
65172 # Target to install modules
65173 PHONY += modules_install
65174@@ -1198,7 +1235,7 @@ distclean: mrproper
65175 @find $(srctree) $(RCS_FIND_IGNORE) \
65176 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
65177 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
65178- -o -name '.*.rej' -o -size 0 \
65179+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
65180 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
65181 -type f -print | xargs rm -f
65182
65183@@ -1359,6 +1396,7 @@ PHONY += $(module-dirs) modules
65184 $(module-dirs): crmodverdir $(objtree)/Module.symvers
65185 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
65186
65187+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
65188 modules: $(module-dirs)
65189 @$(kecho) ' Building modules, stage 2.';
65190 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
65191@@ -1485,17 +1523,19 @@ else
65192 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
65193 endif
65194
65195-%.s: %.c prepare scripts FORCE
65196+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
65197+%.s: %.c gcc-plugins prepare scripts FORCE
65198 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
65199 %.i: %.c prepare scripts FORCE
65200 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
65201-%.o: %.c prepare scripts FORCE
65202+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
65203+%.o: %.c gcc-plugins prepare scripts FORCE
65204 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
65205 %.lst: %.c prepare scripts FORCE
65206 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
65207-%.s: %.S prepare scripts FORCE
65208+%.s: %.S gcc-plugins prepare scripts FORCE
65209 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
65210-%.o: %.S prepare scripts FORCE
65211+%.o: %.S gcc-plugins prepare scripts FORCE
65212 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
65213 %.symtypes: %.c prepare scripts FORCE
65214 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
65215@@ -1505,11 +1545,13 @@ endif
65216 $(cmd_crmodverdir)
65217 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
65218 $(build)=$(build-dir)
65219-%/: prepare scripts FORCE
65220+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
65221+%/: gcc-plugins prepare scripts FORCE
65222 $(cmd_crmodverdir)
65223 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
65224 $(build)=$(build-dir)
65225-%.ko: prepare scripts FORCE
65226+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
65227+%.ko: gcc-plugins prepare scripts FORCE
65228 $(cmd_crmodverdir)
65229 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
65230 $(build)=$(build-dir) $(@:.ko=.o)
65231diff -urNp linux-3.0.4/mm/filemap.c linux-3.0.4/mm/filemap.c
65232--- linux-3.0.4/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
65233+++ linux-3.0.4/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
65234@@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
65235 struct address_space *mapping = file->f_mapping;
65236
65237 if (!mapping->a_ops->readpage)
65238- return -ENOEXEC;
65239+ return -ENODEV;
65240 file_accessed(file);
65241 vma->vm_ops = &generic_file_vm_ops;
65242 vma->vm_flags |= VM_CAN_NONLINEAR;
65243@@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
65244 *pos = i_size_read(inode);
65245
65246 if (limit != RLIM_INFINITY) {
65247+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
65248 if (*pos >= limit) {
65249 send_sig(SIGXFSZ, current, 0);
65250 return -EFBIG;
65251diff -urNp linux-3.0.4/mm/fremap.c linux-3.0.4/mm/fremap.c
65252--- linux-3.0.4/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
65253+++ linux-3.0.4/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
65254@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
65255 retry:
65256 vma = find_vma(mm, start);
65257
65258+#ifdef CONFIG_PAX_SEGMEXEC
65259+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
65260+ goto out;
65261+#endif
65262+
65263 /*
65264 * Make sure the vma is shared, that it supports prefaulting,
65265 * and that the remapped range is valid and fully within
65266diff -urNp linux-3.0.4/mm/highmem.c linux-3.0.4/mm/highmem.c
65267--- linux-3.0.4/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
65268+++ linux-3.0.4/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
65269@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
65270 * So no dangers, even with speculative execution.
65271 */
65272 page = pte_page(pkmap_page_table[i]);
65273+ pax_open_kernel();
65274 pte_clear(&init_mm, (unsigned long)page_address(page),
65275 &pkmap_page_table[i]);
65276-
65277+ pax_close_kernel();
65278 set_page_address(page, NULL);
65279 need_flush = 1;
65280 }
65281@@ -186,9 +187,11 @@ start:
65282 }
65283 }
65284 vaddr = PKMAP_ADDR(last_pkmap_nr);
65285+
65286+ pax_open_kernel();
65287 set_pte_at(&init_mm, vaddr,
65288 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
65289-
65290+ pax_close_kernel();
65291 pkmap_count[last_pkmap_nr] = 1;
65292 set_page_address(page, (void *)vaddr);
65293
65294diff -urNp linux-3.0.4/mm/huge_memory.c linux-3.0.4/mm/huge_memory.c
65295--- linux-3.0.4/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
65296+++ linux-3.0.4/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
65297@@ -702,7 +702,7 @@ out:
65298 * run pte_offset_map on the pmd, if an huge pmd could
65299 * materialize from under us from a different thread.
65300 */
65301- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
65302+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
65303 return VM_FAULT_OOM;
65304 /* if an huge pmd materialized from under us just retry later */
65305 if (unlikely(pmd_trans_huge(*pmd)))
65306diff -urNp linux-3.0.4/mm/hugetlb.c linux-3.0.4/mm/hugetlb.c
65307--- linux-3.0.4/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
65308+++ linux-3.0.4/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
65309@@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
65310 return 1;
65311 }
65312
65313+#ifdef CONFIG_PAX_SEGMEXEC
65314+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
65315+{
65316+ struct mm_struct *mm = vma->vm_mm;
65317+ struct vm_area_struct *vma_m;
65318+ unsigned long address_m;
65319+ pte_t *ptep_m;
65320+
65321+ vma_m = pax_find_mirror_vma(vma);
65322+ if (!vma_m)
65323+ return;
65324+
65325+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
65326+ address_m = address + SEGMEXEC_TASK_SIZE;
65327+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
65328+ get_page(page_m);
65329+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
65330+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
65331+}
65332+#endif
65333+
65334 /*
65335 * Hugetlb_cow() should be called with page lock of the original hugepage held.
65336 */
65337@@ -2440,6 +2461,11 @@ retry_avoidcopy:
65338 make_huge_pte(vma, new_page, 1));
65339 page_remove_rmap(old_page);
65340 hugepage_add_new_anon_rmap(new_page, vma, address);
65341+
65342+#ifdef CONFIG_PAX_SEGMEXEC
65343+ pax_mirror_huge_pte(vma, address, new_page);
65344+#endif
65345+
65346 /* Make the old page be freed below */
65347 new_page = old_page;
65348 mmu_notifier_invalidate_range_end(mm,
65349@@ -2591,6 +2617,10 @@ retry:
65350 && (vma->vm_flags & VM_SHARED)));
65351 set_huge_pte_at(mm, address, ptep, new_pte);
65352
65353+#ifdef CONFIG_PAX_SEGMEXEC
65354+ pax_mirror_huge_pte(vma, address, page);
65355+#endif
65356+
65357 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
65358 /* Optimization, do the COW without a second fault */
65359 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
65360@@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
65361 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
65362 struct hstate *h = hstate_vma(vma);
65363
65364+#ifdef CONFIG_PAX_SEGMEXEC
65365+ struct vm_area_struct *vma_m;
65366+#endif
65367+
65368 ptep = huge_pte_offset(mm, address);
65369 if (ptep) {
65370 entry = huge_ptep_get(ptep);
65371@@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
65372 VM_FAULT_SET_HINDEX(h - hstates);
65373 }
65374
65375+#ifdef CONFIG_PAX_SEGMEXEC
65376+ vma_m = pax_find_mirror_vma(vma);
65377+ if (vma_m) {
65378+ unsigned long address_m;
65379+
65380+ if (vma->vm_start > vma_m->vm_start) {
65381+ address_m = address;
65382+ address -= SEGMEXEC_TASK_SIZE;
65383+ vma = vma_m;
65384+ h = hstate_vma(vma);
65385+ } else
65386+ address_m = address + SEGMEXEC_TASK_SIZE;
65387+
65388+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
65389+ return VM_FAULT_OOM;
65390+ address_m &= HPAGE_MASK;
65391+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
65392+ }
65393+#endif
65394+
65395 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
65396 if (!ptep)
65397 return VM_FAULT_OOM;
65398diff -urNp linux-3.0.4/mm/internal.h linux-3.0.4/mm/internal.h
65399--- linux-3.0.4/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
65400+++ linux-3.0.4/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
65401@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
65402 * in mm/page_alloc.c
65403 */
65404 extern void __free_pages_bootmem(struct page *page, unsigned int order);
65405+extern void free_compound_page(struct page *page);
65406 extern void prep_compound_page(struct page *page, unsigned long order);
65407 #ifdef CONFIG_MEMORY_FAILURE
65408 extern bool is_free_buddy_page(struct page *page);
65409diff -urNp linux-3.0.4/mm/Kconfig linux-3.0.4/mm/Kconfig
65410--- linux-3.0.4/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
65411+++ linux-3.0.4/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
65412@@ -240,7 +240,7 @@ config KSM
65413 config DEFAULT_MMAP_MIN_ADDR
65414 int "Low address space to protect from user allocation"
65415 depends on MMU
65416- default 4096
65417+ default 65536
65418 help
65419 This is the portion of low virtual memory which should be protected
65420 from userspace allocation. Keeping a user from writing to low pages
65421diff -urNp linux-3.0.4/mm/kmemleak.c linux-3.0.4/mm/kmemleak.c
65422--- linux-3.0.4/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
65423+++ linux-3.0.4/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
65424@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
65425
65426 for (i = 0; i < object->trace_len; i++) {
65427 void *ptr = (void *)object->trace[i];
65428- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
65429+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
65430 }
65431 }
65432
65433diff -urNp linux-3.0.4/mm/maccess.c linux-3.0.4/mm/maccess.c
65434--- linux-3.0.4/mm/maccess.c 2011-07-21 22:17:23.000000000 -0400
65435+++ linux-3.0.4/mm/maccess.c 2011-10-06 04:17:55.000000000 -0400
65436@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, cons
65437 set_fs(KERNEL_DS);
65438 pagefault_disable();
65439 ret = __copy_from_user_inatomic(dst,
65440- (__force const void __user *)src, size);
65441+ (const void __force_user *)src, size);
65442 pagefault_enable();
65443 set_fs(old_fs);
65444
65445@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, con
65446
65447 set_fs(KERNEL_DS);
65448 pagefault_disable();
65449- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
65450+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
65451 pagefault_enable();
65452 set_fs(old_fs);
65453
65454diff -urNp linux-3.0.4/mm/madvise.c linux-3.0.4/mm/madvise.c
65455--- linux-3.0.4/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
65456+++ linux-3.0.4/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
65457@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
65458 pgoff_t pgoff;
65459 unsigned long new_flags = vma->vm_flags;
65460
65461+#ifdef CONFIG_PAX_SEGMEXEC
65462+ struct vm_area_struct *vma_m;
65463+#endif
65464+
65465 switch (behavior) {
65466 case MADV_NORMAL:
65467 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
65468@@ -110,6 +114,13 @@ success:
65469 /*
65470 * vm_flags is protected by the mmap_sem held in write mode.
65471 */
65472+
65473+#ifdef CONFIG_PAX_SEGMEXEC
65474+ vma_m = pax_find_mirror_vma(vma);
65475+ if (vma_m)
65476+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
65477+#endif
65478+
65479 vma->vm_flags = new_flags;
65480
65481 out:
65482@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
65483 struct vm_area_struct ** prev,
65484 unsigned long start, unsigned long end)
65485 {
65486+
65487+#ifdef CONFIG_PAX_SEGMEXEC
65488+ struct vm_area_struct *vma_m;
65489+#endif
65490+
65491 *prev = vma;
65492 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
65493 return -EINVAL;
65494@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
65495 zap_page_range(vma, start, end - start, &details);
65496 } else
65497 zap_page_range(vma, start, end - start, NULL);
65498+
65499+#ifdef CONFIG_PAX_SEGMEXEC
65500+ vma_m = pax_find_mirror_vma(vma);
65501+ if (vma_m) {
65502+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
65503+ struct zap_details details = {
65504+ .nonlinear_vma = vma_m,
65505+ .last_index = ULONG_MAX,
65506+ };
65507+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
65508+ } else
65509+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
65510+ }
65511+#endif
65512+
65513 return 0;
65514 }
65515
65516@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
65517 if (end < start)
65518 goto out;
65519
65520+#ifdef CONFIG_PAX_SEGMEXEC
65521+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
65522+ if (end > SEGMEXEC_TASK_SIZE)
65523+ goto out;
65524+ } else
65525+#endif
65526+
65527+ if (end > TASK_SIZE)
65528+ goto out;
65529+
65530 error = 0;
65531 if (end == start)
65532 goto out;
65533diff -urNp linux-3.0.4/mm/memory.c linux-3.0.4/mm/memory.c
65534--- linux-3.0.4/mm/memory.c 2011-09-02 18:11:21.000000000 -0400
65535+++ linux-3.0.4/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
65536@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
65537 return;
65538
65539 pmd = pmd_offset(pud, start);
65540+
65541+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
65542 pud_clear(pud);
65543 pmd_free_tlb(tlb, pmd, start);
65544+#endif
65545+
65546 }
65547
65548 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
65549@@ -489,9 +493,12 @@ static inline void free_pud_range(struct
65550 if (end - 1 > ceiling - 1)
65551 return;
65552
65553+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
65554 pud = pud_offset(pgd, start);
65555 pgd_clear(pgd);
65556 pud_free_tlb(tlb, pud, start);
65557+#endif
65558+
65559 }
65560
65561 /*
65562@@ -1577,12 +1584,6 @@ no_page_table:
65563 return page;
65564 }
65565
65566-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
65567-{
65568- return stack_guard_page_start(vma, addr) ||
65569- stack_guard_page_end(vma, addr+PAGE_SIZE);
65570-}
65571-
65572 /**
65573 * __get_user_pages() - pin user pages in memory
65574 * @tsk: task_struct of target task
65575@@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
65576 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
65577 i = 0;
65578
65579- do {
65580+ while (nr_pages) {
65581 struct vm_area_struct *vma;
65582
65583- vma = find_extend_vma(mm, start);
65584+ vma = find_vma(mm, start);
65585 if (!vma && in_gate_area(mm, start)) {
65586 unsigned long pg = start & PAGE_MASK;
65587 pgd_t *pgd;
65588@@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
65589 goto next_page;
65590 }
65591
65592- if (!vma ||
65593+ if (!vma || start < vma->vm_start ||
65594 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
65595 !(vm_flags & vma->vm_flags))
65596 return i ? : -EFAULT;
65597@@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
65598 int ret;
65599 unsigned int fault_flags = 0;
65600
65601- /* For mlock, just skip the stack guard page. */
65602- if (foll_flags & FOLL_MLOCK) {
65603- if (stack_guard_page(vma, start))
65604- goto next_page;
65605- }
65606 if (foll_flags & FOLL_WRITE)
65607 fault_flags |= FAULT_FLAG_WRITE;
65608 if (nonblocking)
65609@@ -1811,7 +1807,7 @@ next_page:
65610 start += PAGE_SIZE;
65611 nr_pages--;
65612 } while (nr_pages && start < vma->vm_end);
65613- } while (nr_pages);
65614+ }
65615 return i;
65616 }
65617 EXPORT_SYMBOL(__get_user_pages);
65618@@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
65619 page_add_file_rmap(page);
65620 set_pte_at(mm, addr, pte, mk_pte(page, prot));
65621
65622+#ifdef CONFIG_PAX_SEGMEXEC
65623+ pax_mirror_file_pte(vma, addr, page, ptl);
65624+#endif
65625+
65626 retval = 0;
65627 pte_unmap_unlock(pte, ptl);
65628 return retval;
65629@@ -2052,10 +2052,22 @@ out:
65630 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
65631 struct page *page)
65632 {
65633+
65634+#ifdef CONFIG_PAX_SEGMEXEC
65635+ struct vm_area_struct *vma_m;
65636+#endif
65637+
65638 if (addr < vma->vm_start || addr >= vma->vm_end)
65639 return -EFAULT;
65640 if (!page_count(page))
65641 return -EINVAL;
65642+
65643+#ifdef CONFIG_PAX_SEGMEXEC
65644+ vma_m = pax_find_mirror_vma(vma);
65645+ if (vma_m)
65646+ vma_m->vm_flags |= VM_INSERTPAGE;
65647+#endif
65648+
65649 vma->vm_flags |= VM_INSERTPAGE;
65650 return insert_page(vma, addr, page, vma->vm_page_prot);
65651 }
65652@@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
65653 unsigned long pfn)
65654 {
65655 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
65656+ BUG_ON(vma->vm_mirror);
65657
65658 if (addr < vma->vm_start || addr >= vma->vm_end)
65659 return -EFAULT;
65660@@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
65661 copy_user_highpage(dst, src, va, vma);
65662 }
65663
65664+#ifdef CONFIG_PAX_SEGMEXEC
65665+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
65666+{
65667+ struct mm_struct *mm = vma->vm_mm;
65668+ spinlock_t *ptl;
65669+ pte_t *pte, entry;
65670+
65671+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
65672+ entry = *pte;
65673+ if (!pte_present(entry)) {
65674+ if (!pte_none(entry)) {
65675+ BUG_ON(pte_file(entry));
65676+ free_swap_and_cache(pte_to_swp_entry(entry));
65677+ pte_clear_not_present_full(mm, address, pte, 0);
65678+ }
65679+ } else {
65680+ struct page *page;
65681+
65682+ flush_cache_page(vma, address, pte_pfn(entry));
65683+ entry = ptep_clear_flush(vma, address, pte);
65684+ BUG_ON(pte_dirty(entry));
65685+ page = vm_normal_page(vma, address, entry);
65686+ if (page) {
65687+ update_hiwater_rss(mm);
65688+ if (PageAnon(page))
65689+ dec_mm_counter_fast(mm, MM_ANONPAGES);
65690+ else
65691+ dec_mm_counter_fast(mm, MM_FILEPAGES);
65692+ page_remove_rmap(page);
65693+ page_cache_release(page);
65694+ }
65695+ }
65696+ pte_unmap_unlock(pte, ptl);
65697+}
65698+
65699+/* PaX: if vma is mirrored, synchronize the mirror's PTE
65700+ *
65701+ * the ptl of the lower mapped page is held on entry and is not released on exit
65702+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
65703+ */
65704+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
65705+{
65706+ struct mm_struct *mm = vma->vm_mm;
65707+ unsigned long address_m;
65708+ spinlock_t *ptl_m;
65709+ struct vm_area_struct *vma_m;
65710+ pmd_t *pmd_m;
65711+ pte_t *pte_m, entry_m;
65712+
65713+ BUG_ON(!page_m || !PageAnon(page_m));
65714+
65715+ vma_m = pax_find_mirror_vma(vma);
65716+ if (!vma_m)
65717+ return;
65718+
65719+ BUG_ON(!PageLocked(page_m));
65720+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
65721+ address_m = address + SEGMEXEC_TASK_SIZE;
65722+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
65723+ pte_m = pte_offset_map(pmd_m, address_m);
65724+ ptl_m = pte_lockptr(mm, pmd_m);
65725+ if (ptl != ptl_m) {
65726+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
65727+ if (!pte_none(*pte_m))
65728+ goto out;
65729+ }
65730+
65731+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
65732+ page_cache_get(page_m);
65733+ page_add_anon_rmap(page_m, vma_m, address_m);
65734+ inc_mm_counter_fast(mm, MM_ANONPAGES);
65735+ set_pte_at(mm, address_m, pte_m, entry_m);
65736+ update_mmu_cache(vma_m, address_m, entry_m);
65737+out:
65738+ if (ptl != ptl_m)
65739+ spin_unlock(ptl_m);
65740+ pte_unmap(pte_m);
65741+ unlock_page(page_m);
65742+}
65743+
65744+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
65745+{
65746+ struct mm_struct *mm = vma->vm_mm;
65747+ unsigned long address_m;
65748+ spinlock_t *ptl_m;
65749+ struct vm_area_struct *vma_m;
65750+ pmd_t *pmd_m;
65751+ pte_t *pte_m, entry_m;
65752+
65753+ BUG_ON(!page_m || PageAnon(page_m));
65754+
65755+ vma_m = pax_find_mirror_vma(vma);
65756+ if (!vma_m)
65757+ return;
65758+
65759+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
65760+ address_m = address + SEGMEXEC_TASK_SIZE;
65761+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
65762+ pte_m = pte_offset_map(pmd_m, address_m);
65763+ ptl_m = pte_lockptr(mm, pmd_m);
65764+ if (ptl != ptl_m) {
65765+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
65766+ if (!pte_none(*pte_m))
65767+ goto out;
65768+ }
65769+
65770+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
65771+ page_cache_get(page_m);
65772+ page_add_file_rmap(page_m);
65773+ inc_mm_counter_fast(mm, MM_FILEPAGES);
65774+ set_pte_at(mm, address_m, pte_m, entry_m);
65775+ update_mmu_cache(vma_m, address_m, entry_m);
65776+out:
65777+ if (ptl != ptl_m)
65778+ spin_unlock(ptl_m);
65779+ pte_unmap(pte_m);
65780+}
65781+
65782+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
65783+{
65784+ struct mm_struct *mm = vma->vm_mm;
65785+ unsigned long address_m;
65786+ spinlock_t *ptl_m;
65787+ struct vm_area_struct *vma_m;
65788+ pmd_t *pmd_m;
65789+ pte_t *pte_m, entry_m;
65790+
65791+ vma_m = pax_find_mirror_vma(vma);
65792+ if (!vma_m)
65793+ return;
65794+
65795+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
65796+ address_m = address + SEGMEXEC_TASK_SIZE;
65797+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
65798+ pte_m = pte_offset_map(pmd_m, address_m);
65799+ ptl_m = pte_lockptr(mm, pmd_m);
65800+ if (ptl != ptl_m) {
65801+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
65802+ if (!pte_none(*pte_m))
65803+ goto out;
65804+ }
65805+
65806+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
65807+ set_pte_at(mm, address_m, pte_m, entry_m);
65808+out:
65809+ if (ptl != ptl_m)
65810+ spin_unlock(ptl_m);
65811+ pte_unmap(pte_m);
65812+}
65813+
65814+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
65815+{
65816+ struct page *page_m;
65817+ pte_t entry;
65818+
65819+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
65820+ goto out;
65821+
65822+ entry = *pte;
65823+ page_m = vm_normal_page(vma, address, entry);
65824+ if (!page_m)
65825+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
65826+ else if (PageAnon(page_m)) {
65827+ if (pax_find_mirror_vma(vma)) {
65828+ pte_unmap_unlock(pte, ptl);
65829+ lock_page(page_m);
65830+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
65831+ if (pte_same(entry, *pte))
65832+ pax_mirror_anon_pte(vma, address, page_m, ptl);
65833+ else
65834+ unlock_page(page_m);
65835+ }
65836+ } else
65837+ pax_mirror_file_pte(vma, address, page_m, ptl);
65838+
65839+out:
65840+ pte_unmap_unlock(pte, ptl);
65841+}
65842+#endif
65843+
65844 /*
65845 * This routine handles present pages, when users try to write
65846 * to a shared page. It is done by copying the page to a new address
65847@@ -2667,6 +2860,12 @@ gotten:
65848 */
65849 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
65850 if (likely(pte_same(*page_table, orig_pte))) {
65851+
65852+#ifdef CONFIG_PAX_SEGMEXEC
65853+ if (pax_find_mirror_vma(vma))
65854+ BUG_ON(!trylock_page(new_page));
65855+#endif
65856+
65857 if (old_page) {
65858 if (!PageAnon(old_page)) {
65859 dec_mm_counter_fast(mm, MM_FILEPAGES);
65860@@ -2718,6 +2917,10 @@ gotten:
65861 page_remove_rmap(old_page);
65862 }
65863
65864+#ifdef CONFIG_PAX_SEGMEXEC
65865+ pax_mirror_anon_pte(vma, address, new_page, ptl);
65866+#endif
65867+
65868 /* Free the old page.. */
65869 new_page = old_page;
65870 ret |= VM_FAULT_WRITE;
65871@@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
65872 swap_free(entry);
65873 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
65874 try_to_free_swap(page);
65875+
65876+#ifdef CONFIG_PAX_SEGMEXEC
65877+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
65878+#endif
65879+
65880 unlock_page(page);
65881 if (swapcache) {
65882 /*
65883@@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
65884
65885 /* No need to invalidate - it was non-present before */
65886 update_mmu_cache(vma, address, page_table);
65887+
65888+#ifdef CONFIG_PAX_SEGMEXEC
65889+ pax_mirror_anon_pte(vma, address, page, ptl);
65890+#endif
65891+
65892 unlock:
65893 pte_unmap_unlock(page_table, ptl);
65894 out:
65895@@ -3039,40 +3252,6 @@ out_release:
65896 }
65897
65898 /*
65899- * This is like a special single-page "expand_{down|up}wards()",
65900- * except we must first make sure that 'address{-|+}PAGE_SIZE'
65901- * doesn't hit another vma.
65902- */
65903-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
65904-{
65905- address &= PAGE_MASK;
65906- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
65907- struct vm_area_struct *prev = vma->vm_prev;
65908-
65909- /*
65910- * Is there a mapping abutting this one below?
65911- *
65912- * That's only ok if it's the same stack mapping
65913- * that has gotten split..
65914- */
65915- if (prev && prev->vm_end == address)
65916- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
65917-
65918- expand_downwards(vma, address - PAGE_SIZE);
65919- }
65920- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
65921- struct vm_area_struct *next = vma->vm_next;
65922-
65923- /* As VM_GROWSDOWN but s/below/above/ */
65924- if (next && next->vm_start == address + PAGE_SIZE)
65925- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
65926-
65927- expand_upwards(vma, address + PAGE_SIZE);
65928- }
65929- return 0;
65930-}
65931-
65932-/*
65933 * We enter with non-exclusive mmap_sem (to exclude vma changes,
65934 * but allow concurrent faults), and pte mapped but not yet locked.
65935 * We return with mmap_sem still held, but pte unmapped and unlocked.
65936@@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
65937 unsigned long address, pte_t *page_table, pmd_t *pmd,
65938 unsigned int flags)
65939 {
65940- struct page *page;
65941+ struct page *page = NULL;
65942 spinlock_t *ptl;
65943 pte_t entry;
65944
65945- pte_unmap(page_table);
65946-
65947- /* Check if we need to add a guard page to the stack */
65948- if (check_stack_guard_page(vma, address) < 0)
65949- return VM_FAULT_SIGBUS;
65950-
65951- /* Use the zero-page for reads */
65952 if (!(flags & FAULT_FLAG_WRITE)) {
65953 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
65954 vma->vm_page_prot));
65955- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
65956+ ptl = pte_lockptr(mm, pmd);
65957+ spin_lock(ptl);
65958 if (!pte_none(*page_table))
65959 goto unlock;
65960 goto setpte;
65961 }
65962
65963 /* Allocate our own private page. */
65964+ pte_unmap(page_table);
65965+
65966 if (unlikely(anon_vma_prepare(vma)))
65967 goto oom;
65968 page = alloc_zeroed_user_highpage_movable(vma, address);
65969@@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
65970 if (!pte_none(*page_table))
65971 goto release;
65972
65973+#ifdef CONFIG_PAX_SEGMEXEC
65974+ if (pax_find_mirror_vma(vma))
65975+ BUG_ON(!trylock_page(page));
65976+#endif
65977+
65978 inc_mm_counter_fast(mm, MM_ANONPAGES);
65979 page_add_new_anon_rmap(page, vma, address);
65980 setpte:
65981@@ -3127,6 +3307,12 @@ setpte:
65982
65983 /* No need to invalidate - it was non-present before */
65984 update_mmu_cache(vma, address, page_table);
65985+
65986+#ifdef CONFIG_PAX_SEGMEXEC
65987+ if (page)
65988+ pax_mirror_anon_pte(vma, address, page, ptl);
65989+#endif
65990+
65991 unlock:
65992 pte_unmap_unlock(page_table, ptl);
65993 return 0;
65994@@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
65995 */
65996 /* Only go through if we didn't race with anybody else... */
65997 if (likely(pte_same(*page_table, orig_pte))) {
65998+
65999+#ifdef CONFIG_PAX_SEGMEXEC
66000+ if (anon && pax_find_mirror_vma(vma))
66001+ BUG_ON(!trylock_page(page));
66002+#endif
66003+
66004 flush_icache_page(vma, page);
66005 entry = mk_pte(page, vma->vm_page_prot);
66006 if (flags & FAULT_FLAG_WRITE)
66007@@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
66008
66009 /* no need to invalidate: a not-present page won't be cached */
66010 update_mmu_cache(vma, address, page_table);
66011+
66012+#ifdef CONFIG_PAX_SEGMEXEC
66013+ if (anon)
66014+ pax_mirror_anon_pte(vma, address, page, ptl);
66015+ else
66016+ pax_mirror_file_pte(vma, address, page, ptl);
66017+#endif
66018+
66019 } else {
66020 if (charged)
66021 mem_cgroup_uncharge_page(page);
66022@@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
66023 if (flags & FAULT_FLAG_WRITE)
66024 flush_tlb_fix_spurious_fault(vma, address);
66025 }
66026+
66027+#ifdef CONFIG_PAX_SEGMEXEC
66028+ pax_mirror_pte(vma, address, pte, pmd, ptl);
66029+ return 0;
66030+#endif
66031+
66032 unlock:
66033 pte_unmap_unlock(pte, ptl);
66034 return 0;
66035@@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
66036 pmd_t *pmd;
66037 pte_t *pte;
66038
66039+#ifdef CONFIG_PAX_SEGMEXEC
66040+ struct vm_area_struct *vma_m;
66041+#endif
66042+
66043 __set_current_state(TASK_RUNNING);
66044
66045 count_vm_event(PGFAULT);
66046@@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
66047 if (unlikely(is_vm_hugetlb_page(vma)))
66048 return hugetlb_fault(mm, vma, address, flags);
66049
66050+#ifdef CONFIG_PAX_SEGMEXEC
66051+ vma_m = pax_find_mirror_vma(vma);
66052+ if (vma_m) {
66053+ unsigned long address_m;
66054+ pgd_t *pgd_m;
66055+ pud_t *pud_m;
66056+ pmd_t *pmd_m;
66057+
66058+ if (vma->vm_start > vma_m->vm_start) {
66059+ address_m = address;
66060+ address -= SEGMEXEC_TASK_SIZE;
66061+ vma = vma_m;
66062+ } else
66063+ address_m = address + SEGMEXEC_TASK_SIZE;
66064+
66065+ pgd_m = pgd_offset(mm, address_m);
66066+ pud_m = pud_alloc(mm, pgd_m, address_m);
66067+ if (!pud_m)
66068+ return VM_FAULT_OOM;
66069+ pmd_m = pmd_alloc(mm, pud_m, address_m);
66070+ if (!pmd_m)
66071+ return VM_FAULT_OOM;
66072+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
66073+ return VM_FAULT_OOM;
66074+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
66075+ }
66076+#endif
66077+
66078 pgd = pgd_offset(mm, address);
66079 pud = pud_alloc(mm, pgd, address);
66080 if (!pud)
66081@@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
66082 * run pte_offset_map on the pmd, if an huge pmd could
66083 * materialize from under us from a different thread.
66084 */
66085- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
66086+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
66087 return VM_FAULT_OOM;
66088 /* if an huge pmd materialized from under us just retry later */
66089 if (unlikely(pmd_trans_huge(*pmd)))
66090@@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
66091 gate_vma.vm_start = FIXADDR_USER_START;
66092 gate_vma.vm_end = FIXADDR_USER_END;
66093 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
66094- gate_vma.vm_page_prot = __P101;
66095+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
66096 /*
66097 * Make sure the vDSO gets into every core dump.
66098 * Dumping its contents makes post-mortem fully interpretable later
66099diff -urNp linux-3.0.4/mm/memory-failure.c linux-3.0.4/mm/memory-failure.c
66100--- linux-3.0.4/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
66101+++ linux-3.0.4/mm/memory-failure.c 2011-10-06 04:17:55.000000000 -0400
66102@@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
66103
66104 int sysctl_memory_failure_recovery __read_mostly = 1;
66105
66106-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
66107+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
66108
66109 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
66110
66111@@ -200,7 +200,7 @@ static int kill_proc_ao(struct task_stru
66112 si.si_signo = SIGBUS;
66113 si.si_errno = 0;
66114 si.si_code = BUS_MCEERR_AO;
66115- si.si_addr = (void *)addr;
66116+ si.si_addr = (void __user *)addr;
66117 #ifdef __ARCH_SI_TRAPNO
66118 si.si_trapno = trapno;
66119 #endif
66120@@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
66121 }
66122
66123 nr_pages = 1 << compound_trans_order(hpage);
66124- atomic_long_add(nr_pages, &mce_bad_pages);
66125+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
66126
66127 /*
66128 * We need/can do nothing about count=0 pages.
66129@@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
66130 if (!PageHWPoison(hpage)
66131 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
66132 || (p != hpage && TestSetPageHWPoison(hpage))) {
66133- atomic_long_sub(nr_pages, &mce_bad_pages);
66134+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
66135 return 0;
66136 }
66137 set_page_hwpoison_huge_page(hpage);
66138@@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
66139 }
66140 if (hwpoison_filter(p)) {
66141 if (TestClearPageHWPoison(p))
66142- atomic_long_sub(nr_pages, &mce_bad_pages);
66143+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
66144 unlock_page(hpage);
66145 put_page(hpage);
66146 return 0;
66147@@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
66148 return 0;
66149 }
66150 if (TestClearPageHWPoison(p))
66151- atomic_long_sub(nr_pages, &mce_bad_pages);
66152+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
66153 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
66154 return 0;
66155 }
66156@@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
66157 */
66158 if (TestClearPageHWPoison(page)) {
66159 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
66160- atomic_long_sub(nr_pages, &mce_bad_pages);
66161+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
66162 freeit = 1;
66163 if (PageHuge(page))
66164 clear_page_hwpoison_huge_page(page);
66165@@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
66166 }
66167 done:
66168 if (!PageHWPoison(hpage))
66169- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
66170+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
66171 set_page_hwpoison_huge_page(hpage);
66172 dequeue_hwpoisoned_huge_page(hpage);
66173 /* keep elevated page count for bad page */
66174@@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
66175 return ret;
66176
66177 done:
66178- atomic_long_add(1, &mce_bad_pages);
66179+ atomic_long_add_unchecked(1, &mce_bad_pages);
66180 SetPageHWPoison(page);
66181 /* keep elevated page count for bad page */
66182 return ret;
66183diff -urNp linux-3.0.4/mm/mempolicy.c linux-3.0.4/mm/mempolicy.c
66184--- linux-3.0.4/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
66185+++ linux-3.0.4/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
66186@@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
66187 unsigned long vmstart;
66188 unsigned long vmend;
66189
66190+#ifdef CONFIG_PAX_SEGMEXEC
66191+ struct vm_area_struct *vma_m;
66192+#endif
66193+
66194 vma = find_vma_prev(mm, start, &prev);
66195 if (!vma || vma->vm_start > start)
66196 return -EFAULT;
66197@@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
66198 err = policy_vma(vma, new_pol);
66199 if (err)
66200 goto out;
66201+
66202+#ifdef CONFIG_PAX_SEGMEXEC
66203+ vma_m = pax_find_mirror_vma(vma);
66204+ if (vma_m) {
66205+ err = policy_vma(vma_m, new_pol);
66206+ if (err)
66207+ goto out;
66208+ }
66209+#endif
66210+
66211 }
66212
66213 out:
66214@@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
66215
66216 if (end < start)
66217 return -EINVAL;
66218+
66219+#ifdef CONFIG_PAX_SEGMEXEC
66220+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
66221+ if (end > SEGMEXEC_TASK_SIZE)
66222+ return -EINVAL;
66223+ } else
66224+#endif
66225+
66226+ if (end > TASK_SIZE)
66227+ return -EINVAL;
66228+
66229 if (end == start)
66230 return 0;
66231
66232@@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
66233 if (!mm)
66234 goto out;
66235
66236+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66237+ if (mm != current->mm &&
66238+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
66239+ err = -EPERM;
66240+ goto out;
66241+ }
66242+#endif
66243+
66244 /*
66245 * Check if this process has the right to modify the specified
66246 * process. The right exists if the process has administrative
66247@@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
66248 rcu_read_lock();
66249 tcred = __task_cred(task);
66250 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
66251- cred->uid != tcred->suid && cred->uid != tcred->uid &&
66252- !capable(CAP_SYS_NICE)) {
66253+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
66254 rcu_read_unlock();
66255 err = -EPERM;
66256 goto out;
66257diff -urNp linux-3.0.4/mm/migrate.c linux-3.0.4/mm/migrate.c
66258--- linux-3.0.4/mm/migrate.c 2011-07-21 22:17:23.000000000 -0400
66259+++ linux-3.0.4/mm/migrate.c 2011-08-23 21:48:14.000000000 -0400
66260@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
66261 unsigned long chunk_start;
66262 int err;
66263
66264+ pax_track_stack();
66265+
66266 task_nodes = cpuset_mems_allowed(task);
66267
66268 err = -ENOMEM;
66269@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
66270 if (!mm)
66271 return -EINVAL;
66272
66273+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66274+ if (mm != current->mm &&
66275+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
66276+ err = -EPERM;
66277+ goto out;
66278+ }
66279+#endif
66280+
66281 /*
66282 * Check if this process has the right to modify the specified
66283 * process. The right exists if the process has administrative
66284@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
66285 rcu_read_lock();
66286 tcred = __task_cred(task);
66287 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
66288- cred->uid != tcred->suid && cred->uid != tcred->uid &&
66289- !capable(CAP_SYS_NICE)) {
66290+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
66291 rcu_read_unlock();
66292 err = -EPERM;
66293 goto out;
66294diff -urNp linux-3.0.4/mm/mlock.c linux-3.0.4/mm/mlock.c
66295--- linux-3.0.4/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
66296+++ linux-3.0.4/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
66297@@ -13,6 +13,7 @@
66298 #include <linux/pagemap.h>
66299 #include <linux/mempolicy.h>
66300 #include <linux/syscalls.h>
66301+#include <linux/security.h>
66302 #include <linux/sched.h>
66303 #include <linux/module.h>
66304 #include <linux/rmap.h>
66305@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
66306 return -EINVAL;
66307 if (end == start)
66308 return 0;
66309+ if (end > TASK_SIZE)
66310+ return -EINVAL;
66311+
66312 vma = find_vma_prev(current->mm, start, &prev);
66313 if (!vma || vma->vm_start > start)
66314 return -ENOMEM;
66315@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
66316 for (nstart = start ; ; ) {
66317 vm_flags_t newflags;
66318
66319+#ifdef CONFIG_PAX_SEGMEXEC
66320+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
66321+ break;
66322+#endif
66323+
66324 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
66325
66326 newflags = vma->vm_flags | VM_LOCKED;
66327@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
66328 lock_limit >>= PAGE_SHIFT;
66329
66330 /* check against resource limits */
66331+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
66332 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
66333 error = do_mlock(start, len, 1);
66334 up_write(&current->mm->mmap_sem);
66335@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
66336 static int do_mlockall(int flags)
66337 {
66338 struct vm_area_struct * vma, * prev = NULL;
66339- unsigned int def_flags = 0;
66340
66341 if (flags & MCL_FUTURE)
66342- def_flags = VM_LOCKED;
66343- current->mm->def_flags = def_flags;
66344+ current->mm->def_flags |= VM_LOCKED;
66345+ else
66346+ current->mm->def_flags &= ~VM_LOCKED;
66347 if (flags == MCL_FUTURE)
66348 goto out;
66349
66350 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
66351 vm_flags_t newflags;
66352
66353+#ifdef CONFIG_PAX_SEGMEXEC
66354+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
66355+ break;
66356+#endif
66357+
66358+ BUG_ON(vma->vm_end > TASK_SIZE);
66359 newflags = vma->vm_flags | VM_LOCKED;
66360 if (!(flags & MCL_CURRENT))
66361 newflags &= ~VM_LOCKED;
66362@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
66363 lock_limit >>= PAGE_SHIFT;
66364
66365 ret = -ENOMEM;
66366+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
66367 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
66368 capable(CAP_IPC_LOCK))
66369 ret = do_mlockall(flags);
66370diff -urNp linux-3.0.4/mm/mmap.c linux-3.0.4/mm/mmap.c
66371--- linux-3.0.4/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
66372+++ linux-3.0.4/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
66373@@ -46,6 +46,16 @@
66374 #define arch_rebalance_pgtables(addr, len) (addr)
66375 #endif
66376
66377+static inline void verify_mm_writelocked(struct mm_struct *mm)
66378+{
66379+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
66380+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
66381+ up_read(&mm->mmap_sem);
66382+ BUG();
66383+ }
66384+#endif
66385+}
66386+
66387 static void unmap_region(struct mm_struct *mm,
66388 struct vm_area_struct *vma, struct vm_area_struct *prev,
66389 unsigned long start, unsigned long end);
66390@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
66391 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
66392 *
66393 */
66394-pgprot_t protection_map[16] = {
66395+pgprot_t protection_map[16] __read_only = {
66396 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
66397 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
66398 };
66399
66400-pgprot_t vm_get_page_prot(unsigned long vm_flags)
66401+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
66402 {
66403- return __pgprot(pgprot_val(protection_map[vm_flags &
66404+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
66405 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
66406 pgprot_val(arch_vm_get_page_prot(vm_flags)));
66407+
66408+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
66409+ if (!(__supported_pte_mask & _PAGE_NX) &&
66410+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
66411+ (vm_flags & (VM_READ | VM_WRITE)))
66412+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
66413+#endif
66414+
66415+ return prot;
66416 }
66417 EXPORT_SYMBOL(vm_get_page_prot);
66418
66419 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
66420 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
66421 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
66422+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
66423 /*
66424 * Make sure vm_committed_as in one cacheline and not cacheline shared with
66425 * other variables. It can be updated by several CPUs frequently.
66426@@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
66427 struct vm_area_struct *next = vma->vm_next;
66428
66429 might_sleep();
66430+ BUG_ON(vma->vm_mirror);
66431 if (vma->vm_ops && vma->vm_ops->close)
66432 vma->vm_ops->close(vma);
66433 if (vma->vm_file) {
66434@@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
66435 * not page aligned -Ram Gupta
66436 */
66437 rlim = rlimit(RLIMIT_DATA);
66438+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
66439 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
66440 (mm->end_data - mm->start_data) > rlim)
66441 goto out;
66442@@ -697,6 +719,12 @@ static int
66443 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
66444 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
66445 {
66446+
66447+#ifdef CONFIG_PAX_SEGMEXEC
66448+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
66449+ return 0;
66450+#endif
66451+
66452 if (is_mergeable_vma(vma, file, vm_flags) &&
66453 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
66454 if (vma->vm_pgoff == vm_pgoff)
66455@@ -716,6 +744,12 @@ static int
66456 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
66457 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
66458 {
66459+
66460+#ifdef CONFIG_PAX_SEGMEXEC
66461+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
66462+ return 0;
66463+#endif
66464+
66465 if (is_mergeable_vma(vma, file, vm_flags) &&
66466 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
66467 pgoff_t vm_pglen;
66468@@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
66469 struct vm_area_struct *vma_merge(struct mm_struct *mm,
66470 struct vm_area_struct *prev, unsigned long addr,
66471 unsigned long end, unsigned long vm_flags,
66472- struct anon_vma *anon_vma, struct file *file,
66473+ struct anon_vma *anon_vma, struct file *file,
66474 pgoff_t pgoff, struct mempolicy *policy)
66475 {
66476 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
66477 struct vm_area_struct *area, *next;
66478 int err;
66479
66480+#ifdef CONFIG_PAX_SEGMEXEC
66481+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
66482+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
66483+
66484+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
66485+#endif
66486+
66487 /*
66488 * We later require that vma->vm_flags == vm_flags,
66489 * so this tests vma->vm_flags & VM_SPECIAL, too.
66490@@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
66491 if (next && next->vm_end == end) /* cases 6, 7, 8 */
66492 next = next->vm_next;
66493
66494+#ifdef CONFIG_PAX_SEGMEXEC
66495+ if (prev)
66496+ prev_m = pax_find_mirror_vma(prev);
66497+ if (area)
66498+ area_m = pax_find_mirror_vma(area);
66499+ if (next)
66500+ next_m = pax_find_mirror_vma(next);
66501+#endif
66502+
66503 /*
66504 * Can it merge with the predecessor?
66505 */
66506@@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
66507 /* cases 1, 6 */
66508 err = vma_adjust(prev, prev->vm_start,
66509 next->vm_end, prev->vm_pgoff, NULL);
66510- } else /* cases 2, 5, 7 */
66511+
66512+#ifdef CONFIG_PAX_SEGMEXEC
66513+ if (!err && prev_m)
66514+ err = vma_adjust(prev_m, prev_m->vm_start,
66515+ next_m->vm_end, prev_m->vm_pgoff, NULL);
66516+#endif
66517+
66518+ } else { /* cases 2, 5, 7 */
66519 err = vma_adjust(prev, prev->vm_start,
66520 end, prev->vm_pgoff, NULL);
66521+
66522+#ifdef CONFIG_PAX_SEGMEXEC
66523+ if (!err && prev_m)
66524+ err = vma_adjust(prev_m, prev_m->vm_start,
66525+ end_m, prev_m->vm_pgoff, NULL);
66526+#endif
66527+
66528+ }
66529 if (err)
66530 return NULL;
66531 khugepaged_enter_vma_merge(prev);
66532@@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
66533 mpol_equal(policy, vma_policy(next)) &&
66534 can_vma_merge_before(next, vm_flags,
66535 anon_vma, file, pgoff+pglen)) {
66536- if (prev && addr < prev->vm_end) /* case 4 */
66537+ if (prev && addr < prev->vm_end) { /* case 4 */
66538 err = vma_adjust(prev, prev->vm_start,
66539 addr, prev->vm_pgoff, NULL);
66540- else /* cases 3, 8 */
66541+
66542+#ifdef CONFIG_PAX_SEGMEXEC
66543+ if (!err && prev_m)
66544+ err = vma_adjust(prev_m, prev_m->vm_start,
66545+ addr_m, prev_m->vm_pgoff, NULL);
66546+#endif
66547+
66548+ } else { /* cases 3, 8 */
66549 err = vma_adjust(area, addr, next->vm_end,
66550 next->vm_pgoff - pglen, NULL);
66551+
66552+#ifdef CONFIG_PAX_SEGMEXEC
66553+ if (!err && area_m)
66554+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
66555+ next_m->vm_pgoff - pglen, NULL);
66556+#endif
66557+
66558+ }
66559 if (err)
66560 return NULL;
66561 khugepaged_enter_vma_merge(area);
66562@@ -929,14 +1009,11 @@ none:
66563 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
66564 struct file *file, long pages)
66565 {
66566- const unsigned long stack_flags
66567- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
66568-
66569 if (file) {
66570 mm->shared_vm += pages;
66571 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
66572 mm->exec_vm += pages;
66573- } else if (flags & stack_flags)
66574+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
66575 mm->stack_vm += pages;
66576 if (flags & (VM_RESERVED|VM_IO))
66577 mm->reserved_vm += pages;
66578@@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
66579 * (the exception is when the underlying filesystem is noexec
66580 * mounted, in which case we dont add PROT_EXEC.)
66581 */
66582- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
66583+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
66584 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
66585 prot |= PROT_EXEC;
66586
66587@@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
66588 /* Obtain the address to map to. we verify (or select) it and ensure
66589 * that it represents a valid section of the address space.
66590 */
66591- addr = get_unmapped_area(file, addr, len, pgoff, flags);
66592+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
66593 if (addr & ~PAGE_MASK)
66594 return addr;
66595
66596@@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
66597 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
66598 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
66599
66600+#ifdef CONFIG_PAX_MPROTECT
66601+ if (mm->pax_flags & MF_PAX_MPROTECT) {
66602+#ifndef CONFIG_PAX_MPROTECT_COMPAT
66603+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
66604+ gr_log_rwxmmap(file);
66605+
66606+#ifdef CONFIG_PAX_EMUPLT
66607+ vm_flags &= ~VM_EXEC;
66608+#else
66609+ return -EPERM;
66610+#endif
66611+
66612+ }
66613+
66614+ if (!(vm_flags & VM_EXEC))
66615+ vm_flags &= ~VM_MAYEXEC;
66616+#else
66617+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
66618+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
66619+#endif
66620+ else
66621+ vm_flags &= ~VM_MAYWRITE;
66622+ }
66623+#endif
66624+
66625+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
66626+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
66627+ vm_flags &= ~VM_PAGEEXEC;
66628+#endif
66629+
66630 if (flags & MAP_LOCKED)
66631 if (!can_do_mlock())
66632 return -EPERM;
66633@@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
66634 locked += mm->locked_vm;
66635 lock_limit = rlimit(RLIMIT_MEMLOCK);
66636 lock_limit >>= PAGE_SHIFT;
66637+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
66638 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
66639 return -EAGAIN;
66640 }
66641@@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
66642 if (error)
66643 return error;
66644
66645+ if (!gr_acl_handle_mmap(file, prot))
66646+ return -EACCES;
66647+
66648 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
66649 }
66650 EXPORT_SYMBOL(do_mmap_pgoff);
66651@@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
66652 vm_flags_t vm_flags = vma->vm_flags;
66653
66654 /* If it was private or non-writable, the write bit is already clear */
66655- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
66656+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
66657 return 0;
66658
66659 /* The backer wishes to know when pages are first written to? */
66660@@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
66661 unsigned long charged = 0;
66662 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
66663
66664+#ifdef CONFIG_PAX_SEGMEXEC
66665+ struct vm_area_struct *vma_m = NULL;
66666+#endif
66667+
66668+ /*
66669+ * mm->mmap_sem is required to protect against another thread
66670+ * changing the mappings in case we sleep.
66671+ */
66672+ verify_mm_writelocked(mm);
66673+
66674 /* Clear old maps */
66675 error = -ENOMEM;
66676-munmap_back:
66677 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
66678 if (vma && vma->vm_start < addr + len) {
66679 if (do_munmap(mm, addr, len))
66680 return -ENOMEM;
66681- goto munmap_back;
66682+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
66683+ BUG_ON(vma && vma->vm_start < addr + len);
66684 }
66685
66686 /* Check against address space limit. */
66687@@ -1266,6 +1387,16 @@ munmap_back:
66688 goto unacct_error;
66689 }
66690
66691+#ifdef CONFIG_PAX_SEGMEXEC
66692+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
66693+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
66694+ if (!vma_m) {
66695+ error = -ENOMEM;
66696+ goto free_vma;
66697+ }
66698+ }
66699+#endif
66700+
66701 vma->vm_mm = mm;
66702 vma->vm_start = addr;
66703 vma->vm_end = addr + len;
66704@@ -1289,6 +1420,19 @@ munmap_back:
66705 error = file->f_op->mmap(file, vma);
66706 if (error)
66707 goto unmap_and_free_vma;
66708+
66709+#ifdef CONFIG_PAX_SEGMEXEC
66710+ if (vma_m && (vm_flags & VM_EXECUTABLE))
66711+ added_exe_file_vma(mm);
66712+#endif
66713+
66714+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
66715+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
66716+ vma->vm_flags |= VM_PAGEEXEC;
66717+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
66718+ }
66719+#endif
66720+
66721 if (vm_flags & VM_EXECUTABLE)
66722 added_exe_file_vma(mm);
66723
66724@@ -1324,6 +1468,11 @@ munmap_back:
66725 vma_link(mm, vma, prev, rb_link, rb_parent);
66726 file = vma->vm_file;
66727
66728+#ifdef CONFIG_PAX_SEGMEXEC
66729+ if (vma_m)
66730+ BUG_ON(pax_mirror_vma(vma_m, vma));
66731+#endif
66732+
66733 /* Once vma denies write, undo our temporary denial count */
66734 if (correct_wcount)
66735 atomic_inc(&inode->i_writecount);
66736@@ -1332,6 +1481,7 @@ out:
66737
66738 mm->total_vm += len >> PAGE_SHIFT;
66739 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
66740+ track_exec_limit(mm, addr, addr + len, vm_flags);
66741 if (vm_flags & VM_LOCKED) {
66742 if (!mlock_vma_pages_range(vma, addr, addr + len))
66743 mm->locked_vm += (len >> PAGE_SHIFT);
66744@@ -1349,6 +1499,12 @@ unmap_and_free_vma:
66745 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
66746 charged = 0;
66747 free_vma:
66748+
66749+#ifdef CONFIG_PAX_SEGMEXEC
66750+ if (vma_m)
66751+ kmem_cache_free(vm_area_cachep, vma_m);
66752+#endif
66753+
66754 kmem_cache_free(vm_area_cachep, vma);
66755 unacct_error:
66756 if (charged)
66757@@ -1356,6 +1512,44 @@ unacct_error:
66758 return error;
66759 }
66760
66761+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
66762+{
66763+ if (!vma) {
66764+#ifdef CONFIG_STACK_GROWSUP
66765+ if (addr > sysctl_heap_stack_gap)
66766+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
66767+ else
66768+ vma = find_vma(current->mm, 0);
66769+ if (vma && (vma->vm_flags & VM_GROWSUP))
66770+ return false;
66771+#endif
66772+ return true;
66773+ }
66774+
66775+ if (addr + len > vma->vm_start)
66776+ return false;
66777+
66778+ if (vma->vm_flags & VM_GROWSDOWN)
66779+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
66780+#ifdef CONFIG_STACK_GROWSUP
66781+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
66782+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
66783+#endif
66784+
66785+ return true;
66786+}
66787+
66788+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
66789+{
66790+ if (vma->vm_start < len)
66791+ return -ENOMEM;
66792+ if (!(vma->vm_flags & VM_GROWSDOWN))
66793+ return vma->vm_start - len;
66794+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
66795+ return vma->vm_start - len - sysctl_heap_stack_gap;
66796+ return -ENOMEM;
66797+}
66798+
66799 /* Get an address range which is currently unmapped.
66800 * For shmat() with addr=0.
66801 *
66802@@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
66803 if (flags & MAP_FIXED)
66804 return addr;
66805
66806+#ifdef CONFIG_PAX_RANDMMAP
66807+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
66808+#endif
66809+
66810 if (addr) {
66811 addr = PAGE_ALIGN(addr);
66812- vma = find_vma(mm, addr);
66813- if (TASK_SIZE - len >= addr &&
66814- (!vma || addr + len <= vma->vm_start))
66815- return addr;
66816+ if (TASK_SIZE - len >= addr) {
66817+ vma = find_vma(mm, addr);
66818+ if (check_heap_stack_gap(vma, addr, len))
66819+ return addr;
66820+ }
66821 }
66822 if (len > mm->cached_hole_size) {
66823- start_addr = addr = mm->free_area_cache;
66824+ start_addr = addr = mm->free_area_cache;
66825 } else {
66826- start_addr = addr = TASK_UNMAPPED_BASE;
66827- mm->cached_hole_size = 0;
66828+ start_addr = addr = mm->mmap_base;
66829+ mm->cached_hole_size = 0;
66830 }
66831
66832 full_search:
66833@@ -1404,34 +1603,40 @@ full_search:
66834 * Start a new search - just in case we missed
66835 * some holes.
66836 */
66837- if (start_addr != TASK_UNMAPPED_BASE) {
66838- addr = TASK_UNMAPPED_BASE;
66839- start_addr = addr;
66840+ if (start_addr != mm->mmap_base) {
66841+ start_addr = addr = mm->mmap_base;
66842 mm->cached_hole_size = 0;
66843 goto full_search;
66844 }
66845 return -ENOMEM;
66846 }
66847- if (!vma || addr + len <= vma->vm_start) {
66848- /*
66849- * Remember the place where we stopped the search:
66850- */
66851- mm->free_area_cache = addr + len;
66852- return addr;
66853- }
66854+ if (check_heap_stack_gap(vma, addr, len))
66855+ break;
66856 if (addr + mm->cached_hole_size < vma->vm_start)
66857 mm->cached_hole_size = vma->vm_start - addr;
66858 addr = vma->vm_end;
66859 }
66860+
66861+ /*
66862+ * Remember the place where we stopped the search:
66863+ */
66864+ mm->free_area_cache = addr + len;
66865+ return addr;
66866 }
66867 #endif
66868
66869 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
66870 {
66871+
66872+#ifdef CONFIG_PAX_SEGMEXEC
66873+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
66874+ return;
66875+#endif
66876+
66877 /*
66878 * Is this a new hole at the lowest possible address?
66879 */
66880- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
66881+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
66882 mm->free_area_cache = addr;
66883 mm->cached_hole_size = ~0UL;
66884 }
66885@@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
66886 {
66887 struct vm_area_struct *vma;
66888 struct mm_struct *mm = current->mm;
66889- unsigned long addr = addr0;
66890+ unsigned long base = mm->mmap_base, addr = addr0;
66891
66892 /* requested length too big for entire address space */
66893 if (len > TASK_SIZE)
66894@@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
66895 if (flags & MAP_FIXED)
66896 return addr;
66897
66898+#ifdef CONFIG_PAX_RANDMMAP
66899+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
66900+#endif
66901+
66902 /* requesting a specific address */
66903 if (addr) {
66904 addr = PAGE_ALIGN(addr);
66905- vma = find_vma(mm, addr);
66906- if (TASK_SIZE - len >= addr &&
66907- (!vma || addr + len <= vma->vm_start))
66908- return addr;
66909+ if (TASK_SIZE - len >= addr) {
66910+ vma = find_vma(mm, addr);
66911+ if (check_heap_stack_gap(vma, addr, len))
66912+ return addr;
66913+ }
66914 }
66915
66916 /* check if free_area_cache is useful for us */
66917@@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
66918 /* make sure it can fit in the remaining address space */
66919 if (addr > len) {
66920 vma = find_vma(mm, addr-len);
66921- if (!vma || addr <= vma->vm_start)
66922+ if (check_heap_stack_gap(vma, addr - len, len))
66923 /* remember the address as a hint for next time */
66924 return (mm->free_area_cache = addr-len);
66925 }
66926@@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
66927 * return with success:
66928 */
66929 vma = find_vma(mm, addr);
66930- if (!vma || addr+len <= vma->vm_start)
66931+ if (check_heap_stack_gap(vma, addr, len))
66932 /* remember the address as a hint for next time */
66933 return (mm->free_area_cache = addr);
66934
66935@@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
66936 mm->cached_hole_size = vma->vm_start - addr;
66937
66938 /* try just below the current vma->vm_start */
66939- addr = vma->vm_start-len;
66940- } while (len < vma->vm_start);
66941+ addr = skip_heap_stack_gap(vma, len);
66942+ } while (!IS_ERR_VALUE(addr));
66943
66944 bottomup:
66945 /*
66946@@ -1515,13 +1725,21 @@ bottomup:
66947 * can happen with large stack limits and large mmap()
66948 * allocations.
66949 */
66950+ mm->mmap_base = TASK_UNMAPPED_BASE;
66951+
66952+#ifdef CONFIG_PAX_RANDMMAP
66953+ if (mm->pax_flags & MF_PAX_RANDMMAP)
66954+ mm->mmap_base += mm->delta_mmap;
66955+#endif
66956+
66957+ mm->free_area_cache = mm->mmap_base;
66958 mm->cached_hole_size = ~0UL;
66959- mm->free_area_cache = TASK_UNMAPPED_BASE;
66960 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
66961 /*
66962 * Restore the topdown base:
66963 */
66964- mm->free_area_cache = mm->mmap_base;
66965+ mm->mmap_base = base;
66966+ mm->free_area_cache = base;
66967 mm->cached_hole_size = ~0UL;
66968
66969 return addr;
66970@@ -1530,6 +1748,12 @@ bottomup:
66971
66972 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
66973 {
66974+
66975+#ifdef CONFIG_PAX_SEGMEXEC
66976+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
66977+ return;
66978+#endif
66979+
66980 /*
66981 * Is this a new hole at the highest possible address?
66982 */
66983@@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
66984 mm->free_area_cache = addr;
66985
66986 /* dont allow allocations above current base */
66987- if (mm->free_area_cache > mm->mmap_base)
66988+ if (mm->free_area_cache > mm->mmap_base) {
66989 mm->free_area_cache = mm->mmap_base;
66990+ mm->cached_hole_size = ~0UL;
66991+ }
66992 }
66993
66994 unsigned long
66995@@ -1646,6 +1872,28 @@ out:
66996 return prev ? prev->vm_next : vma;
66997 }
66998
66999+#ifdef CONFIG_PAX_SEGMEXEC
67000+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
67001+{
67002+ struct vm_area_struct *vma_m;
67003+
67004+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
67005+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
67006+ BUG_ON(vma->vm_mirror);
67007+ return NULL;
67008+ }
67009+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
67010+ vma_m = vma->vm_mirror;
67011+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
67012+ BUG_ON(vma->vm_file != vma_m->vm_file);
67013+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
67014+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
67015+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
67016+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
67017+ return vma_m;
67018+}
67019+#endif
67020+
67021 /*
67022 * Verify that the stack growth is acceptable and
67023 * update accounting. This is shared with both the
67024@@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
67025 return -ENOMEM;
67026
67027 /* Stack limit test */
67028+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
67029 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
67030 return -ENOMEM;
67031
67032@@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
67033 locked = mm->locked_vm + grow;
67034 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
67035 limit >>= PAGE_SHIFT;
67036+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
67037 if (locked > limit && !capable(CAP_IPC_LOCK))
67038 return -ENOMEM;
67039 }
67040@@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
67041 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
67042 * vma is the last one with address > vma->vm_end. Have to extend vma.
67043 */
67044+#ifndef CONFIG_IA64
67045+static
67046+#endif
67047 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
67048 {
67049 int error;
67050+ bool locknext;
67051
67052 if (!(vma->vm_flags & VM_GROWSUP))
67053 return -EFAULT;
67054
67055+ /* Also guard against wrapping around to address 0. */
67056+ if (address < PAGE_ALIGN(address+1))
67057+ address = PAGE_ALIGN(address+1);
67058+ else
67059+ return -ENOMEM;
67060+
67061 /*
67062 * We must make sure the anon_vma is allocated
67063 * so that the anon_vma locking is not a noop.
67064 */
67065 if (unlikely(anon_vma_prepare(vma)))
67066 return -ENOMEM;
67067+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
67068+ if (locknext && anon_vma_prepare(vma->vm_next))
67069+ return -ENOMEM;
67070 vma_lock_anon_vma(vma);
67071+ if (locknext)
67072+ vma_lock_anon_vma(vma->vm_next);
67073
67074 /*
67075 * vma->vm_start/vm_end cannot change under us because the caller
67076 * is required to hold the mmap_sem in read mode. We need the
67077- * anon_vma lock to serialize against concurrent expand_stacks.
67078- * Also guard against wrapping around to address 0.
67079+ * anon_vma locks to serialize against concurrent expand_stacks
67080+ * and expand_upwards.
67081 */
67082- if (address < PAGE_ALIGN(address+4))
67083- address = PAGE_ALIGN(address+4);
67084- else {
67085- vma_unlock_anon_vma(vma);
67086- return -ENOMEM;
67087- }
67088 error = 0;
67089
67090 /* Somebody else might have raced and expanded it already */
67091- if (address > vma->vm_end) {
67092+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
67093+ error = -ENOMEM;
67094+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
67095 unsigned long size, grow;
67096
67097 size = address - vma->vm_start;
67098@@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
67099 }
67100 }
67101 }
67102+ if (locknext)
67103+ vma_unlock_anon_vma(vma->vm_next);
67104 vma_unlock_anon_vma(vma);
67105 khugepaged_enter_vma_merge(vma);
67106 return error;
67107@@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
67108 unsigned long address)
67109 {
67110 int error;
67111+ bool lockprev = false;
67112+ struct vm_area_struct *prev;
67113
67114 /*
67115 * We must make sure the anon_vma is allocated
67116@@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
67117 if (error)
67118 return error;
67119
67120+ prev = vma->vm_prev;
67121+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
67122+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
67123+#endif
67124+ if (lockprev && anon_vma_prepare(prev))
67125+ return -ENOMEM;
67126+ if (lockprev)
67127+ vma_lock_anon_vma(prev);
67128+
67129 vma_lock_anon_vma(vma);
67130
67131 /*
67132@@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
67133 */
67134
67135 /* Somebody else might have raced and expanded it already */
67136- if (address < vma->vm_start) {
67137+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
67138+ error = -ENOMEM;
67139+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
67140 unsigned long size, grow;
67141
67142+#ifdef CONFIG_PAX_SEGMEXEC
67143+ struct vm_area_struct *vma_m;
67144+
67145+ vma_m = pax_find_mirror_vma(vma);
67146+#endif
67147+
67148 size = vma->vm_end - address;
67149 grow = (vma->vm_start - address) >> PAGE_SHIFT;
67150
67151@@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
67152 if (!error) {
67153 vma->vm_start = address;
67154 vma->vm_pgoff -= grow;
67155+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
67156+
67157+#ifdef CONFIG_PAX_SEGMEXEC
67158+ if (vma_m) {
67159+ vma_m->vm_start -= grow << PAGE_SHIFT;
67160+ vma_m->vm_pgoff -= grow;
67161+ }
67162+#endif
67163+
67164 perf_event_mmap(vma);
67165 }
67166 }
67167 }
67168 vma_unlock_anon_vma(vma);
67169+ if (lockprev)
67170+ vma_unlock_anon_vma(prev);
67171 khugepaged_enter_vma_merge(vma);
67172 return error;
67173 }
67174@@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
67175 do {
67176 long nrpages = vma_pages(vma);
67177
67178+#ifdef CONFIG_PAX_SEGMEXEC
67179+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
67180+ vma = remove_vma(vma);
67181+ continue;
67182+ }
67183+#endif
67184+
67185 mm->total_vm -= nrpages;
67186 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
67187 vma = remove_vma(vma);
67188@@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
67189 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
67190 vma->vm_prev = NULL;
67191 do {
67192+
67193+#ifdef CONFIG_PAX_SEGMEXEC
67194+ if (vma->vm_mirror) {
67195+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
67196+ vma->vm_mirror->vm_mirror = NULL;
67197+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
67198+ vma->vm_mirror = NULL;
67199+ }
67200+#endif
67201+
67202 rb_erase(&vma->vm_rb, &mm->mm_rb);
67203 mm->map_count--;
67204 tail_vma = vma;
67205@@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
67206 struct vm_area_struct *new;
67207 int err = -ENOMEM;
67208
67209+#ifdef CONFIG_PAX_SEGMEXEC
67210+ struct vm_area_struct *vma_m, *new_m = NULL;
67211+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
67212+#endif
67213+
67214 if (is_vm_hugetlb_page(vma) && (addr &
67215 ~(huge_page_mask(hstate_vma(vma)))))
67216 return -EINVAL;
67217
67218+#ifdef CONFIG_PAX_SEGMEXEC
67219+ vma_m = pax_find_mirror_vma(vma);
67220+#endif
67221+
67222 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
67223 if (!new)
67224 goto out_err;
67225
67226+#ifdef CONFIG_PAX_SEGMEXEC
67227+ if (vma_m) {
67228+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
67229+ if (!new_m) {
67230+ kmem_cache_free(vm_area_cachep, new);
67231+ goto out_err;
67232+ }
67233+ }
67234+#endif
67235+
67236 /* most fields are the same, copy all, and then fixup */
67237 *new = *vma;
67238
67239@@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
67240 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
67241 }
67242
67243+#ifdef CONFIG_PAX_SEGMEXEC
67244+ if (vma_m) {
67245+ *new_m = *vma_m;
67246+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
67247+ new_m->vm_mirror = new;
67248+ new->vm_mirror = new_m;
67249+
67250+ if (new_below)
67251+ new_m->vm_end = addr_m;
67252+ else {
67253+ new_m->vm_start = addr_m;
67254+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
67255+ }
67256+ }
67257+#endif
67258+
67259 pol = mpol_dup(vma_policy(vma));
67260 if (IS_ERR(pol)) {
67261 err = PTR_ERR(pol);
67262@@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
67263 else
67264 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
67265
67266+#ifdef CONFIG_PAX_SEGMEXEC
67267+ if (!err && vma_m) {
67268+ if (anon_vma_clone(new_m, vma_m))
67269+ goto out_free_mpol;
67270+
67271+ mpol_get(pol);
67272+ vma_set_policy(new_m, pol);
67273+
67274+ if (new_m->vm_file) {
67275+ get_file(new_m->vm_file);
67276+ if (vma_m->vm_flags & VM_EXECUTABLE)
67277+ added_exe_file_vma(mm);
67278+ }
67279+
67280+ if (new_m->vm_ops && new_m->vm_ops->open)
67281+ new_m->vm_ops->open(new_m);
67282+
67283+ if (new_below)
67284+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
67285+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
67286+ else
67287+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
67288+
67289+ if (err) {
67290+ if (new_m->vm_ops && new_m->vm_ops->close)
67291+ new_m->vm_ops->close(new_m);
67292+ if (new_m->vm_file) {
67293+ if (vma_m->vm_flags & VM_EXECUTABLE)
67294+ removed_exe_file_vma(mm);
67295+ fput(new_m->vm_file);
67296+ }
67297+ mpol_put(pol);
67298+ }
67299+ }
67300+#endif
67301+
67302 /* Success. */
67303 if (!err)
67304 return 0;
67305@@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
67306 removed_exe_file_vma(mm);
67307 fput(new->vm_file);
67308 }
67309- unlink_anon_vmas(new);
67310 out_free_mpol:
67311 mpol_put(pol);
67312 out_free_vma:
67313+
67314+#ifdef CONFIG_PAX_SEGMEXEC
67315+ if (new_m) {
67316+ unlink_anon_vmas(new_m);
67317+ kmem_cache_free(vm_area_cachep, new_m);
67318+ }
67319+#endif
67320+
67321+ unlink_anon_vmas(new);
67322 kmem_cache_free(vm_area_cachep, new);
67323 out_err:
67324 return err;
67325@@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
67326 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
67327 unsigned long addr, int new_below)
67328 {
67329+
67330+#ifdef CONFIG_PAX_SEGMEXEC
67331+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
67332+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
67333+ if (mm->map_count >= sysctl_max_map_count-1)
67334+ return -ENOMEM;
67335+ } else
67336+#endif
67337+
67338 if (mm->map_count >= sysctl_max_map_count)
67339 return -ENOMEM;
67340
67341@@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
67342 * work. This now handles partial unmappings.
67343 * Jeremy Fitzhardinge <jeremy@goop.org>
67344 */
67345+#ifdef CONFIG_PAX_SEGMEXEC
67346 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
67347 {
67348+ int ret = __do_munmap(mm, start, len);
67349+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
67350+ return ret;
67351+
67352+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
67353+}
67354+
67355+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
67356+#else
67357+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
67358+#endif
67359+{
67360 unsigned long end;
67361 struct vm_area_struct *vma, *prev, *last;
67362
67363+ /*
67364+ * mm->mmap_sem is required to protect against another thread
67365+ * changing the mappings in case we sleep.
67366+ */
67367+ verify_mm_writelocked(mm);
67368+
67369 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
67370 return -EINVAL;
67371
67372@@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
67373 /* Fix up all other VM information */
67374 remove_vma_list(mm, vma);
67375
67376+ track_exec_limit(mm, start, end, 0UL);
67377+
67378 return 0;
67379 }
67380
67381@@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
67382
67383 profile_munmap(addr);
67384
67385+#ifdef CONFIG_PAX_SEGMEXEC
67386+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
67387+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
67388+ return -EINVAL;
67389+#endif
67390+
67391 down_write(&mm->mmap_sem);
67392 ret = do_munmap(mm, addr, len);
67393 up_write(&mm->mmap_sem);
67394 return ret;
67395 }
67396
67397-static inline void verify_mm_writelocked(struct mm_struct *mm)
67398-{
67399-#ifdef CONFIG_DEBUG_VM
67400- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
67401- WARN_ON(1);
67402- up_read(&mm->mmap_sem);
67403- }
67404-#endif
67405-}
67406-
67407 /*
67408 * this is really a simplified "do_mmap". it only handles
67409 * anonymous maps. eventually we may be able to do some
67410@@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
67411 struct rb_node ** rb_link, * rb_parent;
67412 pgoff_t pgoff = addr >> PAGE_SHIFT;
67413 int error;
67414+ unsigned long charged;
67415
67416 len = PAGE_ALIGN(len);
67417 if (!len)
67418@@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
67419
67420 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
67421
67422+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
67423+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
67424+ flags &= ~VM_EXEC;
67425+
67426+#ifdef CONFIG_PAX_MPROTECT
67427+ if (mm->pax_flags & MF_PAX_MPROTECT)
67428+ flags &= ~VM_MAYEXEC;
67429+#endif
67430+
67431+ }
67432+#endif
67433+
67434 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
67435 if (error & ~PAGE_MASK)
67436 return error;
67437
67438+ charged = len >> PAGE_SHIFT;
67439+
67440 /*
67441 * mlock MCL_FUTURE?
67442 */
67443 if (mm->def_flags & VM_LOCKED) {
67444 unsigned long locked, lock_limit;
67445- locked = len >> PAGE_SHIFT;
67446+ locked = charged;
67447 locked += mm->locked_vm;
67448 lock_limit = rlimit(RLIMIT_MEMLOCK);
67449 lock_limit >>= PAGE_SHIFT;
67450@@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
67451 /*
67452 * Clear old maps. this also does some error checking for us
67453 */
67454- munmap_back:
67455 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
67456 if (vma && vma->vm_start < addr + len) {
67457 if (do_munmap(mm, addr, len))
67458 return -ENOMEM;
67459- goto munmap_back;
67460+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
67461+ BUG_ON(vma && vma->vm_start < addr + len);
67462 }
67463
67464 /* Check against address space limits *after* clearing old maps... */
67465- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
67466+ if (!may_expand_vm(mm, charged))
67467 return -ENOMEM;
67468
67469 if (mm->map_count > sysctl_max_map_count)
67470 return -ENOMEM;
67471
67472- if (security_vm_enough_memory(len >> PAGE_SHIFT))
67473+ if (security_vm_enough_memory(charged))
67474 return -ENOMEM;
67475
67476 /* Can we just expand an old private anonymous mapping? */
67477@@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
67478 */
67479 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
67480 if (!vma) {
67481- vm_unacct_memory(len >> PAGE_SHIFT);
67482+ vm_unacct_memory(charged);
67483 return -ENOMEM;
67484 }
67485
67486@@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
67487 vma_link(mm, vma, prev, rb_link, rb_parent);
67488 out:
67489 perf_event_mmap(vma);
67490- mm->total_vm += len >> PAGE_SHIFT;
67491+ mm->total_vm += charged;
67492 if (flags & VM_LOCKED) {
67493 if (!mlock_vma_pages_range(vma, addr, addr + len))
67494- mm->locked_vm += (len >> PAGE_SHIFT);
67495+ mm->locked_vm += charged;
67496 }
67497+ track_exec_limit(mm, addr, addr + len, flags);
67498 return addr;
67499 }
67500
67501@@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
67502 * Walk the list again, actually closing and freeing it,
67503 * with preemption enabled, without holding any MM locks.
67504 */
67505- while (vma)
67506+ while (vma) {
67507+ vma->vm_mirror = NULL;
67508 vma = remove_vma(vma);
67509+ }
67510
67511 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
67512 }
67513@@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
67514 struct vm_area_struct * __vma, * prev;
67515 struct rb_node ** rb_link, * rb_parent;
67516
67517+#ifdef CONFIG_PAX_SEGMEXEC
67518+ struct vm_area_struct *vma_m = NULL;
67519+#endif
67520+
67521+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
67522+ return -EPERM;
67523+
67524 /*
67525 * The vm_pgoff of a purely anonymous vma should be irrelevant
67526 * until its first write fault, when page's anon_vma and index
67527@@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
67528 if ((vma->vm_flags & VM_ACCOUNT) &&
67529 security_vm_enough_memory_mm(mm, vma_pages(vma)))
67530 return -ENOMEM;
67531+
67532+#ifdef CONFIG_PAX_SEGMEXEC
67533+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
67534+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
67535+ if (!vma_m)
67536+ return -ENOMEM;
67537+ }
67538+#endif
67539+
67540 vma_link(mm, vma, prev, rb_link, rb_parent);
67541+
67542+#ifdef CONFIG_PAX_SEGMEXEC
67543+ if (vma_m)
67544+ BUG_ON(pax_mirror_vma(vma_m, vma));
67545+#endif
67546+
67547 return 0;
67548 }
67549
67550@@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
67551 struct rb_node **rb_link, *rb_parent;
67552 struct mempolicy *pol;
67553
67554+ BUG_ON(vma->vm_mirror);
67555+
67556 /*
67557 * If anonymous vma has not yet been faulted, update new pgoff
67558 * to match new location, to increase its chance of merging.
67559@@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
67560 return NULL;
67561 }
67562
67563+#ifdef CONFIG_PAX_SEGMEXEC
67564+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
67565+{
67566+ struct vm_area_struct *prev_m;
67567+ struct rb_node **rb_link_m, *rb_parent_m;
67568+ struct mempolicy *pol_m;
67569+
67570+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
67571+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
67572+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
67573+ *vma_m = *vma;
67574+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
67575+ if (anon_vma_clone(vma_m, vma))
67576+ return -ENOMEM;
67577+ pol_m = vma_policy(vma_m);
67578+ mpol_get(pol_m);
67579+ vma_set_policy(vma_m, pol_m);
67580+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
67581+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
67582+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
67583+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
67584+ if (vma_m->vm_file)
67585+ get_file(vma_m->vm_file);
67586+ if (vma_m->vm_ops && vma_m->vm_ops->open)
67587+ vma_m->vm_ops->open(vma_m);
67588+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
67589+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
67590+ vma_m->vm_mirror = vma;
67591+ vma->vm_mirror = vma_m;
67592+ return 0;
67593+}
67594+#endif
67595+
67596 /*
67597 * Return true if the calling process may expand its vm space by the passed
67598 * number of pages
67599@@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
67600 unsigned long lim;
67601
67602 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
67603-
67604+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
67605 if (cur + npages > lim)
67606 return 0;
67607 return 1;
67608@@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
67609 vma->vm_start = addr;
67610 vma->vm_end = addr + len;
67611
67612+#ifdef CONFIG_PAX_MPROTECT
67613+ if (mm->pax_flags & MF_PAX_MPROTECT) {
67614+#ifndef CONFIG_PAX_MPROTECT_COMPAT
67615+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
67616+ return -EPERM;
67617+ if (!(vm_flags & VM_EXEC))
67618+ vm_flags &= ~VM_MAYEXEC;
67619+#else
67620+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
67621+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
67622+#endif
67623+ else
67624+ vm_flags &= ~VM_MAYWRITE;
67625+ }
67626+#endif
67627+
67628 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
67629 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
67630
67631diff -urNp linux-3.0.4/mm/mprotect.c linux-3.0.4/mm/mprotect.c
67632--- linux-3.0.4/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
67633+++ linux-3.0.4/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
67634@@ -23,10 +23,16 @@
67635 #include <linux/mmu_notifier.h>
67636 #include <linux/migrate.h>
67637 #include <linux/perf_event.h>
67638+
67639+#ifdef CONFIG_PAX_MPROTECT
67640+#include <linux/elf.h>
67641+#endif
67642+
67643 #include <asm/uaccess.h>
67644 #include <asm/pgtable.h>
67645 #include <asm/cacheflush.h>
67646 #include <asm/tlbflush.h>
67647+#include <asm/mmu_context.h>
67648
67649 #ifndef pgprot_modify
67650 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
67651@@ -141,6 +147,48 @@ static void change_protection(struct vm_
67652 flush_tlb_range(vma, start, end);
67653 }
67654
67655+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67656+/* called while holding the mmap semaphor for writing except stack expansion */
67657+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
67658+{
67659+ unsigned long oldlimit, newlimit = 0UL;
67660+
67661+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
67662+ return;
67663+
67664+ spin_lock(&mm->page_table_lock);
67665+ oldlimit = mm->context.user_cs_limit;
67666+ if ((prot & VM_EXEC) && oldlimit < end)
67667+ /* USER_CS limit moved up */
67668+ newlimit = end;
67669+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
67670+ /* USER_CS limit moved down */
67671+ newlimit = start;
67672+
67673+ if (newlimit) {
67674+ mm->context.user_cs_limit = newlimit;
67675+
67676+#ifdef CONFIG_SMP
67677+ wmb();
67678+ cpus_clear(mm->context.cpu_user_cs_mask);
67679+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
67680+#endif
67681+
67682+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
67683+ }
67684+ spin_unlock(&mm->page_table_lock);
67685+ if (newlimit == end) {
67686+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
67687+
67688+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
67689+ if (is_vm_hugetlb_page(vma))
67690+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
67691+ else
67692+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
67693+ }
67694+}
67695+#endif
67696+
67697 int
67698 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
67699 unsigned long start, unsigned long end, unsigned long newflags)
67700@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
67701 int error;
67702 int dirty_accountable = 0;
67703
67704+#ifdef CONFIG_PAX_SEGMEXEC
67705+ struct vm_area_struct *vma_m = NULL;
67706+ unsigned long start_m, end_m;
67707+
67708+ start_m = start + SEGMEXEC_TASK_SIZE;
67709+ end_m = end + SEGMEXEC_TASK_SIZE;
67710+#endif
67711+
67712 if (newflags == oldflags) {
67713 *pprev = vma;
67714 return 0;
67715 }
67716
67717+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
67718+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
67719+
67720+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
67721+ return -ENOMEM;
67722+
67723+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
67724+ return -ENOMEM;
67725+ }
67726+
67727 /*
67728 * If we make a private mapping writable we increase our commit;
67729 * but (without finer accounting) cannot reduce our commit if we
67730@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
67731 }
67732 }
67733
67734+#ifdef CONFIG_PAX_SEGMEXEC
67735+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
67736+ if (start != vma->vm_start) {
67737+ error = split_vma(mm, vma, start, 1);
67738+ if (error)
67739+ goto fail;
67740+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
67741+ *pprev = (*pprev)->vm_next;
67742+ }
67743+
67744+ if (end != vma->vm_end) {
67745+ error = split_vma(mm, vma, end, 0);
67746+ if (error)
67747+ goto fail;
67748+ }
67749+
67750+ if (pax_find_mirror_vma(vma)) {
67751+ error = __do_munmap(mm, start_m, end_m - start_m);
67752+ if (error)
67753+ goto fail;
67754+ } else {
67755+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
67756+ if (!vma_m) {
67757+ error = -ENOMEM;
67758+ goto fail;
67759+ }
67760+ vma->vm_flags = newflags;
67761+ error = pax_mirror_vma(vma_m, vma);
67762+ if (error) {
67763+ vma->vm_flags = oldflags;
67764+ goto fail;
67765+ }
67766+ }
67767+ }
67768+#endif
67769+
67770 /*
67771 * First try to merge with previous and/or next vma.
67772 */
67773@@ -204,9 +306,21 @@ success:
67774 * vm_flags and vm_page_prot are protected by the mmap_sem
67775 * held in write mode.
67776 */
67777+
67778+#ifdef CONFIG_PAX_SEGMEXEC
67779+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
67780+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
67781+#endif
67782+
67783 vma->vm_flags = newflags;
67784+
67785+#ifdef CONFIG_PAX_MPROTECT
67786+ if (mm->binfmt && mm->binfmt->handle_mprotect)
67787+ mm->binfmt->handle_mprotect(vma, newflags);
67788+#endif
67789+
67790 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
67791- vm_get_page_prot(newflags));
67792+ vm_get_page_prot(vma->vm_flags));
67793
67794 if (vma_wants_writenotify(vma)) {
67795 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
67796@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
67797 end = start + len;
67798 if (end <= start)
67799 return -ENOMEM;
67800+
67801+#ifdef CONFIG_PAX_SEGMEXEC
67802+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67803+ if (end > SEGMEXEC_TASK_SIZE)
67804+ return -EINVAL;
67805+ } else
67806+#endif
67807+
67808+ if (end > TASK_SIZE)
67809+ return -EINVAL;
67810+
67811 if (!arch_validate_prot(prot))
67812 return -EINVAL;
67813
67814@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
67815 /*
67816 * Does the application expect PROT_READ to imply PROT_EXEC:
67817 */
67818- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
67819+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
67820 prot |= PROT_EXEC;
67821
67822 vm_flags = calc_vm_prot_bits(prot);
67823@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
67824 if (start > vma->vm_start)
67825 prev = vma;
67826
67827+#ifdef CONFIG_PAX_MPROTECT
67828+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
67829+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
67830+#endif
67831+
67832 for (nstart = start ; ; ) {
67833 unsigned long newflags;
67834
67835@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
67836
67837 /* newflags >> 4 shift VM_MAY% in place of VM_% */
67838 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
67839+ if (prot & (PROT_WRITE | PROT_EXEC))
67840+ gr_log_rwxmprotect(vma->vm_file);
67841+
67842+ error = -EACCES;
67843+ goto out;
67844+ }
67845+
67846+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
67847 error = -EACCES;
67848 goto out;
67849 }
67850@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
67851 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
67852 if (error)
67853 goto out;
67854+
67855+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
67856+
67857 nstart = tmp;
67858
67859 if (nstart < prev->vm_end)
67860diff -urNp linux-3.0.4/mm/mremap.c linux-3.0.4/mm/mremap.c
67861--- linux-3.0.4/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
67862+++ linux-3.0.4/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
67863@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
67864 continue;
67865 pte = ptep_clear_flush(vma, old_addr, old_pte);
67866 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
67867+
67868+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67869+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
67870+ pte = pte_exprotect(pte);
67871+#endif
67872+
67873 set_pte_at(mm, new_addr, new_pte, pte);
67874 }
67875
67876@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
67877 if (is_vm_hugetlb_page(vma))
67878 goto Einval;
67879
67880+#ifdef CONFIG_PAX_SEGMEXEC
67881+ if (pax_find_mirror_vma(vma))
67882+ goto Einval;
67883+#endif
67884+
67885 /* We can't remap across vm area boundaries */
67886 if (old_len > vma->vm_end - addr)
67887 goto Efault;
67888@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
67889 unsigned long ret = -EINVAL;
67890 unsigned long charged = 0;
67891 unsigned long map_flags;
67892+ unsigned long pax_task_size = TASK_SIZE;
67893
67894 if (new_addr & ~PAGE_MASK)
67895 goto out;
67896
67897- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
67898+#ifdef CONFIG_PAX_SEGMEXEC
67899+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
67900+ pax_task_size = SEGMEXEC_TASK_SIZE;
67901+#endif
67902+
67903+ pax_task_size -= PAGE_SIZE;
67904+
67905+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
67906 goto out;
67907
67908 /* Check if the location we're moving into overlaps the
67909 * old location at all, and fail if it does.
67910 */
67911- if ((new_addr <= addr) && (new_addr+new_len) > addr)
67912- goto out;
67913-
67914- if ((addr <= new_addr) && (addr+old_len) > new_addr)
67915+ if (addr + old_len > new_addr && new_addr + new_len > addr)
67916 goto out;
67917
67918 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
67919@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
67920 struct vm_area_struct *vma;
67921 unsigned long ret = -EINVAL;
67922 unsigned long charged = 0;
67923+ unsigned long pax_task_size = TASK_SIZE;
67924
67925 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
67926 goto out;
67927@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
67928 if (!new_len)
67929 goto out;
67930
67931+#ifdef CONFIG_PAX_SEGMEXEC
67932+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
67933+ pax_task_size = SEGMEXEC_TASK_SIZE;
67934+#endif
67935+
67936+ pax_task_size -= PAGE_SIZE;
67937+
67938+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
67939+ old_len > pax_task_size || addr > pax_task_size-old_len)
67940+ goto out;
67941+
67942 if (flags & MREMAP_FIXED) {
67943 if (flags & MREMAP_MAYMOVE)
67944 ret = mremap_to(addr, old_len, new_addr, new_len);
67945@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
67946 addr + new_len);
67947 }
67948 ret = addr;
67949+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
67950 goto out;
67951 }
67952 }
67953@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
67954 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
67955 if (ret)
67956 goto out;
67957+
67958+ map_flags = vma->vm_flags;
67959 ret = move_vma(vma, addr, old_len, new_len, new_addr);
67960+ if (!(ret & ~PAGE_MASK)) {
67961+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
67962+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
67963+ }
67964 }
67965 out:
67966 if (ret & ~PAGE_MASK)
67967diff -urNp linux-3.0.4/mm/nobootmem.c linux-3.0.4/mm/nobootmem.c
67968--- linux-3.0.4/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
67969+++ linux-3.0.4/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
67970@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
67971 unsigned long __init free_all_memory_core_early(int nodeid)
67972 {
67973 int i;
67974- u64 start, end;
67975+ u64 start, end, startrange, endrange;
67976 unsigned long count = 0;
67977- struct range *range = NULL;
67978+ struct range *range = NULL, rangerange = { 0, 0 };
67979 int nr_range;
67980
67981 nr_range = get_free_all_memory_range(&range, nodeid);
67982+ startrange = __pa(range) >> PAGE_SHIFT;
67983+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
67984
67985 for (i = 0; i < nr_range; i++) {
67986 start = range[i].start;
67987 end = range[i].end;
67988+ if (start <= endrange && startrange < end) {
67989+ BUG_ON(rangerange.start | rangerange.end);
67990+ rangerange = range[i];
67991+ continue;
67992+ }
67993 count += end - start;
67994 __free_pages_memory(start, end);
67995 }
67996+ start = rangerange.start;
67997+ end = rangerange.end;
67998+ count += end - start;
67999+ __free_pages_memory(start, end);
68000
68001 return count;
68002 }
68003diff -urNp linux-3.0.4/mm/nommu.c linux-3.0.4/mm/nommu.c
68004--- linux-3.0.4/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
68005+++ linux-3.0.4/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
68006@@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
68007 int sysctl_overcommit_ratio = 50; /* default is 50% */
68008 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
68009 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
68010-int heap_stack_gap = 0;
68011
68012 atomic_long_t mmap_pages_allocated;
68013
68014@@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
68015 EXPORT_SYMBOL(find_vma);
68016
68017 /*
68018- * find a VMA
68019- * - we don't extend stack VMAs under NOMMU conditions
68020- */
68021-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
68022-{
68023- return find_vma(mm, addr);
68024-}
68025-
68026-/*
68027 * expand a stack to a given address
68028 * - not supported under NOMMU conditions
68029 */
68030@@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
68031
68032 /* most fields are the same, copy all, and then fixup */
68033 *new = *vma;
68034+ INIT_LIST_HEAD(&new->anon_vma_chain);
68035 *region = *vma->vm_region;
68036 new->vm_region = region;
68037
68038diff -urNp linux-3.0.4/mm/page_alloc.c linux-3.0.4/mm/page_alloc.c
68039--- linux-3.0.4/mm/page_alloc.c 2011-07-21 22:17:23.000000000 -0400
68040+++ linux-3.0.4/mm/page_alloc.c 2011-08-23 21:48:14.000000000 -0400
68041@@ -340,7 +340,7 @@ out:
68042 * This usage means that zero-order pages may not be compound.
68043 */
68044
68045-static void free_compound_page(struct page *page)
68046+void free_compound_page(struct page *page)
68047 {
68048 __free_pages_ok(page, compound_order(page));
68049 }
68050@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
68051 int i;
68052 int bad = 0;
68053
68054+#ifdef CONFIG_PAX_MEMORY_SANITIZE
68055+ unsigned long index = 1UL << order;
68056+#endif
68057+
68058 trace_mm_page_free_direct(page, order);
68059 kmemcheck_free_shadow(page, order);
68060
68061@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
68062 debug_check_no_obj_freed(page_address(page),
68063 PAGE_SIZE << order);
68064 }
68065+
68066+#ifdef CONFIG_PAX_MEMORY_SANITIZE
68067+ for (; index; --index)
68068+ sanitize_highpage(page + index - 1);
68069+#endif
68070+
68071 arch_free_page(page, order);
68072 kernel_map_pages(page, 1 << order, 0);
68073
68074@@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
68075 arch_alloc_page(page, order);
68076 kernel_map_pages(page, 1 << order, 1);
68077
68078+#ifndef CONFIG_PAX_MEMORY_SANITIZE
68079 if (gfp_flags & __GFP_ZERO)
68080 prep_zero_page(page, order, gfp_flags);
68081+#endif
68082
68083 if (order && (gfp_flags & __GFP_COMP))
68084 prep_compound_page(page, order);
68085@@ -2525,6 +2537,8 @@ void show_free_areas(unsigned int filter
68086 int cpu;
68087 struct zone *zone;
68088
68089+ pax_track_stack();
68090+
68091 for_each_populated_zone(zone) {
68092 if (skip_free_areas_node(filter, zone_to_nid(zone)))
68093 continue;
68094diff -urNp linux-3.0.4/mm/percpu.c linux-3.0.4/mm/percpu.c
68095--- linux-3.0.4/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
68096+++ linux-3.0.4/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
68097@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
68098 static unsigned int pcpu_last_unit_cpu __read_mostly;
68099
68100 /* the address of the first chunk which starts with the kernel static area */
68101-void *pcpu_base_addr __read_mostly;
68102+void *pcpu_base_addr __read_only;
68103 EXPORT_SYMBOL_GPL(pcpu_base_addr);
68104
68105 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
68106diff -urNp linux-3.0.4/mm/rmap.c linux-3.0.4/mm/rmap.c
68107--- linux-3.0.4/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
68108+++ linux-3.0.4/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
68109@@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
68110 struct anon_vma *anon_vma = vma->anon_vma;
68111 struct anon_vma_chain *avc;
68112
68113+#ifdef CONFIG_PAX_SEGMEXEC
68114+ struct anon_vma_chain *avc_m = NULL;
68115+#endif
68116+
68117 might_sleep();
68118 if (unlikely(!anon_vma)) {
68119 struct mm_struct *mm = vma->vm_mm;
68120@@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
68121 if (!avc)
68122 goto out_enomem;
68123
68124+#ifdef CONFIG_PAX_SEGMEXEC
68125+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
68126+ if (!avc_m)
68127+ goto out_enomem_free_avc;
68128+#endif
68129+
68130 anon_vma = find_mergeable_anon_vma(vma);
68131 allocated = NULL;
68132 if (!anon_vma) {
68133@@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
68134 /* page_table_lock to protect against threads */
68135 spin_lock(&mm->page_table_lock);
68136 if (likely(!vma->anon_vma)) {
68137+
68138+#ifdef CONFIG_PAX_SEGMEXEC
68139+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
68140+
68141+ if (vma_m) {
68142+ BUG_ON(vma_m->anon_vma);
68143+ vma_m->anon_vma = anon_vma;
68144+ avc_m->anon_vma = anon_vma;
68145+ avc_m->vma = vma;
68146+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
68147+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
68148+ avc_m = NULL;
68149+ }
68150+#endif
68151+
68152 vma->anon_vma = anon_vma;
68153 avc->anon_vma = anon_vma;
68154 avc->vma = vma;
68155@@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
68156
68157 if (unlikely(allocated))
68158 put_anon_vma(allocated);
68159+
68160+#ifdef CONFIG_PAX_SEGMEXEC
68161+ if (unlikely(avc_m))
68162+ anon_vma_chain_free(avc_m);
68163+#endif
68164+
68165 if (unlikely(avc))
68166 anon_vma_chain_free(avc);
68167 }
68168 return 0;
68169
68170 out_enomem_free_avc:
68171+
68172+#ifdef CONFIG_PAX_SEGMEXEC
68173+ if (avc_m)
68174+ anon_vma_chain_free(avc_m);
68175+#endif
68176+
68177 anon_vma_chain_free(avc);
68178 out_enomem:
68179 return -ENOMEM;
68180@@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
68181 * Attach the anon_vmas from src to dst.
68182 * Returns 0 on success, -ENOMEM on failure.
68183 */
68184-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
68185+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
68186 {
68187 struct anon_vma_chain *avc, *pavc;
68188 struct anon_vma *root = NULL;
68189@@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
68190 * the corresponding VMA in the parent process is attached to.
68191 * Returns 0 on success, non-zero on failure.
68192 */
68193-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
68194+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
68195 {
68196 struct anon_vma_chain *avc;
68197 struct anon_vma *anon_vma;
68198diff -urNp linux-3.0.4/mm/shmem.c linux-3.0.4/mm/shmem.c
68199--- linux-3.0.4/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
68200+++ linux-3.0.4/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
68201@@ -31,7 +31,7 @@
68202 #include <linux/percpu_counter.h>
68203 #include <linux/swap.h>
68204
68205-static struct vfsmount *shm_mnt;
68206+struct vfsmount *shm_mnt;
68207
68208 #ifdef CONFIG_SHMEM
68209 /*
68210@@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
68211 goto unlock;
68212 }
68213 entry = shmem_swp_entry(info, index, NULL);
68214+ if (!entry)
68215+ goto unlock;
68216 if (entry->val) {
68217 /*
68218 * The more uptodate page coming down from a stacked
68219@@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
68220 struct vm_area_struct pvma;
68221 struct page *page;
68222
68223+ pax_track_stack();
68224+
68225 spol = mpol_cond_copy(&mpol,
68226 mpol_shared_policy_lookup(&info->policy, idx));
68227
68228@@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
68229 int err = -ENOMEM;
68230
68231 /* Round up to L1_CACHE_BYTES to resist false sharing */
68232- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
68233- L1_CACHE_BYTES), GFP_KERNEL);
68234+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
68235 if (!sbinfo)
68236 return -ENOMEM;
68237
68238diff -urNp linux-3.0.4/mm/slab.c linux-3.0.4/mm/slab.c
68239--- linux-3.0.4/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
68240+++ linux-3.0.4/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
68241@@ -151,7 +151,7 @@
68242
68243 /* Legal flag mask for kmem_cache_create(). */
68244 #if DEBUG
68245-# define CREATE_MASK (SLAB_RED_ZONE | \
68246+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
68247 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
68248 SLAB_CACHE_DMA | \
68249 SLAB_STORE_USER | \
68250@@ -159,7 +159,7 @@
68251 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
68252 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
68253 #else
68254-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
68255+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
68256 SLAB_CACHE_DMA | \
68257 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
68258 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
68259@@ -288,7 +288,7 @@ struct kmem_list3 {
68260 * Need this for bootstrapping a per node allocator.
68261 */
68262 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
68263-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
68264+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
68265 #define CACHE_CACHE 0
68266 #define SIZE_AC MAX_NUMNODES
68267 #define SIZE_L3 (2 * MAX_NUMNODES)
68268@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
68269 if ((x)->max_freeable < i) \
68270 (x)->max_freeable = i; \
68271 } while (0)
68272-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
68273-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
68274-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
68275-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
68276+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
68277+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
68278+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
68279+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
68280 #else
68281 #define STATS_INC_ACTIVE(x) do { } while (0)
68282 #define STATS_DEC_ACTIVE(x) do { } while (0)
68283@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
68284 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
68285 */
68286 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
68287- const struct slab *slab, void *obj)
68288+ const struct slab *slab, const void *obj)
68289 {
68290 u32 offset = (obj - slab->s_mem);
68291 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
68292@@ -564,7 +564,7 @@ struct cache_names {
68293 static struct cache_names __initdata cache_names[] = {
68294 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
68295 #include <linux/kmalloc_sizes.h>
68296- {NULL,}
68297+ {NULL}
68298 #undef CACHE
68299 };
68300
68301@@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
68302 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
68303 sizes[INDEX_AC].cs_size,
68304 ARCH_KMALLOC_MINALIGN,
68305- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
68306+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
68307 NULL);
68308
68309 if (INDEX_AC != INDEX_L3) {
68310@@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
68311 kmem_cache_create(names[INDEX_L3].name,
68312 sizes[INDEX_L3].cs_size,
68313 ARCH_KMALLOC_MINALIGN,
68314- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
68315+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
68316 NULL);
68317 }
68318
68319@@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
68320 sizes->cs_cachep = kmem_cache_create(names->name,
68321 sizes->cs_size,
68322 ARCH_KMALLOC_MINALIGN,
68323- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
68324+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
68325 NULL);
68326 }
68327 #ifdef CONFIG_ZONE_DMA
68328@@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
68329 }
68330 /* cpu stats */
68331 {
68332- unsigned long allochit = atomic_read(&cachep->allochit);
68333- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
68334- unsigned long freehit = atomic_read(&cachep->freehit);
68335- unsigned long freemiss = atomic_read(&cachep->freemiss);
68336+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
68337+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
68338+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
68339+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
68340
68341 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
68342 allochit, allocmiss, freehit, freemiss);
68343@@ -4532,15 +4532,66 @@ static const struct file_operations proc
68344
68345 static int __init slab_proc_init(void)
68346 {
68347- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
68348+ mode_t gr_mode = S_IRUGO;
68349+
68350+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68351+ gr_mode = S_IRUSR;
68352+#endif
68353+
68354+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
68355 #ifdef CONFIG_DEBUG_SLAB_LEAK
68356- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
68357+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
68358 #endif
68359 return 0;
68360 }
68361 module_init(slab_proc_init);
68362 #endif
68363
68364+void check_object_size(const void *ptr, unsigned long n, bool to)
68365+{
68366+
68367+#ifdef CONFIG_PAX_USERCOPY
68368+ struct page *page;
68369+ struct kmem_cache *cachep = NULL;
68370+ struct slab *slabp;
68371+ unsigned int objnr;
68372+ unsigned long offset;
68373+
68374+ if (!n)
68375+ return;
68376+
68377+ if (ZERO_OR_NULL_PTR(ptr))
68378+ goto report;
68379+
68380+ if (!virt_addr_valid(ptr))
68381+ return;
68382+
68383+ page = virt_to_head_page(ptr);
68384+
68385+ if (!PageSlab(page)) {
68386+ if (object_is_on_stack(ptr, n) == -1)
68387+ goto report;
68388+ return;
68389+ }
68390+
68391+ cachep = page_get_cache(page);
68392+ if (!(cachep->flags & SLAB_USERCOPY))
68393+ goto report;
68394+
68395+ slabp = page_get_slab(page);
68396+ objnr = obj_to_index(cachep, slabp, ptr);
68397+ BUG_ON(objnr >= cachep->num);
68398+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
68399+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
68400+ return;
68401+
68402+report:
68403+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
68404+#endif
68405+
68406+}
68407+EXPORT_SYMBOL(check_object_size);
68408+
68409 /**
68410 * ksize - get the actual amount of memory allocated for a given object
68411 * @objp: Pointer to the object
68412diff -urNp linux-3.0.4/mm/slob.c linux-3.0.4/mm/slob.c
68413--- linux-3.0.4/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
68414+++ linux-3.0.4/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
68415@@ -29,7 +29,7 @@
68416 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
68417 * alloc_pages() directly, allocating compound pages so the page order
68418 * does not have to be separately tracked, and also stores the exact
68419- * allocation size in page->private so that it can be used to accurately
68420+ * allocation size in slob_page->size so that it can be used to accurately
68421 * provide ksize(). These objects are detected in kfree() because slob_page()
68422 * is false for them.
68423 *
68424@@ -58,6 +58,7 @@
68425 */
68426
68427 #include <linux/kernel.h>
68428+#include <linux/sched.h>
68429 #include <linux/slab.h>
68430 #include <linux/mm.h>
68431 #include <linux/swap.h> /* struct reclaim_state */
68432@@ -102,7 +103,8 @@ struct slob_page {
68433 unsigned long flags; /* mandatory */
68434 atomic_t _count; /* mandatory */
68435 slobidx_t units; /* free units left in page */
68436- unsigned long pad[2];
68437+ unsigned long pad[1];
68438+ unsigned long size; /* size when >=PAGE_SIZE */
68439 slob_t *free; /* first free slob_t in page */
68440 struct list_head list; /* linked list of free pages */
68441 };
68442@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
68443 */
68444 static inline int is_slob_page(struct slob_page *sp)
68445 {
68446- return PageSlab((struct page *)sp);
68447+ return PageSlab((struct page *)sp) && !sp->size;
68448 }
68449
68450 static inline void set_slob_page(struct slob_page *sp)
68451@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
68452
68453 static inline struct slob_page *slob_page(const void *addr)
68454 {
68455- return (struct slob_page *)virt_to_page(addr);
68456+ return (struct slob_page *)virt_to_head_page(addr);
68457 }
68458
68459 /*
68460@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
68461 /*
68462 * Return the size of a slob block.
68463 */
68464-static slobidx_t slob_units(slob_t *s)
68465+static slobidx_t slob_units(const slob_t *s)
68466 {
68467 if (s->units > 0)
68468 return s->units;
68469@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
68470 /*
68471 * Return the next free slob block pointer after this one.
68472 */
68473-static slob_t *slob_next(slob_t *s)
68474+static slob_t *slob_next(const slob_t *s)
68475 {
68476 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
68477 slobidx_t next;
68478@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
68479 /*
68480 * Returns true if s is the last free block in its page.
68481 */
68482-static int slob_last(slob_t *s)
68483+static int slob_last(const slob_t *s)
68484 {
68485 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
68486 }
68487@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
68488 if (!page)
68489 return NULL;
68490
68491+ set_slob_page(page);
68492 return page_address(page);
68493 }
68494
68495@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
68496 if (!b)
68497 return NULL;
68498 sp = slob_page(b);
68499- set_slob_page(sp);
68500
68501 spin_lock_irqsave(&slob_lock, flags);
68502 sp->units = SLOB_UNITS(PAGE_SIZE);
68503 sp->free = b;
68504+ sp->size = 0;
68505 INIT_LIST_HEAD(&sp->list);
68506 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
68507 set_slob_page_free(sp, slob_list);
68508@@ -476,10 +479,9 @@ out:
68509 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
68510 */
68511
68512-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
68513+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
68514 {
68515- unsigned int *m;
68516- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
68517+ slob_t *m;
68518 void *ret;
68519
68520 lockdep_trace_alloc(gfp);
68521@@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
68522
68523 if (!m)
68524 return NULL;
68525- *m = size;
68526+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
68527+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
68528+ m[0].units = size;
68529+ m[1].units = align;
68530 ret = (void *)m + align;
68531
68532 trace_kmalloc_node(_RET_IP_, ret,
68533@@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
68534 gfp |= __GFP_COMP;
68535 ret = slob_new_pages(gfp, order, node);
68536 if (ret) {
68537- struct page *page;
68538- page = virt_to_page(ret);
68539- page->private = size;
68540+ struct slob_page *sp;
68541+ sp = slob_page(ret);
68542+ sp->size = size;
68543 }
68544
68545 trace_kmalloc_node(_RET_IP_, ret,
68546 size, PAGE_SIZE << order, gfp, node);
68547 }
68548
68549- kmemleak_alloc(ret, size, 1, gfp);
68550+ return ret;
68551+}
68552+
68553+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
68554+{
68555+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
68556+ void *ret = __kmalloc_node_align(size, gfp, node, align);
68557+
68558+ if (!ZERO_OR_NULL_PTR(ret))
68559+ kmemleak_alloc(ret, size, 1, gfp);
68560 return ret;
68561 }
68562 EXPORT_SYMBOL(__kmalloc_node);
68563@@ -531,13 +545,88 @@ void kfree(const void *block)
68564 sp = slob_page(block);
68565 if (is_slob_page(sp)) {
68566 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
68567- unsigned int *m = (unsigned int *)(block - align);
68568- slob_free(m, *m + align);
68569- } else
68570+ slob_t *m = (slob_t *)(block - align);
68571+ slob_free(m, m[0].units + align);
68572+ } else {
68573+ clear_slob_page(sp);
68574+ free_slob_page(sp);
68575+ sp->size = 0;
68576 put_page(&sp->page);
68577+ }
68578 }
68579 EXPORT_SYMBOL(kfree);
68580
68581+void check_object_size(const void *ptr, unsigned long n, bool to)
68582+{
68583+
68584+#ifdef CONFIG_PAX_USERCOPY
68585+ struct slob_page *sp;
68586+ const slob_t *free;
68587+ const void *base;
68588+ unsigned long flags;
68589+
68590+ if (!n)
68591+ return;
68592+
68593+ if (ZERO_OR_NULL_PTR(ptr))
68594+ goto report;
68595+
68596+ if (!virt_addr_valid(ptr))
68597+ return;
68598+
68599+ sp = slob_page(ptr);
68600+ if (!PageSlab((struct page*)sp)) {
68601+ if (object_is_on_stack(ptr, n) == -1)
68602+ goto report;
68603+ return;
68604+ }
68605+
68606+ if (sp->size) {
68607+ base = page_address(&sp->page);
68608+ if (base <= ptr && n <= sp->size - (ptr - base))
68609+ return;
68610+ goto report;
68611+ }
68612+
68613+ /* some tricky double walking to find the chunk */
68614+ spin_lock_irqsave(&slob_lock, flags);
68615+ base = (void *)((unsigned long)ptr & PAGE_MASK);
68616+ free = sp->free;
68617+
68618+ while (!slob_last(free) && (void *)free <= ptr) {
68619+ base = free + slob_units(free);
68620+ free = slob_next(free);
68621+ }
68622+
68623+ while (base < (void *)free) {
68624+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
68625+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
68626+ int offset;
68627+
68628+ if (ptr < base + align)
68629+ break;
68630+
68631+ offset = ptr - base - align;
68632+ if (offset >= m) {
68633+ base += size;
68634+ continue;
68635+ }
68636+
68637+ if (n > m - offset)
68638+ break;
68639+
68640+ spin_unlock_irqrestore(&slob_lock, flags);
68641+ return;
68642+ }
68643+
68644+ spin_unlock_irqrestore(&slob_lock, flags);
68645+report:
68646+ pax_report_usercopy(ptr, n, to, NULL);
68647+#endif
68648+
68649+}
68650+EXPORT_SYMBOL(check_object_size);
68651+
68652 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
68653 size_t ksize(const void *block)
68654 {
68655@@ -550,10 +639,10 @@ size_t ksize(const void *block)
68656 sp = slob_page(block);
68657 if (is_slob_page(sp)) {
68658 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
68659- unsigned int *m = (unsigned int *)(block - align);
68660- return SLOB_UNITS(*m) * SLOB_UNIT;
68661+ slob_t *m = (slob_t *)(block - align);
68662+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
68663 } else
68664- return sp->page.private;
68665+ return sp->size;
68666 }
68667 EXPORT_SYMBOL(ksize);
68668
68669@@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
68670 {
68671 struct kmem_cache *c;
68672
68673+#ifdef CONFIG_PAX_USERCOPY
68674+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
68675+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
68676+#else
68677 c = slob_alloc(sizeof(struct kmem_cache),
68678 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
68679+#endif
68680
68681 if (c) {
68682 c->name = name;
68683@@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
68684 {
68685 void *b;
68686
68687+#ifdef CONFIG_PAX_USERCOPY
68688+ b = __kmalloc_node_align(c->size, flags, node, c->align);
68689+#else
68690 if (c->size < PAGE_SIZE) {
68691 b = slob_alloc(c->size, flags, c->align, node);
68692 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
68693 SLOB_UNITS(c->size) * SLOB_UNIT,
68694 flags, node);
68695 } else {
68696+ struct slob_page *sp;
68697+
68698 b = slob_new_pages(flags, get_order(c->size), node);
68699+ sp = slob_page(b);
68700+ sp->size = c->size;
68701 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
68702 PAGE_SIZE << get_order(c->size),
68703 flags, node);
68704 }
68705+#endif
68706
68707 if (c->ctor)
68708 c->ctor(b);
68709@@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
68710
68711 static void __kmem_cache_free(void *b, int size)
68712 {
68713- if (size < PAGE_SIZE)
68714+ struct slob_page *sp = slob_page(b);
68715+
68716+ if (is_slob_page(sp))
68717 slob_free(b, size);
68718- else
68719+ else {
68720+ clear_slob_page(sp);
68721+ free_slob_page(sp);
68722+ sp->size = 0;
68723 slob_free_pages(b, get_order(size));
68724+ }
68725 }
68726
68727 static void kmem_rcu_free(struct rcu_head *head)
68728@@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
68729
68730 void kmem_cache_free(struct kmem_cache *c, void *b)
68731 {
68732+ int size = c->size;
68733+
68734+#ifdef CONFIG_PAX_USERCOPY
68735+ if (size + c->align < PAGE_SIZE) {
68736+ size += c->align;
68737+ b -= c->align;
68738+ }
68739+#endif
68740+
68741 kmemleak_free_recursive(b, c->flags);
68742 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
68743 struct slob_rcu *slob_rcu;
68744- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
68745- slob_rcu->size = c->size;
68746+ slob_rcu = b + (size - sizeof(struct slob_rcu));
68747+ slob_rcu->size = size;
68748 call_rcu(&slob_rcu->head, kmem_rcu_free);
68749 } else {
68750- __kmem_cache_free(b, c->size);
68751+ __kmem_cache_free(b, size);
68752 }
68753
68754+#ifdef CONFIG_PAX_USERCOPY
68755+ trace_kfree(_RET_IP_, b);
68756+#else
68757 trace_kmem_cache_free(_RET_IP_, b);
68758+#endif
68759+
68760 }
68761 EXPORT_SYMBOL(kmem_cache_free);
68762
68763diff -urNp linux-3.0.4/mm/slub.c linux-3.0.4/mm/slub.c
68764--- linux-3.0.4/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
68765+++ linux-3.0.4/mm/slub.c 2011-09-25 22:15:40.000000000 -0400
68766@@ -200,7 +200,7 @@ struct track {
68767
68768 enum track_item { TRACK_ALLOC, TRACK_FREE };
68769
68770-#ifdef CONFIG_SYSFS
68771+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
68772 static int sysfs_slab_add(struct kmem_cache *);
68773 static int sysfs_slab_alias(struct kmem_cache *, const char *);
68774 static void sysfs_slab_remove(struct kmem_cache *);
68775@@ -442,7 +442,7 @@ static void print_track(const char *s, s
68776 if (!t->addr)
68777 return;
68778
68779- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
68780+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
68781 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
68782 }
68783
68784@@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
68785
68786 page = virt_to_head_page(x);
68787
68788+ BUG_ON(!PageSlab(page));
68789+
68790 slab_free(s, page, x, _RET_IP_);
68791
68792 trace_kmem_cache_free(_RET_IP_, x);
68793@@ -2170,7 +2172,7 @@ static int slub_min_objects;
68794 * Merge control. If this is set then no merging of slab caches will occur.
68795 * (Could be removed. This was introduced to pacify the merge skeptics.)
68796 */
68797-static int slub_nomerge;
68798+static int slub_nomerge = 1;
68799
68800 /*
68801 * Calculate the order of allocation given an slab object size.
68802@@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
68803 * list to avoid pounding the page allocator excessively.
68804 */
68805 set_min_partial(s, ilog2(s->size));
68806- s->refcount = 1;
68807+ atomic_set(&s->refcount, 1);
68808 #ifdef CONFIG_NUMA
68809 s->remote_node_defrag_ratio = 1000;
68810 #endif
68811@@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
68812 void kmem_cache_destroy(struct kmem_cache *s)
68813 {
68814 down_write(&slub_lock);
68815- s->refcount--;
68816- if (!s->refcount) {
68817+ if (atomic_dec_and_test(&s->refcount)) {
68818 list_del(&s->list);
68819 if (kmem_cache_close(s)) {
68820 printk(KERN_ERR "SLUB %s: %s called for cache that "
68821@@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
68822 EXPORT_SYMBOL(__kmalloc_node);
68823 #endif
68824
68825+void check_object_size(const void *ptr, unsigned long n, bool to)
68826+{
68827+
68828+#ifdef CONFIG_PAX_USERCOPY
68829+ struct page *page;
68830+ struct kmem_cache *s = NULL;
68831+ unsigned long offset;
68832+
68833+ if (!n)
68834+ return;
68835+
68836+ if (ZERO_OR_NULL_PTR(ptr))
68837+ goto report;
68838+
68839+ if (!virt_addr_valid(ptr))
68840+ return;
68841+
68842+ page = virt_to_head_page(ptr);
68843+
68844+ if (!PageSlab(page)) {
68845+ if (object_is_on_stack(ptr, n) == -1)
68846+ goto report;
68847+ return;
68848+ }
68849+
68850+ s = page->slab;
68851+ if (!(s->flags & SLAB_USERCOPY))
68852+ goto report;
68853+
68854+ offset = (ptr - page_address(page)) % s->size;
68855+ if (offset <= s->objsize && n <= s->objsize - offset)
68856+ return;
68857+
68858+report:
68859+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
68860+#endif
68861+
68862+}
68863+EXPORT_SYMBOL(check_object_size);
68864+
68865 size_t ksize(const void *object)
68866 {
68867 struct page *page;
68868@@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
68869 int node;
68870
68871 list_add(&s->list, &slab_caches);
68872- s->refcount = -1;
68873+ atomic_set(&s->refcount, -1);
68874
68875 for_each_node_state(node, N_NORMAL_MEMORY) {
68876 struct kmem_cache_node *n = get_node(s, node);
68877@@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
68878
68879 /* Caches that are not of the two-to-the-power-of size */
68880 if (KMALLOC_MIN_SIZE <= 32) {
68881- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
68882+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
68883 caches++;
68884 }
68885
68886 if (KMALLOC_MIN_SIZE <= 64) {
68887- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
68888+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
68889 caches++;
68890 }
68891
68892 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
68893- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
68894+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
68895 caches++;
68896 }
68897
68898@@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
68899 /*
68900 * We may have set a slab to be unmergeable during bootstrap.
68901 */
68902- if (s->refcount < 0)
68903+ if (atomic_read(&s->refcount) < 0)
68904 return 1;
68905
68906 return 0;
68907@@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
68908 down_write(&slub_lock);
68909 s = find_mergeable(size, align, flags, name, ctor);
68910 if (s) {
68911- s->refcount++;
68912+ atomic_inc(&s->refcount);
68913 /*
68914 * Adjust the object sizes so that we clear
68915 * the complete object on kzalloc.
68916@@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
68917 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
68918
68919 if (sysfs_slab_alias(s, name)) {
68920- s->refcount--;
68921+ atomic_dec(&s->refcount);
68922 goto err;
68923 }
68924 up_write(&slub_lock);
68925@@ -3545,7 +3586,7 @@ void *__kmalloc_node_track_caller(size_t
68926 }
68927 #endif
68928
68929-#ifdef CONFIG_SYSFS
68930+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
68931 static int count_inuse(struct page *page)
68932 {
68933 return page->inuse;
68934@@ -3935,12 +3976,12 @@ static void resiliency_test(void)
68935 validate_slab_cache(kmalloc_caches[9]);
68936 }
68937 #else
68938-#ifdef CONFIG_SYSFS
68939+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
68940 static void resiliency_test(void) {};
68941 #endif
68942 #endif
68943
68944-#ifdef CONFIG_SYSFS
68945+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
68946 enum slab_stat_type {
68947 SL_ALL, /* All slabs */
68948 SL_PARTIAL, /* Only partially allocated slabs */
68949@@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
68950
68951 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
68952 {
68953- return sprintf(buf, "%d\n", s->refcount - 1);
68954+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
68955 }
68956 SLAB_ATTR_RO(aliases);
68957
68958@@ -4662,6 +4703,7 @@ static char *create_unique_id(struct kme
68959 return name;
68960 }
68961
68962+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
68963 static int sysfs_slab_add(struct kmem_cache *s)
68964 {
68965 int err;
68966@@ -4724,6 +4766,7 @@ static void sysfs_slab_remove(struct kme
68967 kobject_del(&s->kobj);
68968 kobject_put(&s->kobj);
68969 }
68970+#endif
68971
68972 /*
68973 * Need to buffer aliases during bootup until sysfs becomes
68974@@ -4737,6 +4780,7 @@ struct saved_alias {
68975
68976 static struct saved_alias *alias_list;
68977
68978+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
68979 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
68980 {
68981 struct saved_alias *al;
68982@@ -4759,6 +4803,7 @@ static int sysfs_slab_alias(struct kmem_
68983 alias_list = al;
68984 return 0;
68985 }
68986+#endif
68987
68988 static int __init slab_sysfs_init(void)
68989 {
68990@@ -4894,7 +4939,13 @@ static const struct file_operations proc
68991
68992 static int __init slab_proc_init(void)
68993 {
68994- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
68995+ mode_t gr_mode = S_IRUGO;
68996+
68997+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68998+ gr_mode = S_IRUSR;
68999+#endif
69000+
69001+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
69002 return 0;
69003 }
69004 module_init(slab_proc_init);
69005diff -urNp linux-3.0.4/mm/swap.c linux-3.0.4/mm/swap.c
69006--- linux-3.0.4/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
69007+++ linux-3.0.4/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
69008@@ -31,6 +31,7 @@
69009 #include <linux/backing-dev.h>
69010 #include <linux/memcontrol.h>
69011 #include <linux/gfp.h>
69012+#include <linux/hugetlb.h>
69013
69014 #include "internal.h"
69015
69016@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
69017
69018 __page_cache_release(page);
69019 dtor = get_compound_page_dtor(page);
69020+ if (!PageHuge(page))
69021+ BUG_ON(dtor != free_compound_page);
69022 (*dtor)(page);
69023 }
69024
69025diff -urNp linux-3.0.4/mm/swapfile.c linux-3.0.4/mm/swapfile.c
69026--- linux-3.0.4/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
69027+++ linux-3.0.4/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
69028@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
69029
69030 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
69031 /* Activity counter to indicate that a swapon or swapoff has occurred */
69032-static atomic_t proc_poll_event = ATOMIC_INIT(0);
69033+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
69034
69035 static inline unsigned char swap_count(unsigned char ent)
69036 {
69037@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
69038 }
69039 filp_close(swap_file, NULL);
69040 err = 0;
69041- atomic_inc(&proc_poll_event);
69042+ atomic_inc_unchecked(&proc_poll_event);
69043 wake_up_interruptible(&proc_poll_wait);
69044
69045 out_dput:
69046@@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
69047
69048 poll_wait(file, &proc_poll_wait, wait);
69049
69050- if (s->event != atomic_read(&proc_poll_event)) {
69051- s->event = atomic_read(&proc_poll_event);
69052+ if (s->event != atomic_read_unchecked(&proc_poll_event)) {
69053+ s->event = atomic_read_unchecked(&proc_poll_event);
69054 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
69055 }
69056
69057@@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
69058 }
69059
69060 s->seq.private = s;
69061- s->event = atomic_read(&proc_poll_event);
69062+ s->event = atomic_read_unchecked(&proc_poll_event);
69063 return ret;
69064 }
69065
69066@@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
69067 (p->flags & SWP_DISCARDABLE) ? "D" : "");
69068
69069 mutex_unlock(&swapon_mutex);
69070- atomic_inc(&proc_poll_event);
69071+ atomic_inc_unchecked(&proc_poll_event);
69072 wake_up_interruptible(&proc_poll_wait);
69073
69074 if (S_ISREG(inode->i_mode))
69075diff -urNp linux-3.0.4/mm/util.c linux-3.0.4/mm/util.c
69076--- linux-3.0.4/mm/util.c 2011-07-21 22:17:23.000000000 -0400
69077+++ linux-3.0.4/mm/util.c 2011-08-23 21:47:56.000000000 -0400
69078@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
69079 * allocated buffer. Use this if you don't want to free the buffer immediately
69080 * like, for example, with RCU.
69081 */
69082+#undef __krealloc
69083 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
69084 {
69085 void *ret;
69086@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
69087 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
69088 * %NULL pointer, the object pointed to is freed.
69089 */
69090+#undef krealloc
69091 void *krealloc(const void *p, size_t new_size, gfp_t flags)
69092 {
69093 void *ret;
69094@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
69095 void arch_pick_mmap_layout(struct mm_struct *mm)
69096 {
69097 mm->mmap_base = TASK_UNMAPPED_BASE;
69098+
69099+#ifdef CONFIG_PAX_RANDMMAP
69100+ if (mm->pax_flags & MF_PAX_RANDMMAP)
69101+ mm->mmap_base += mm->delta_mmap;
69102+#endif
69103+
69104 mm->get_unmapped_area = arch_get_unmapped_area;
69105 mm->unmap_area = arch_unmap_area;
69106 }
69107diff -urNp linux-3.0.4/mm/vmalloc.c linux-3.0.4/mm/vmalloc.c
69108--- linux-3.0.4/mm/vmalloc.c 2011-09-02 18:11:21.000000000 -0400
69109+++ linux-3.0.4/mm/vmalloc.c 2011-08-23 21:47:56.000000000 -0400
69110@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
69111
69112 pte = pte_offset_kernel(pmd, addr);
69113 do {
69114- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
69115- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
69116+
69117+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69118+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
69119+ BUG_ON(!pte_exec(*pte));
69120+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
69121+ continue;
69122+ }
69123+#endif
69124+
69125+ {
69126+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
69127+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
69128+ }
69129 } while (pte++, addr += PAGE_SIZE, addr != end);
69130 }
69131
69132@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
69133 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
69134 {
69135 pte_t *pte;
69136+ int ret = -ENOMEM;
69137
69138 /*
69139 * nr is a running index into the array which helps higher level
69140@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
69141 pte = pte_alloc_kernel(pmd, addr);
69142 if (!pte)
69143 return -ENOMEM;
69144+
69145+ pax_open_kernel();
69146 do {
69147 struct page *page = pages[*nr];
69148
69149- if (WARN_ON(!pte_none(*pte)))
69150- return -EBUSY;
69151- if (WARN_ON(!page))
69152- return -ENOMEM;
69153+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69154+ if (pgprot_val(prot) & _PAGE_NX)
69155+#endif
69156+
69157+ if (WARN_ON(!pte_none(*pte))) {
69158+ ret = -EBUSY;
69159+ goto out;
69160+ }
69161+ if (WARN_ON(!page)) {
69162+ ret = -ENOMEM;
69163+ goto out;
69164+ }
69165 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
69166 (*nr)++;
69167 } while (pte++, addr += PAGE_SIZE, addr != end);
69168- return 0;
69169+ ret = 0;
69170+out:
69171+ pax_close_kernel();
69172+ return ret;
69173 }
69174
69175 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
69176@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
69177 * and fall back on vmalloc() if that fails. Others
69178 * just put it in the vmalloc space.
69179 */
69180-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
69181+#ifdef CONFIG_MODULES
69182+#ifdef MODULES_VADDR
69183 unsigned long addr = (unsigned long)x;
69184 if (addr >= MODULES_VADDR && addr < MODULES_END)
69185 return 1;
69186 #endif
69187+
69188+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69189+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
69190+ return 1;
69191+#endif
69192+
69193+#endif
69194+
69195 return is_vmalloc_addr(x);
69196 }
69197
69198@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
69199
69200 if (!pgd_none(*pgd)) {
69201 pud_t *pud = pud_offset(pgd, addr);
69202+#ifdef CONFIG_X86
69203+ if (!pud_large(*pud))
69204+#endif
69205 if (!pud_none(*pud)) {
69206 pmd_t *pmd = pmd_offset(pud, addr);
69207+#ifdef CONFIG_X86
69208+ if (!pmd_large(*pmd))
69209+#endif
69210 if (!pmd_none(*pmd)) {
69211 pte_t *ptep, pte;
69212
69213@@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
69214 struct vm_struct *area;
69215
69216 BUG_ON(in_interrupt());
69217+
69218+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
69219+ if (flags & VM_KERNEXEC) {
69220+ if (start != VMALLOC_START || end != VMALLOC_END)
69221+ return NULL;
69222+ start = (unsigned long)MODULES_EXEC_VADDR;
69223+ end = (unsigned long)MODULES_EXEC_END;
69224+ }
69225+#endif
69226+
69227 if (flags & VM_IOREMAP) {
69228 int bit = fls(size);
69229
69230@@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
69231 if (count > totalram_pages)
69232 return NULL;
69233
69234+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
69235+ if (!(pgprot_val(prot) & _PAGE_NX))
69236+ flags |= VM_KERNEXEC;
69237+#endif
69238+
69239 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
69240 __builtin_return_address(0));
69241 if (!area)
69242@@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
69243 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
69244 return NULL;
69245
69246+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
69247+ if (!(pgprot_val(prot) & _PAGE_NX))
69248+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
69249+ node, gfp_mask, caller);
69250+ else
69251+#endif
69252+
69253 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
69254 gfp_mask, caller);
69255
69256@@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
69257 gfp_mask, prot, node, caller);
69258 }
69259
69260+#undef __vmalloc
69261 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
69262 {
69263 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
69264@@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
69265 * For tight control over page level allocator and protection flags
69266 * use __vmalloc() instead.
69267 */
69268+#undef vmalloc
69269 void *vmalloc(unsigned long size)
69270 {
69271 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
69272@@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
69273 * For tight control over page level allocator and protection flags
69274 * use __vmalloc() instead.
69275 */
69276+#undef vzalloc
69277 void *vzalloc(unsigned long size)
69278 {
69279 return __vmalloc_node_flags(size, -1,
69280@@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
69281 * The resulting memory area is zeroed so it can be mapped to userspace
69282 * without leaking data.
69283 */
69284+#undef vmalloc_user
69285 void *vmalloc_user(unsigned long size)
69286 {
69287 struct vm_struct *area;
69288@@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
69289 * For tight control over page level allocator and protection flags
69290 * use __vmalloc() instead.
69291 */
69292+#undef vmalloc_node
69293 void *vmalloc_node(unsigned long size, int node)
69294 {
69295 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
69296@@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
69297 * For tight control over page level allocator and protection flags
69298 * use __vmalloc_node() instead.
69299 */
69300+#undef vzalloc_node
69301 void *vzalloc_node(unsigned long size, int node)
69302 {
69303 return __vmalloc_node_flags(size, node,
69304@@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
69305 * For tight control over page level allocator and protection flags
69306 * use __vmalloc() instead.
69307 */
69308-
69309+#undef vmalloc_exec
69310 void *vmalloc_exec(unsigned long size)
69311 {
69312- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
69313+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
69314 -1, __builtin_return_address(0));
69315 }
69316
69317@@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
69318 * Allocate enough 32bit PA addressable pages to cover @size from the
69319 * page level allocator and map them into contiguous kernel virtual space.
69320 */
69321+#undef vmalloc_32
69322 void *vmalloc_32(unsigned long size)
69323 {
69324 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
69325@@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
69326 * The resulting memory area is 32bit addressable and zeroed so it can be
69327 * mapped to userspace without leaking data.
69328 */
69329+#undef vmalloc_32_user
69330 void *vmalloc_32_user(unsigned long size)
69331 {
69332 struct vm_struct *area;
69333@@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
69334 unsigned long uaddr = vma->vm_start;
69335 unsigned long usize = vma->vm_end - vma->vm_start;
69336
69337+ BUG_ON(vma->vm_mirror);
69338+
69339 if ((PAGE_SIZE-1) & (unsigned long)addr)
69340 return -EINVAL;
69341
69342diff -urNp linux-3.0.4/mm/vmstat.c linux-3.0.4/mm/vmstat.c
69343--- linux-3.0.4/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
69344+++ linux-3.0.4/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
69345@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
69346 *
69347 * vm_stat contains the global counters
69348 */
69349-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69350+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69351 EXPORT_SYMBOL(vm_stat);
69352
69353 #ifdef CONFIG_SMP
69354@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
69355 v = p->vm_stat_diff[i];
69356 p->vm_stat_diff[i] = 0;
69357 local_irq_restore(flags);
69358- atomic_long_add(v, &zone->vm_stat[i]);
69359+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
69360 global_diff[i] += v;
69361 #ifdef CONFIG_NUMA
69362 /* 3 seconds idle till flush */
69363@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
69364
69365 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
69366 if (global_diff[i])
69367- atomic_long_add(global_diff[i], &vm_stat[i]);
69368+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
69369 }
69370
69371 #endif
69372@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
69373 start_cpu_timer(cpu);
69374 #endif
69375 #ifdef CONFIG_PROC_FS
69376- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
69377- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
69378- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
69379- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
69380+ {
69381+ mode_t gr_mode = S_IRUGO;
69382+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69383+ gr_mode = S_IRUSR;
69384+#endif
69385+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
69386+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
69387+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
69388+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
69389+#else
69390+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
69391+#endif
69392+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
69393+ }
69394 #endif
69395 return 0;
69396 }
69397diff -urNp linux-3.0.4/net/8021q/vlan.c linux-3.0.4/net/8021q/vlan.c
69398--- linux-3.0.4/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
69399+++ linux-3.0.4/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
69400@@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
69401 err = -EPERM;
69402 if (!capable(CAP_NET_ADMIN))
69403 break;
69404- if ((args.u.name_type >= 0) &&
69405- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
69406+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
69407 struct vlan_net *vn;
69408
69409 vn = net_generic(net, vlan_net_id);
69410diff -urNp linux-3.0.4/net/9p/trans_fd.c linux-3.0.4/net/9p/trans_fd.c
69411--- linux-3.0.4/net/9p/trans_fd.c 2011-07-21 22:17:23.000000000 -0400
69412+++ linux-3.0.4/net/9p/trans_fd.c 2011-10-06 04:17:55.000000000 -0400
69413@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client
69414 oldfs = get_fs();
69415 set_fs(get_ds());
69416 /* The cast to a user pointer is valid due to the set_fs() */
69417- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
69418+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
69419 set_fs(oldfs);
69420
69421 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
69422diff -urNp linux-3.0.4/net/9p/trans_virtio.c linux-3.0.4/net/9p/trans_virtio.c
69423--- linux-3.0.4/net/9p/trans_virtio.c 2011-07-21 22:17:23.000000000 -0400
69424+++ linux-3.0.4/net/9p/trans_virtio.c 2011-10-06 04:17:55.000000000 -0400
69425@@ -328,7 +328,7 @@ req_retry_pinned:
69426 } else {
69427 char *pbuf;
69428 if (req->tc->pubuf)
69429- pbuf = (__force char *) req->tc->pubuf;
69430+ pbuf = (char __force_kernel *) req->tc->pubuf;
69431 else
69432 pbuf = req->tc->pkbuf;
69433 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
69434@@ -357,7 +357,7 @@ req_retry_pinned:
69435 } else {
69436 char *pbuf;
69437 if (req->tc->pubuf)
69438- pbuf = (__force char *) req->tc->pubuf;
69439+ pbuf = (char __force_kernel *) req->tc->pubuf;
69440 else
69441 pbuf = req->tc->pkbuf;
69442
69443diff -urNp linux-3.0.4/net/atm/atm_misc.c linux-3.0.4/net/atm/atm_misc.c
69444--- linux-3.0.4/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
69445+++ linux-3.0.4/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
69446@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
69447 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
69448 return 1;
69449 atm_return(vcc, truesize);
69450- atomic_inc(&vcc->stats->rx_drop);
69451+ atomic_inc_unchecked(&vcc->stats->rx_drop);
69452 return 0;
69453 }
69454 EXPORT_SYMBOL(atm_charge);
69455@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
69456 }
69457 }
69458 atm_return(vcc, guess);
69459- atomic_inc(&vcc->stats->rx_drop);
69460+ atomic_inc_unchecked(&vcc->stats->rx_drop);
69461 return NULL;
69462 }
69463 EXPORT_SYMBOL(atm_alloc_charge);
69464@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
69465
69466 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
69467 {
69468-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
69469+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
69470 __SONET_ITEMS
69471 #undef __HANDLE_ITEM
69472 }
69473@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
69474
69475 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
69476 {
69477-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
69478+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
69479 __SONET_ITEMS
69480 #undef __HANDLE_ITEM
69481 }
69482diff -urNp linux-3.0.4/net/atm/lec.h linux-3.0.4/net/atm/lec.h
69483--- linux-3.0.4/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
69484+++ linux-3.0.4/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
69485@@ -48,7 +48,7 @@ struct lane2_ops {
69486 const u8 *tlvs, u32 sizeoftlvs);
69487 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
69488 const u8 *tlvs, u32 sizeoftlvs);
69489-};
69490+} __no_const;
69491
69492 /*
69493 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
69494diff -urNp linux-3.0.4/net/atm/mpc.h linux-3.0.4/net/atm/mpc.h
69495--- linux-3.0.4/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
69496+++ linux-3.0.4/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
69497@@ -33,7 +33,7 @@ struct mpoa_client {
69498 struct mpc_parameters parameters; /* parameters for this client */
69499
69500 const struct net_device_ops *old_ops;
69501- struct net_device_ops new_ops;
69502+ net_device_ops_no_const new_ops;
69503 };
69504
69505
69506diff -urNp linux-3.0.4/net/atm/mpoa_caches.c linux-3.0.4/net/atm/mpoa_caches.c
69507--- linux-3.0.4/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
69508+++ linux-3.0.4/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
69509@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
69510 struct timeval now;
69511 struct k_message msg;
69512
69513+ pax_track_stack();
69514+
69515 do_gettimeofday(&now);
69516
69517 read_lock_bh(&client->ingress_lock);
69518diff -urNp linux-3.0.4/net/atm/proc.c linux-3.0.4/net/atm/proc.c
69519--- linux-3.0.4/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
69520+++ linux-3.0.4/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
69521@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
69522 const struct k_atm_aal_stats *stats)
69523 {
69524 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
69525- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
69526- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
69527- atomic_read(&stats->rx_drop));
69528+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
69529+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
69530+ atomic_read_unchecked(&stats->rx_drop));
69531 }
69532
69533 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
69534diff -urNp linux-3.0.4/net/atm/resources.c linux-3.0.4/net/atm/resources.c
69535--- linux-3.0.4/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
69536+++ linux-3.0.4/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
69537@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
69538 static void copy_aal_stats(struct k_atm_aal_stats *from,
69539 struct atm_aal_stats *to)
69540 {
69541-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
69542+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
69543 __AAL_STAT_ITEMS
69544 #undef __HANDLE_ITEM
69545 }
69546@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
69547 static void subtract_aal_stats(struct k_atm_aal_stats *from,
69548 struct atm_aal_stats *to)
69549 {
69550-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
69551+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
69552 __AAL_STAT_ITEMS
69553 #undef __HANDLE_ITEM
69554 }
69555diff -urNp linux-3.0.4/net/batman-adv/hard-interface.c linux-3.0.4/net/batman-adv/hard-interface.c
69556--- linux-3.0.4/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
69557+++ linux-3.0.4/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
69558@@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
69559 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
69560 dev_add_pack(&hard_iface->batman_adv_ptype);
69561
69562- atomic_set(&hard_iface->seqno, 1);
69563- atomic_set(&hard_iface->frag_seqno, 1);
69564+ atomic_set_unchecked(&hard_iface->seqno, 1);
69565+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
69566 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
69567 hard_iface->net_dev->name);
69568
69569diff -urNp linux-3.0.4/net/batman-adv/routing.c linux-3.0.4/net/batman-adv/routing.c
69570--- linux-3.0.4/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
69571+++ linux-3.0.4/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
69572@@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
69573 return;
69574
69575 /* could be changed by schedule_own_packet() */
69576- if_incoming_seqno = atomic_read(&if_incoming->seqno);
69577+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
69578
69579 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
69580
69581diff -urNp linux-3.0.4/net/batman-adv/send.c linux-3.0.4/net/batman-adv/send.c
69582--- linux-3.0.4/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
69583+++ linux-3.0.4/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
69584@@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
69585
69586 /* change sequence number to network order */
69587 batman_packet->seqno =
69588- htonl((uint32_t)atomic_read(&hard_iface->seqno));
69589+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
69590
69591 if (vis_server == VIS_TYPE_SERVER_SYNC)
69592 batman_packet->flags |= VIS_SERVER;
69593@@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
69594 else
69595 batman_packet->gw_flags = 0;
69596
69597- atomic_inc(&hard_iface->seqno);
69598+ atomic_inc_unchecked(&hard_iface->seqno);
69599
69600 slide_own_bcast_window(hard_iface);
69601 send_time = own_send_time(bat_priv);
69602diff -urNp linux-3.0.4/net/batman-adv/soft-interface.c linux-3.0.4/net/batman-adv/soft-interface.c
69603--- linux-3.0.4/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
69604+++ linux-3.0.4/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
69605@@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
69606
69607 /* set broadcast sequence number */
69608 bcast_packet->seqno =
69609- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
69610+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
69611
69612 add_bcast_packet_to_list(bat_priv, skb);
69613
69614@@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
69615 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
69616
69617 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
69618- atomic_set(&bat_priv->bcast_seqno, 1);
69619+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
69620 atomic_set(&bat_priv->tt_local_changed, 0);
69621
69622 bat_priv->primary_if = NULL;
69623diff -urNp linux-3.0.4/net/batman-adv/types.h linux-3.0.4/net/batman-adv/types.h
69624--- linux-3.0.4/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
69625+++ linux-3.0.4/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
69626@@ -38,8 +38,8 @@ struct hard_iface {
69627 int16_t if_num;
69628 char if_status;
69629 struct net_device *net_dev;
69630- atomic_t seqno;
69631- atomic_t frag_seqno;
69632+ atomic_unchecked_t seqno;
69633+ atomic_unchecked_t frag_seqno;
69634 unsigned char *packet_buff;
69635 int packet_len;
69636 struct kobject *hardif_obj;
69637@@ -142,7 +142,7 @@ struct bat_priv {
69638 atomic_t orig_interval; /* uint */
69639 atomic_t hop_penalty; /* uint */
69640 atomic_t log_level; /* uint */
69641- atomic_t bcast_seqno;
69642+ atomic_unchecked_t bcast_seqno;
69643 atomic_t bcast_queue_left;
69644 atomic_t batman_queue_left;
69645 char num_ifaces;
69646diff -urNp linux-3.0.4/net/batman-adv/unicast.c linux-3.0.4/net/batman-adv/unicast.c
69647--- linux-3.0.4/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
69648+++ linux-3.0.4/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
69649@@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
69650 frag1->flags = UNI_FRAG_HEAD | large_tail;
69651 frag2->flags = large_tail;
69652
69653- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
69654+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
69655 frag1->seqno = htons(seqno - 1);
69656 frag2->seqno = htons(seqno);
69657
69658diff -urNp linux-3.0.4/net/bridge/br_multicast.c linux-3.0.4/net/bridge/br_multicast.c
69659--- linux-3.0.4/net/bridge/br_multicast.c 2011-07-21 22:17:23.000000000 -0400
69660+++ linux-3.0.4/net/bridge/br_multicast.c 2011-08-23 21:47:56.000000000 -0400
69661@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
69662 nexthdr = ip6h->nexthdr;
69663 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
69664
69665- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
69666+ if (nexthdr != IPPROTO_ICMPV6)
69667 return 0;
69668
69669 /* Okay, we found ICMPv6 header */
69670diff -urNp linux-3.0.4/net/bridge/netfilter/ebtables.c linux-3.0.4/net/bridge/netfilter/ebtables.c
69671--- linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
69672+++ linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
69673@@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
69674 tmp.valid_hooks = t->table->valid_hooks;
69675 }
69676 mutex_unlock(&ebt_mutex);
69677- if (copy_to_user(user, &tmp, *len) != 0){
69678+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
69679 BUGPRINT("c2u Didn't work\n");
69680 ret = -EFAULT;
69681 break;
69682@@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
69683 int ret;
69684 void __user *pos;
69685
69686+ pax_track_stack();
69687+
69688 memset(&tinfo, 0, sizeof(tinfo));
69689
69690 if (cmd == EBT_SO_GET_ENTRIES) {
69691diff -urNp linux-3.0.4/net/caif/caif_socket.c linux-3.0.4/net/caif/caif_socket.c
69692--- linux-3.0.4/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
69693+++ linux-3.0.4/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
69694@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
69695 #ifdef CONFIG_DEBUG_FS
69696 struct debug_fs_counter {
69697 atomic_t caif_nr_socks;
69698- atomic_t caif_sock_create;
69699- atomic_t num_connect_req;
69700- atomic_t num_connect_resp;
69701- atomic_t num_connect_fail_resp;
69702- atomic_t num_disconnect;
69703- atomic_t num_remote_shutdown_ind;
69704- atomic_t num_tx_flow_off_ind;
69705- atomic_t num_tx_flow_on_ind;
69706- atomic_t num_rx_flow_off;
69707- atomic_t num_rx_flow_on;
69708+ atomic_unchecked_t caif_sock_create;
69709+ atomic_unchecked_t num_connect_req;
69710+ atomic_unchecked_t num_connect_resp;
69711+ atomic_unchecked_t num_connect_fail_resp;
69712+ atomic_unchecked_t num_disconnect;
69713+ atomic_unchecked_t num_remote_shutdown_ind;
69714+ atomic_unchecked_t num_tx_flow_off_ind;
69715+ atomic_unchecked_t num_tx_flow_on_ind;
69716+ atomic_unchecked_t num_rx_flow_off;
69717+ atomic_unchecked_t num_rx_flow_on;
69718 };
69719 static struct debug_fs_counter cnt;
69720 #define dbfs_atomic_inc(v) atomic_inc_return(v)
69721+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
69722 #define dbfs_atomic_dec(v) atomic_dec_return(v)
69723 #else
69724 #define dbfs_atomic_inc(v) 0
69725@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
69726 atomic_read(&cf_sk->sk.sk_rmem_alloc),
69727 sk_rcvbuf_lowwater(cf_sk));
69728 set_rx_flow_off(cf_sk);
69729- dbfs_atomic_inc(&cnt.num_rx_flow_off);
69730+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
69731 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
69732 }
69733
69734@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
69735 set_rx_flow_off(cf_sk);
69736 if (net_ratelimit())
69737 pr_debug("sending flow OFF due to rmem_schedule\n");
69738- dbfs_atomic_inc(&cnt.num_rx_flow_off);
69739+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
69740 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
69741 }
69742 skb->dev = NULL;
69743@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
69744 switch (flow) {
69745 case CAIF_CTRLCMD_FLOW_ON_IND:
69746 /* OK from modem to start sending again */
69747- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
69748+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
69749 set_tx_flow_on(cf_sk);
69750 cf_sk->sk.sk_state_change(&cf_sk->sk);
69751 break;
69752
69753 case CAIF_CTRLCMD_FLOW_OFF_IND:
69754 /* Modem asks us to shut up */
69755- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
69756+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
69757 set_tx_flow_off(cf_sk);
69758 cf_sk->sk.sk_state_change(&cf_sk->sk);
69759 break;
69760@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
69761 /* We're now connected */
69762 caif_client_register_refcnt(&cf_sk->layer,
69763 cfsk_hold, cfsk_put);
69764- dbfs_atomic_inc(&cnt.num_connect_resp);
69765+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
69766 cf_sk->sk.sk_state = CAIF_CONNECTED;
69767 set_tx_flow_on(cf_sk);
69768 cf_sk->sk.sk_state_change(&cf_sk->sk);
69769@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
69770
69771 case CAIF_CTRLCMD_INIT_FAIL_RSP:
69772 /* Connect request failed */
69773- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
69774+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
69775 cf_sk->sk.sk_err = ECONNREFUSED;
69776 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
69777 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
69778@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
69779
69780 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
69781 /* Modem has closed this connection, or device is down. */
69782- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
69783+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
69784 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
69785 cf_sk->sk.sk_err = ECONNRESET;
69786 set_rx_flow_on(cf_sk);
69787@@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
69788 return;
69789
69790 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
69791- dbfs_atomic_inc(&cnt.num_rx_flow_on);
69792+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
69793 set_rx_flow_on(cf_sk);
69794 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
69795 }
69796@@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
69797 /*ifindex = id of the interface.*/
69798 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
69799
69800- dbfs_atomic_inc(&cnt.num_connect_req);
69801+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
69802 cf_sk->layer.receive = caif_sktrecv_cb;
69803
69804 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
69805@@ -943,7 +944,7 @@ static int caif_release(struct socket *s
69806 spin_unlock_bh(&sk->sk_receive_queue.lock);
69807 sock->sk = NULL;
69808
69809- dbfs_atomic_inc(&cnt.num_disconnect);
69810+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
69811
69812 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
69813 if (cf_sk->debugfs_socket_dir != NULL)
69814@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
69815 cf_sk->conn_req.protocol = protocol;
69816 /* Increase the number of sockets created. */
69817 dbfs_atomic_inc(&cnt.caif_nr_socks);
69818- num = dbfs_atomic_inc(&cnt.caif_sock_create);
69819+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
69820 #ifdef CONFIG_DEBUG_FS
69821 if (!IS_ERR(debugfsdir)) {
69822
69823diff -urNp linux-3.0.4/net/caif/cfctrl.c linux-3.0.4/net/caif/cfctrl.c
69824--- linux-3.0.4/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
69825+++ linux-3.0.4/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
69826@@ -9,6 +9,7 @@
69827 #include <linux/stddef.h>
69828 #include <linux/spinlock.h>
69829 #include <linux/slab.h>
69830+#include <linux/sched.h>
69831 #include <net/caif/caif_layer.h>
69832 #include <net/caif/cfpkt.h>
69833 #include <net/caif/cfctrl.h>
69834@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
69835 dev_info.id = 0xff;
69836 memset(this, 0, sizeof(*this));
69837 cfsrvl_init(&this->serv, 0, &dev_info, false);
69838- atomic_set(&this->req_seq_no, 1);
69839- atomic_set(&this->rsp_seq_no, 1);
69840+ atomic_set_unchecked(&this->req_seq_no, 1);
69841+ atomic_set_unchecked(&this->rsp_seq_no, 1);
69842 this->serv.layer.receive = cfctrl_recv;
69843 sprintf(this->serv.layer.name, "ctrl");
69844 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
69845@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
69846 struct cfctrl_request_info *req)
69847 {
69848 spin_lock_bh(&ctrl->info_list_lock);
69849- atomic_inc(&ctrl->req_seq_no);
69850- req->sequence_no = atomic_read(&ctrl->req_seq_no);
69851+ atomic_inc_unchecked(&ctrl->req_seq_no);
69852+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
69853 list_add_tail(&req->list, &ctrl->list);
69854 spin_unlock_bh(&ctrl->info_list_lock);
69855 }
69856@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
69857 if (p != first)
69858 pr_warn("Requests are not received in order\n");
69859
69860- atomic_set(&ctrl->rsp_seq_no,
69861+ atomic_set_unchecked(&ctrl->rsp_seq_no,
69862 p->sequence_no);
69863 list_del(&p->list);
69864 goto out;
69865@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
69866 struct cfctrl *cfctrl = container_obj(layer);
69867 struct cfctrl_request_info rsp, *req;
69868
69869+ pax_track_stack();
69870
69871 cfpkt_extr_head(pkt, &cmdrsp, 1);
69872 cmd = cmdrsp & CFCTRL_CMD_MASK;
69873diff -urNp linux-3.0.4/net/compat.c linux-3.0.4/net/compat.c
69874--- linux-3.0.4/net/compat.c 2011-07-21 22:17:23.000000000 -0400
69875+++ linux-3.0.4/net/compat.c 2011-10-06 04:17:55.000000000 -0400
69876@@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kms
69877 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
69878 __get_user(kmsg->msg_flags, &umsg->msg_flags))
69879 return -EFAULT;
69880- kmsg->msg_name = compat_ptr(tmp1);
69881- kmsg->msg_iov = compat_ptr(tmp2);
69882- kmsg->msg_control = compat_ptr(tmp3);
69883+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
69884+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
69885+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
69886 return 0;
69887 }
69888
69889@@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *k
69890
69891 if (kern_msg->msg_namelen) {
69892 if (mode == VERIFY_READ) {
69893- int err = move_addr_to_kernel(kern_msg->msg_name,
69894+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
69895 kern_msg->msg_namelen,
69896 kern_address);
69897 if (err < 0)
69898@@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *k
69899 kern_msg->msg_name = NULL;
69900
69901 tot_len = iov_from_user_compat_to_kern(kern_iov,
69902- (struct compat_iovec __user *)kern_msg->msg_iov,
69903+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
69904 kern_msg->msg_iovlen);
69905 if (tot_len >= 0)
69906 kern_msg->msg_iov = kern_iov;
69907@@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *k
69908
69909 #define CMSG_COMPAT_FIRSTHDR(msg) \
69910 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
69911- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
69912+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
69913 (struct compat_cmsghdr __user *)NULL)
69914
69915 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
69916 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
69917 (ucmlen) <= (unsigned long) \
69918 ((mhdr)->msg_controllen - \
69919- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
69920+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
69921
69922 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
69923 struct compat_cmsghdr __user *cmsg, int cmsg_len)
69924 {
69925 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
69926- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
69927+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
69928 msg->msg_controllen)
69929 return NULL;
69930 return (struct compat_cmsghdr __user *)ptr;
69931@@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
69932 {
69933 struct compat_timeval ctv;
69934 struct compat_timespec cts[3];
69935- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
69936+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
69937 struct compat_cmsghdr cmhdr;
69938 int cmlen;
69939
69940@@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
69941
69942 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
69943 {
69944- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
69945+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
69946 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
69947 int fdnum = scm->fp->count;
69948 struct file **fp = scm->fp->fp;
69949@@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct so
69950 return -EFAULT;
69951 old_fs = get_fs();
69952 set_fs(KERNEL_DS);
69953- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
69954+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
69955 set_fs(old_fs);
69956
69957 return err;
69958@@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct so
69959 len = sizeof(ktime);
69960 old_fs = get_fs();
69961 set_fs(KERNEL_DS);
69962- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
69963+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
69964 set_fs(old_fs);
69965
69966 if (!err) {
69967@@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *so
69968 case MCAST_JOIN_GROUP:
69969 case MCAST_LEAVE_GROUP:
69970 {
69971- struct compat_group_req __user *gr32 = (void *)optval;
69972+ struct compat_group_req __user *gr32 = (void __user *)optval;
69973 struct group_req __user *kgr =
69974 compat_alloc_user_space(sizeof(struct group_req));
69975 u32 interface;
69976@@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *so
69977 case MCAST_BLOCK_SOURCE:
69978 case MCAST_UNBLOCK_SOURCE:
69979 {
69980- struct compat_group_source_req __user *gsr32 = (void *)optval;
69981+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
69982 struct group_source_req __user *kgsr = compat_alloc_user_space(
69983 sizeof(struct group_source_req));
69984 u32 interface;
69985@@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *so
69986 }
69987 case MCAST_MSFILTER:
69988 {
69989- struct compat_group_filter __user *gf32 = (void *)optval;
69990+ struct compat_group_filter __user *gf32 = (void __user *)optval;
69991 struct group_filter __user *kgf;
69992 u32 interface, fmode, numsrc;
69993
69994@@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *so
69995 char __user *optval, int __user *optlen,
69996 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
69997 {
69998- struct compat_group_filter __user *gf32 = (void *)optval;
69999+ struct compat_group_filter __user *gf32 = (void __user *)optval;
70000 struct group_filter __user *kgf;
70001 int __user *koptlen;
70002 u32 interface, fmode, numsrc;
70003diff -urNp linux-3.0.4/net/core/datagram.c linux-3.0.4/net/core/datagram.c
70004--- linux-3.0.4/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
70005+++ linux-3.0.4/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
70006@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
70007 }
70008
70009 kfree_skb(skb);
70010- atomic_inc(&sk->sk_drops);
70011+ atomic_inc_unchecked(&sk->sk_drops);
70012 sk_mem_reclaim_partial(sk);
70013
70014 return err;
70015diff -urNp linux-3.0.4/net/core/dev.c linux-3.0.4/net/core/dev.c
70016--- linux-3.0.4/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
70017+++ linux-3.0.4/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
70018@@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
70019 if (no_module && capable(CAP_NET_ADMIN))
70020 no_module = request_module("netdev-%s", name);
70021 if (no_module && capable(CAP_SYS_MODULE)) {
70022+#ifdef CONFIG_GRKERNSEC_MODHARDEN
70023+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
70024+#else
70025 if (!request_module("%s", name))
70026 pr_err("Loading kernel module for a network device "
70027 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
70028 "instead\n", name);
70029+#endif
70030 }
70031 }
70032 EXPORT_SYMBOL(dev_load);
70033@@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
70034
70035 struct dev_gso_cb {
70036 void (*destructor)(struct sk_buff *skb);
70037-};
70038+} __no_const;
70039
70040 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
70041
70042@@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
70043 }
70044 EXPORT_SYMBOL(netif_rx_ni);
70045
70046-static void net_tx_action(struct softirq_action *h)
70047+static void net_tx_action(void)
70048 {
70049 struct softnet_data *sd = &__get_cpu_var(softnet_data);
70050
70051@@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
70052 }
70053 EXPORT_SYMBOL(netif_napi_del);
70054
70055-static void net_rx_action(struct softirq_action *h)
70056+static void net_rx_action(void)
70057 {
70058 struct softnet_data *sd = &__get_cpu_var(softnet_data);
70059 unsigned long time_limit = jiffies + 2;
70060diff -urNp linux-3.0.4/net/core/flow.c linux-3.0.4/net/core/flow.c
70061--- linux-3.0.4/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
70062+++ linux-3.0.4/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
70063@@ -60,7 +60,7 @@ struct flow_cache {
70064 struct timer_list rnd_timer;
70065 };
70066
70067-atomic_t flow_cache_genid = ATOMIC_INIT(0);
70068+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
70069 EXPORT_SYMBOL(flow_cache_genid);
70070 static struct flow_cache flow_cache_global;
70071 static struct kmem_cache *flow_cachep __read_mostly;
70072@@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
70073
70074 static int flow_entry_valid(struct flow_cache_entry *fle)
70075 {
70076- if (atomic_read(&flow_cache_genid) != fle->genid)
70077+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
70078 return 0;
70079 if (fle->object && !fle->object->ops->check(fle->object))
70080 return 0;
70081@@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
70082 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
70083 fcp->hash_count++;
70084 }
70085- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
70086+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
70087 flo = fle->object;
70088 if (!flo)
70089 goto ret_object;
70090@@ -274,7 +274,7 @@ nocache:
70091 }
70092 flo = resolver(net, key, family, dir, flo, ctx);
70093 if (fle) {
70094- fle->genid = atomic_read(&flow_cache_genid);
70095+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
70096 if (!IS_ERR(flo))
70097 fle->object = flo;
70098 else
70099diff -urNp linux-3.0.4/net/core/iovec.c linux-3.0.4/net/core/iovec.c
70100--- linux-3.0.4/net/core/iovec.c 2011-07-21 22:17:23.000000000 -0400
70101+++ linux-3.0.4/net/core/iovec.c 2011-10-06 04:17:55.000000000 -0400
70102@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struc
70103 if (m->msg_namelen) {
70104 if (mode == VERIFY_READ) {
70105 void __user *namep;
70106- namep = (void __user __force *) m->msg_name;
70107+ namep = (void __force_user *) m->msg_name;
70108 err = move_addr_to_kernel(namep, m->msg_namelen,
70109 address);
70110 if (err < 0)
70111@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struc
70112 }
70113
70114 size = m->msg_iovlen * sizeof(struct iovec);
70115- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
70116+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
70117 return -EFAULT;
70118
70119 m->msg_iov = iov;
70120diff -urNp linux-3.0.4/net/core/rtnetlink.c linux-3.0.4/net/core/rtnetlink.c
70121--- linux-3.0.4/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
70122+++ linux-3.0.4/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
70123@@ -56,7 +56,7 @@
70124 struct rtnl_link {
70125 rtnl_doit_func doit;
70126 rtnl_dumpit_func dumpit;
70127-};
70128+} __no_const;
70129
70130 static DEFINE_MUTEX(rtnl_mutex);
70131
70132diff -urNp linux-3.0.4/net/core/scm.c linux-3.0.4/net/core/scm.c
70133--- linux-3.0.4/net/core/scm.c 2011-07-21 22:17:23.000000000 -0400
70134+++ linux-3.0.4/net/core/scm.c 2011-10-06 04:17:55.000000000 -0400
70135@@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send);
70136 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
70137 {
70138 struct cmsghdr __user *cm
70139- = (__force struct cmsghdr __user *)msg->msg_control;
70140+ = (struct cmsghdr __force_user *)msg->msg_control;
70141 struct cmsghdr cmhdr;
70142 int cmlen = CMSG_LEN(len);
70143 int err;
70144@@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int le
70145 err = -EFAULT;
70146 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
70147 goto out;
70148- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
70149+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
70150 goto out;
70151 cmlen = CMSG_SPACE(len);
70152 if (msg->msg_controllen < cmlen)
70153@@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg);
70154 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
70155 {
70156 struct cmsghdr __user *cm
70157- = (__force struct cmsghdr __user*)msg->msg_control;
70158+ = (struct cmsghdr __force_user *)msg->msg_control;
70159
70160 int fdmax = 0;
70161 int fdnum = scm->fp->count;
70162@@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg,
70163 if (fdnum < fdmax)
70164 fdmax = fdnum;
70165
70166- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
70167+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
70168 i++, cmfptr++)
70169 {
70170 int new_fd;
70171diff -urNp linux-3.0.4/net/core/skbuff.c linux-3.0.4/net/core/skbuff.c
70172--- linux-3.0.4/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
70173+++ linux-3.0.4/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
70174@@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
70175 struct sock *sk = skb->sk;
70176 int ret = 0;
70177
70178+ pax_track_stack();
70179+
70180 if (splice_grow_spd(pipe, &spd))
70181 return -ENOMEM;
70182
70183diff -urNp linux-3.0.4/net/core/sock.c linux-3.0.4/net/core/sock.c
70184--- linux-3.0.4/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
70185+++ linux-3.0.4/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
70186@@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
70187 */
70188 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
70189 (unsigned)sk->sk_rcvbuf) {
70190- atomic_inc(&sk->sk_drops);
70191+ atomic_inc_unchecked(&sk->sk_drops);
70192 return -ENOMEM;
70193 }
70194
70195@@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
70196 return err;
70197
70198 if (!sk_rmem_schedule(sk, skb->truesize)) {
70199- atomic_inc(&sk->sk_drops);
70200+ atomic_inc_unchecked(&sk->sk_drops);
70201 return -ENOBUFS;
70202 }
70203
70204@@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
70205 skb_dst_force(skb);
70206
70207 spin_lock_irqsave(&list->lock, flags);
70208- skb->dropcount = atomic_read(&sk->sk_drops);
70209+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
70210 __skb_queue_tail(list, skb);
70211 spin_unlock_irqrestore(&list->lock, flags);
70212
70213@@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
70214 skb->dev = NULL;
70215
70216 if (sk_rcvqueues_full(sk, skb)) {
70217- atomic_inc(&sk->sk_drops);
70218+ atomic_inc_unchecked(&sk->sk_drops);
70219 goto discard_and_relse;
70220 }
70221 if (nested)
70222@@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
70223 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
70224 } else if (sk_add_backlog(sk, skb)) {
70225 bh_unlock_sock(sk);
70226- atomic_inc(&sk->sk_drops);
70227+ atomic_inc_unchecked(&sk->sk_drops);
70228 goto discard_and_relse;
70229 }
70230
70231@@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
70232 if (len > sizeof(peercred))
70233 len = sizeof(peercred);
70234 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
70235- if (copy_to_user(optval, &peercred, len))
70236+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
70237 return -EFAULT;
70238 goto lenout;
70239 }
70240@@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
70241 return -ENOTCONN;
70242 if (lv < len)
70243 return -EINVAL;
70244- if (copy_to_user(optval, address, len))
70245+ if (len > sizeof(address) || copy_to_user(optval, address, len))
70246 return -EFAULT;
70247 goto lenout;
70248 }
70249@@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
70250
70251 if (len > lv)
70252 len = lv;
70253- if (copy_to_user(optval, &v, len))
70254+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
70255 return -EFAULT;
70256 lenout:
70257 if (put_user(len, optlen))
70258@@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
70259 */
70260 smp_wmb();
70261 atomic_set(&sk->sk_refcnt, 1);
70262- atomic_set(&sk->sk_drops, 0);
70263+ atomic_set_unchecked(&sk->sk_drops, 0);
70264 }
70265 EXPORT_SYMBOL(sock_init_data);
70266
70267diff -urNp linux-3.0.4/net/decnet/sysctl_net_decnet.c linux-3.0.4/net/decnet/sysctl_net_decnet.c
70268--- linux-3.0.4/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
70269+++ linux-3.0.4/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
70270@@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
70271
70272 if (len > *lenp) len = *lenp;
70273
70274- if (copy_to_user(buffer, addr, len))
70275+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
70276 return -EFAULT;
70277
70278 *lenp = len;
70279@@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
70280
70281 if (len > *lenp) len = *lenp;
70282
70283- if (copy_to_user(buffer, devname, len))
70284+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
70285 return -EFAULT;
70286
70287 *lenp = len;
70288diff -urNp linux-3.0.4/net/econet/Kconfig linux-3.0.4/net/econet/Kconfig
70289--- linux-3.0.4/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
70290+++ linux-3.0.4/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
70291@@ -4,7 +4,7 @@
70292
70293 config ECONET
70294 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
70295- depends on EXPERIMENTAL && INET
70296+ depends on EXPERIMENTAL && INET && BROKEN
70297 ---help---
70298 Econet is a fairly old and slow networking protocol mainly used by
70299 Acorn computers to access file and print servers. It uses native
70300diff -urNp linux-3.0.4/net/ipv4/fib_frontend.c linux-3.0.4/net/ipv4/fib_frontend.c
70301--- linux-3.0.4/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
70302+++ linux-3.0.4/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
70303@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
70304 #ifdef CONFIG_IP_ROUTE_MULTIPATH
70305 fib_sync_up(dev);
70306 #endif
70307- atomic_inc(&net->ipv4.dev_addr_genid);
70308+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
70309 rt_cache_flush(dev_net(dev), -1);
70310 break;
70311 case NETDEV_DOWN:
70312 fib_del_ifaddr(ifa, NULL);
70313- atomic_inc(&net->ipv4.dev_addr_genid);
70314+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
70315 if (ifa->ifa_dev->ifa_list == NULL) {
70316 /* Last address was deleted from this interface.
70317 * Disable IP.
70318@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
70319 #ifdef CONFIG_IP_ROUTE_MULTIPATH
70320 fib_sync_up(dev);
70321 #endif
70322- atomic_inc(&net->ipv4.dev_addr_genid);
70323+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
70324 rt_cache_flush(dev_net(dev), -1);
70325 break;
70326 case NETDEV_DOWN:
70327diff -urNp linux-3.0.4/net/ipv4/fib_semantics.c linux-3.0.4/net/ipv4/fib_semantics.c
70328--- linux-3.0.4/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
70329+++ linux-3.0.4/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
70330@@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
70331 nh->nh_saddr = inet_select_addr(nh->nh_dev,
70332 nh->nh_gw,
70333 nh->nh_parent->fib_scope);
70334- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
70335+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
70336
70337 return nh->nh_saddr;
70338 }
70339diff -urNp linux-3.0.4/net/ipv4/inet_diag.c linux-3.0.4/net/ipv4/inet_diag.c
70340--- linux-3.0.4/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
70341+++ linux-3.0.4/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
70342@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
70343 r->idiag_retrans = 0;
70344
70345 r->id.idiag_if = sk->sk_bound_dev_if;
70346+
70347+#ifdef CONFIG_GRKERNSEC_HIDESYM
70348+ r->id.idiag_cookie[0] = 0;
70349+ r->id.idiag_cookie[1] = 0;
70350+#else
70351 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
70352 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
70353+#endif
70354
70355 r->id.idiag_sport = inet->inet_sport;
70356 r->id.idiag_dport = inet->inet_dport;
70357@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
70358 r->idiag_family = tw->tw_family;
70359 r->idiag_retrans = 0;
70360 r->id.idiag_if = tw->tw_bound_dev_if;
70361+
70362+#ifdef CONFIG_GRKERNSEC_HIDESYM
70363+ r->id.idiag_cookie[0] = 0;
70364+ r->id.idiag_cookie[1] = 0;
70365+#else
70366 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
70367 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
70368+#endif
70369+
70370 r->id.idiag_sport = tw->tw_sport;
70371 r->id.idiag_dport = tw->tw_dport;
70372 r->id.idiag_src[0] = tw->tw_rcv_saddr;
70373@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
70374 if (sk == NULL)
70375 goto unlock;
70376
70377+#ifndef CONFIG_GRKERNSEC_HIDESYM
70378 err = -ESTALE;
70379 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
70380 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
70381 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
70382 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
70383 goto out;
70384+#endif
70385
70386 err = -ENOMEM;
70387 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
70388@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
70389 r->idiag_retrans = req->retrans;
70390
70391 r->id.idiag_if = sk->sk_bound_dev_if;
70392+
70393+#ifdef CONFIG_GRKERNSEC_HIDESYM
70394+ r->id.idiag_cookie[0] = 0;
70395+ r->id.idiag_cookie[1] = 0;
70396+#else
70397 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
70398 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
70399+#endif
70400
70401 tmo = req->expires - jiffies;
70402 if (tmo < 0)
70403diff -urNp linux-3.0.4/net/ipv4/inet_hashtables.c linux-3.0.4/net/ipv4/inet_hashtables.c
70404--- linux-3.0.4/net/ipv4/inet_hashtables.c 2011-09-02 18:11:21.000000000 -0400
70405+++ linux-3.0.4/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
70406@@ -18,12 +18,15 @@
70407 #include <linux/sched.h>
70408 #include <linux/slab.h>
70409 #include <linux/wait.h>
70410+#include <linux/security.h>
70411
70412 #include <net/inet_connection_sock.h>
70413 #include <net/inet_hashtables.h>
70414 #include <net/secure_seq.h>
70415 #include <net/ip.h>
70416
70417+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
70418+
70419 /*
70420 * Allocate and initialize a new local port bind bucket.
70421 * The bindhash mutex for snum's hash chain must be held here.
70422@@ -530,6 +533,8 @@ ok:
70423 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
70424 spin_unlock(&head->lock);
70425
70426+ gr_update_task_in_ip_table(current, inet_sk(sk));
70427+
70428 if (tw) {
70429 inet_twsk_deschedule(tw, death_row);
70430 while (twrefcnt) {
70431diff -urNp linux-3.0.4/net/ipv4/inetpeer.c linux-3.0.4/net/ipv4/inetpeer.c
70432--- linux-3.0.4/net/ipv4/inetpeer.c 2011-09-02 18:11:21.000000000 -0400
70433+++ linux-3.0.4/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
70434@@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
70435 unsigned int sequence;
70436 int invalidated, newrefcnt = 0;
70437
70438+ pax_track_stack();
70439+
70440 /* Look up for the address quickly, lockless.
70441 * Because of a concurrent writer, we might not find an existing entry.
70442 */
70443@@ -517,8 +519,8 @@ found: /* The existing node has been fo
70444 if (p) {
70445 p->daddr = *daddr;
70446 atomic_set(&p->refcnt, 1);
70447- atomic_set(&p->rid, 0);
70448- atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
70449+ atomic_set_unchecked(&p->rid, 0);
70450+ atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
70451 p->tcp_ts_stamp = 0;
70452 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
70453 p->rate_tokens = 0;
70454diff -urNp linux-3.0.4/net/ipv4/ipconfig.c linux-3.0.4/net/ipv4/ipconfig.c
70455--- linux-3.0.4/net/ipv4/ipconfig.c 2011-07-21 22:17:23.000000000 -0400
70456+++ linux-3.0.4/net/ipv4/ipconfig.c 2011-10-06 04:17:55.000000000 -0400
70457@@ -313,7 +313,7 @@ static int __init ic_devinet_ioctl(unsig
70458
70459 mm_segment_t oldfs = get_fs();
70460 set_fs(get_ds());
70461- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
70462+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
70463 set_fs(oldfs);
70464 return res;
70465 }
70466@@ -324,7 +324,7 @@ static int __init ic_dev_ioctl(unsigned
70467
70468 mm_segment_t oldfs = get_fs();
70469 set_fs(get_ds());
70470- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
70471+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
70472 set_fs(oldfs);
70473 return res;
70474 }
70475@@ -335,7 +335,7 @@ static int __init ic_route_ioctl(unsigne
70476
70477 mm_segment_t oldfs = get_fs();
70478 set_fs(get_ds());
70479- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
70480+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
70481 set_fs(oldfs);
70482 return res;
70483 }
70484diff -urNp linux-3.0.4/net/ipv4/ip_fragment.c linux-3.0.4/net/ipv4/ip_fragment.c
70485--- linux-3.0.4/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
70486+++ linux-3.0.4/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
70487@@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
70488 return 0;
70489
70490 start = qp->rid;
70491- end = atomic_inc_return(&peer->rid);
70492+ end = atomic_inc_return_unchecked(&peer->rid);
70493 qp->rid = end;
70494
70495 rc = qp->q.fragments && (end - start) > max;
70496diff -urNp linux-3.0.4/net/ipv4/ip_sockglue.c linux-3.0.4/net/ipv4/ip_sockglue.c
70497--- linux-3.0.4/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
70498+++ linux-3.0.4/net/ipv4/ip_sockglue.c 2011-10-06 04:17:55.000000000 -0400
70499@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
70500 int val;
70501 int len;
70502
70503+ pax_track_stack();
70504+
70505 if (level != SOL_IP)
70506 return -EOPNOTSUPP;
70507
70508@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
70509 len = min_t(unsigned int, len, opt->optlen);
70510 if (put_user(len, optlen))
70511 return -EFAULT;
70512- if (copy_to_user(optval, opt->__data, len))
70513+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
70514+ copy_to_user(optval, opt->__data, len))
70515 return -EFAULT;
70516 return 0;
70517 }
70518@@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock
70519 if (sk->sk_type != SOCK_STREAM)
70520 return -ENOPROTOOPT;
70521
70522- msg.msg_control = optval;
70523+ msg.msg_control = (void __force_kernel *)optval;
70524 msg.msg_controllen = len;
70525 msg.msg_flags = 0;
70526
70527diff -urNp linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
70528--- linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
70529+++ linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
70530@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
70531
70532 *len = 0;
70533
70534- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
70535+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
70536 if (*octets == NULL) {
70537 if (net_ratelimit())
70538 pr_notice("OOM in bsalg (%d)\n", __LINE__);
70539diff -urNp linux-3.0.4/net/ipv4/ping.c linux-3.0.4/net/ipv4/ping.c
70540--- linux-3.0.4/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
70541+++ linux-3.0.4/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
70542@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
70543 sk_rmem_alloc_get(sp),
70544 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
70545 atomic_read(&sp->sk_refcnt), sp,
70546- atomic_read(&sp->sk_drops), len);
70547+ atomic_read_unchecked(&sp->sk_drops), len);
70548 }
70549
70550 static int ping_seq_show(struct seq_file *seq, void *v)
70551diff -urNp linux-3.0.4/net/ipv4/raw.c linux-3.0.4/net/ipv4/raw.c
70552--- linux-3.0.4/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
70553+++ linux-3.0.4/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
70554@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
70555 int raw_rcv(struct sock *sk, struct sk_buff *skb)
70556 {
70557 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
70558- atomic_inc(&sk->sk_drops);
70559+ atomic_inc_unchecked(&sk->sk_drops);
70560 kfree_skb(skb);
70561 return NET_RX_DROP;
70562 }
70563@@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
70564
70565 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
70566 {
70567+ struct icmp_filter filter;
70568+
70569 if (optlen > sizeof(struct icmp_filter))
70570 optlen = sizeof(struct icmp_filter);
70571- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
70572+ if (copy_from_user(&filter, optval, optlen))
70573 return -EFAULT;
70574+ raw_sk(sk)->filter = filter;
70575 return 0;
70576 }
70577
70578 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
70579 {
70580 int len, ret = -EFAULT;
70581+ struct icmp_filter filter;
70582
70583 if (get_user(len, optlen))
70584 goto out;
70585@@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
70586 if (len > sizeof(struct icmp_filter))
70587 len = sizeof(struct icmp_filter);
70588 ret = -EFAULT;
70589- if (put_user(len, optlen) ||
70590- copy_to_user(optval, &raw_sk(sk)->filter, len))
70591+ filter = raw_sk(sk)->filter;
70592+ if (put_user(len, optlen) || len > sizeof filter ||
70593+ copy_to_user(optval, &filter, len))
70594 goto out;
70595 ret = 0;
70596 out: return ret;
70597@@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
70598 sk_wmem_alloc_get(sp),
70599 sk_rmem_alloc_get(sp),
70600 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
70601- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
70602+ atomic_read(&sp->sk_refcnt),
70603+#ifdef CONFIG_GRKERNSEC_HIDESYM
70604+ NULL,
70605+#else
70606+ sp,
70607+#endif
70608+ atomic_read_unchecked(&sp->sk_drops));
70609 }
70610
70611 static int raw_seq_show(struct seq_file *seq, void *v)
70612diff -urNp linux-3.0.4/net/ipv4/route.c linux-3.0.4/net/ipv4/route.c
70613--- linux-3.0.4/net/ipv4/route.c 2011-09-02 18:11:21.000000000 -0400
70614+++ linux-3.0.4/net/ipv4/route.c 2011-08-23 21:47:56.000000000 -0400
70615@@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
70616
70617 static inline int rt_genid(struct net *net)
70618 {
70619- return atomic_read(&net->ipv4.rt_genid);
70620+ return atomic_read_unchecked(&net->ipv4.rt_genid);
70621 }
70622
70623 #ifdef CONFIG_PROC_FS
70624@@ -833,7 +833,7 @@ static void rt_cache_invalidate(struct n
70625 unsigned char shuffle;
70626
70627 get_random_bytes(&shuffle, sizeof(shuffle));
70628- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
70629+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
70630 }
70631
70632 /*
70633@@ -2834,7 +2834,7 @@ static int rt_fill_info(struct net *net,
70634 error = rt->dst.error;
70635 if (peer) {
70636 inet_peer_refcheck(rt->peer);
70637- id = atomic_read(&peer->ip_id_count) & 0xffff;
70638+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
70639 if (peer->tcp_ts_stamp) {
70640 ts = peer->tcp_ts;
70641 tsage = get_seconds() - peer->tcp_ts_stamp;
70642diff -urNp linux-3.0.4/net/ipv4/tcp.c linux-3.0.4/net/ipv4/tcp.c
70643--- linux-3.0.4/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
70644+++ linux-3.0.4/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
70645@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
70646 int val;
70647 int err = 0;
70648
70649+ pax_track_stack();
70650+
70651 /* These are data/string values, all the others are ints */
70652 switch (optname) {
70653 case TCP_CONGESTION: {
70654@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
70655 struct tcp_sock *tp = tcp_sk(sk);
70656 int val, len;
70657
70658+ pax_track_stack();
70659+
70660 if (get_user(len, optlen))
70661 return -EFAULT;
70662
70663diff -urNp linux-3.0.4/net/ipv4/tcp_ipv4.c linux-3.0.4/net/ipv4/tcp_ipv4.c
70664--- linux-3.0.4/net/ipv4/tcp_ipv4.c 2011-09-02 18:11:21.000000000 -0400
70665+++ linux-3.0.4/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
70666@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
70667 int sysctl_tcp_low_latency __read_mostly;
70668 EXPORT_SYMBOL(sysctl_tcp_low_latency);
70669
70670+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
70671+extern int grsec_enable_blackhole;
70672+#endif
70673
70674 #ifdef CONFIG_TCP_MD5SIG
70675 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
70676@@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
70677 return 0;
70678
70679 reset:
70680+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
70681+ if (!grsec_enable_blackhole)
70682+#endif
70683 tcp_v4_send_reset(rsk, skb);
70684 discard:
70685 kfree_skb(skb);
70686@@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
70687 TCP_SKB_CB(skb)->sacked = 0;
70688
70689 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
70690- if (!sk)
70691+ if (!sk) {
70692+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
70693+ ret = 1;
70694+#endif
70695 goto no_tcp_socket;
70696-
70697+ }
70698 process:
70699- if (sk->sk_state == TCP_TIME_WAIT)
70700+ if (sk->sk_state == TCP_TIME_WAIT) {
70701+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
70702+ ret = 2;
70703+#endif
70704 goto do_time_wait;
70705+ }
70706
70707 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
70708 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
70709@@ -1724,6 +1737,10 @@ no_tcp_socket:
70710 bad_packet:
70711 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
70712 } else {
70713+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
70714+ if (!grsec_enable_blackhole || (ret == 1 &&
70715+ (skb->dev->flags & IFF_LOOPBACK)))
70716+#endif
70717 tcp_v4_send_reset(NULL, skb);
70718 }
70719
70720@@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
70721 0, /* non standard timer */
70722 0, /* open_requests have no inode */
70723 atomic_read(&sk->sk_refcnt),
70724+#ifdef CONFIG_GRKERNSEC_HIDESYM
70725+ NULL,
70726+#else
70727 req,
70728+#endif
70729 len);
70730 }
70731
70732@@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
70733 sock_i_uid(sk),
70734 icsk->icsk_probes_out,
70735 sock_i_ino(sk),
70736- atomic_read(&sk->sk_refcnt), sk,
70737+ atomic_read(&sk->sk_refcnt),
70738+#ifdef CONFIG_GRKERNSEC_HIDESYM
70739+ NULL,
70740+#else
70741+ sk,
70742+#endif
70743 jiffies_to_clock_t(icsk->icsk_rto),
70744 jiffies_to_clock_t(icsk->icsk_ack.ato),
70745 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
70746@@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
70747 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
70748 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
70749 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
70750- atomic_read(&tw->tw_refcnt), tw, len);
70751+ atomic_read(&tw->tw_refcnt),
70752+#ifdef CONFIG_GRKERNSEC_HIDESYM
70753+ NULL,
70754+#else
70755+ tw,
70756+#endif
70757+ len);
70758 }
70759
70760 #define TMPSZ 150
70761diff -urNp linux-3.0.4/net/ipv4/tcp_minisocks.c linux-3.0.4/net/ipv4/tcp_minisocks.c
70762--- linux-3.0.4/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
70763+++ linux-3.0.4/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
70764@@ -27,6 +27,10 @@
70765 #include <net/inet_common.h>
70766 #include <net/xfrm.h>
70767
70768+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
70769+extern int grsec_enable_blackhole;
70770+#endif
70771+
70772 int sysctl_tcp_syncookies __read_mostly = 1;
70773 EXPORT_SYMBOL(sysctl_tcp_syncookies);
70774
70775@@ -745,6 +749,10 @@ listen_overflow:
70776
70777 embryonic_reset:
70778 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
70779+
70780+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
70781+ if (!grsec_enable_blackhole)
70782+#endif
70783 if (!(flg & TCP_FLAG_RST))
70784 req->rsk_ops->send_reset(sk, skb);
70785
70786diff -urNp linux-3.0.4/net/ipv4/tcp_output.c linux-3.0.4/net/ipv4/tcp_output.c
70787--- linux-3.0.4/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
70788+++ linux-3.0.4/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
70789@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
70790 int mss;
70791 int s_data_desired = 0;
70792
70793+ pax_track_stack();
70794+
70795 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
70796 s_data_desired = cvp->s_data_desired;
70797 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
70798diff -urNp linux-3.0.4/net/ipv4/tcp_probe.c linux-3.0.4/net/ipv4/tcp_probe.c
70799--- linux-3.0.4/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
70800+++ linux-3.0.4/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
70801@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
70802 if (cnt + width >= len)
70803 break;
70804
70805- if (copy_to_user(buf + cnt, tbuf, width))
70806+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
70807 return -EFAULT;
70808 cnt += width;
70809 }
70810diff -urNp linux-3.0.4/net/ipv4/tcp_timer.c linux-3.0.4/net/ipv4/tcp_timer.c
70811--- linux-3.0.4/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
70812+++ linux-3.0.4/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
70813@@ -22,6 +22,10 @@
70814 #include <linux/gfp.h>
70815 #include <net/tcp.h>
70816
70817+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
70818+extern int grsec_lastack_retries;
70819+#endif
70820+
70821 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
70822 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
70823 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
70824@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
70825 }
70826 }
70827
70828+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
70829+ if ((sk->sk_state == TCP_LAST_ACK) &&
70830+ (grsec_lastack_retries > 0) &&
70831+ (grsec_lastack_retries < retry_until))
70832+ retry_until = grsec_lastack_retries;
70833+#endif
70834+
70835 if (retransmits_timed_out(sk, retry_until,
70836 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
70837 /* Has it gone just too far? */
70838diff -urNp linux-3.0.4/net/ipv4/udp.c linux-3.0.4/net/ipv4/udp.c
70839--- linux-3.0.4/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
70840+++ linux-3.0.4/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
70841@@ -86,6 +86,7 @@
70842 #include <linux/types.h>
70843 #include <linux/fcntl.h>
70844 #include <linux/module.h>
70845+#include <linux/security.h>
70846 #include <linux/socket.h>
70847 #include <linux/sockios.h>
70848 #include <linux/igmp.h>
70849@@ -107,6 +108,10 @@
70850 #include <net/xfrm.h>
70851 #include "udp_impl.h"
70852
70853+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
70854+extern int grsec_enable_blackhole;
70855+#endif
70856+
70857 struct udp_table udp_table __read_mostly;
70858 EXPORT_SYMBOL(udp_table);
70859
70860@@ -564,6 +569,9 @@ found:
70861 return s;
70862 }
70863
70864+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
70865+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
70866+
70867 /*
70868 * This routine is called by the ICMP module when it gets some
70869 * sort of error condition. If err < 0 then the socket should
70870@@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
70871 dport = usin->sin_port;
70872 if (dport == 0)
70873 return -EINVAL;
70874+
70875+ err = gr_search_udp_sendmsg(sk, usin);
70876+ if (err)
70877+ return err;
70878 } else {
70879 if (sk->sk_state != TCP_ESTABLISHED)
70880 return -EDESTADDRREQ;
70881+
70882+ err = gr_search_udp_sendmsg(sk, NULL);
70883+ if (err)
70884+ return err;
70885+
70886 daddr = inet->inet_daddr;
70887 dport = inet->inet_dport;
70888 /* Open fast path for connected socket.
70889@@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
70890 udp_lib_checksum_complete(skb)) {
70891 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
70892 IS_UDPLITE(sk));
70893- atomic_inc(&sk->sk_drops);
70894+ atomic_inc_unchecked(&sk->sk_drops);
70895 __skb_unlink(skb, rcvq);
70896 __skb_queue_tail(&list_kill, skb);
70897 }
70898@@ -1184,6 +1201,10 @@ try_again:
70899 if (!skb)
70900 goto out;
70901
70902+ err = gr_search_udp_recvmsg(sk, skb);
70903+ if (err)
70904+ goto out_free;
70905+
70906 ulen = skb->len - sizeof(struct udphdr);
70907 if (len > ulen)
70908 len = ulen;
70909@@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
70910
70911 drop:
70912 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
70913- atomic_inc(&sk->sk_drops);
70914+ atomic_inc_unchecked(&sk->sk_drops);
70915 kfree_skb(skb);
70916 return -1;
70917 }
70918@@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
70919 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
70920
70921 if (!skb1) {
70922- atomic_inc(&sk->sk_drops);
70923+ atomic_inc_unchecked(&sk->sk_drops);
70924 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
70925 IS_UDPLITE(sk));
70926 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
70927@@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
70928 goto csum_error;
70929
70930 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
70931+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
70932+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
70933+#endif
70934 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
70935
70936 /*
70937@@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
70938 sk_wmem_alloc_get(sp),
70939 sk_rmem_alloc_get(sp),
70940 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
70941- atomic_read(&sp->sk_refcnt), sp,
70942- atomic_read(&sp->sk_drops), len);
70943+ atomic_read(&sp->sk_refcnt),
70944+#ifdef CONFIG_GRKERNSEC_HIDESYM
70945+ NULL,
70946+#else
70947+ sp,
70948+#endif
70949+ atomic_read_unchecked(&sp->sk_drops), len);
70950 }
70951
70952 int udp4_seq_show(struct seq_file *seq, void *v)
70953diff -urNp linux-3.0.4/net/ipv6/addrconf.c linux-3.0.4/net/ipv6/addrconf.c
70954--- linux-3.0.4/net/ipv6/addrconf.c 2011-07-21 22:17:23.000000000 -0400
70955+++ linux-3.0.4/net/ipv6/addrconf.c 2011-10-06 04:17:55.000000000 -0400
70956@@ -2072,7 +2072,7 @@ int addrconf_set_dstaddr(struct net *net
70957 p.iph.ihl = 5;
70958 p.iph.protocol = IPPROTO_IPV6;
70959 p.iph.ttl = 64;
70960- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
70961+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
70962
70963 if (ops->ndo_do_ioctl) {
70964 mm_segment_t oldfs = get_fs();
70965diff -urNp linux-3.0.4/net/ipv6/inet6_connection_sock.c linux-3.0.4/net/ipv6/inet6_connection_sock.c
70966--- linux-3.0.4/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
70967+++ linux-3.0.4/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
70968@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
70969 #ifdef CONFIG_XFRM
70970 {
70971 struct rt6_info *rt = (struct rt6_info *)dst;
70972- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
70973+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
70974 }
70975 #endif
70976 }
70977@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
70978 #ifdef CONFIG_XFRM
70979 if (dst) {
70980 struct rt6_info *rt = (struct rt6_info *)dst;
70981- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
70982+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
70983 __sk_dst_reset(sk);
70984 dst = NULL;
70985 }
70986diff -urNp linux-3.0.4/net/ipv6/ipv6_sockglue.c linux-3.0.4/net/ipv6/ipv6_sockglue.c
70987--- linux-3.0.4/net/ipv6/ipv6_sockglue.c 2011-07-21 22:17:23.000000000 -0400
70988+++ linux-3.0.4/net/ipv6/ipv6_sockglue.c 2011-10-06 04:17:55.000000000 -0400
70989@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
70990 int val, valbool;
70991 int retv = -ENOPROTOOPT;
70992
70993+ pax_track_stack();
70994+
70995 if (optval == NULL)
70996 val=0;
70997 else {
70998@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
70999 int len;
71000 int val;
71001
71002+ pax_track_stack();
71003+
71004 if (ip6_mroute_opt(optname))
71005 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
71006
71007@@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct soc
71008 if (sk->sk_type != SOCK_STREAM)
71009 return -ENOPROTOOPT;
71010
71011- msg.msg_control = optval;
71012+ msg.msg_control = (void __force_kernel *)optval;
71013 msg.msg_controllen = len;
71014 msg.msg_flags = 0;
71015
71016diff -urNp linux-3.0.4/net/ipv6/raw.c linux-3.0.4/net/ipv6/raw.c
71017--- linux-3.0.4/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
71018+++ linux-3.0.4/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
71019@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
71020 {
71021 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
71022 skb_checksum_complete(skb)) {
71023- atomic_inc(&sk->sk_drops);
71024+ atomic_inc_unchecked(&sk->sk_drops);
71025 kfree_skb(skb);
71026 return NET_RX_DROP;
71027 }
71028@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
71029 struct raw6_sock *rp = raw6_sk(sk);
71030
71031 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
71032- atomic_inc(&sk->sk_drops);
71033+ atomic_inc_unchecked(&sk->sk_drops);
71034 kfree_skb(skb);
71035 return NET_RX_DROP;
71036 }
71037@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
71038
71039 if (inet->hdrincl) {
71040 if (skb_checksum_complete(skb)) {
71041- atomic_inc(&sk->sk_drops);
71042+ atomic_inc_unchecked(&sk->sk_drops);
71043 kfree_skb(skb);
71044 return NET_RX_DROP;
71045 }
71046@@ -601,7 +601,7 @@ out:
71047 return err;
71048 }
71049
71050-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
71051+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
71052 struct flowi6 *fl6, struct dst_entry **dstp,
71053 unsigned int flags)
71054 {
71055@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
71056 u16 proto;
71057 int err;
71058
71059+ pax_track_stack();
71060+
71061 /* Rough check on arithmetic overflow,
71062 better check is made in ip6_append_data().
71063 */
71064@@ -909,12 +911,15 @@ do_confirm:
71065 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
71066 char __user *optval, int optlen)
71067 {
71068+ struct icmp6_filter filter;
71069+
71070 switch (optname) {
71071 case ICMPV6_FILTER:
71072 if (optlen > sizeof(struct icmp6_filter))
71073 optlen = sizeof(struct icmp6_filter);
71074- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
71075+ if (copy_from_user(&filter, optval, optlen))
71076 return -EFAULT;
71077+ raw6_sk(sk)->filter = filter;
71078 return 0;
71079 default:
71080 return -ENOPROTOOPT;
71081@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
71082 char __user *optval, int __user *optlen)
71083 {
71084 int len;
71085+ struct icmp6_filter filter;
71086
71087 switch (optname) {
71088 case ICMPV6_FILTER:
71089@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
71090 len = sizeof(struct icmp6_filter);
71091 if (put_user(len, optlen))
71092 return -EFAULT;
71093- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
71094+ filter = raw6_sk(sk)->filter;
71095+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
71096 return -EFAULT;
71097 return 0;
71098 default:
71099@@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
71100 0, 0L, 0,
71101 sock_i_uid(sp), 0,
71102 sock_i_ino(sp),
71103- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
71104+ atomic_read(&sp->sk_refcnt),
71105+#ifdef CONFIG_GRKERNSEC_HIDESYM
71106+ NULL,
71107+#else
71108+ sp,
71109+#endif
71110+ atomic_read_unchecked(&sp->sk_drops));
71111 }
71112
71113 static int raw6_seq_show(struct seq_file *seq, void *v)
71114diff -urNp linux-3.0.4/net/ipv6/tcp_ipv6.c linux-3.0.4/net/ipv6/tcp_ipv6.c
71115--- linux-3.0.4/net/ipv6/tcp_ipv6.c 2011-09-02 18:11:21.000000000 -0400
71116+++ linux-3.0.4/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
71117@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
71118 }
71119 #endif
71120
71121+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71122+extern int grsec_enable_blackhole;
71123+#endif
71124+
71125 static void tcp_v6_hash(struct sock *sk)
71126 {
71127 if (sk->sk_state != TCP_CLOSE) {
71128@@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
71129 return 0;
71130
71131 reset:
71132+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71133+ if (!grsec_enable_blackhole)
71134+#endif
71135 tcp_v6_send_reset(sk, skb);
71136 discard:
71137 if (opt_skb)
71138@@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
71139 TCP_SKB_CB(skb)->sacked = 0;
71140
71141 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
71142- if (!sk)
71143+ if (!sk) {
71144+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71145+ ret = 1;
71146+#endif
71147 goto no_tcp_socket;
71148+ }
71149
71150 process:
71151- if (sk->sk_state == TCP_TIME_WAIT)
71152+ if (sk->sk_state == TCP_TIME_WAIT) {
71153+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71154+ ret = 2;
71155+#endif
71156 goto do_time_wait;
71157+ }
71158
71159 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
71160 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
71161@@ -1794,6 +1809,10 @@ no_tcp_socket:
71162 bad_packet:
71163 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
71164 } else {
71165+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71166+ if (!grsec_enable_blackhole || (ret == 1 &&
71167+ (skb->dev->flags & IFF_LOOPBACK)))
71168+#endif
71169 tcp_v6_send_reset(NULL, skb);
71170 }
71171
71172@@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
71173 uid,
71174 0, /* non standard timer */
71175 0, /* open_requests have no inode */
71176- 0, req);
71177+ 0,
71178+#ifdef CONFIG_GRKERNSEC_HIDESYM
71179+ NULL
71180+#else
71181+ req
71182+#endif
71183+ );
71184 }
71185
71186 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
71187@@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
71188 sock_i_uid(sp),
71189 icsk->icsk_probes_out,
71190 sock_i_ino(sp),
71191- atomic_read(&sp->sk_refcnt), sp,
71192+ atomic_read(&sp->sk_refcnt),
71193+#ifdef CONFIG_GRKERNSEC_HIDESYM
71194+ NULL,
71195+#else
71196+ sp,
71197+#endif
71198 jiffies_to_clock_t(icsk->icsk_rto),
71199 jiffies_to_clock_t(icsk->icsk_ack.ato),
71200 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
71201@@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
71202 dest->s6_addr32[2], dest->s6_addr32[3], destp,
71203 tw->tw_substate, 0, 0,
71204 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
71205- atomic_read(&tw->tw_refcnt), tw);
71206+ atomic_read(&tw->tw_refcnt),
71207+#ifdef CONFIG_GRKERNSEC_HIDESYM
71208+ NULL
71209+#else
71210+ tw
71211+#endif
71212+ );
71213 }
71214
71215 static int tcp6_seq_show(struct seq_file *seq, void *v)
71216diff -urNp linux-3.0.4/net/ipv6/udp.c linux-3.0.4/net/ipv6/udp.c
71217--- linux-3.0.4/net/ipv6/udp.c 2011-09-02 18:11:21.000000000 -0400
71218+++ linux-3.0.4/net/ipv6/udp.c 2011-08-23 21:48:14.000000000 -0400
71219@@ -50,6 +50,10 @@
71220 #include <linux/seq_file.h>
71221 #include "udp_impl.h"
71222
71223+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71224+extern int grsec_enable_blackhole;
71225+#endif
71226+
71227 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
71228 {
71229 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
71230@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
71231
71232 return 0;
71233 drop:
71234- atomic_inc(&sk->sk_drops);
71235+ atomic_inc_unchecked(&sk->sk_drops);
71236 drop_no_sk_drops_inc:
71237 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
71238 kfree_skb(skb);
71239@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
71240 continue;
71241 }
71242 drop:
71243- atomic_inc(&sk->sk_drops);
71244+ atomic_inc_unchecked(&sk->sk_drops);
71245 UDP6_INC_STATS_BH(sock_net(sk),
71246 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
71247 UDP6_INC_STATS_BH(sock_net(sk),
71248@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
71249 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
71250 proto == IPPROTO_UDPLITE);
71251
71252+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71253+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
71254+#endif
71255 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
71256
71257 kfree_skb(skb);
71258@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
71259 if (!sock_owned_by_user(sk))
71260 udpv6_queue_rcv_skb(sk, skb);
71261 else if (sk_add_backlog(sk, skb)) {
71262- atomic_inc(&sk->sk_drops);
71263+ atomic_inc_unchecked(&sk->sk_drops);
71264 bh_unlock_sock(sk);
71265 sock_put(sk);
71266 goto discard;
71267@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
71268 0, 0L, 0,
71269 sock_i_uid(sp), 0,
71270 sock_i_ino(sp),
71271- atomic_read(&sp->sk_refcnt), sp,
71272- atomic_read(&sp->sk_drops));
71273+ atomic_read(&sp->sk_refcnt),
71274+#ifdef CONFIG_GRKERNSEC_HIDESYM
71275+ NULL,
71276+#else
71277+ sp,
71278+#endif
71279+ atomic_read_unchecked(&sp->sk_drops));
71280 }
71281
71282 int udp6_seq_show(struct seq_file *seq, void *v)
71283diff -urNp linux-3.0.4/net/irda/ircomm/ircomm_tty.c linux-3.0.4/net/irda/ircomm/ircomm_tty.c
71284--- linux-3.0.4/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
71285+++ linux-3.0.4/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
71286@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
71287 add_wait_queue(&self->open_wait, &wait);
71288
71289 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
71290- __FILE__,__LINE__, tty->driver->name, self->open_count );
71291+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
71292
71293 /* As far as I can see, we protect open_count - Jean II */
71294 spin_lock_irqsave(&self->spinlock, flags);
71295 if (!tty_hung_up_p(filp)) {
71296 extra_count = 1;
71297- self->open_count--;
71298+ local_dec(&self->open_count);
71299 }
71300 spin_unlock_irqrestore(&self->spinlock, flags);
71301- self->blocked_open++;
71302+ local_inc(&self->blocked_open);
71303
71304 while (1) {
71305 if (tty->termios->c_cflag & CBAUD) {
71306@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
71307 }
71308
71309 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
71310- __FILE__,__LINE__, tty->driver->name, self->open_count );
71311+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
71312
71313 schedule();
71314 }
71315@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
71316 if (extra_count) {
71317 /* ++ is not atomic, so this should be protected - Jean II */
71318 spin_lock_irqsave(&self->spinlock, flags);
71319- self->open_count++;
71320+ local_inc(&self->open_count);
71321 spin_unlock_irqrestore(&self->spinlock, flags);
71322 }
71323- self->blocked_open--;
71324+ local_dec(&self->blocked_open);
71325
71326 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
71327- __FILE__,__LINE__, tty->driver->name, self->open_count);
71328+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
71329
71330 if (!retval)
71331 self->flags |= ASYNC_NORMAL_ACTIVE;
71332@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
71333 }
71334 /* ++ is not atomic, so this should be protected - Jean II */
71335 spin_lock_irqsave(&self->spinlock, flags);
71336- self->open_count++;
71337+ local_inc(&self->open_count);
71338
71339 tty->driver_data = self;
71340 self->tty = tty;
71341 spin_unlock_irqrestore(&self->spinlock, flags);
71342
71343 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
71344- self->line, self->open_count);
71345+ self->line, local_read(&self->open_count));
71346
71347 /* Not really used by us, but lets do it anyway */
71348 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
71349@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
71350 return;
71351 }
71352
71353- if ((tty->count == 1) && (self->open_count != 1)) {
71354+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
71355 /*
71356 * Uh, oh. tty->count is 1, which means that the tty
71357 * structure will be freed. state->count should always
71358@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
71359 */
71360 IRDA_DEBUG(0, "%s(), bad serial port count; "
71361 "tty->count is 1, state->count is %d\n", __func__ ,
71362- self->open_count);
71363- self->open_count = 1;
71364+ local_read(&self->open_count));
71365+ local_set(&self->open_count, 1);
71366 }
71367
71368- if (--self->open_count < 0) {
71369+ if (local_dec_return(&self->open_count) < 0) {
71370 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
71371- __func__, self->line, self->open_count);
71372- self->open_count = 0;
71373+ __func__, self->line, local_read(&self->open_count));
71374+ local_set(&self->open_count, 0);
71375 }
71376- if (self->open_count) {
71377+ if (local_read(&self->open_count)) {
71378 spin_unlock_irqrestore(&self->spinlock, flags);
71379
71380 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
71381@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
71382 tty->closing = 0;
71383 self->tty = NULL;
71384
71385- if (self->blocked_open) {
71386+ if (local_read(&self->blocked_open)) {
71387 if (self->close_delay)
71388 schedule_timeout_interruptible(self->close_delay);
71389 wake_up_interruptible(&self->open_wait);
71390@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
71391 spin_lock_irqsave(&self->spinlock, flags);
71392 self->flags &= ~ASYNC_NORMAL_ACTIVE;
71393 self->tty = NULL;
71394- self->open_count = 0;
71395+ local_set(&self->open_count, 0);
71396 spin_unlock_irqrestore(&self->spinlock, flags);
71397
71398 wake_up_interruptible(&self->open_wait);
71399@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
71400 seq_putc(m, '\n');
71401
71402 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
71403- seq_printf(m, "Open count: %d\n", self->open_count);
71404+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
71405 seq_printf(m, "Max data size: %d\n", self->max_data_size);
71406 seq_printf(m, "Max header size: %d\n", self->max_header_size);
71407
71408diff -urNp linux-3.0.4/net/iucv/af_iucv.c linux-3.0.4/net/iucv/af_iucv.c
71409--- linux-3.0.4/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
71410+++ linux-3.0.4/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
71411@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
71412
71413 write_lock_bh(&iucv_sk_list.lock);
71414
71415- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
71416+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
71417 while (__iucv_get_sock_by_name(name)) {
71418 sprintf(name, "%08x",
71419- atomic_inc_return(&iucv_sk_list.autobind_name));
71420+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
71421 }
71422
71423 write_unlock_bh(&iucv_sk_list.lock);
71424diff -urNp linux-3.0.4/net/key/af_key.c linux-3.0.4/net/key/af_key.c
71425--- linux-3.0.4/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
71426+++ linux-3.0.4/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
71427@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
71428 struct xfrm_migrate m[XFRM_MAX_DEPTH];
71429 struct xfrm_kmaddress k;
71430
71431+ pax_track_stack();
71432+
71433 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
71434 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
71435 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
71436@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
71437 static u32 get_acqseq(void)
71438 {
71439 u32 res;
71440- static atomic_t acqseq;
71441+ static atomic_unchecked_t acqseq;
71442
71443 do {
71444- res = atomic_inc_return(&acqseq);
71445+ res = atomic_inc_return_unchecked(&acqseq);
71446 } while (!res);
71447 return res;
71448 }
71449diff -urNp linux-3.0.4/net/lapb/lapb_iface.c linux-3.0.4/net/lapb/lapb_iface.c
71450--- linux-3.0.4/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
71451+++ linux-3.0.4/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
71452@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
71453 goto out;
71454
71455 lapb->dev = dev;
71456- lapb->callbacks = *callbacks;
71457+ lapb->callbacks = callbacks;
71458
71459 __lapb_insert_cb(lapb);
71460
71461@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
71462
71463 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
71464 {
71465- if (lapb->callbacks.connect_confirmation)
71466- lapb->callbacks.connect_confirmation(lapb->dev, reason);
71467+ if (lapb->callbacks->connect_confirmation)
71468+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
71469 }
71470
71471 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
71472 {
71473- if (lapb->callbacks.connect_indication)
71474- lapb->callbacks.connect_indication(lapb->dev, reason);
71475+ if (lapb->callbacks->connect_indication)
71476+ lapb->callbacks->connect_indication(lapb->dev, reason);
71477 }
71478
71479 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
71480 {
71481- if (lapb->callbacks.disconnect_confirmation)
71482- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
71483+ if (lapb->callbacks->disconnect_confirmation)
71484+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
71485 }
71486
71487 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
71488 {
71489- if (lapb->callbacks.disconnect_indication)
71490- lapb->callbacks.disconnect_indication(lapb->dev, reason);
71491+ if (lapb->callbacks->disconnect_indication)
71492+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
71493 }
71494
71495 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
71496 {
71497- if (lapb->callbacks.data_indication)
71498- return lapb->callbacks.data_indication(lapb->dev, skb);
71499+ if (lapb->callbacks->data_indication)
71500+ return lapb->callbacks->data_indication(lapb->dev, skb);
71501
71502 kfree_skb(skb);
71503 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
71504@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
71505 {
71506 int used = 0;
71507
71508- if (lapb->callbacks.data_transmit) {
71509- lapb->callbacks.data_transmit(lapb->dev, skb);
71510+ if (lapb->callbacks->data_transmit) {
71511+ lapb->callbacks->data_transmit(lapb->dev, skb);
71512 used = 1;
71513 }
71514
71515diff -urNp linux-3.0.4/net/mac80211/debugfs_sta.c linux-3.0.4/net/mac80211/debugfs_sta.c
71516--- linux-3.0.4/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
71517+++ linux-3.0.4/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
71518@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
71519 struct tid_ampdu_rx *tid_rx;
71520 struct tid_ampdu_tx *tid_tx;
71521
71522+ pax_track_stack();
71523+
71524 rcu_read_lock();
71525
71526 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
71527@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
71528 struct sta_info *sta = file->private_data;
71529 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
71530
71531+ pax_track_stack();
71532+
71533 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
71534 htc->ht_supported ? "" : "not ");
71535 if (htc->ht_supported) {
71536diff -urNp linux-3.0.4/net/mac80211/ieee80211_i.h linux-3.0.4/net/mac80211/ieee80211_i.h
71537--- linux-3.0.4/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
71538+++ linux-3.0.4/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
71539@@ -27,6 +27,7 @@
71540 #include <net/ieee80211_radiotap.h>
71541 #include <net/cfg80211.h>
71542 #include <net/mac80211.h>
71543+#include <asm/local.h>
71544 #include "key.h"
71545 #include "sta_info.h"
71546
71547@@ -721,7 +722,7 @@ struct ieee80211_local {
71548 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
71549 spinlock_t queue_stop_reason_lock;
71550
71551- int open_count;
71552+ local_t open_count;
71553 int monitors, cooked_mntrs;
71554 /* number of interfaces with corresponding FIF_ flags */
71555 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
71556diff -urNp linux-3.0.4/net/mac80211/iface.c linux-3.0.4/net/mac80211/iface.c
71557--- linux-3.0.4/net/mac80211/iface.c 2011-09-02 18:11:21.000000000 -0400
71558+++ linux-3.0.4/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
71559@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
71560 break;
71561 }
71562
71563- if (local->open_count == 0) {
71564+ if (local_read(&local->open_count) == 0) {
71565 res = drv_start(local);
71566 if (res)
71567 goto err_del_bss;
71568@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
71569 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
71570
71571 if (!is_valid_ether_addr(dev->dev_addr)) {
71572- if (!local->open_count)
71573+ if (!local_read(&local->open_count))
71574 drv_stop(local);
71575 return -EADDRNOTAVAIL;
71576 }
71577@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
71578 mutex_unlock(&local->mtx);
71579
71580 if (coming_up)
71581- local->open_count++;
71582+ local_inc(&local->open_count);
71583
71584 if (hw_reconf_flags) {
71585 ieee80211_hw_config(local, hw_reconf_flags);
71586@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
71587 err_del_interface:
71588 drv_remove_interface(local, &sdata->vif);
71589 err_stop:
71590- if (!local->open_count)
71591+ if (!local_read(&local->open_count))
71592 drv_stop(local);
71593 err_del_bss:
71594 sdata->bss = NULL;
71595@@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
71596 }
71597
71598 if (going_down)
71599- local->open_count--;
71600+ local_dec(&local->open_count);
71601
71602 switch (sdata->vif.type) {
71603 case NL80211_IFTYPE_AP_VLAN:
71604@@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
71605
71606 ieee80211_recalc_ps(local, -1);
71607
71608- if (local->open_count == 0) {
71609+ if (local_read(&local->open_count) == 0) {
71610 if (local->ops->napi_poll)
71611 napi_disable(&local->napi);
71612 ieee80211_clear_tx_pending(local);
71613diff -urNp linux-3.0.4/net/mac80211/main.c linux-3.0.4/net/mac80211/main.c
71614--- linux-3.0.4/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
71615+++ linux-3.0.4/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
71616@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
71617 local->hw.conf.power_level = power;
71618 }
71619
71620- if (changed && local->open_count) {
71621+ if (changed && local_read(&local->open_count)) {
71622 ret = drv_config(local, changed);
71623 /*
71624 * Goal:
71625diff -urNp linux-3.0.4/net/mac80211/mlme.c linux-3.0.4/net/mac80211/mlme.c
71626--- linux-3.0.4/net/mac80211/mlme.c 2011-09-02 18:11:21.000000000 -0400
71627+++ linux-3.0.4/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
71628@@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
71629 bool have_higher_than_11mbit = false;
71630 u16 ap_ht_cap_flags;
71631
71632+ pax_track_stack();
71633+
71634 /* AssocResp and ReassocResp have identical structure */
71635
71636 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
71637diff -urNp linux-3.0.4/net/mac80211/pm.c linux-3.0.4/net/mac80211/pm.c
71638--- linux-3.0.4/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
71639+++ linux-3.0.4/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
71640@@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
71641 cancel_work_sync(&local->dynamic_ps_enable_work);
71642 del_timer_sync(&local->dynamic_ps_timer);
71643
71644- local->wowlan = wowlan && local->open_count;
71645+ local->wowlan = wowlan && local_read(&local->open_count);
71646 if (local->wowlan) {
71647 int err = drv_suspend(local, wowlan);
71648 if (err) {
71649@@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
71650 }
71651
71652 /* stop hardware - this must stop RX */
71653- if (local->open_count)
71654+ if (local_read(&local->open_count))
71655 ieee80211_stop_device(local);
71656
71657 suspend:
71658diff -urNp linux-3.0.4/net/mac80211/rate.c linux-3.0.4/net/mac80211/rate.c
71659--- linux-3.0.4/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
71660+++ linux-3.0.4/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
71661@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
71662
71663 ASSERT_RTNL();
71664
71665- if (local->open_count)
71666+ if (local_read(&local->open_count))
71667 return -EBUSY;
71668
71669 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
71670diff -urNp linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c
71671--- linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
71672+++ linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
71673@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
71674
71675 spin_unlock_irqrestore(&events->lock, status);
71676
71677- if (copy_to_user(buf, pb, p))
71678+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
71679 return -EFAULT;
71680
71681 return p;
71682diff -urNp linux-3.0.4/net/mac80211/util.c linux-3.0.4/net/mac80211/util.c
71683--- linux-3.0.4/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
71684+++ linux-3.0.4/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
71685@@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
71686 #endif
71687
71688 /* restart hardware */
71689- if (local->open_count) {
71690+ if (local_read(&local->open_count)) {
71691 /*
71692 * Upon resume hardware can sometimes be goofy due to
71693 * various platform / driver / bus issues, so restarting
71694diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c
71695--- linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
71696+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
71697@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
71698 /* Increase the refcnt counter of the dest */
71699 atomic_inc(&dest->refcnt);
71700
71701- conn_flags = atomic_read(&dest->conn_flags);
71702+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
71703 if (cp->protocol != IPPROTO_UDP)
71704 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
71705 /* Bind with the destination and its corresponding transmitter */
71706@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
71707 atomic_set(&cp->refcnt, 1);
71708
71709 atomic_set(&cp->n_control, 0);
71710- atomic_set(&cp->in_pkts, 0);
71711+ atomic_set_unchecked(&cp->in_pkts, 0);
71712
71713 atomic_inc(&ipvs->conn_count);
71714 if (flags & IP_VS_CONN_F_NO_CPORT)
71715@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
71716
71717 /* Don't drop the entry if its number of incoming packets is not
71718 located in [0, 8] */
71719- i = atomic_read(&cp->in_pkts);
71720+ i = atomic_read_unchecked(&cp->in_pkts);
71721 if (i > 8 || i < 0) return 0;
71722
71723 if (!todrop_rate[i]) return 0;
71724diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c
71725--- linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
71726+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
71727@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
71728 ret = cp->packet_xmit(skb, cp, pd->pp);
71729 /* do not touch skb anymore */
71730
71731- atomic_inc(&cp->in_pkts);
71732+ atomic_inc_unchecked(&cp->in_pkts);
71733 ip_vs_conn_put(cp);
71734 return ret;
71735 }
71736@@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
71737 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
71738 pkts = sysctl_sync_threshold(ipvs);
71739 else
71740- pkts = atomic_add_return(1, &cp->in_pkts);
71741+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
71742
71743 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
71744 cp->protocol == IPPROTO_SCTP) {
71745diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c
71746--- linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-09-02 18:11:21.000000000 -0400
71747+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
71748@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
71749 ip_vs_rs_hash(ipvs, dest);
71750 write_unlock_bh(&ipvs->rs_lock);
71751 }
71752- atomic_set(&dest->conn_flags, conn_flags);
71753+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
71754
71755 /* bind the service */
71756 if (!dest->svc) {
71757@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
71758 " %-7s %-6d %-10d %-10d\n",
71759 &dest->addr.in6,
71760 ntohs(dest->port),
71761- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
71762+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
71763 atomic_read(&dest->weight),
71764 atomic_read(&dest->activeconns),
71765 atomic_read(&dest->inactconns));
71766@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
71767 "%-7s %-6d %-10d %-10d\n",
71768 ntohl(dest->addr.ip),
71769 ntohs(dest->port),
71770- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
71771+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
71772 atomic_read(&dest->weight),
71773 atomic_read(&dest->activeconns),
71774 atomic_read(&dest->inactconns));
71775@@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
71776 struct ip_vs_dest_user *udest_compat;
71777 struct ip_vs_dest_user_kern udest;
71778
71779+ pax_track_stack();
71780+
71781 if (!capable(CAP_NET_ADMIN))
71782 return -EPERM;
71783
71784@@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
71785
71786 entry.addr = dest->addr.ip;
71787 entry.port = dest->port;
71788- entry.conn_flags = atomic_read(&dest->conn_flags);
71789+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
71790 entry.weight = atomic_read(&dest->weight);
71791 entry.u_threshold = dest->u_threshold;
71792 entry.l_threshold = dest->l_threshold;
71793@@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
71794 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
71795
71796 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
71797- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
71798+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
71799 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
71800 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
71801 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
71802diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c
71803--- linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
71804+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
71805@@ -648,7 +648,7 @@ control:
71806 * i.e only increment in_pkts for Templates.
71807 */
71808 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
71809- int pkts = atomic_add_return(1, &cp->in_pkts);
71810+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
71811
71812 if (pkts % sysctl_sync_period(ipvs) != 1)
71813 return;
71814@@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
71815
71816 if (opt)
71817 memcpy(&cp->in_seq, opt, sizeof(*opt));
71818- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
71819+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
71820 cp->state = state;
71821 cp->old_state = cp->state;
71822 /*
71823diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c
71824--- linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
71825+++ linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
71826@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
71827 else
71828 rc = NF_ACCEPT;
71829 /* do not touch skb anymore */
71830- atomic_inc(&cp->in_pkts);
71831+ atomic_inc_unchecked(&cp->in_pkts);
71832 goto out;
71833 }
71834
71835@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
71836 else
71837 rc = NF_ACCEPT;
71838 /* do not touch skb anymore */
71839- atomic_inc(&cp->in_pkts);
71840+ atomic_inc_unchecked(&cp->in_pkts);
71841 goto out;
71842 }
71843
71844diff -urNp linux-3.0.4/net/netfilter/Kconfig linux-3.0.4/net/netfilter/Kconfig
71845--- linux-3.0.4/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
71846+++ linux-3.0.4/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
71847@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
71848
71849 To compile it as a module, choose M here. If unsure, say N.
71850
71851+config NETFILTER_XT_MATCH_GRADM
71852+ tristate '"gradm" match support'
71853+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
71854+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
71855+ ---help---
71856+ The gradm match allows to match on grsecurity RBAC being enabled.
71857+ It is useful when iptables rules are applied early on bootup to
71858+ prevent connections to the machine (except from a trusted host)
71859+ while the RBAC system is disabled.
71860+
71861 config NETFILTER_XT_MATCH_HASHLIMIT
71862 tristate '"hashlimit" match support'
71863 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
71864diff -urNp linux-3.0.4/net/netfilter/Makefile linux-3.0.4/net/netfilter/Makefile
71865--- linux-3.0.4/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
71866+++ linux-3.0.4/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
71867@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
71868 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
71869 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
71870 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
71871+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
71872 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
71873 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
71874 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
71875diff -urNp linux-3.0.4/net/netfilter/nfnetlink_log.c linux-3.0.4/net/netfilter/nfnetlink_log.c
71876--- linux-3.0.4/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
71877+++ linux-3.0.4/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
71878@@ -70,7 +70,7 @@ struct nfulnl_instance {
71879 };
71880
71881 static DEFINE_SPINLOCK(instances_lock);
71882-static atomic_t global_seq;
71883+static atomic_unchecked_t global_seq;
71884
71885 #define INSTANCE_BUCKETS 16
71886 static struct hlist_head instance_table[INSTANCE_BUCKETS];
71887@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
71888 /* global sequence number */
71889 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
71890 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
71891- htonl(atomic_inc_return(&global_seq)));
71892+ htonl(atomic_inc_return_unchecked(&global_seq)));
71893
71894 if (data_len) {
71895 struct nlattr *nla;
71896diff -urNp linux-3.0.4/net/netfilter/nfnetlink_queue.c linux-3.0.4/net/netfilter/nfnetlink_queue.c
71897--- linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
71898+++ linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
71899@@ -58,7 +58,7 @@ struct nfqnl_instance {
71900 */
71901 spinlock_t lock;
71902 unsigned int queue_total;
71903- atomic_t id_sequence; /* 'sequence' of pkt ids */
71904+ atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
71905 struct list_head queue_list; /* packets in queue */
71906 };
71907
71908@@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
71909 nfmsg->version = NFNETLINK_V0;
71910 nfmsg->res_id = htons(queue->queue_num);
71911
71912- entry->id = atomic_inc_return(&queue->id_sequence);
71913+ entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
71914 pmsg.packet_id = htonl(entry->id);
71915 pmsg.hw_protocol = entskb->protocol;
71916 pmsg.hook = entry->hook;
71917@@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
71918 inst->peer_pid, inst->queue_total,
71919 inst->copy_mode, inst->copy_range,
71920 inst->queue_dropped, inst->queue_user_dropped,
71921- atomic_read(&inst->id_sequence), 1);
71922+ atomic_read_unchecked(&inst->id_sequence), 1);
71923 }
71924
71925 static const struct seq_operations nfqnl_seq_ops = {
71926diff -urNp linux-3.0.4/net/netfilter/xt_gradm.c linux-3.0.4/net/netfilter/xt_gradm.c
71927--- linux-3.0.4/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
71928+++ linux-3.0.4/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
71929@@ -0,0 +1,51 @@
71930+/*
71931+ * gradm match for netfilter
71932