]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.1.1-201111201943.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.1.1-201111201943.patch
CommitLineData
6a7f3573
PK
1diff -urNp linux-3.1.1/arch/alpha/include/asm/elf.h linux-3.1.1/arch/alpha/include/asm/elf.h
2--- linux-3.1.1/arch/alpha/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
3+++ linux-3.1.1/arch/alpha/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
4@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-3.1.1/arch/alpha/include/asm/pgtable.h linux-3.1.1/arch/alpha/include/asm/pgtable.h
19--- linux-3.1.1/arch/alpha/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
20+++ linux-3.1.1/arch/alpha/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-3.1.1/arch/alpha/kernel/module.c linux-3.1.1/arch/alpha/kernel/module.c
40--- linux-3.1.1/arch/alpha/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
41+++ linux-3.1.1/arch/alpha/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
42@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-3.1.1/arch/alpha/kernel/osf_sys.c linux-3.1.1/arch/alpha/kernel/osf_sys.c
52--- linux-3.1.1/arch/alpha/kernel/osf_sys.c 2011-11-11 15:19:27.000000000 -0500
53+++ linux-3.1.1/arch/alpha/kernel/osf_sys.c 2011-11-16 18:39:07.000000000 -0500
54@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-3.1.1/arch/alpha/mm/fault.c linux-3.1.1/arch/alpha/mm/fault.c
86--- linux-3.1.1/arch/alpha/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
87+++ linux-3.1.1/arch/alpha/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-3.1.1/arch/arm/include/asm/elf.h linux-3.1.1/arch/arm/include/asm/elf.h
245--- linux-3.1.1/arch/arm/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
246+++ linux-3.1.1/arch/arm/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
247@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267-struct mm_struct;
268-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269-#define arch_randomize_brk arch_randomize_brk
270-
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274diff -urNp linux-3.1.1/arch/arm/include/asm/kmap_types.h linux-3.1.1/arch/arm/include/asm/kmap_types.h
275--- linux-3.1.1/arch/arm/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
276+++ linux-3.1.1/arch/arm/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
277@@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281+ KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285diff -urNp linux-3.1.1/arch/arm/include/asm/uaccess.h linux-3.1.1/arch/arm/include/asm/uaccess.h
286--- linux-3.1.1/arch/arm/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
287+++ linux-3.1.1/arch/arm/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
288@@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292+extern void check_object_size(const void *ptr, unsigned long n, bool to);
293+
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297@@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305+
306+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307+{
308+ if (!__builtin_constant_p(n))
309+ check_object_size(to, n, false);
310+ return ___copy_from_user(to, from, n);
311+}
312+
313+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314+{
315+ if (!__builtin_constant_p(n))
316+ check_object_size(from, n, true);
317+ return ___copy_to_user(to, from, n);
318+}
319+
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327+ if ((long)n < 0)
328+ return n;
329+
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333@@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337+ if ((long)n < 0)
338+ return n;
339+
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343diff -urNp linux-3.1.1/arch/arm/kernel/armksyms.c linux-3.1.1/arch/arm/kernel/armksyms.c
344--- linux-3.1.1/arch/arm/kernel/armksyms.c 2011-11-11 15:19:27.000000000 -0500
345+++ linux-3.1.1/arch/arm/kernel/armksyms.c 2011-11-16 18:39:07.000000000 -0500
346@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350-EXPORT_SYMBOL(__copy_from_user);
351-EXPORT_SYMBOL(__copy_to_user);
352+EXPORT_SYMBOL(___copy_from_user);
353+EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357diff -urNp linux-3.1.1/arch/arm/kernel/process.c linux-3.1.1/arch/arm/kernel/process.c
358--- linux-3.1.1/arch/arm/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
359+++ linux-3.1.1/arch/arm/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
360@@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364-#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366 #include <linux/cpuidle.h>
367
368@@ -481,12 +480,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372-unsigned long arch_randomize_brk(struct mm_struct *mm)
373-{
374- unsigned long range_end = mm->brk + 0x02000000;
375- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376-}
377-
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381diff -urNp linux-3.1.1/arch/arm/kernel/traps.c linux-3.1.1/arch/arm/kernel/traps.c
382--- linux-3.1.1/arch/arm/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
383+++ linux-3.1.1/arch/arm/kernel/traps.c 2011-11-16 18:40:08.000000000 -0500
384@@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388+extern void gr_handle_kernel_exploit(void);
389+
390 /*
391 * This function is protected against re-entrancy.
392 */
393@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397+
398+ gr_handle_kernel_exploit();
399+
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403diff -urNp linux-3.1.1/arch/arm/lib/copy_from_user.S linux-3.1.1/arch/arm/lib/copy_from_user.S
404--- linux-3.1.1/arch/arm/lib/copy_from_user.S 2011-11-11 15:19:27.000000000 -0500
405+++ linux-3.1.1/arch/arm/lib/copy_from_user.S 2011-11-16 18:39:07.000000000 -0500
406@@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410- * size_t __copy_from_user(void *to, const void *from, size_t n)
411+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415@@ -84,11 +84,11 @@
416
417 .text
418
419-ENTRY(__copy_from_user)
420+ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424-ENDPROC(__copy_from_user)
425+ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429diff -urNp linux-3.1.1/arch/arm/lib/copy_to_user.S linux-3.1.1/arch/arm/lib/copy_to_user.S
430--- linux-3.1.1/arch/arm/lib/copy_to_user.S 2011-11-11 15:19:27.000000000 -0500
431+++ linux-3.1.1/arch/arm/lib/copy_to_user.S 2011-11-16 18:39:07.000000000 -0500
432@@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436- * size_t __copy_to_user(void *to, const void *from, size_t n)
437+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441@@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445-WEAK(__copy_to_user)
446+WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450-ENDPROC(__copy_to_user)
451+ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455diff -urNp linux-3.1.1/arch/arm/lib/uaccess.S linux-3.1.1/arch/arm/lib/uaccess.S
456--- linux-3.1.1/arch/arm/lib/uaccess.S 2011-11-11 15:19:27.000000000 -0500
457+++ linux-3.1.1/arch/arm/lib/uaccess.S 2011-11-16 18:39:07.000000000 -0500
458@@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471-ENTRY(__copy_to_user)
472+ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480-ENDPROC(__copy_to_user)
481+ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497-ENTRY(__copy_from_user)
498+ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506-ENDPROC(__copy_from_user)
507+ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511diff -urNp linux-3.1.1/arch/arm/lib/uaccess_with_memcpy.c linux-3.1.1/arch/arm/lib/uaccess_with_memcpy.c
512--- linux-3.1.1/arch/arm/lib/uaccess_with_memcpy.c 2011-11-11 15:19:27.000000000 -0500
513+++ linux-3.1.1/arch/arm/lib/uaccess_with_memcpy.c 2011-11-16 18:39:07.000000000 -0500
514@@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518-__copy_to_user(void __user *to, const void *from, unsigned long n)
519+___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523diff -urNp linux-3.1.1/arch/arm/mach-ux500/mbox-db5500.c linux-3.1.1/arch/arm/mach-ux500/mbox-db5500.c
524--- linux-3.1.1/arch/arm/mach-ux500/mbox-db5500.c 2011-11-11 15:19:27.000000000 -0500
525+++ linux-3.1.1/arch/arm/mach-ux500/mbox-db5500.c 2011-11-16 18:40:08.000000000 -0500
526@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535diff -urNp linux-3.1.1/arch/arm/mm/fault.c linux-3.1.1/arch/arm/mm/fault.c
536--- linux-3.1.1/arch/arm/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
537+++ linux-3.1.1/arch/arm/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
538@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542+#ifdef CONFIG_PAX_PAGEEXEC
543+ if (fsr & FSR_LNX_PF) {
544+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545+ do_group_exit(SIGKILL);
546+ }
547+#endif
548+
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552@@ -383,6 +390,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556+#ifdef CONFIG_PAX_PAGEEXEC
557+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
558+{
559+ long i;
560+
561+ printk(KERN_ERR "PAX: bytes at PC: ");
562+ for (i = 0; i < 20; i++) {
563+ unsigned char c;
564+ if (get_user(c, (__force unsigned char __user *)pc+i))
565+ printk(KERN_CONT "?? ");
566+ else
567+ printk(KERN_CONT "%02x ", c);
568+ }
569+ printk("\n");
570+
571+ printk(KERN_ERR "PAX: bytes at SP-4: ");
572+ for (i = -1; i < 20; i++) {
573+ unsigned long c;
574+ if (get_user(c, (__force unsigned long __user *)sp+i))
575+ printk(KERN_CONT "???????? ");
576+ else
577+ printk(KERN_CONT "%08lx ", c);
578+ }
579+ printk("\n");
580+}
581+#endif
582+
583 /*
584 * First Level Translation Fault Handler
585 *
586diff -urNp linux-3.1.1/arch/arm/mm/mmap.c linux-3.1.1/arch/arm/mm/mmap.c
587--- linux-3.1.1/arch/arm/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
588+++ linux-3.1.1/arch/arm/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
589@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593+#ifdef CONFIG_PAX_RANDMMAP
594+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595+#endif
596+
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604- if (TASK_SIZE - len >= addr &&
605- (!vma || addr + len <= vma->vm_start))
606+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610- start_addr = addr = mm->free_area_cache;
611+ start_addr = addr = mm->free_area_cache;
612 } else {
613- start_addr = addr = TASK_UNMAPPED_BASE;
614- mm->cached_hole_size = 0;
615+ start_addr = addr = mm->mmap_base;
616+ mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620@@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624- if (start_addr != TASK_UNMAPPED_BASE) {
625- start_addr = addr = TASK_UNMAPPED_BASE;
626+ if (start_addr != mm->mmap_base) {
627+ start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633- if (!vma || addr + len <= vma->vm_start) {
634+ if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638diff -urNp linux-3.1.1/arch/avr32/include/asm/elf.h linux-3.1.1/arch/avr32/include/asm/elf.h
639--- linux-3.1.1/arch/avr32/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
640+++ linux-3.1.1/arch/avr32/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
641@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648+#ifdef CONFIG_PAX_ASLR
649+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650+
651+#define PAX_DELTA_MMAP_LEN 15
652+#define PAX_DELTA_STACK_LEN 15
653+#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657diff -urNp linux-3.1.1/arch/avr32/include/asm/kmap_types.h linux-3.1.1/arch/avr32/include/asm/kmap_types.h
658--- linux-3.1.1/arch/avr32/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
659+++ linux-3.1.1/arch/avr32/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
660@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664-D(14) KM_TYPE_NR
665+D(14) KM_CLEARPAGE,
666+D(15) KM_TYPE_NR
667 };
668
669 #undef D
670diff -urNp linux-3.1.1/arch/avr32/mm/fault.c linux-3.1.1/arch/avr32/mm/fault.c
671--- linux-3.1.1/arch/avr32/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
672+++ linux-3.1.1/arch/avr32/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
673@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677+#ifdef CONFIG_PAX_PAGEEXEC
678+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
679+{
680+ unsigned long i;
681+
682+ printk(KERN_ERR "PAX: bytes at PC: ");
683+ for (i = 0; i < 20; i++) {
684+ unsigned char c;
685+ if (get_user(c, (unsigned char *)pc+i))
686+ printk(KERN_CONT "???????? ");
687+ else
688+ printk(KERN_CONT "%02x ", c);
689+ }
690+ printk("\n");
691+}
692+#endif
693+
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697@@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706+ do_group_exit(SIGKILL);
707+ }
708+ }
709+#endif
710+
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714diff -urNp linux-3.1.1/arch/frv/include/asm/kmap_types.h linux-3.1.1/arch/frv/include/asm/kmap_types.h
715--- linux-3.1.1/arch/frv/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
716+++ linux-3.1.1/arch/frv/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
717@@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721+ KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725diff -urNp linux-3.1.1/arch/frv/mm/elf-fdpic.c linux-3.1.1/arch/frv/mm/elf-fdpic.c
726--- linux-3.1.1/arch/frv/mm/elf-fdpic.c 2011-11-11 15:19:27.000000000 -0500
727+++ linux-3.1.1/arch/frv/mm/elf-fdpic.c 2011-11-16 18:39:07.000000000 -0500
728@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732- if (TASK_SIZE - len >= addr &&
733- (!vma || addr + len <= vma->vm_start))
734+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742- if (addr + len <= vma->vm_start)
743+ if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751- if (addr + len <= vma->vm_start)
752+ if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756diff -urNp linux-3.1.1/arch/ia64/include/asm/elf.h linux-3.1.1/arch/ia64/include/asm/elf.h
757--- linux-3.1.1/arch/ia64/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
758+++ linux-3.1.1/arch/ia64/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
759@@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763+#ifdef CONFIG_PAX_ASLR
764+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765+
766+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768+#endif
769+
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773diff -urNp linux-3.1.1/arch/ia64/include/asm/pgtable.h linux-3.1.1/arch/ia64/include/asm/pgtable.h
774--- linux-3.1.1/arch/ia64/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
775+++ linux-3.1.1/arch/ia64/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
776@@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780-
781+#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785@@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789+
790+#ifdef CONFIG_PAX_PAGEEXEC
791+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794+#else
795+# define PAGE_SHARED_NOEXEC PAGE_SHARED
796+# define PAGE_READONLY_NOEXEC PAGE_READONLY
797+# define PAGE_COPY_NOEXEC PAGE_COPY
798+#endif
799+
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803diff -urNp linux-3.1.1/arch/ia64/include/asm/spinlock.h linux-3.1.1/arch/ia64/include/asm/spinlock.h
804--- linux-3.1.1/arch/ia64/include/asm/spinlock.h 2011-11-11 15:19:27.000000000 -0500
805+++ linux-3.1.1/arch/ia64/include/asm/spinlock.h 2011-11-16 18:39:07.000000000 -0500
806@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815diff -urNp linux-3.1.1/arch/ia64/include/asm/uaccess.h linux-3.1.1/arch/ia64/include/asm/uaccess.h
816--- linux-3.1.1/arch/ia64/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
817+++ linux-3.1.1/arch/ia64/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
818@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822- if (__access_ok(__cu_to, __cu_len, get_fs())) \
823+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831- if (__access_ok(__cu_from, __cu_len, get_fs())) \
832+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836diff -urNp linux-3.1.1/arch/ia64/kernel/module.c linux-3.1.1/arch/ia64/kernel/module.c
837--- linux-3.1.1/arch/ia64/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
838+++ linux-3.1.1/arch/ia64/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
839@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843- if (mod && mod->arch.init_unw_table &&
844- module_region == mod->module_init) {
845+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853+in_init_rx (const struct module *mod, uint64_t addr)
854+{
855+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856+}
857+
858+static inline int
859+in_init_rw (const struct module *mod, uint64_t addr)
860+{
861+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862+}
863+
864+static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867- return addr - (uint64_t) mod->module_init < mod->init_size;
868+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869+}
870+
871+static inline int
872+in_core_rx (const struct module *mod, uint64_t addr)
873+{
874+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875+}
876+
877+static inline int
878+in_core_rw (const struct module *mod, uint64_t addr)
879+{
880+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886- return addr - (uint64_t) mod->module_core < mod->core_size;
887+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896+ if (in_init_rx(mod, val))
897+ val -= (uint64_t) mod->module_init_rx;
898+ else if (in_init_rw(mod, val))
899+ val -= (uint64_t) mod->module_init_rw;
900+ else if (in_core_rx(mod, val))
901+ val -= (uint64_t) mod->module_core_rx;
902+ else if (in_core_rw(mod, val))
903+ val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911- if (mod->core_size > MAX_LTOFF)
912+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917- gp = mod->core_size - MAX_LTOFF / 2;
918+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920- gp = mod->core_size / 2;
921- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927diff -urNp linux-3.1.1/arch/ia64/kernel/sys_ia64.c linux-3.1.1/arch/ia64/kernel/sys_ia64.c
928--- linux-3.1.1/arch/ia64/kernel/sys_ia64.c 2011-11-11 15:19:27.000000000 -0500
929+++ linux-3.1.1/arch/ia64/kernel/sys_ia64.c 2011-11-16 18:39:07.000000000 -0500
930@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934+
935+#ifdef CONFIG_PAX_RANDMMAP
936+ if (mm->pax_flags & MF_PAX_RANDMMAP)
937+ addr = mm->free_area_cache;
938+ else
939+#endif
940+
941 if (!addr)
942 addr = mm->free_area_cache;
943
944@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948- if (start_addr != TASK_UNMAPPED_BASE) {
949+ if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951- addr = TASK_UNMAPPED_BASE;
952+ addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957- if (!vma || addr + len <= vma->vm_start) {
958+ if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962diff -urNp linux-3.1.1/arch/ia64/kernel/vmlinux.lds.S linux-3.1.1/arch/ia64/kernel/vmlinux.lds.S
963--- linux-3.1.1/arch/ia64/kernel/vmlinux.lds.S 2011-11-11 15:19:27.000000000 -0500
964+++ linux-3.1.1/arch/ia64/kernel/vmlinux.lds.S 2011-11-16 18:39:07.000000000 -0500
965@@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969- __phys_per_cpu_start = __per_cpu_load;
970+ __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974diff -urNp linux-3.1.1/arch/ia64/mm/fault.c linux-3.1.1/arch/ia64/mm/fault.c
975--- linux-3.1.1/arch/ia64/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
976+++ linux-3.1.1/arch/ia64/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
977@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981+#ifdef CONFIG_PAX_PAGEEXEC
982+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
983+{
984+ unsigned long i;
985+
986+ printk(KERN_ERR "PAX: bytes at PC: ");
987+ for (i = 0; i < 8; i++) {
988+ unsigned int c;
989+ if (get_user(c, (unsigned int *)pc+i))
990+ printk(KERN_CONT "???????? ");
991+ else
992+ printk(KERN_CONT "%08x ", c);
993+ }
994+ printk("\n");
995+}
996+#endif
997+
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005- if ((vma->vm_flags & mask) != mask)
1006+ if ((vma->vm_flags & mask) != mask) {
1007+
1008+#ifdef CONFIG_PAX_PAGEEXEC
1009+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011+ goto bad_area;
1012+
1013+ up_read(&mm->mmap_sem);
1014+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015+ do_group_exit(SIGKILL);
1016+ }
1017+#endif
1018+
1019 goto bad_area;
1020
1021+ }
1022+
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026diff -urNp linux-3.1.1/arch/ia64/mm/hugetlbpage.c linux-3.1.1/arch/ia64/mm/hugetlbpage.c
1027--- linux-3.1.1/arch/ia64/mm/hugetlbpage.c 2011-11-11 15:19:27.000000000 -0500
1028+++ linux-3.1.1/arch/ia64/mm/hugetlbpage.c 2011-11-16 18:39:07.000000000 -0500
1029@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033- if (!vmm || (addr + len) <= vmm->vm_start)
1034+ if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038diff -urNp linux-3.1.1/arch/ia64/mm/init.c linux-3.1.1/arch/ia64/mm/init.c
1039--- linux-3.1.1/arch/ia64/mm/init.c 2011-11-11 15:19:27.000000000 -0500
1040+++ linux-3.1.1/arch/ia64/mm/init.c 2011-11-16 18:39:07.000000000 -0500
1041@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045+
1046+#ifdef CONFIG_PAX_PAGEEXEC
1047+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048+ vma->vm_flags &= ~VM_EXEC;
1049+
1050+#ifdef CONFIG_PAX_MPROTECT
1051+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052+ vma->vm_flags &= ~VM_MAYEXEC;
1053+#endif
1054+
1055+ }
1056+#endif
1057+
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061diff -urNp linux-3.1.1/arch/m32r/lib/usercopy.c linux-3.1.1/arch/m32r/lib/usercopy.c
1062--- linux-3.1.1/arch/m32r/lib/usercopy.c 2011-11-11 15:19:27.000000000 -0500
1063+++ linux-3.1.1/arch/m32r/lib/usercopy.c 2011-11-16 18:39:07.000000000 -0500
1064@@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068+ if ((long)n < 0)
1069+ return n;
1070+
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078+ if ((long)n < 0)
1079+ return n;
1080+
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084diff -urNp linux-3.1.1/arch/mips/include/asm/elf.h linux-3.1.1/arch/mips/include/asm/elf.h
1085--- linux-3.1.1/arch/mips/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
1086+++ linux-3.1.1/arch/mips/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
1087@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091+#ifdef CONFIG_PAX_ASLR
1092+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093+
1094+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096+#endif
1097+
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103-struct mm_struct;
1104-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105-#define arch_randomize_brk arch_randomize_brk
1106-
1107 #endif /* _ASM_ELF_H */
1108diff -urNp linux-3.1.1/arch/mips/include/asm/page.h linux-3.1.1/arch/mips/include/asm/page.h
1109--- linux-3.1.1/arch/mips/include/asm/page.h 2011-11-11 15:19:27.000000000 -0500
1110+++ linux-3.1.1/arch/mips/include/asm/page.h 2011-11-16 18:39:07.000000000 -0500
1111@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120diff -urNp linux-3.1.1/arch/mips/include/asm/system.h linux-3.1.1/arch/mips/include/asm/system.h
1121--- linux-3.1.1/arch/mips/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
1122+++ linux-3.1.1/arch/mips/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
1123@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127-extern unsigned long arch_align_stack(unsigned long sp);
1128+#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131diff -urNp linux-3.1.1/arch/mips/kernel/binfmt_elfn32.c linux-3.1.1/arch/mips/kernel/binfmt_elfn32.c
1132--- linux-3.1.1/arch/mips/kernel/binfmt_elfn32.c 2011-11-11 15:19:27.000000000 -0500
1133+++ linux-3.1.1/arch/mips/kernel/binfmt_elfn32.c 2011-11-16 18:39:07.000000000 -0500
1134@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138+#ifdef CONFIG_PAX_ASLR
1139+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140+
1141+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143+#endif
1144+
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148diff -urNp linux-3.1.1/arch/mips/kernel/binfmt_elfo32.c linux-3.1.1/arch/mips/kernel/binfmt_elfo32.c
1149--- linux-3.1.1/arch/mips/kernel/binfmt_elfo32.c 2011-11-11 15:19:27.000000000 -0500
1150+++ linux-3.1.1/arch/mips/kernel/binfmt_elfo32.c 2011-11-16 18:39:07.000000000 -0500
1151@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155+#ifdef CONFIG_PAX_ASLR
1156+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157+
1158+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160+#endif
1161+
1162 #include <asm/processor.h>
1163
1164 /*
1165diff -urNp linux-3.1.1/arch/mips/kernel/process.c linux-3.1.1/arch/mips/kernel/process.c
1166--- linux-3.1.1/arch/mips/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
1167+++ linux-3.1.1/arch/mips/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
1168@@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172-
1173-/*
1174- * Don't forget that the stack pointer must be aligned on a 8 bytes
1175- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176- */
1177-unsigned long arch_align_stack(unsigned long sp)
1178-{
1179- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180- sp -= get_random_int() & ~PAGE_MASK;
1181-
1182- return sp & ALMASK;
1183-}
1184diff -urNp linux-3.1.1/arch/mips/mm/fault.c linux-3.1.1/arch/mips/mm/fault.c
1185--- linux-3.1.1/arch/mips/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
1186+++ linux-3.1.1/arch/mips/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
1187@@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191+#ifdef CONFIG_PAX_PAGEEXEC
1192+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1193+{
1194+ unsigned long i;
1195+
1196+ printk(KERN_ERR "PAX: bytes at PC: ");
1197+ for (i = 0; i < 5; i++) {
1198+ unsigned int c;
1199+ if (get_user(c, (unsigned int *)pc+i))
1200+ printk(KERN_CONT "???????? ");
1201+ else
1202+ printk(KERN_CONT "%08x ", c);
1203+ }
1204+ printk("\n");
1205+}
1206+#endif
1207+
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211diff -urNp linux-3.1.1/arch/mips/mm/mmap.c linux-3.1.1/arch/mips/mm/mmap.c
1212--- linux-3.1.1/arch/mips/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
1213+++ linux-3.1.1/arch/mips/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
1214@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_a
1215 do_color_align = 1;
1216
1217 /* requesting a specific address */
1218+
1219+#ifdef CONFIG_PAX_RANDMMAP
1220+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221+#endif
1222+
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_a
1227 addr = PAGE_ALIGN(addr);
1228
1229 vma = find_vma(mm, addr);
1230- if (TASK_SIZE - len >= addr &&
1231- (!vma || addr + len <= vma->vm_start))
1232+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1233 return addr;
1234 }
1235
1236@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_a
1237 /* At this point: (!vma || addr < vma->vm_end). */
1238 if (TASK_SIZE - len < addr)
1239 return -ENOMEM;
1240- if (!vma || addr + len <= vma->vm_start)
1241+ if (check_heap_stack_gap(vmm, addr, len))
1242 return addr;
1243 addr = vma->vm_end;
1244 if (do_color_align)
1245@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_a
1246 /* make sure it can fit in the remaining address space */
1247 if (likely(addr > len)) {
1248 vma = find_vma(mm, addr - len);
1249- if (!vma || addr <= vma->vm_start) {
1250+ if (check_heap_stack_gap(vmm, addr - len, len))
1251 /* cache the address as a hint for next time */
1252 return mm->free_area_cache = addr - len;
1253 }
1254@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_a
1255 * return with success:
1256 */
1257 vma = find_vma(mm, addr);
1258- if (likely(!vma || addr + len <= vma->vm_start)) {
1259+ if (check_heap_stack_gap(vmm, addr, len)) {
1260 /* cache the address as a hint for next time */
1261 return mm->free_area_cache = addr;
1262 }
1263@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_str
1264 mm->unmap_area = arch_unmap_area_topdown;
1265 }
1266 }
1267-
1268-static inline unsigned long brk_rnd(void)
1269-{
1270- unsigned long rnd = get_random_int();
1271-
1272- rnd = rnd << PAGE_SHIFT;
1273- /* 8MB for 32bit, 256MB for 64bit */
1274- if (TASK_IS_32BIT_ADDR)
1275- rnd = rnd & 0x7ffffful;
1276- else
1277- rnd = rnd & 0xffffffful;
1278-
1279- return rnd;
1280-}
1281-
1282-unsigned long arch_randomize_brk(struct mm_struct *mm)
1283-{
1284- unsigned long base = mm->brk;
1285- unsigned long ret;
1286-
1287- ret = PAGE_ALIGN(base + brk_rnd());
1288-
1289- if (ret < mm->brk)
1290- return mm->brk;
1291-
1292- return ret;
1293-}
1294diff -urNp linux-3.1.1/arch/parisc/include/asm/elf.h linux-3.1.1/arch/parisc/include/asm/elf.h
1295--- linux-3.1.1/arch/parisc/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
1296+++ linux-3.1.1/arch/parisc/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
1297@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1298
1299 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1300
1301+#ifdef CONFIG_PAX_ASLR
1302+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1303+
1304+#define PAX_DELTA_MMAP_LEN 16
1305+#define PAX_DELTA_STACK_LEN 16
1306+#endif
1307+
1308 /* This yields a mask that user programs can use to figure out what
1309 instruction set this CPU supports. This could be done in user space,
1310 but it's not easy, and we've already done it here. */
1311diff -urNp linux-3.1.1/arch/parisc/include/asm/pgtable.h linux-3.1.1/arch/parisc/include/asm/pgtable.h
1312--- linux-3.1.1/arch/parisc/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
1313+++ linux-3.1.1/arch/parisc/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
1314@@ -210,6 +210,17 @@ struct vm_area_struct;
1315 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1316 #define PAGE_COPY PAGE_EXECREAD
1317 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1318+
1319+#ifdef CONFIG_PAX_PAGEEXEC
1320+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1321+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1322+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1323+#else
1324+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1325+# define PAGE_COPY_NOEXEC PAGE_COPY
1326+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1327+#endif
1328+
1329 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1330 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1331 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1332diff -urNp linux-3.1.1/arch/parisc/kernel/module.c linux-3.1.1/arch/parisc/kernel/module.c
1333--- linux-3.1.1/arch/parisc/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
1334+++ linux-3.1.1/arch/parisc/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
1335@@ -98,16 +98,38 @@
1336
1337 /* three functions to determine where in the module core
1338 * or init pieces the location is */
1339+static inline int in_init_rx(struct module *me, void *loc)
1340+{
1341+ return (loc >= me->module_init_rx &&
1342+ loc < (me->module_init_rx + me->init_size_rx));
1343+}
1344+
1345+static inline int in_init_rw(struct module *me, void *loc)
1346+{
1347+ return (loc >= me->module_init_rw &&
1348+ loc < (me->module_init_rw + me->init_size_rw));
1349+}
1350+
1351 static inline int in_init(struct module *me, void *loc)
1352 {
1353- return (loc >= me->module_init &&
1354- loc <= (me->module_init + me->init_size));
1355+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1356+}
1357+
1358+static inline int in_core_rx(struct module *me, void *loc)
1359+{
1360+ return (loc >= me->module_core_rx &&
1361+ loc < (me->module_core_rx + me->core_size_rx));
1362+}
1363+
1364+static inline int in_core_rw(struct module *me, void *loc)
1365+{
1366+ return (loc >= me->module_core_rw &&
1367+ loc < (me->module_core_rw + me->core_size_rw));
1368 }
1369
1370 static inline int in_core(struct module *me, void *loc)
1371 {
1372- return (loc >= me->module_core &&
1373- loc <= (me->module_core + me->core_size));
1374+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1375 }
1376
1377 static inline int in_local(struct module *me, void *loc)
1378@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1379 }
1380
1381 /* align things a bit */
1382- me->core_size = ALIGN(me->core_size, 16);
1383- me->arch.got_offset = me->core_size;
1384- me->core_size += gots * sizeof(struct got_entry);
1385-
1386- me->core_size = ALIGN(me->core_size, 16);
1387- me->arch.fdesc_offset = me->core_size;
1388- me->core_size += fdescs * sizeof(Elf_Fdesc);
1389+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1390+ me->arch.got_offset = me->core_size_rw;
1391+ me->core_size_rw += gots * sizeof(struct got_entry);
1392+
1393+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1394+ me->arch.fdesc_offset = me->core_size_rw;
1395+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1396
1397 me->arch.got_max = gots;
1398 me->arch.fdesc_max = fdescs;
1399@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1400
1401 BUG_ON(value == 0);
1402
1403- got = me->module_core + me->arch.got_offset;
1404+ got = me->module_core_rw + me->arch.got_offset;
1405 for (i = 0; got[i].addr; i++)
1406 if (got[i].addr == value)
1407 goto out;
1408@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1409 #ifdef CONFIG_64BIT
1410 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1411 {
1412- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1413+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1414
1415 if (!value) {
1416 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1417@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1418
1419 /* Create new one */
1420 fdesc->addr = value;
1421- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1422+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1423 return (Elf_Addr)fdesc;
1424 }
1425 #endif /* CONFIG_64BIT */
1426@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
1427
1428 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1429 end = table + sechdrs[me->arch.unwind_section].sh_size;
1430- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1431+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1432
1433 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1434 me->arch.unwind_section, table, end, gp);
1435diff -urNp linux-3.1.1/arch/parisc/kernel/sys_parisc.c linux-3.1.1/arch/parisc/kernel/sys_parisc.c
1436--- linux-3.1.1/arch/parisc/kernel/sys_parisc.c 2011-11-11 15:19:27.000000000 -0500
1437+++ linux-3.1.1/arch/parisc/kernel/sys_parisc.c 2011-11-16 18:39:07.000000000 -0500
1438@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1439 /* At this point: (!vma || addr < vma->vm_end). */
1440 if (TASK_SIZE - len < addr)
1441 return -ENOMEM;
1442- if (!vma || addr + len <= vma->vm_start)
1443+ if (check_heap_stack_gap(vma, addr, len))
1444 return addr;
1445 addr = vma->vm_end;
1446 }
1447@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1448 /* At this point: (!vma || addr < vma->vm_end). */
1449 if (TASK_SIZE - len < addr)
1450 return -ENOMEM;
1451- if (!vma || addr + len <= vma->vm_start)
1452+ if (check_heap_stack_gap(vma, addr, len))
1453 return addr;
1454 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1455 if (addr < vma->vm_end) /* handle wraparound */
1456@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1457 if (flags & MAP_FIXED)
1458 return addr;
1459 if (!addr)
1460- addr = TASK_UNMAPPED_BASE;
1461+ addr = current->mm->mmap_base;
1462
1463 if (filp) {
1464 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1465diff -urNp linux-3.1.1/arch/parisc/kernel/traps.c linux-3.1.1/arch/parisc/kernel/traps.c
1466--- linux-3.1.1/arch/parisc/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
1467+++ linux-3.1.1/arch/parisc/kernel/traps.c 2011-11-16 18:39:07.000000000 -0500
1468@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1469
1470 down_read(&current->mm->mmap_sem);
1471 vma = find_vma(current->mm,regs->iaoq[0]);
1472- if (vma && (regs->iaoq[0] >= vma->vm_start)
1473- && (vma->vm_flags & VM_EXEC)) {
1474-
1475+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1476 fault_address = regs->iaoq[0];
1477 fault_space = regs->iasq[0];
1478
1479diff -urNp linux-3.1.1/arch/parisc/mm/fault.c linux-3.1.1/arch/parisc/mm/fault.c
1480--- linux-3.1.1/arch/parisc/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
1481+++ linux-3.1.1/arch/parisc/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
1482@@ -15,6 +15,7 @@
1483 #include <linux/sched.h>
1484 #include <linux/interrupt.h>
1485 #include <linux/module.h>
1486+#include <linux/unistd.h>
1487
1488 #include <asm/uaccess.h>
1489 #include <asm/traps.h>
1490@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1491 static unsigned long
1492 parisc_acctyp(unsigned long code, unsigned int inst)
1493 {
1494- if (code == 6 || code == 16)
1495+ if (code == 6 || code == 7 || code == 16)
1496 return VM_EXEC;
1497
1498 switch (inst & 0xf0000000) {
1499@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1500 }
1501 #endif
1502
1503+#ifdef CONFIG_PAX_PAGEEXEC
1504+/*
1505+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1506+ *
1507+ * returns 1 when task should be killed
1508+ * 2 when rt_sigreturn trampoline was detected
1509+ * 3 when unpatched PLT trampoline was detected
1510+ */
1511+static int pax_handle_fetch_fault(struct pt_regs *regs)
1512+{
1513+
1514+#ifdef CONFIG_PAX_EMUPLT
1515+ int err;
1516+
1517+ do { /* PaX: unpatched PLT emulation */
1518+ unsigned int bl, depwi;
1519+
1520+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1521+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1522+
1523+ if (err)
1524+ break;
1525+
1526+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1527+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1528+
1529+ err = get_user(ldw, (unsigned int *)addr);
1530+ err |= get_user(bv, (unsigned int *)(addr+4));
1531+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1532+
1533+ if (err)
1534+ break;
1535+
1536+ if (ldw == 0x0E801096U &&
1537+ bv == 0xEAC0C000U &&
1538+ ldw2 == 0x0E881095U)
1539+ {
1540+ unsigned int resolver, map;
1541+
1542+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1543+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1544+ if (err)
1545+ break;
1546+
1547+ regs->gr[20] = instruction_pointer(regs)+8;
1548+ regs->gr[21] = map;
1549+ regs->gr[22] = resolver;
1550+ regs->iaoq[0] = resolver | 3UL;
1551+ regs->iaoq[1] = regs->iaoq[0] + 4;
1552+ return 3;
1553+ }
1554+ }
1555+ } while (0);
1556+#endif
1557+
1558+#ifdef CONFIG_PAX_EMUTRAMP
1559+
1560+#ifndef CONFIG_PAX_EMUSIGRT
1561+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1562+ return 1;
1563+#endif
1564+
1565+ do { /* PaX: rt_sigreturn emulation */
1566+ unsigned int ldi1, ldi2, bel, nop;
1567+
1568+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1569+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1570+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1571+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1572+
1573+ if (err)
1574+ break;
1575+
1576+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1577+ ldi2 == 0x3414015AU &&
1578+ bel == 0xE4008200U &&
1579+ nop == 0x08000240U)
1580+ {
1581+ regs->gr[25] = (ldi1 & 2) >> 1;
1582+ regs->gr[20] = __NR_rt_sigreturn;
1583+ regs->gr[31] = regs->iaoq[1] + 16;
1584+ regs->sr[0] = regs->iasq[1];
1585+ regs->iaoq[0] = 0x100UL;
1586+ regs->iaoq[1] = regs->iaoq[0] + 4;
1587+ regs->iasq[0] = regs->sr[2];
1588+ regs->iasq[1] = regs->sr[2];
1589+ return 2;
1590+ }
1591+ } while (0);
1592+#endif
1593+
1594+ return 1;
1595+}
1596+
1597+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1598+{
1599+ unsigned long i;
1600+
1601+ printk(KERN_ERR "PAX: bytes at PC: ");
1602+ for (i = 0; i < 5; i++) {
1603+ unsigned int c;
1604+ if (get_user(c, (unsigned int *)pc+i))
1605+ printk(KERN_CONT "???????? ");
1606+ else
1607+ printk(KERN_CONT "%08x ", c);
1608+ }
1609+ printk("\n");
1610+}
1611+#endif
1612+
1613 int fixup_exception(struct pt_regs *regs)
1614 {
1615 const struct exception_table_entry *fix;
1616@@ -192,8 +303,33 @@ good_area:
1617
1618 acc_type = parisc_acctyp(code,regs->iir);
1619
1620- if ((vma->vm_flags & acc_type) != acc_type)
1621+ if ((vma->vm_flags & acc_type) != acc_type) {
1622+
1623+#ifdef CONFIG_PAX_PAGEEXEC
1624+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1625+ (address & ~3UL) == instruction_pointer(regs))
1626+ {
1627+ up_read(&mm->mmap_sem);
1628+ switch (pax_handle_fetch_fault(regs)) {
1629+
1630+#ifdef CONFIG_PAX_EMUPLT
1631+ case 3:
1632+ return;
1633+#endif
1634+
1635+#ifdef CONFIG_PAX_EMUTRAMP
1636+ case 2:
1637+ return;
1638+#endif
1639+
1640+ }
1641+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1642+ do_group_exit(SIGKILL);
1643+ }
1644+#endif
1645+
1646 goto bad_area;
1647+ }
1648
1649 /*
1650 * If for any reason at all we couldn't handle the fault, make
1651diff -urNp linux-3.1.1/arch/powerpc/include/asm/elf.h linux-3.1.1/arch/powerpc/include/asm/elf.h
1652--- linux-3.1.1/arch/powerpc/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
1653+++ linux-3.1.1/arch/powerpc/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
1654@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1655 the loader. We need to make sure that it is out of the way of the program
1656 that it will "exec", and that there is sufficient room for the brk. */
1657
1658-extern unsigned long randomize_et_dyn(unsigned long base);
1659-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1660+#define ELF_ET_DYN_BASE (0x20000000)
1661+
1662+#ifdef CONFIG_PAX_ASLR
1663+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1664+
1665+#ifdef __powerpc64__
1666+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1667+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1668+#else
1669+#define PAX_DELTA_MMAP_LEN 15
1670+#define PAX_DELTA_STACK_LEN 15
1671+#endif
1672+#endif
1673
1674 /*
1675 * Our registers are always unsigned longs, whether we're a 32 bit
1676@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1677 (0x7ff >> (PAGE_SHIFT - 12)) : \
1678 (0x3ffff >> (PAGE_SHIFT - 12)))
1679
1680-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1681-#define arch_randomize_brk arch_randomize_brk
1682-
1683 #endif /* __KERNEL__ */
1684
1685 /*
1686diff -urNp linux-3.1.1/arch/powerpc/include/asm/kmap_types.h linux-3.1.1/arch/powerpc/include/asm/kmap_types.h
1687--- linux-3.1.1/arch/powerpc/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
1688+++ linux-3.1.1/arch/powerpc/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
1689@@ -27,6 +27,7 @@ enum km_type {
1690 KM_PPC_SYNC_PAGE,
1691 KM_PPC_SYNC_ICACHE,
1692 KM_KDB,
1693+ KM_CLEARPAGE,
1694 KM_TYPE_NR
1695 };
1696
1697diff -urNp linux-3.1.1/arch/powerpc/include/asm/mman.h linux-3.1.1/arch/powerpc/include/asm/mman.h
1698--- linux-3.1.1/arch/powerpc/include/asm/mman.h 2011-11-11 15:19:27.000000000 -0500
1699+++ linux-3.1.1/arch/powerpc/include/asm/mman.h 2011-11-16 18:39:07.000000000 -0500
1700@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1701 }
1702 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1703
1704-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1705+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1706 {
1707 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1708 }
1709diff -urNp linux-3.1.1/arch/powerpc/include/asm/page_64.h linux-3.1.1/arch/powerpc/include/asm/page_64.h
1710--- linux-3.1.1/arch/powerpc/include/asm/page_64.h 2011-11-11 15:19:27.000000000 -0500
1711+++ linux-3.1.1/arch/powerpc/include/asm/page_64.h 2011-11-16 18:39:07.000000000 -0500
1712@@ -155,15 +155,18 @@ do { \
1713 * stack by default, so in the absence of a PT_GNU_STACK program header
1714 * we turn execute permission off.
1715 */
1716-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1717- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1718+#define VM_STACK_DEFAULT_FLAGS32 \
1719+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1720+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1721
1722 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1723 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1724
1725+#ifndef CONFIG_PAX_PAGEEXEC
1726 #define VM_STACK_DEFAULT_FLAGS \
1727 (is_32bit_task() ? \
1728 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1729+#endif
1730
1731 #include <asm-generic/getorder.h>
1732
1733diff -urNp linux-3.1.1/arch/powerpc/include/asm/page.h linux-3.1.1/arch/powerpc/include/asm/page.h
1734--- linux-3.1.1/arch/powerpc/include/asm/page.h 2011-11-11 15:19:27.000000000 -0500
1735+++ linux-3.1.1/arch/powerpc/include/asm/page.h 2011-11-16 18:39:07.000000000 -0500
1736@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1737 * and needs to be executable. This means the whole heap ends
1738 * up being executable.
1739 */
1740-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1741- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1742+#define VM_DATA_DEFAULT_FLAGS32 \
1743+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1744+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1745
1746 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1747 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1748@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1749 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1750 #endif
1751
1752+#define ktla_ktva(addr) (addr)
1753+#define ktva_ktla(addr) (addr)
1754+
1755 #ifndef __ASSEMBLY__
1756
1757 #undef STRICT_MM_TYPECHECKS
1758diff -urNp linux-3.1.1/arch/powerpc/include/asm/pgtable.h linux-3.1.1/arch/powerpc/include/asm/pgtable.h
1759--- linux-3.1.1/arch/powerpc/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
1760+++ linux-3.1.1/arch/powerpc/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
1761@@ -2,6 +2,7 @@
1762 #define _ASM_POWERPC_PGTABLE_H
1763 #ifdef __KERNEL__
1764
1765+#include <linux/const.h>
1766 #ifndef __ASSEMBLY__
1767 #include <asm/processor.h> /* For TASK_SIZE */
1768 #include <asm/mmu.h>
1769diff -urNp linux-3.1.1/arch/powerpc/include/asm/pte-hash32.h linux-3.1.1/arch/powerpc/include/asm/pte-hash32.h
1770--- linux-3.1.1/arch/powerpc/include/asm/pte-hash32.h 2011-11-11 15:19:27.000000000 -0500
1771+++ linux-3.1.1/arch/powerpc/include/asm/pte-hash32.h 2011-11-16 18:39:07.000000000 -0500
1772@@ -21,6 +21,7 @@
1773 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1774 #define _PAGE_USER 0x004 /* usermode access allowed */
1775 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1776+#define _PAGE_EXEC _PAGE_GUARDED
1777 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1778 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1779 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1780diff -urNp linux-3.1.1/arch/powerpc/include/asm/reg.h linux-3.1.1/arch/powerpc/include/asm/reg.h
1781--- linux-3.1.1/arch/powerpc/include/asm/reg.h 2011-11-11 15:19:27.000000000 -0500
1782+++ linux-3.1.1/arch/powerpc/include/asm/reg.h 2011-11-16 18:39:07.000000000 -0500
1783@@ -212,6 +212,7 @@
1784 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1785 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1786 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1787+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1788 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1789 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1790 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1791diff -urNp linux-3.1.1/arch/powerpc/include/asm/system.h linux-3.1.1/arch/powerpc/include/asm/system.h
1792--- linux-3.1.1/arch/powerpc/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
1793+++ linux-3.1.1/arch/powerpc/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
1794@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1795 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1796 #endif
1797
1798-extern unsigned long arch_align_stack(unsigned long sp);
1799+#define arch_align_stack(x) ((x) & ~0xfUL)
1800
1801 /* Used in very early kernel initialization. */
1802 extern unsigned long reloc_offset(void);
1803diff -urNp linux-3.1.1/arch/powerpc/include/asm/uaccess.h linux-3.1.1/arch/powerpc/include/asm/uaccess.h
1804--- linux-3.1.1/arch/powerpc/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
1805+++ linux-3.1.1/arch/powerpc/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
1806@@ -13,6 +13,8 @@
1807 #define VERIFY_READ 0
1808 #define VERIFY_WRITE 1
1809
1810+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1811+
1812 /*
1813 * The fs value determines whether argument validity checking should be
1814 * performed or not. If get_fs() == USER_DS, checking is performed, with
1815@@ -327,52 +329,6 @@ do { \
1816 extern unsigned long __copy_tofrom_user(void __user *to,
1817 const void __user *from, unsigned long size);
1818
1819-#ifndef __powerpc64__
1820-
1821-static inline unsigned long copy_from_user(void *to,
1822- const void __user *from, unsigned long n)
1823-{
1824- unsigned long over;
1825-
1826- if (access_ok(VERIFY_READ, from, n))
1827- return __copy_tofrom_user((__force void __user *)to, from, n);
1828- if ((unsigned long)from < TASK_SIZE) {
1829- over = (unsigned long)from + n - TASK_SIZE;
1830- return __copy_tofrom_user((__force void __user *)to, from,
1831- n - over) + over;
1832- }
1833- return n;
1834-}
1835-
1836-static inline unsigned long copy_to_user(void __user *to,
1837- const void *from, unsigned long n)
1838-{
1839- unsigned long over;
1840-
1841- if (access_ok(VERIFY_WRITE, to, n))
1842- return __copy_tofrom_user(to, (__force void __user *)from, n);
1843- if ((unsigned long)to < TASK_SIZE) {
1844- over = (unsigned long)to + n - TASK_SIZE;
1845- return __copy_tofrom_user(to, (__force void __user *)from,
1846- n - over) + over;
1847- }
1848- return n;
1849-}
1850-
1851-#else /* __powerpc64__ */
1852-
1853-#define __copy_in_user(to, from, size) \
1854- __copy_tofrom_user((to), (from), (size))
1855-
1856-extern unsigned long copy_from_user(void *to, const void __user *from,
1857- unsigned long n);
1858-extern unsigned long copy_to_user(void __user *to, const void *from,
1859- unsigned long n);
1860-extern unsigned long copy_in_user(void __user *to, const void __user *from,
1861- unsigned long n);
1862-
1863-#endif /* __powerpc64__ */
1864-
1865 static inline unsigned long __copy_from_user_inatomic(void *to,
1866 const void __user *from, unsigned long n)
1867 {
1868@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1869 if (ret == 0)
1870 return 0;
1871 }
1872+
1873+ if (!__builtin_constant_p(n))
1874+ check_object_size(to, n, false);
1875+
1876 return __copy_tofrom_user((__force void __user *)to, from, n);
1877 }
1878
1879@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1880 if (ret == 0)
1881 return 0;
1882 }
1883+
1884+ if (!__builtin_constant_p(n))
1885+ check_object_size(from, n, true);
1886+
1887 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1888 }
1889
1890@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1891 return __copy_to_user_inatomic(to, from, size);
1892 }
1893
1894+#ifndef __powerpc64__
1895+
1896+static inline unsigned long __must_check copy_from_user(void *to,
1897+ const void __user *from, unsigned long n)
1898+{
1899+ unsigned long over;
1900+
1901+ if ((long)n < 0)
1902+ return n;
1903+
1904+ if (access_ok(VERIFY_READ, from, n)) {
1905+ if (!__builtin_constant_p(n))
1906+ check_object_size(to, n, false);
1907+ return __copy_tofrom_user((__force void __user *)to, from, n);
1908+ }
1909+ if ((unsigned long)from < TASK_SIZE) {
1910+ over = (unsigned long)from + n - TASK_SIZE;
1911+ if (!__builtin_constant_p(n - over))
1912+ check_object_size(to, n - over, false);
1913+ return __copy_tofrom_user((__force void __user *)to, from,
1914+ n - over) + over;
1915+ }
1916+ return n;
1917+}
1918+
1919+static inline unsigned long __must_check copy_to_user(void __user *to,
1920+ const void *from, unsigned long n)
1921+{
1922+ unsigned long over;
1923+
1924+ if ((long)n < 0)
1925+ return n;
1926+
1927+ if (access_ok(VERIFY_WRITE, to, n)) {
1928+ if (!__builtin_constant_p(n))
1929+ check_object_size(from, n, true);
1930+ return __copy_tofrom_user(to, (__force void __user *)from, n);
1931+ }
1932+ if ((unsigned long)to < TASK_SIZE) {
1933+ over = (unsigned long)to + n - TASK_SIZE;
1934+ if (!__builtin_constant_p(n))
1935+ check_object_size(from, n - over, true);
1936+ return __copy_tofrom_user(to, (__force void __user *)from,
1937+ n - over) + over;
1938+ }
1939+ return n;
1940+}
1941+
1942+#else /* __powerpc64__ */
1943+
1944+#define __copy_in_user(to, from, size) \
1945+ __copy_tofrom_user((to), (from), (size))
1946+
1947+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1948+{
1949+ if ((long)n < 0 || n > INT_MAX)
1950+ return n;
1951+
1952+ if (!__builtin_constant_p(n))
1953+ check_object_size(to, n, false);
1954+
1955+ if (likely(access_ok(VERIFY_READ, from, n)))
1956+ n = __copy_from_user(to, from, n);
1957+ else
1958+ memset(to, 0, n);
1959+ return n;
1960+}
1961+
1962+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1963+{
1964+ if ((long)n < 0 || n > INT_MAX)
1965+ return n;
1966+
1967+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
1968+ if (!__builtin_constant_p(n))
1969+ check_object_size(from, n, true);
1970+ n = __copy_to_user(to, from, n);
1971+ }
1972+ return n;
1973+}
1974+
1975+extern unsigned long copy_in_user(void __user *to, const void __user *from,
1976+ unsigned long n);
1977+
1978+#endif /* __powerpc64__ */
1979+
1980 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1981
1982 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1983diff -urNp linux-3.1.1/arch/powerpc/kernel/exceptions-64e.S linux-3.1.1/arch/powerpc/kernel/exceptions-64e.S
1984--- linux-3.1.1/arch/powerpc/kernel/exceptions-64e.S 2011-11-11 15:19:27.000000000 -0500
1985+++ linux-3.1.1/arch/powerpc/kernel/exceptions-64e.S 2011-11-16 18:39:07.000000000 -0500
1986@@ -587,6 +587,7 @@ storage_fault_common:
1987 std r14,_DAR(r1)
1988 std r15,_DSISR(r1)
1989 addi r3,r1,STACK_FRAME_OVERHEAD
1990+ bl .save_nvgprs
1991 mr r4,r14
1992 mr r5,r15
1993 ld r14,PACA_EXGEN+EX_R14(r13)
1994@@ -596,8 +597,7 @@ storage_fault_common:
1995 cmpdi r3,0
1996 bne- 1f
1997 b .ret_from_except_lite
1998-1: bl .save_nvgprs
1999- mr r5,r3
2000+1: mr r5,r3
2001 addi r3,r1,STACK_FRAME_OVERHEAD
2002 ld r4,_DAR(r1)
2003 bl .bad_page_fault
2004diff -urNp linux-3.1.1/arch/powerpc/kernel/exceptions-64s.S linux-3.1.1/arch/powerpc/kernel/exceptions-64s.S
2005--- linux-3.1.1/arch/powerpc/kernel/exceptions-64s.S 2011-11-11 15:19:27.000000000 -0500
2006+++ linux-3.1.1/arch/powerpc/kernel/exceptions-64s.S 2011-11-16 18:39:07.000000000 -0500
2007@@ -1014,10 +1014,10 @@ handle_page_fault:
2008 11: ld r4,_DAR(r1)
2009 ld r5,_DSISR(r1)
2010 addi r3,r1,STACK_FRAME_OVERHEAD
2011+ bl .save_nvgprs
2012 bl .do_page_fault
2013 cmpdi r3,0
2014 beq+ 13f
2015- bl .save_nvgprs
2016 mr r5,r3
2017 addi r3,r1,STACK_FRAME_OVERHEAD
2018 lwz r4,_DAR(r1)
2019diff -urNp linux-3.1.1/arch/powerpc/kernel/module_32.c linux-3.1.1/arch/powerpc/kernel/module_32.c
2020--- linux-3.1.1/arch/powerpc/kernel/module_32.c 2011-11-11 15:19:27.000000000 -0500
2021+++ linux-3.1.1/arch/powerpc/kernel/module_32.c 2011-11-16 18:39:07.000000000 -0500
2022@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2023 me->arch.core_plt_section = i;
2024 }
2025 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2026- printk("Module doesn't contain .plt or .init.plt sections.\n");
2027+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2028 return -ENOEXEC;
2029 }
2030
2031@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *locati
2032
2033 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2034 /* Init, or core PLT? */
2035- if (location >= mod->module_core
2036- && location < mod->module_core + mod->core_size)
2037+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2038+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2039 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2040- else
2041+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2042+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2043 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2044+ else {
2045+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2046+ return ~0UL;
2047+ }
2048
2049 /* Find this entry, or if that fails, the next avail. entry */
2050 while (entry->jump[0]) {
2051diff -urNp linux-3.1.1/arch/powerpc/kernel/process.c linux-3.1.1/arch/powerpc/kernel/process.c
2052--- linux-3.1.1/arch/powerpc/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
2053+++ linux-3.1.1/arch/powerpc/kernel/process.c 2011-11-16 18:40:08.000000000 -0500
2054@@ -682,8 +682,8 @@ void show_regs(struct pt_regs * regs)
2055 * Lookup NIP late so we have the best change of getting the
2056 * above info out without failing
2057 */
2058- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2059- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2060+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2061+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2062 #endif
2063 show_stack(current, (unsigned long *) regs->gpr[1]);
2064 if (!user_mode(regs))
2065@@ -1187,10 +1187,10 @@ void show_stack(struct task_struct *tsk,
2066 newsp = stack[0];
2067 ip = stack[STACK_FRAME_LR_SAVE];
2068 if (!firstframe || ip != lr) {
2069- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2070+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2071 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2072 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2073- printk(" (%pS)",
2074+ printk(" (%pA)",
2075 (void *)current->ret_stack[curr_frame].ret);
2076 curr_frame--;
2077 }
2078@@ -1210,7 +1210,7 @@ void show_stack(struct task_struct *tsk,
2079 struct pt_regs *regs = (struct pt_regs *)
2080 (sp + STACK_FRAME_OVERHEAD);
2081 lr = regs->link;
2082- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2083+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2084 regs->trap, (void *)regs->nip, (void *)lr);
2085 firstframe = 1;
2086 }
2087@@ -1285,58 +1285,3 @@ void thread_info_cache_init(void)
2088 }
2089
2090 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2091-
2092-unsigned long arch_align_stack(unsigned long sp)
2093-{
2094- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2095- sp -= get_random_int() & ~PAGE_MASK;
2096- return sp & ~0xf;
2097-}
2098-
2099-static inline unsigned long brk_rnd(void)
2100-{
2101- unsigned long rnd = 0;
2102-
2103- /* 8MB for 32bit, 1GB for 64bit */
2104- if (is_32bit_task())
2105- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2106- else
2107- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2108-
2109- return rnd << PAGE_SHIFT;
2110-}
2111-
2112-unsigned long arch_randomize_brk(struct mm_struct *mm)
2113-{
2114- unsigned long base = mm->brk;
2115- unsigned long ret;
2116-
2117-#ifdef CONFIG_PPC_STD_MMU_64
2118- /*
2119- * If we are using 1TB segments and we are allowed to randomise
2120- * the heap, we can put it above 1TB so it is backed by a 1TB
2121- * segment. Otherwise the heap will be in the bottom 1TB
2122- * which always uses 256MB segments and this may result in a
2123- * performance penalty.
2124- */
2125- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2126- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2127-#endif
2128-
2129- ret = PAGE_ALIGN(base + brk_rnd());
2130-
2131- if (ret < mm->brk)
2132- return mm->brk;
2133-
2134- return ret;
2135-}
2136-
2137-unsigned long randomize_et_dyn(unsigned long base)
2138-{
2139- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2140-
2141- if (ret < base)
2142- return base;
2143-
2144- return ret;
2145-}
2146diff -urNp linux-3.1.1/arch/powerpc/kernel/signal_32.c linux-3.1.1/arch/powerpc/kernel/signal_32.c
2147--- linux-3.1.1/arch/powerpc/kernel/signal_32.c 2011-11-11 15:19:27.000000000 -0500
2148+++ linux-3.1.1/arch/powerpc/kernel/signal_32.c 2011-11-16 18:39:07.000000000 -0500
2149@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2150 /* Save user registers on the stack */
2151 frame = &rt_sf->uc.uc_mcontext;
2152 addr = frame;
2153- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2154+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2155 if (save_user_regs(regs, frame, 0, 1))
2156 goto badframe;
2157 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2158diff -urNp linux-3.1.1/arch/powerpc/kernel/signal_64.c linux-3.1.1/arch/powerpc/kernel/signal_64.c
2159--- linux-3.1.1/arch/powerpc/kernel/signal_64.c 2011-11-11 15:19:27.000000000 -0500
2160+++ linux-3.1.1/arch/powerpc/kernel/signal_64.c 2011-11-16 18:39:07.000000000 -0500
2161@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2162 current->thread.fpscr.val = 0;
2163
2164 /* Set up to return from userspace. */
2165- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2166+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2167 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2168 } else {
2169 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2170diff -urNp linux-3.1.1/arch/powerpc/kernel/traps.c linux-3.1.1/arch/powerpc/kernel/traps.c
2171--- linux-3.1.1/arch/powerpc/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
2172+++ linux-3.1.1/arch/powerpc/kernel/traps.c 2011-11-16 18:40:08.000000000 -0500
2173@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2174 static inline void pmac_backlight_unblank(void) { }
2175 #endif
2176
2177+extern void gr_handle_kernel_exploit(void);
2178+
2179 int die(const char *str, struct pt_regs *regs, long err)
2180 {
2181 static struct {
2182@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2183 if (panic_on_oops)
2184 panic("Fatal exception");
2185
2186+ gr_handle_kernel_exploit();
2187+
2188 oops_exit();
2189 do_exit(err);
2190
2191diff -urNp linux-3.1.1/arch/powerpc/kernel/vdso.c linux-3.1.1/arch/powerpc/kernel/vdso.c
2192--- linux-3.1.1/arch/powerpc/kernel/vdso.c 2011-11-11 15:19:27.000000000 -0500
2193+++ linux-3.1.1/arch/powerpc/kernel/vdso.c 2011-11-16 18:39:07.000000000 -0500
2194@@ -36,6 +36,7 @@
2195 #include <asm/firmware.h>
2196 #include <asm/vdso.h>
2197 #include <asm/vdso_datapage.h>
2198+#include <asm/mman.h>
2199
2200 #include "setup.h"
2201
2202@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2203 vdso_base = VDSO32_MBASE;
2204 #endif
2205
2206- current->mm->context.vdso_base = 0;
2207+ current->mm->context.vdso_base = ~0UL;
2208
2209 /* vDSO has a problem and was disabled, just don't "enable" it for the
2210 * process
2211@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2212 vdso_base = get_unmapped_area(NULL, vdso_base,
2213 (vdso_pages << PAGE_SHIFT) +
2214 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2215- 0, 0);
2216+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2217 if (IS_ERR_VALUE(vdso_base)) {
2218 rc = vdso_base;
2219 goto fail_mmapsem;
2220diff -urNp linux-3.1.1/arch/powerpc/lib/usercopy_64.c linux-3.1.1/arch/powerpc/lib/usercopy_64.c
2221--- linux-3.1.1/arch/powerpc/lib/usercopy_64.c 2011-11-11 15:19:27.000000000 -0500
2222+++ linux-3.1.1/arch/powerpc/lib/usercopy_64.c 2011-11-16 18:39:07.000000000 -0500
2223@@ -9,22 +9,6 @@
2224 #include <linux/module.h>
2225 #include <asm/uaccess.h>
2226
2227-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2228-{
2229- if (likely(access_ok(VERIFY_READ, from, n)))
2230- n = __copy_from_user(to, from, n);
2231- else
2232- memset(to, 0, n);
2233- return n;
2234-}
2235-
2236-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2237-{
2238- if (likely(access_ok(VERIFY_WRITE, to, n)))
2239- n = __copy_to_user(to, from, n);
2240- return n;
2241-}
2242-
2243 unsigned long copy_in_user(void __user *to, const void __user *from,
2244 unsigned long n)
2245 {
2246@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2247 return n;
2248 }
2249
2250-EXPORT_SYMBOL(copy_from_user);
2251-EXPORT_SYMBOL(copy_to_user);
2252 EXPORT_SYMBOL(copy_in_user);
2253
2254diff -urNp linux-3.1.1/arch/powerpc/mm/fault.c linux-3.1.1/arch/powerpc/mm/fault.c
2255--- linux-3.1.1/arch/powerpc/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
2256+++ linux-3.1.1/arch/powerpc/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
2257@@ -32,6 +32,10 @@
2258 #include <linux/perf_event.h>
2259 #include <linux/magic.h>
2260 #include <linux/ratelimit.h>
2261+#include <linux/slab.h>
2262+#include <linux/pagemap.h>
2263+#include <linux/compiler.h>
2264+#include <linux/unistd.h>
2265
2266 #include <asm/firmware.h>
2267 #include <asm/page.h>
2268@@ -43,6 +47,7 @@
2269 #include <asm/tlbflush.h>
2270 #include <asm/siginfo.h>
2271 #include <mm/mmu_decl.h>
2272+#include <asm/ptrace.h>
2273
2274 #ifdef CONFIG_KPROBES
2275 static inline int notify_page_fault(struct pt_regs *regs)
2276@@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2277 }
2278 #endif
2279
2280+#ifdef CONFIG_PAX_PAGEEXEC
2281+/*
2282+ * PaX: decide what to do with offenders (regs->nip = fault address)
2283+ *
2284+ * returns 1 when task should be killed
2285+ */
2286+static int pax_handle_fetch_fault(struct pt_regs *regs)
2287+{
2288+ return 1;
2289+}
2290+
2291+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2292+{
2293+ unsigned long i;
2294+
2295+ printk(KERN_ERR "PAX: bytes at PC: ");
2296+ for (i = 0; i < 5; i++) {
2297+ unsigned int c;
2298+ if (get_user(c, (unsigned int __user *)pc+i))
2299+ printk(KERN_CONT "???????? ");
2300+ else
2301+ printk(KERN_CONT "%08x ", c);
2302+ }
2303+ printk("\n");
2304+}
2305+#endif
2306+
2307 /*
2308 * Check whether the instruction at regs->nip is a store using
2309 * an update addressing form which will update r1.
2310@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2311 * indicate errors in DSISR but can validly be set in SRR1.
2312 */
2313 if (trap == 0x400)
2314- error_code &= 0x48200000;
2315+ error_code &= 0x58200000;
2316 else
2317 is_write = error_code & DSISR_ISSTORE;
2318 #else
2319@@ -259,7 +291,7 @@ good_area:
2320 * "undefined". Of those that can be set, this is the only
2321 * one which seems bad.
2322 */
2323- if (error_code & 0x10000000)
2324+ if (error_code & DSISR_GUARDED)
2325 /* Guarded storage error. */
2326 goto bad_area;
2327 #endif /* CONFIG_8xx */
2328@@ -274,7 +306,7 @@ good_area:
2329 * processors use the same I/D cache coherency mechanism
2330 * as embedded.
2331 */
2332- if (error_code & DSISR_PROTFAULT)
2333+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2334 goto bad_area;
2335 #endif /* CONFIG_PPC_STD_MMU */
2336
2337@@ -343,6 +375,23 @@ bad_area:
2338 bad_area_nosemaphore:
2339 /* User mode accesses cause a SIGSEGV */
2340 if (user_mode(regs)) {
2341+
2342+#ifdef CONFIG_PAX_PAGEEXEC
2343+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2344+#ifdef CONFIG_PPC_STD_MMU
2345+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2346+#else
2347+ if (is_exec && regs->nip == address) {
2348+#endif
2349+ switch (pax_handle_fetch_fault(regs)) {
2350+ }
2351+
2352+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2353+ do_group_exit(SIGKILL);
2354+ }
2355+ }
2356+#endif
2357+
2358 _exception(SIGSEGV, regs, code, address);
2359 return 0;
2360 }
2361diff -urNp linux-3.1.1/arch/powerpc/mm/mmap_64.c linux-3.1.1/arch/powerpc/mm/mmap_64.c
2362--- linux-3.1.1/arch/powerpc/mm/mmap_64.c 2011-11-11 15:19:27.000000000 -0500
2363+++ linux-3.1.1/arch/powerpc/mm/mmap_64.c 2011-11-16 18:39:07.000000000 -0500
2364@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2365 */
2366 if (mmap_is_legacy()) {
2367 mm->mmap_base = TASK_UNMAPPED_BASE;
2368+
2369+#ifdef CONFIG_PAX_RANDMMAP
2370+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2371+ mm->mmap_base += mm->delta_mmap;
2372+#endif
2373+
2374 mm->get_unmapped_area = arch_get_unmapped_area;
2375 mm->unmap_area = arch_unmap_area;
2376 } else {
2377 mm->mmap_base = mmap_base();
2378+
2379+#ifdef CONFIG_PAX_RANDMMAP
2380+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2381+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2382+#endif
2383+
2384 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2385 mm->unmap_area = arch_unmap_area_topdown;
2386 }
2387diff -urNp linux-3.1.1/arch/powerpc/mm/slice.c linux-3.1.1/arch/powerpc/mm/slice.c
2388--- linux-3.1.1/arch/powerpc/mm/slice.c 2011-11-11 15:19:27.000000000 -0500
2389+++ linux-3.1.1/arch/powerpc/mm/slice.c 2011-11-16 18:39:07.000000000 -0500
2390@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2391 if ((mm->task_size - len) < addr)
2392 return 0;
2393 vma = find_vma(mm, addr);
2394- return (!vma || (addr + len) <= vma->vm_start);
2395+ return check_heap_stack_gap(vma, addr, len);
2396 }
2397
2398 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2399@@ -256,7 +256,7 @@ full_search:
2400 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2401 continue;
2402 }
2403- if (!vma || addr + len <= vma->vm_start) {
2404+ if (check_heap_stack_gap(vma, addr, len)) {
2405 /*
2406 * Remember the place where we stopped the search:
2407 */
2408@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2409 }
2410 }
2411
2412- addr = mm->mmap_base;
2413- while (addr > len) {
2414+ if (mm->mmap_base < len)
2415+ addr = -ENOMEM;
2416+ else
2417+ addr = mm->mmap_base - len;
2418+
2419+ while (!IS_ERR_VALUE(addr)) {
2420 /* Go down by chunk size */
2421- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2422+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2423
2424 /* Check for hit with different page size */
2425 mask = slice_range_to_mask(addr, len);
2426@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2427 * return with success:
2428 */
2429 vma = find_vma(mm, addr);
2430- if (!vma || (addr + len) <= vma->vm_start) {
2431+ if (check_heap_stack_gap(vma, addr, len)) {
2432 /* remember the address as a hint for next time */
2433 if (use_cache)
2434 mm->free_area_cache = addr;
2435@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2436 mm->cached_hole_size = vma->vm_start - addr;
2437
2438 /* try just below the current vma->vm_start */
2439- addr = vma->vm_start;
2440+ addr = skip_heap_stack_gap(vma, len);
2441 }
2442
2443 /*
2444@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2445 if (fixed && addr > (mm->task_size - len))
2446 return -EINVAL;
2447
2448+#ifdef CONFIG_PAX_RANDMMAP
2449+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2450+ addr = 0;
2451+#endif
2452+
2453 /* If hint, make sure it matches our alignment restrictions */
2454 if (!fixed && addr) {
2455 addr = _ALIGN_UP(addr, 1ul << pshift);
2456diff -urNp linux-3.1.1/arch/s390/include/asm/elf.h linux-3.1.1/arch/s390/include/asm/elf.h
2457--- linux-3.1.1/arch/s390/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
2458+++ linux-3.1.1/arch/s390/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
2459@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2460 the loader. We need to make sure that it is out of the way of the program
2461 that it will "exec", and that there is sufficient room for the brk. */
2462
2463-extern unsigned long randomize_et_dyn(unsigned long base);
2464-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2465+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2466+
2467+#ifdef CONFIG_PAX_ASLR
2468+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2469+
2470+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2471+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2472+#endif
2473
2474 /* This yields a mask that user programs can use to figure out what
2475 instruction set this CPU supports. */
2476@@ -211,7 +217,4 @@ struct linux_binprm;
2477 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2478 int arch_setup_additional_pages(struct linux_binprm *, int);
2479
2480-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2481-#define arch_randomize_brk arch_randomize_brk
2482-
2483 #endif
2484diff -urNp linux-3.1.1/arch/s390/include/asm/system.h linux-3.1.1/arch/s390/include/asm/system.h
2485--- linux-3.1.1/arch/s390/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
2486+++ linux-3.1.1/arch/s390/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
2487@@ -256,7 +256,7 @@ extern void (*_machine_restart)(char *co
2488 extern void (*_machine_halt)(void);
2489 extern void (*_machine_power_off)(void);
2490
2491-extern unsigned long arch_align_stack(unsigned long sp);
2492+#define arch_align_stack(x) ((x) & ~0xfUL)
2493
2494 static inline int tprot(unsigned long addr)
2495 {
2496diff -urNp linux-3.1.1/arch/s390/include/asm/uaccess.h linux-3.1.1/arch/s390/include/asm/uaccess.h
2497--- linux-3.1.1/arch/s390/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
2498+++ linux-3.1.1/arch/s390/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
2499@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2500 copy_to_user(void __user *to, const void *from, unsigned long n)
2501 {
2502 might_fault();
2503+
2504+ if ((long)n < 0)
2505+ return n;
2506+
2507 if (access_ok(VERIFY_WRITE, to, n))
2508 n = __copy_to_user(to, from, n);
2509 return n;
2510@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2511 static inline unsigned long __must_check
2512 __copy_from_user(void *to, const void __user *from, unsigned long n)
2513 {
2514+ if ((long)n < 0)
2515+ return n;
2516+
2517 if (__builtin_constant_p(n) && (n <= 256))
2518 return uaccess.copy_from_user_small(n, from, to);
2519 else
2520@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2521 unsigned int sz = __compiletime_object_size(to);
2522
2523 might_fault();
2524+
2525+ if ((long)n < 0)
2526+ return n;
2527+
2528 if (unlikely(sz != -1 && sz < n)) {
2529 copy_from_user_overflow();
2530 return n;
2531diff -urNp linux-3.1.1/arch/s390/kernel/module.c linux-3.1.1/arch/s390/kernel/module.c
2532--- linux-3.1.1/arch/s390/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
2533+++ linux-3.1.1/arch/s390/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
2534@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2535
2536 /* Increase core size by size of got & plt and set start
2537 offsets for got and plt. */
2538- me->core_size = ALIGN(me->core_size, 4);
2539- me->arch.got_offset = me->core_size;
2540- me->core_size += me->arch.got_size;
2541- me->arch.plt_offset = me->core_size;
2542- me->core_size += me->arch.plt_size;
2543+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
2544+ me->arch.got_offset = me->core_size_rw;
2545+ me->core_size_rw += me->arch.got_size;
2546+ me->arch.plt_offset = me->core_size_rx;
2547+ me->core_size_rx += me->arch.plt_size;
2548 return 0;
2549 }
2550
2551@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2552 if (info->got_initialized == 0) {
2553 Elf_Addr *gotent;
2554
2555- gotent = me->module_core + me->arch.got_offset +
2556+ gotent = me->module_core_rw + me->arch.got_offset +
2557 info->got_offset;
2558 *gotent = val;
2559 info->got_initialized = 1;
2560@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2561 else if (r_type == R_390_GOTENT ||
2562 r_type == R_390_GOTPLTENT)
2563 *(unsigned int *) loc =
2564- (val + (Elf_Addr) me->module_core - loc) >> 1;
2565+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2566 else if (r_type == R_390_GOT64 ||
2567 r_type == R_390_GOTPLT64)
2568 *(unsigned long *) loc = val;
2569@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2570 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2571 if (info->plt_initialized == 0) {
2572 unsigned int *ip;
2573- ip = me->module_core + me->arch.plt_offset +
2574+ ip = me->module_core_rx + me->arch.plt_offset +
2575 info->plt_offset;
2576 #ifndef CONFIG_64BIT
2577 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2578@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2579 val - loc + 0xffffUL < 0x1ffffeUL) ||
2580 (r_type == R_390_PLT32DBL &&
2581 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2582- val = (Elf_Addr) me->module_core +
2583+ val = (Elf_Addr) me->module_core_rx +
2584 me->arch.plt_offset +
2585 info->plt_offset;
2586 val += rela->r_addend - loc;
2587@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2588 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2589 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2590 val = val + rela->r_addend -
2591- ((Elf_Addr) me->module_core + me->arch.got_offset);
2592+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2593 if (r_type == R_390_GOTOFF16)
2594 *(unsigned short *) loc = val;
2595 else if (r_type == R_390_GOTOFF32)
2596@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2597 break;
2598 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2599 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2600- val = (Elf_Addr) me->module_core + me->arch.got_offset +
2601+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2602 rela->r_addend - loc;
2603 if (r_type == R_390_GOTPC)
2604 *(unsigned int *) loc = val;
2605diff -urNp linux-3.1.1/arch/s390/kernel/process.c linux-3.1.1/arch/s390/kernel/process.c
2606--- linux-3.1.1/arch/s390/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
2607+++ linux-3.1.1/arch/s390/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
2608@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2609 }
2610 return 0;
2611 }
2612-
2613-unsigned long arch_align_stack(unsigned long sp)
2614-{
2615- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2616- sp -= get_random_int() & ~PAGE_MASK;
2617- return sp & ~0xf;
2618-}
2619-
2620-static inline unsigned long brk_rnd(void)
2621-{
2622- /* 8MB for 32bit, 1GB for 64bit */
2623- if (is_32bit_task())
2624- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2625- else
2626- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2627-}
2628-
2629-unsigned long arch_randomize_brk(struct mm_struct *mm)
2630-{
2631- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2632-
2633- if (ret < mm->brk)
2634- return mm->brk;
2635- return ret;
2636-}
2637-
2638-unsigned long randomize_et_dyn(unsigned long base)
2639-{
2640- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2641-
2642- if (!(current->flags & PF_RANDOMIZE))
2643- return base;
2644- if (ret < base)
2645- return base;
2646- return ret;
2647-}
2648diff -urNp linux-3.1.1/arch/s390/kernel/setup.c linux-3.1.1/arch/s390/kernel/setup.c
2649--- linux-3.1.1/arch/s390/kernel/setup.c 2011-11-11 15:19:27.000000000 -0500
2650+++ linux-3.1.1/arch/s390/kernel/setup.c 2011-11-16 18:39:07.000000000 -0500
2651@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2652 }
2653 early_param("mem", early_parse_mem);
2654
2655-unsigned int user_mode = HOME_SPACE_MODE;
2656+unsigned int user_mode = SECONDARY_SPACE_MODE;
2657 EXPORT_SYMBOL_GPL(user_mode);
2658
2659 static int set_amode_and_uaccess(unsigned long user_amode,
2660diff -urNp linux-3.1.1/arch/s390/mm/mmap.c linux-3.1.1/arch/s390/mm/mmap.c
2661--- linux-3.1.1/arch/s390/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
2662+++ linux-3.1.1/arch/s390/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
2663@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2664 */
2665 if (mmap_is_legacy()) {
2666 mm->mmap_base = TASK_UNMAPPED_BASE;
2667+
2668+#ifdef CONFIG_PAX_RANDMMAP
2669+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2670+ mm->mmap_base += mm->delta_mmap;
2671+#endif
2672+
2673 mm->get_unmapped_area = arch_get_unmapped_area;
2674 mm->unmap_area = arch_unmap_area;
2675 } else {
2676 mm->mmap_base = mmap_base();
2677+
2678+#ifdef CONFIG_PAX_RANDMMAP
2679+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2680+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2681+#endif
2682+
2683 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2684 mm->unmap_area = arch_unmap_area_topdown;
2685 }
2686@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690+
2691+#ifdef CONFIG_PAX_RANDMMAP
2692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2693+ mm->mmap_base += mm->delta_mmap;
2694+#endif
2695+
2696 mm->get_unmapped_area = s390_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700+
2701+#ifdef CONFIG_PAX_RANDMMAP
2702+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2703+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704+#endif
2705+
2706 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709diff -urNp linux-3.1.1/arch/score/include/asm/system.h linux-3.1.1/arch/score/include/asm/system.h
2710--- linux-3.1.1/arch/score/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
2711+++ linux-3.1.1/arch/score/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
2712@@ -17,7 +17,7 @@ do { \
2713 #define finish_arch_switch(prev) do {} while (0)
2714
2715 typedef void (*vi_handler_t)(void);
2716-extern unsigned long arch_align_stack(unsigned long sp);
2717+#define arch_align_stack(x) (x)
2718
2719 #define mb() barrier()
2720 #define rmb() barrier()
2721diff -urNp linux-3.1.1/arch/score/kernel/process.c linux-3.1.1/arch/score/kernel/process.c
2722--- linux-3.1.1/arch/score/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
2723+++ linux-3.1.1/arch/score/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
2724@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2725
2726 return task_pt_regs(task)->cp0_epc;
2727 }
2728-
2729-unsigned long arch_align_stack(unsigned long sp)
2730-{
2731- return sp;
2732-}
2733diff -urNp linux-3.1.1/arch/sh/mm/mmap.c linux-3.1.1/arch/sh/mm/mmap.c
2734--- linux-3.1.1/arch/sh/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
2735+++ linux-3.1.1/arch/sh/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
2736@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2737 addr = PAGE_ALIGN(addr);
2738
2739 vma = find_vma(mm, addr);
2740- if (TASK_SIZE - len >= addr &&
2741- (!vma || addr + len <= vma->vm_start))
2742+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2743 return addr;
2744 }
2745
2746@@ -106,7 +105,7 @@ full_search:
2747 }
2748 return -ENOMEM;
2749 }
2750- if (likely(!vma || addr + len <= vma->vm_start)) {
2751+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2752 /*
2753 * Remember the place where we stopped the search:
2754 */
2755@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2756 addr = PAGE_ALIGN(addr);
2757
2758 vma = find_vma(mm, addr);
2759- if (TASK_SIZE - len >= addr &&
2760- (!vma || addr + len <= vma->vm_start))
2761+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2762 return addr;
2763 }
2764
2765@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2766 /* make sure it can fit in the remaining address space */
2767 if (likely(addr > len)) {
2768 vma = find_vma(mm, addr-len);
2769- if (!vma || addr <= vma->vm_start) {
2770+ if (check_heap_stack_gap(vma, addr - len, len)) {
2771 /* remember the address as a hint for next time */
2772 return (mm->free_area_cache = addr-len);
2773 }
2774@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2775 if (unlikely(mm->mmap_base < len))
2776 goto bottomup;
2777
2778- addr = mm->mmap_base-len;
2779- if (do_colour_align)
2780- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2781+ addr = mm->mmap_base - len;
2782
2783 do {
2784+ if (do_colour_align)
2785+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2786 /*
2787 * Lookup failure means no vma is above this address,
2788 * else if new region fits below vma->vm_start,
2789 * return with success:
2790 */
2791 vma = find_vma(mm, addr);
2792- if (likely(!vma || addr+len <= vma->vm_start)) {
2793+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr);
2796 }
2797@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2798 mm->cached_hole_size = vma->vm_start - addr;
2799
2800 /* try just below the current vma->vm_start */
2801- addr = vma->vm_start-len;
2802- if (do_colour_align)
2803- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804- } while (likely(len < vma->vm_start));
2805+ addr = skip_heap_stack_gap(vma, len);
2806+ } while (!IS_ERR_VALUE(addr));
2807
2808 bottomup:
2809 /*
2810diff -urNp linux-3.1.1/arch/sparc/include/asm/atomic_64.h linux-3.1.1/arch/sparc/include/asm/atomic_64.h
2811--- linux-3.1.1/arch/sparc/include/asm/atomic_64.h 2011-11-11 15:19:27.000000000 -0500
2812+++ linux-3.1.1/arch/sparc/include/asm/atomic_64.h 2011-11-16 18:39:07.000000000 -0500
2813@@ -14,18 +14,40 @@
2814 #define ATOMIC64_INIT(i) { (i) }
2815
2816 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2817+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2818+{
2819+ return v->counter;
2820+}
2821 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2822+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2823+{
2824+ return v->counter;
2825+}
2826
2827 #define atomic_set(v, i) (((v)->counter) = i)
2828+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2829+{
2830+ v->counter = i;
2831+}
2832 #define atomic64_set(v, i) (((v)->counter) = i)
2833+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2834+{
2835+ v->counter = i;
2836+}
2837
2838 extern void atomic_add(int, atomic_t *);
2839+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2840 extern void atomic64_add(long, atomic64_t *);
2841+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2842 extern void atomic_sub(int, atomic_t *);
2843+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2844 extern void atomic64_sub(long, atomic64_t *);
2845+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2846
2847 extern int atomic_add_ret(int, atomic_t *);
2848+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2849 extern long atomic64_add_ret(long, atomic64_t *);
2850+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2851 extern int atomic_sub_ret(int, atomic_t *);
2852 extern long atomic64_sub_ret(long, atomic64_t *);
2853
2854@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2855 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2856
2857 #define atomic_inc_return(v) atomic_add_ret(1, v)
2858+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2859+{
2860+ return atomic_add_ret_unchecked(1, v);
2861+}
2862 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2863+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2864+{
2865+ return atomic64_add_ret_unchecked(1, v);
2866+}
2867
2868 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2869 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2870
2871 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2872+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2873+{
2874+ return atomic_add_ret_unchecked(i, v);
2875+}
2876 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2877+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2878+{
2879+ return atomic64_add_ret_unchecked(i, v);
2880+}
2881
2882 /*
2883 * atomic_inc_and_test - increment and test
2884@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2885 * other cases.
2886 */
2887 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2888+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2889+{
2890+ return atomic_inc_return_unchecked(v) == 0;
2891+}
2892 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2893
2894 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2895@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomi
2896 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2897
2898 #define atomic_inc(v) atomic_add(1, v)
2899+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2900+{
2901+ atomic_add_unchecked(1, v);
2902+}
2903 #define atomic64_inc(v) atomic64_add(1, v)
2904+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2905+{
2906+ atomic64_add_unchecked(1, v);
2907+}
2908
2909 #define atomic_dec(v) atomic_sub(1, v)
2910+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2911+{
2912+ atomic_sub_unchecked(1, v);
2913+}
2914 #define atomic64_dec(v) atomic64_sub(1, v)
2915+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2916+{
2917+ atomic64_sub_unchecked(1, v);
2918+}
2919
2920 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2921 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2922
2923 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2924+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2925+{
2926+ return cmpxchg(&v->counter, old, new);
2927+}
2928 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2929+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2930+{
2931+ return xchg(&v->counter, new);
2932+}
2933
2934 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
2935 {
2936- int c, old;
2937+ int c, old, new;
2938 c = atomic_read(v);
2939 for (;;) {
2940- if (unlikely(c == (u)))
2941+ if (unlikely(c == u))
2942 break;
2943- old = atomic_cmpxchg((v), c, c + (a));
2944+
2945+ asm volatile("addcc %2, %0, %0\n"
2946+
2947+#ifdef CONFIG_PAX_REFCOUNT
2948+ "tvs %%icc, 6\n"
2949+#endif
2950+
2951+ : "=r" (new)
2952+ : "0" (c), "ir" (a)
2953+ : "cc");
2954+
2955+ old = atomic_cmpxchg(v, c, new);
2956 if (likely(old == c))
2957 break;
2958 c = old;
2959@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(at
2960 #define atomic64_cmpxchg(v, o, n) \
2961 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2962 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2963+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2964+{
2965+ return xchg(&v->counter, new);
2966+}
2967
2968 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2969 {
2970- long c, old;
2971+ long c, old, new;
2972 c = atomic64_read(v);
2973 for (;;) {
2974- if (unlikely(c == (u)))
2975+ if (unlikely(c == u))
2976 break;
2977- old = atomic64_cmpxchg((v), c, c + (a));
2978+
2979+ asm volatile("addcc %2, %0, %0\n"
2980+
2981+#ifdef CONFIG_PAX_REFCOUNT
2982+ "tvs %%xcc, 6\n"
2983+#endif
2984+
2985+ : "=r" (new)
2986+ : "0" (c), "ir" (a)
2987+ : "cc");
2988+
2989+ old = atomic64_cmpxchg(v, c, new);
2990 if (likely(old == c))
2991 break;
2992 c = old;
2993 }
2994- return c != (u);
2995+ return c != u;
2996 }
2997
2998 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2999diff -urNp linux-3.1.1/arch/sparc/include/asm/cache.h linux-3.1.1/arch/sparc/include/asm/cache.h
3000--- linux-3.1.1/arch/sparc/include/asm/cache.h 2011-11-11 15:19:27.000000000 -0500
3001+++ linux-3.1.1/arch/sparc/include/asm/cache.h 2011-11-16 18:39:07.000000000 -0500
3002@@ -10,7 +10,7 @@
3003 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3004
3005 #define L1_CACHE_SHIFT 5
3006-#define L1_CACHE_BYTES 32
3007+#define L1_CACHE_BYTES 32UL
3008
3009 #ifdef CONFIG_SPARC32
3010 #define SMP_CACHE_BYTES_SHIFT 5
3011diff -urNp linux-3.1.1/arch/sparc/include/asm/elf_32.h linux-3.1.1/arch/sparc/include/asm/elf_32.h
3012--- linux-3.1.1/arch/sparc/include/asm/elf_32.h 2011-11-11 15:19:27.000000000 -0500
3013+++ linux-3.1.1/arch/sparc/include/asm/elf_32.h 2011-11-16 18:39:07.000000000 -0500
3014@@ -114,6 +114,13 @@ typedef struct {
3015
3016 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3017
3018+#ifdef CONFIG_PAX_ASLR
3019+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3020+
3021+#define PAX_DELTA_MMAP_LEN 16
3022+#define PAX_DELTA_STACK_LEN 16
3023+#endif
3024+
3025 /* This yields a mask that user programs can use to figure out what
3026 instruction set this cpu supports. This can NOT be done in userspace
3027 on Sparc. */
3028diff -urNp linux-3.1.1/arch/sparc/include/asm/elf_64.h linux-3.1.1/arch/sparc/include/asm/elf_64.h
3029--- linux-3.1.1/arch/sparc/include/asm/elf_64.h 2011-11-11 15:19:27.000000000 -0500
3030+++ linux-3.1.1/arch/sparc/include/asm/elf_64.h 2011-11-16 18:39:07.000000000 -0500
3031@@ -180,6 +180,13 @@ typedef struct {
3032 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3033 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3034
3035+#ifdef CONFIG_PAX_ASLR
3036+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3037+
3038+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3039+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3040+#endif
3041+
3042 extern unsigned long sparc64_elf_hwcap;
3043 #define ELF_HWCAP sparc64_elf_hwcap
3044
3045diff -urNp linux-3.1.1/arch/sparc/include/asm/pgtable_32.h linux-3.1.1/arch/sparc/include/asm/pgtable_32.h
3046--- linux-3.1.1/arch/sparc/include/asm/pgtable_32.h 2011-11-11 15:19:27.000000000 -0500
3047+++ linux-3.1.1/arch/sparc/include/asm/pgtable_32.h 2011-11-16 18:39:07.000000000 -0500
3048@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3049 BTFIXUPDEF_INT(page_none)
3050 BTFIXUPDEF_INT(page_copy)
3051 BTFIXUPDEF_INT(page_readonly)
3052+
3053+#ifdef CONFIG_PAX_PAGEEXEC
3054+BTFIXUPDEF_INT(page_shared_noexec)
3055+BTFIXUPDEF_INT(page_copy_noexec)
3056+BTFIXUPDEF_INT(page_readonly_noexec)
3057+#endif
3058+
3059 BTFIXUPDEF_INT(page_kernel)
3060
3061 #define PMD_SHIFT SUN4C_PMD_SHIFT
3062@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3063 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3064 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3065
3066+#ifdef CONFIG_PAX_PAGEEXEC
3067+extern pgprot_t PAGE_SHARED_NOEXEC;
3068+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3069+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3070+#else
3071+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3072+# define PAGE_COPY_NOEXEC PAGE_COPY
3073+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3074+#endif
3075+
3076 extern unsigned long page_kernel;
3077
3078 #ifdef MODULE
3079diff -urNp linux-3.1.1/arch/sparc/include/asm/pgtsrmmu.h linux-3.1.1/arch/sparc/include/asm/pgtsrmmu.h
3080--- linux-3.1.1/arch/sparc/include/asm/pgtsrmmu.h 2011-11-11 15:19:27.000000000 -0500
3081+++ linux-3.1.1/arch/sparc/include/asm/pgtsrmmu.h 2011-11-16 18:39:07.000000000 -0500
3082@@ -115,6 +115,13 @@
3083 SRMMU_EXEC | SRMMU_REF)
3084 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3085 SRMMU_EXEC | SRMMU_REF)
3086+
3087+#ifdef CONFIG_PAX_PAGEEXEC
3088+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3089+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3090+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3091+#endif
3092+
3093 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3094 SRMMU_DIRTY | SRMMU_REF)
3095
3096diff -urNp linux-3.1.1/arch/sparc/include/asm/spinlock_64.h linux-3.1.1/arch/sparc/include/asm/spinlock_64.h
3097--- linux-3.1.1/arch/sparc/include/asm/spinlock_64.h 2011-11-11 15:19:27.000000000 -0500
3098+++ linux-3.1.1/arch/sparc/include/asm/spinlock_64.h 2011-11-16 18:39:07.000000000 -0500
3099@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3100
3101 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3102
3103-static void inline arch_read_lock(arch_rwlock_t *lock)
3104+static inline void arch_read_lock(arch_rwlock_t *lock)
3105 {
3106 unsigned long tmp1, tmp2;
3107
3108 __asm__ __volatile__ (
3109 "1: ldsw [%2], %0\n"
3110 " brlz,pn %0, 2f\n"
3111-"4: add %0, 1, %1\n"
3112+"4: addcc %0, 1, %1\n"
3113+
3114+#ifdef CONFIG_PAX_REFCOUNT
3115+" tvs %%icc, 6\n"
3116+#endif
3117+
3118 " cas [%2], %0, %1\n"
3119 " cmp %0, %1\n"
3120 " bne,pn %%icc, 1b\n"
3121@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3122 " .previous"
3123 : "=&r" (tmp1), "=&r" (tmp2)
3124 : "r" (lock)
3125- : "memory");
3126+ : "memory", "cc");
3127 }
3128
3129-static int inline arch_read_trylock(arch_rwlock_t *lock)
3130+static inline int arch_read_trylock(arch_rwlock_t *lock)
3131 {
3132 int tmp1, tmp2;
3133
3134@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3135 "1: ldsw [%2], %0\n"
3136 " brlz,a,pn %0, 2f\n"
3137 " mov 0, %0\n"
3138-" add %0, 1, %1\n"
3139+" addcc %0, 1, %1\n"
3140+
3141+#ifdef CONFIG_PAX_REFCOUNT
3142+" tvs %%icc, 6\n"
3143+#endif
3144+
3145 " cas [%2], %0, %1\n"
3146 " cmp %0, %1\n"
3147 " bne,pn %%icc, 1b\n"
3148@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3149 return tmp1;
3150 }
3151
3152-static void inline arch_read_unlock(arch_rwlock_t *lock)
3153+static inline void arch_read_unlock(arch_rwlock_t *lock)
3154 {
3155 unsigned long tmp1, tmp2;
3156
3157 __asm__ __volatile__(
3158 "1: lduw [%2], %0\n"
3159-" sub %0, 1, %1\n"
3160+" subcc %0, 1, %1\n"
3161+
3162+#ifdef CONFIG_PAX_REFCOUNT
3163+" tvs %%icc, 6\n"
3164+#endif
3165+
3166 " cas [%2], %0, %1\n"
3167 " cmp %0, %1\n"
3168 " bne,pn %%xcc, 1b\n"
3169@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3170 : "memory");
3171 }
3172
3173-static void inline arch_write_lock(arch_rwlock_t *lock)
3174+static inline void arch_write_lock(arch_rwlock_t *lock)
3175 {
3176 unsigned long mask, tmp1, tmp2;
3177
3178@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3179 : "memory");
3180 }
3181
3182-static void inline arch_write_unlock(arch_rwlock_t *lock)
3183+static inline void arch_write_unlock(arch_rwlock_t *lock)
3184 {
3185 __asm__ __volatile__(
3186 " stw %%g0, [%0]"
3187@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3188 : "memory");
3189 }
3190
3191-static int inline arch_write_trylock(arch_rwlock_t *lock)
3192+static inline int arch_write_trylock(arch_rwlock_t *lock)
3193 {
3194 unsigned long mask, tmp1, tmp2, result;
3195
3196diff -urNp linux-3.1.1/arch/sparc/include/asm/thread_info_32.h linux-3.1.1/arch/sparc/include/asm/thread_info_32.h
3197--- linux-3.1.1/arch/sparc/include/asm/thread_info_32.h 2011-11-11 15:19:27.000000000 -0500
3198+++ linux-3.1.1/arch/sparc/include/asm/thread_info_32.h 2011-11-16 18:39:07.000000000 -0500
3199@@ -50,6 +50,8 @@ struct thread_info {
3200 unsigned long w_saved;
3201
3202 struct restart_block restart_block;
3203+
3204+ unsigned long lowest_stack;
3205 };
3206
3207 /*
3208diff -urNp linux-3.1.1/arch/sparc/include/asm/thread_info_64.h linux-3.1.1/arch/sparc/include/asm/thread_info_64.h
3209--- linux-3.1.1/arch/sparc/include/asm/thread_info_64.h 2011-11-11 15:19:27.000000000 -0500
3210+++ linux-3.1.1/arch/sparc/include/asm/thread_info_64.h 2011-11-16 18:39:07.000000000 -0500
3211@@ -63,6 +63,8 @@ struct thread_info {
3212 struct pt_regs *kern_una_regs;
3213 unsigned int kern_una_insn;
3214
3215+ unsigned long lowest_stack;
3216+
3217 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3218 };
3219
3220diff -urNp linux-3.1.1/arch/sparc/include/asm/uaccess_32.h linux-3.1.1/arch/sparc/include/asm/uaccess_32.h
3221--- linux-3.1.1/arch/sparc/include/asm/uaccess_32.h 2011-11-11 15:19:27.000000000 -0500
3222+++ linux-3.1.1/arch/sparc/include/asm/uaccess_32.h 2011-11-16 18:39:07.000000000 -0500
3223@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3224
3225 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3226 {
3227- if (n && __access_ok((unsigned long) to, n))
3228+ if ((long)n < 0)
3229+ return n;
3230+
3231+ if (n && __access_ok((unsigned long) to, n)) {
3232+ if (!__builtin_constant_p(n))
3233+ check_object_size(from, n, true);
3234 return __copy_user(to, (__force void __user *) from, n);
3235- else
3236+ } else
3237 return n;
3238 }
3239
3240 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3241 {
3242+ if ((long)n < 0)
3243+ return n;
3244+
3245+ if (!__builtin_constant_p(n))
3246+ check_object_size(from, n, true);
3247+
3248 return __copy_user(to, (__force void __user *) from, n);
3249 }
3250
3251 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3252 {
3253- if (n && __access_ok((unsigned long) from, n))
3254+ if ((long)n < 0)
3255+ return n;
3256+
3257+ if (n && __access_ok((unsigned long) from, n)) {
3258+ if (!__builtin_constant_p(n))
3259+ check_object_size(to, n, false);
3260 return __copy_user((__force void __user *) to, from, n);
3261- else
3262+ } else
3263 return n;
3264 }
3265
3266 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3267 {
3268+ if ((long)n < 0)
3269+ return n;
3270+
3271 return __copy_user((__force void __user *) to, from, n);
3272 }
3273
3274diff -urNp linux-3.1.1/arch/sparc/include/asm/uaccess_64.h linux-3.1.1/arch/sparc/include/asm/uaccess_64.h
3275--- linux-3.1.1/arch/sparc/include/asm/uaccess_64.h 2011-11-11 15:19:27.000000000 -0500
3276+++ linux-3.1.1/arch/sparc/include/asm/uaccess_64.h 2011-11-16 18:39:07.000000000 -0500
3277@@ -10,6 +10,7 @@
3278 #include <linux/compiler.h>
3279 #include <linux/string.h>
3280 #include <linux/thread_info.h>
3281+#include <linux/kernel.h>
3282 #include <asm/asi.h>
3283 #include <asm/system.h>
3284 #include <asm/spitfire.h>
3285@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3286 static inline unsigned long __must_check
3287 copy_from_user(void *to, const void __user *from, unsigned long size)
3288 {
3289- unsigned long ret = ___copy_from_user(to, from, size);
3290+ unsigned long ret;
3291
3292+ if ((long)size < 0 || size > INT_MAX)
3293+ return size;
3294+
3295+ if (!__builtin_constant_p(size))
3296+ check_object_size(to, size, false);
3297+
3298+ ret = ___copy_from_user(to, from, size);
3299 if (unlikely(ret))
3300 ret = copy_from_user_fixup(to, from, size);
3301
3302@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3303 static inline unsigned long __must_check
3304 copy_to_user(void __user *to, const void *from, unsigned long size)
3305 {
3306- unsigned long ret = ___copy_to_user(to, from, size);
3307+ unsigned long ret;
3308+
3309+ if ((long)size < 0 || size > INT_MAX)
3310+ return size;
3311+
3312+ if (!__builtin_constant_p(size))
3313+ check_object_size(from, size, true);
3314
3315+ ret = ___copy_to_user(to, from, size);
3316 if (unlikely(ret))
3317 ret = copy_to_user_fixup(to, from, size);
3318 return ret;
3319diff -urNp linux-3.1.1/arch/sparc/include/asm/uaccess.h linux-3.1.1/arch/sparc/include/asm/uaccess.h
3320--- linux-3.1.1/arch/sparc/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
3321+++ linux-3.1.1/arch/sparc/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
3322@@ -1,5 +1,13 @@
3323 #ifndef ___ASM_SPARC_UACCESS_H
3324 #define ___ASM_SPARC_UACCESS_H
3325+
3326+#ifdef __KERNEL__
3327+#ifndef __ASSEMBLY__
3328+#include <linux/types.h>
3329+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3330+#endif
3331+#endif
3332+
3333 #if defined(__sparc__) && defined(__arch64__)
3334 #include <asm/uaccess_64.h>
3335 #else
3336diff -urNp linux-3.1.1/arch/sparc/kernel/Makefile linux-3.1.1/arch/sparc/kernel/Makefile
3337--- linux-3.1.1/arch/sparc/kernel/Makefile 2011-11-11 15:19:27.000000000 -0500
3338+++ linux-3.1.1/arch/sparc/kernel/Makefile 2011-11-16 18:39:07.000000000 -0500
3339@@ -3,7 +3,7 @@
3340 #
3341
3342 asflags-y := -ansi
3343-ccflags-y := -Werror
3344+#ccflags-y := -Werror
3345
3346 extra-y := head_$(BITS).o
3347 extra-y += init_task.o
3348diff -urNp linux-3.1.1/arch/sparc/kernel/process_32.c linux-3.1.1/arch/sparc/kernel/process_32.c
3349--- linux-3.1.1/arch/sparc/kernel/process_32.c 2011-11-11 15:19:27.000000000 -0500
3350+++ linux-3.1.1/arch/sparc/kernel/process_32.c 2011-11-16 18:40:08.000000000 -0500
3351@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3352 rw->ins[4], rw->ins[5],
3353 rw->ins[6],
3354 rw->ins[7]);
3355- printk("%pS\n", (void *) rw->ins[7]);
3356+ printk("%pA\n", (void *) rw->ins[7]);
3357 rw = (struct reg_window32 *) rw->ins[6];
3358 }
3359 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3360@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3361
3362 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3363 r->psr, r->pc, r->npc, r->y, print_tainted());
3364- printk("PC: <%pS>\n", (void *) r->pc);
3365+ printk("PC: <%pA>\n", (void *) r->pc);
3366 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3367 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3368 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3369 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3370 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3371 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3372- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3373+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3374
3375 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3376 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3377@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3378 rw = (struct reg_window32 *) fp;
3379 pc = rw->ins[7];
3380 printk("[%08lx : ", pc);
3381- printk("%pS ] ", (void *) pc);
3382+ printk("%pA ] ", (void *) pc);
3383 fp = rw->ins[6];
3384 } while (++count < 16);
3385 printk("\n");
3386diff -urNp linux-3.1.1/arch/sparc/kernel/process_64.c linux-3.1.1/arch/sparc/kernel/process_64.c
3387--- linux-3.1.1/arch/sparc/kernel/process_64.c 2011-11-11 15:19:27.000000000 -0500
3388+++ linux-3.1.1/arch/sparc/kernel/process_64.c 2011-11-16 18:40:08.000000000 -0500
3389@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3390 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3391 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3392 if (regs->tstate & TSTATE_PRIV)
3393- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3394+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3395 }
3396
3397 void show_regs(struct pt_regs *regs)
3398 {
3399 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3400 regs->tpc, regs->tnpc, regs->y, print_tainted());
3401- printk("TPC: <%pS>\n", (void *) regs->tpc);
3402+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3403 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3404 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3405 regs->u_regs[3]);
3406@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3407 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3408 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3409 regs->u_regs[15]);
3410- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3411+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3412 show_regwindow(regs);
3413 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3414 }
3415@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3416 ((tp && tp->task) ? tp->task->pid : -1));
3417
3418 if (gp->tstate & TSTATE_PRIV) {
3419- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3420+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3421 (void *) gp->tpc,
3422 (void *) gp->o7,
3423 (void *) gp->i7,
3424diff -urNp linux-3.1.1/arch/sparc/kernel/sys_sparc_32.c linux-3.1.1/arch/sparc/kernel/sys_sparc_32.c
3425--- linux-3.1.1/arch/sparc/kernel/sys_sparc_32.c 2011-11-11 15:19:27.000000000 -0500
3426+++ linux-3.1.1/arch/sparc/kernel/sys_sparc_32.c 2011-11-16 18:39:07.000000000 -0500
3427@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3428 if (ARCH_SUN4C && len > 0x20000000)
3429 return -ENOMEM;
3430 if (!addr)
3431- addr = TASK_UNMAPPED_BASE;
3432+ addr = current->mm->mmap_base;
3433
3434 if (flags & MAP_SHARED)
3435 addr = COLOUR_ALIGN(addr);
3436@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3437 }
3438 if (TASK_SIZE - PAGE_SIZE - len < addr)
3439 return -ENOMEM;
3440- if (!vmm || addr + len <= vmm->vm_start)
3441+ if (check_heap_stack_gap(vmm, addr, len))
3442 return addr;
3443 addr = vmm->vm_end;
3444 if (flags & MAP_SHARED)
3445diff -urNp linux-3.1.1/arch/sparc/kernel/sys_sparc_64.c linux-3.1.1/arch/sparc/kernel/sys_sparc_64.c
3446--- linux-3.1.1/arch/sparc/kernel/sys_sparc_64.c 2011-11-11 15:19:27.000000000 -0500
3447+++ linux-3.1.1/arch/sparc/kernel/sys_sparc_64.c 2011-11-16 18:39:07.000000000 -0500
3448@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3449 /* We do not accept a shared mapping if it would violate
3450 * cache aliasing constraints.
3451 */
3452- if ((flags & MAP_SHARED) &&
3453+ if ((filp || (flags & MAP_SHARED)) &&
3454 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3455 return -EINVAL;
3456 return addr;
3457@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3458 if (filp || (flags & MAP_SHARED))
3459 do_color_align = 1;
3460
3461+#ifdef CONFIG_PAX_RANDMMAP
3462+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3463+#endif
3464+
3465 if (addr) {
3466 if (do_color_align)
3467 addr = COLOUR_ALIGN(addr, pgoff);
3468@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3469 addr = PAGE_ALIGN(addr);
3470
3471 vma = find_vma(mm, addr);
3472- if (task_size - len >= addr &&
3473- (!vma || addr + len <= vma->vm_start))
3474+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3475 return addr;
3476 }
3477
3478 if (len > mm->cached_hole_size) {
3479- start_addr = addr = mm->free_area_cache;
3480+ start_addr = addr = mm->free_area_cache;
3481 } else {
3482- start_addr = addr = TASK_UNMAPPED_BASE;
3483+ start_addr = addr = mm->mmap_base;
3484 mm->cached_hole_size = 0;
3485 }
3486
3487@@ -174,14 +177,14 @@ full_search:
3488 vma = find_vma(mm, VA_EXCLUDE_END);
3489 }
3490 if (unlikely(task_size < addr)) {
3491- if (start_addr != TASK_UNMAPPED_BASE) {
3492- start_addr = addr = TASK_UNMAPPED_BASE;
3493+ if (start_addr != mm->mmap_base) {
3494+ start_addr = addr = mm->mmap_base;
3495 mm->cached_hole_size = 0;
3496 goto full_search;
3497 }
3498 return -ENOMEM;
3499 }
3500- if (likely(!vma || addr + len <= vma->vm_start)) {
3501+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3502 /*
3503 * Remember the place where we stopped the search:
3504 */
3505@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3506 /* We do not accept a shared mapping if it would violate
3507 * cache aliasing constraints.
3508 */
3509- if ((flags & MAP_SHARED) &&
3510+ if ((filp || (flags & MAP_SHARED)) &&
3511 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3512 return -EINVAL;
3513 return addr;
3514@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3515 addr = PAGE_ALIGN(addr);
3516
3517 vma = find_vma(mm, addr);
3518- if (task_size - len >= addr &&
3519- (!vma || addr + len <= vma->vm_start))
3520+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3521 return addr;
3522 }
3523
3524@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3525 /* make sure it can fit in the remaining address space */
3526 if (likely(addr > len)) {
3527 vma = find_vma(mm, addr-len);
3528- if (!vma || addr <= vma->vm_start) {
3529+ if (check_heap_stack_gap(vma, addr - len, len)) {
3530 /* remember the address as a hint for next time */
3531 return (mm->free_area_cache = addr-len);
3532 }
3533@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3534 if (unlikely(mm->mmap_base < len))
3535 goto bottomup;
3536
3537- addr = mm->mmap_base-len;
3538- if (do_color_align)
3539- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3540+ addr = mm->mmap_base - len;
3541
3542 do {
3543+ if (do_color_align)
3544+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3545 /*
3546 * Lookup failure means no vma is above this address,
3547 * else if new region fits below vma->vm_start,
3548 * return with success:
3549 */
3550 vma = find_vma(mm, addr);
3551- if (likely(!vma || addr+len <= vma->vm_start)) {
3552+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3553 /* remember the address as a hint for next time */
3554 return (mm->free_area_cache = addr);
3555 }
3556@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3557 mm->cached_hole_size = vma->vm_start - addr;
3558
3559 /* try just below the current vma->vm_start */
3560- addr = vma->vm_start-len;
3561- if (do_color_align)
3562- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3563- } while (likely(len < vma->vm_start));
3564+ addr = skip_heap_stack_gap(vma, len);
3565+ } while (!IS_ERR_VALUE(addr));
3566
3567 bottomup:
3568 /*
3569@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3570 gap == RLIM_INFINITY ||
3571 sysctl_legacy_va_layout) {
3572 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3573+
3574+#ifdef CONFIG_PAX_RANDMMAP
3575+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3576+ mm->mmap_base += mm->delta_mmap;
3577+#endif
3578+
3579 mm->get_unmapped_area = arch_get_unmapped_area;
3580 mm->unmap_area = arch_unmap_area;
3581 } else {
3582@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3583 gap = (task_size / 6 * 5);
3584
3585 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3586+
3587+#ifdef CONFIG_PAX_RANDMMAP
3588+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3589+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3590+#endif
3591+
3592 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3593 mm->unmap_area = arch_unmap_area_topdown;
3594 }
3595diff -urNp linux-3.1.1/arch/sparc/kernel/traps_32.c linux-3.1.1/arch/sparc/kernel/traps_32.c
3596--- linux-3.1.1/arch/sparc/kernel/traps_32.c 2011-11-11 15:19:27.000000000 -0500
3597+++ linux-3.1.1/arch/sparc/kernel/traps_32.c 2011-11-16 18:40:08.000000000 -0500
3598@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3599 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3600 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3601
3602+extern void gr_handle_kernel_exploit(void);
3603+
3604 void die_if_kernel(char *str, struct pt_regs *regs)
3605 {
3606 static int die_counter;
3607@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3608 count++ < 30 &&
3609 (((unsigned long) rw) >= PAGE_OFFSET) &&
3610 !(((unsigned long) rw) & 0x7)) {
3611- printk("Caller[%08lx]: %pS\n", rw->ins[7],
3612+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
3613 (void *) rw->ins[7]);
3614 rw = (struct reg_window32 *)rw->ins[6];
3615 }
3616 }
3617 printk("Instruction DUMP:");
3618 instruction_dump ((unsigned long *) regs->pc);
3619- if(regs->psr & PSR_PS)
3620+ if(regs->psr & PSR_PS) {
3621+ gr_handle_kernel_exploit();
3622 do_exit(SIGKILL);
3623+ }
3624 do_exit(SIGSEGV);
3625 }
3626
3627diff -urNp linux-3.1.1/arch/sparc/kernel/traps_64.c linux-3.1.1/arch/sparc/kernel/traps_64.c
3628--- linux-3.1.1/arch/sparc/kernel/traps_64.c 2011-11-11 15:19:27.000000000 -0500
3629+++ linux-3.1.1/arch/sparc/kernel/traps_64.c 2011-11-16 18:40:08.000000000 -0500
3630@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3631 i + 1,
3632 p->trapstack[i].tstate, p->trapstack[i].tpc,
3633 p->trapstack[i].tnpc, p->trapstack[i].tt);
3634- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3635+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3636 }
3637 }
3638
3639@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3640
3641 lvl -= 0x100;
3642 if (regs->tstate & TSTATE_PRIV) {
3643+
3644+#ifdef CONFIG_PAX_REFCOUNT
3645+ if (lvl == 6)
3646+ pax_report_refcount_overflow(regs);
3647+#endif
3648+
3649 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3650 die_if_kernel(buffer, regs);
3651 }
3652@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3653 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3654 {
3655 char buffer[32];
3656-
3657+
3658 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3659 0, lvl, SIGTRAP) == NOTIFY_STOP)
3660 return;
3661
3662+#ifdef CONFIG_PAX_REFCOUNT
3663+ if (lvl == 6)
3664+ pax_report_refcount_overflow(regs);
3665+#endif
3666+
3667 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3668
3669 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3670@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3671 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3672 printk("%s" "ERROR(%d): ",
3673 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3674- printk("TPC<%pS>\n", (void *) regs->tpc);
3675+ printk("TPC<%pA>\n", (void *) regs->tpc);
3676 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3677 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3678 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3679@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3680 smp_processor_id(),
3681 (type & 0x1) ? 'I' : 'D',
3682 regs->tpc);
3683- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3684+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3685 panic("Irrecoverable Cheetah+ parity error.");
3686 }
3687
3688@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3689 smp_processor_id(),
3690 (type & 0x1) ? 'I' : 'D',
3691 regs->tpc);
3692- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3693+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3694 }
3695
3696 struct sun4v_error_entry {
3697@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3698
3699 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3700 regs->tpc, tl);
3701- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3702+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3703 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3704- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3705+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3706 (void *) regs->u_regs[UREG_I7]);
3707 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3708 "pte[%lx] error[%lx]\n",
3709@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3710
3711 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3712 regs->tpc, tl);
3713- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3714+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3715 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3716- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3717+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3718 (void *) regs->u_regs[UREG_I7]);
3719 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3720 "pte[%lx] error[%lx]\n",
3721@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3722 fp = (unsigned long)sf->fp + STACK_BIAS;
3723 }
3724
3725- printk(" [%016lx] %pS\n", pc, (void *) pc);
3726+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3727 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3728 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3729 int index = tsk->curr_ret_stack;
3730 if (tsk->ret_stack && index >= graph) {
3731 pc = tsk->ret_stack[index - graph].ret;
3732- printk(" [%016lx] %pS\n", pc, (void *) pc);
3733+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3734 graph++;
3735 }
3736 }
3737@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3738 return (struct reg_window *) (fp + STACK_BIAS);
3739 }
3740
3741+extern void gr_handle_kernel_exploit(void);
3742+
3743 void die_if_kernel(char *str, struct pt_regs *regs)
3744 {
3745 static int die_counter;
3746@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3747 while (rw &&
3748 count++ < 30 &&
3749 kstack_valid(tp, (unsigned long) rw)) {
3750- printk("Caller[%016lx]: %pS\n", rw->ins[7],
3751+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
3752 (void *) rw->ins[7]);
3753
3754 rw = kernel_stack_up(rw);
3755@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3756 }
3757 user_instruction_dump ((unsigned int __user *) regs->tpc);
3758 }
3759- if (regs->tstate & TSTATE_PRIV)
3760+ if (regs->tstate & TSTATE_PRIV) {
3761+ gr_handle_kernel_exploit();
3762 do_exit(SIGKILL);
3763+ }
3764 do_exit(SIGSEGV);
3765 }
3766 EXPORT_SYMBOL(die_if_kernel);
3767diff -urNp linux-3.1.1/arch/sparc/kernel/unaligned_64.c linux-3.1.1/arch/sparc/kernel/unaligned_64.c
3768--- linux-3.1.1/arch/sparc/kernel/unaligned_64.c 2011-11-11 15:19:27.000000000 -0500
3769+++ linux-3.1.1/arch/sparc/kernel/unaligned_64.c 2011-11-16 18:40:08.000000000 -0500
3770@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3771 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3772
3773 if (__ratelimit(&ratelimit)) {
3774- printk("Kernel unaligned access at TPC[%lx] %pS\n",
3775+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
3776 regs->tpc, (void *) regs->tpc);
3777 }
3778 }
3779diff -urNp linux-3.1.1/arch/sparc/lib/atomic_64.S linux-3.1.1/arch/sparc/lib/atomic_64.S
3780--- linux-3.1.1/arch/sparc/lib/atomic_64.S 2011-11-11 15:19:27.000000000 -0500
3781+++ linux-3.1.1/arch/sparc/lib/atomic_64.S 2011-11-16 18:39:07.000000000 -0500
3782@@ -18,7 +18,12 @@
3783 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3784 BACKOFF_SETUP(%o2)
3785 1: lduw [%o1], %g1
3786- add %g1, %o0, %g7
3787+ addcc %g1, %o0, %g7
3788+
3789+#ifdef CONFIG_PAX_REFCOUNT
3790+ tvs %icc, 6
3791+#endif
3792+
3793 cas [%o1], %g1, %g7
3794 cmp %g1, %g7
3795 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3796@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3797 2: BACKOFF_SPIN(%o2, %o3, 1b)
3798 .size atomic_add, .-atomic_add
3799
3800+ .globl atomic_add_unchecked
3801+ .type atomic_add_unchecked,#function
3802+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3803+ BACKOFF_SETUP(%o2)
3804+1: lduw [%o1], %g1
3805+ add %g1, %o0, %g7
3806+ cas [%o1], %g1, %g7
3807+ cmp %g1, %g7
3808+ bne,pn %icc, 2f
3809+ nop
3810+ retl
3811+ nop
3812+2: BACKOFF_SPIN(%o2, %o3, 1b)
3813+ .size atomic_add_unchecked, .-atomic_add_unchecked
3814+
3815 .globl atomic_sub
3816 .type atomic_sub,#function
3817 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3818 BACKOFF_SETUP(%o2)
3819 1: lduw [%o1], %g1
3820- sub %g1, %o0, %g7
3821+ subcc %g1, %o0, %g7
3822+
3823+#ifdef CONFIG_PAX_REFCOUNT
3824+ tvs %icc, 6
3825+#endif
3826+
3827 cas [%o1], %g1, %g7
3828 cmp %g1, %g7
3829 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3830@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3831 2: BACKOFF_SPIN(%o2, %o3, 1b)
3832 .size atomic_sub, .-atomic_sub
3833
3834+ .globl atomic_sub_unchecked
3835+ .type atomic_sub_unchecked,#function
3836+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3837+ BACKOFF_SETUP(%o2)
3838+1: lduw [%o1], %g1
3839+ sub %g1, %o0, %g7
3840+ cas [%o1], %g1, %g7
3841+ cmp %g1, %g7
3842+ bne,pn %icc, 2f
3843+ nop
3844+ retl
3845+ nop
3846+2: BACKOFF_SPIN(%o2, %o3, 1b)
3847+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
3848+
3849 .globl atomic_add_ret
3850 .type atomic_add_ret,#function
3851 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3852 BACKOFF_SETUP(%o2)
3853 1: lduw [%o1], %g1
3854- add %g1, %o0, %g7
3855+ addcc %g1, %o0, %g7
3856+
3857+#ifdef CONFIG_PAX_REFCOUNT
3858+ tvs %icc, 6
3859+#endif
3860+
3861 cas [%o1], %g1, %g7
3862 cmp %g1, %g7
3863 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3864@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3865 2: BACKOFF_SPIN(%o2, %o3, 1b)
3866 .size atomic_add_ret, .-atomic_add_ret
3867
3868+ .globl atomic_add_ret_unchecked
3869+ .type atomic_add_ret_unchecked,#function
3870+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3871+ BACKOFF_SETUP(%o2)
3872+1: lduw [%o1], %g1
3873+ addcc %g1, %o0, %g7
3874+ cas [%o1], %g1, %g7
3875+ cmp %g1, %g7
3876+ bne,pn %icc, 2f
3877+ add %g7, %o0, %g7
3878+ sra %g7, 0, %o0
3879+ retl
3880+ nop
3881+2: BACKOFF_SPIN(%o2, %o3, 1b)
3882+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3883+
3884 .globl atomic_sub_ret
3885 .type atomic_sub_ret,#function
3886 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3887 BACKOFF_SETUP(%o2)
3888 1: lduw [%o1], %g1
3889- sub %g1, %o0, %g7
3890+ subcc %g1, %o0, %g7
3891+
3892+#ifdef CONFIG_PAX_REFCOUNT
3893+ tvs %icc, 6
3894+#endif
3895+
3896 cas [%o1], %g1, %g7
3897 cmp %g1, %g7
3898 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3899@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3900 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3901 BACKOFF_SETUP(%o2)
3902 1: ldx [%o1], %g1
3903- add %g1, %o0, %g7
3904+ addcc %g1, %o0, %g7
3905+
3906+#ifdef CONFIG_PAX_REFCOUNT
3907+ tvs %xcc, 6
3908+#endif
3909+
3910 casx [%o1], %g1, %g7
3911 cmp %g1, %g7
3912 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3913@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3914 2: BACKOFF_SPIN(%o2, %o3, 1b)
3915 .size atomic64_add, .-atomic64_add
3916
3917+ .globl atomic64_add_unchecked
3918+ .type atomic64_add_unchecked,#function
3919+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3920+ BACKOFF_SETUP(%o2)
3921+1: ldx [%o1], %g1
3922+ addcc %g1, %o0, %g7
3923+ casx [%o1], %g1, %g7
3924+ cmp %g1, %g7
3925+ bne,pn %xcc, 2f
3926+ nop
3927+ retl
3928+ nop
3929+2: BACKOFF_SPIN(%o2, %o3, 1b)
3930+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
3931+
3932 .globl atomic64_sub
3933 .type atomic64_sub,#function
3934 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3935 BACKOFF_SETUP(%o2)
3936 1: ldx [%o1], %g1
3937- sub %g1, %o0, %g7
3938+ subcc %g1, %o0, %g7
3939+
3940+#ifdef CONFIG_PAX_REFCOUNT
3941+ tvs %xcc, 6
3942+#endif
3943+
3944 casx [%o1], %g1, %g7
3945 cmp %g1, %g7
3946 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3947@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3948 2: BACKOFF_SPIN(%o2, %o3, 1b)
3949 .size atomic64_sub, .-atomic64_sub
3950
3951+ .globl atomic64_sub_unchecked
3952+ .type atomic64_sub_unchecked,#function
3953+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3954+ BACKOFF_SETUP(%o2)
3955+1: ldx [%o1], %g1
3956+ subcc %g1, %o0, %g7
3957+ casx [%o1], %g1, %g7
3958+ cmp %g1, %g7
3959+ bne,pn %xcc, 2f
3960+ nop
3961+ retl
3962+ nop
3963+2: BACKOFF_SPIN(%o2, %o3, 1b)
3964+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3965+
3966 .globl atomic64_add_ret
3967 .type atomic64_add_ret,#function
3968 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3969 BACKOFF_SETUP(%o2)
3970 1: ldx [%o1], %g1
3971- add %g1, %o0, %g7
3972+ addcc %g1, %o0, %g7
3973+
3974+#ifdef CONFIG_PAX_REFCOUNT
3975+ tvs %xcc, 6
3976+#endif
3977+
3978 casx [%o1], %g1, %g7
3979 cmp %g1, %g7
3980 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3981@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
3982 2: BACKOFF_SPIN(%o2, %o3, 1b)
3983 .size atomic64_add_ret, .-atomic64_add_ret
3984
3985+ .globl atomic64_add_ret_unchecked
3986+ .type atomic64_add_ret_unchecked,#function
3987+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3988+ BACKOFF_SETUP(%o2)
3989+1: ldx [%o1], %g1
3990+ addcc %g1, %o0, %g7
3991+ casx [%o1], %g1, %g7
3992+ cmp %g1, %g7
3993+ bne,pn %xcc, 2f
3994+ add %g7, %o0, %g7
3995+ mov %g7, %o0
3996+ retl
3997+ nop
3998+2: BACKOFF_SPIN(%o2, %o3, 1b)
3999+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4000+
4001 .globl atomic64_sub_ret
4002 .type atomic64_sub_ret,#function
4003 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4004 BACKOFF_SETUP(%o2)
4005 1: ldx [%o1], %g1
4006- sub %g1, %o0, %g7
4007+ subcc %g1, %o0, %g7
4008+
4009+#ifdef CONFIG_PAX_REFCOUNT
4010+ tvs %xcc, 6
4011+#endif
4012+
4013 casx [%o1], %g1, %g7
4014 cmp %g1, %g7
4015 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4016diff -urNp linux-3.1.1/arch/sparc/lib/ksyms.c linux-3.1.1/arch/sparc/lib/ksyms.c
4017--- linux-3.1.1/arch/sparc/lib/ksyms.c 2011-11-11 15:19:27.000000000 -0500
4018+++ linux-3.1.1/arch/sparc/lib/ksyms.c 2011-11-16 18:39:07.000000000 -0500
4019@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4020
4021 /* Atomic counter implementation. */
4022 EXPORT_SYMBOL(atomic_add);
4023+EXPORT_SYMBOL(atomic_add_unchecked);
4024 EXPORT_SYMBOL(atomic_add_ret);
4025+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4026 EXPORT_SYMBOL(atomic_sub);
4027+EXPORT_SYMBOL(atomic_sub_unchecked);
4028 EXPORT_SYMBOL(atomic_sub_ret);
4029 EXPORT_SYMBOL(atomic64_add);
4030+EXPORT_SYMBOL(atomic64_add_unchecked);
4031 EXPORT_SYMBOL(atomic64_add_ret);
4032+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4033 EXPORT_SYMBOL(atomic64_sub);
4034+EXPORT_SYMBOL(atomic64_sub_unchecked);
4035 EXPORT_SYMBOL(atomic64_sub_ret);
4036
4037 /* Atomic bit operations. */
4038diff -urNp linux-3.1.1/arch/sparc/lib/Makefile linux-3.1.1/arch/sparc/lib/Makefile
4039--- linux-3.1.1/arch/sparc/lib/Makefile 2011-11-11 15:19:27.000000000 -0500
4040+++ linux-3.1.1/arch/sparc/lib/Makefile 2011-11-16 18:39:07.000000000 -0500
4041@@ -2,7 +2,7 @@
4042 #
4043
4044 asflags-y := -ansi -DST_DIV0=0x02
4045-ccflags-y := -Werror
4046+#ccflags-y := -Werror
4047
4048 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4049 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4050diff -urNp linux-3.1.1/arch/sparc/Makefile linux-3.1.1/arch/sparc/Makefile
4051--- linux-3.1.1/arch/sparc/Makefile 2011-11-11 15:19:27.000000000 -0500
4052+++ linux-3.1.1/arch/sparc/Makefile 2011-11-16 18:40:08.000000000 -0500
4053@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4054 # Export what is needed by arch/sparc/boot/Makefile
4055 export VMLINUX_INIT VMLINUX_MAIN
4056 VMLINUX_INIT := $(head-y) $(init-y)
4057-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4058+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4059 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4060 VMLINUX_MAIN += $(drivers-y) $(net-y)
4061
4062diff -urNp linux-3.1.1/arch/sparc/mm/fault_32.c linux-3.1.1/arch/sparc/mm/fault_32.c
4063--- linux-3.1.1/arch/sparc/mm/fault_32.c 2011-11-11 15:19:27.000000000 -0500
4064+++ linux-3.1.1/arch/sparc/mm/fault_32.c 2011-11-16 18:39:07.000000000 -0500
4065@@ -22,6 +22,9 @@
4066 #include <linux/interrupt.h>
4067 #include <linux/module.h>
4068 #include <linux/kdebug.h>
4069+#include <linux/slab.h>
4070+#include <linux/pagemap.h>
4071+#include <linux/compiler.h>
4072
4073 #include <asm/system.h>
4074 #include <asm/page.h>
4075@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4076 return safe_compute_effective_address(regs, insn);
4077 }
4078
4079+#ifdef CONFIG_PAX_PAGEEXEC
4080+#ifdef CONFIG_PAX_DLRESOLVE
4081+static void pax_emuplt_close(struct vm_area_struct *vma)
4082+{
4083+ vma->vm_mm->call_dl_resolve = 0UL;
4084+}
4085+
4086+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4087+{
4088+ unsigned int *kaddr;
4089+
4090+ vmf->page = alloc_page(GFP_HIGHUSER);
4091+ if (!vmf->page)
4092+ return VM_FAULT_OOM;
4093+
4094+ kaddr = kmap(vmf->page);
4095+ memset(kaddr, 0, PAGE_SIZE);
4096+ kaddr[0] = 0x9DE3BFA8U; /* save */
4097+ flush_dcache_page(vmf->page);
4098+ kunmap(vmf->page);
4099+ return VM_FAULT_MAJOR;
4100+}
4101+
4102+static const struct vm_operations_struct pax_vm_ops = {
4103+ .close = pax_emuplt_close,
4104+ .fault = pax_emuplt_fault
4105+};
4106+
4107+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4108+{
4109+ int ret;
4110+
4111+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4112+ vma->vm_mm = current->mm;
4113+ vma->vm_start = addr;
4114+ vma->vm_end = addr + PAGE_SIZE;
4115+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4116+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4117+ vma->vm_ops = &pax_vm_ops;
4118+
4119+ ret = insert_vm_struct(current->mm, vma);
4120+ if (ret)
4121+ return ret;
4122+
4123+ ++current->mm->total_vm;
4124+ return 0;
4125+}
4126+#endif
4127+
4128+/*
4129+ * PaX: decide what to do with offenders (regs->pc = fault address)
4130+ *
4131+ * returns 1 when task should be killed
4132+ * 2 when patched PLT trampoline was detected
4133+ * 3 when unpatched PLT trampoline was detected
4134+ */
4135+static int pax_handle_fetch_fault(struct pt_regs *regs)
4136+{
4137+
4138+#ifdef CONFIG_PAX_EMUPLT
4139+ int err;
4140+
4141+ do { /* PaX: patched PLT emulation #1 */
4142+ unsigned int sethi1, sethi2, jmpl;
4143+
4144+ err = get_user(sethi1, (unsigned int *)regs->pc);
4145+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4146+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4147+
4148+ if (err)
4149+ break;
4150+
4151+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4152+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4153+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4154+ {
4155+ unsigned int addr;
4156+
4157+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4158+ addr = regs->u_regs[UREG_G1];
4159+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4160+ regs->pc = addr;
4161+ regs->npc = addr+4;
4162+ return 2;
4163+ }
4164+ } while (0);
4165+
4166+ { /* PaX: patched PLT emulation #2 */
4167+ unsigned int ba;
4168+
4169+ err = get_user(ba, (unsigned int *)regs->pc);
4170+
4171+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4172+ unsigned int addr;
4173+
4174+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4175+ regs->pc = addr;
4176+ regs->npc = addr+4;
4177+ return 2;
4178+ }
4179+ }
4180+
4181+ do { /* PaX: patched PLT emulation #3 */
4182+ unsigned int sethi, jmpl, nop;
4183+
4184+ err = get_user(sethi, (unsigned int *)regs->pc);
4185+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4186+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4187+
4188+ if (err)
4189+ break;
4190+
4191+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4192+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4193+ nop == 0x01000000U)
4194+ {
4195+ unsigned int addr;
4196+
4197+ addr = (sethi & 0x003FFFFFU) << 10;
4198+ regs->u_regs[UREG_G1] = addr;
4199+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4200+ regs->pc = addr;
4201+ regs->npc = addr+4;
4202+ return 2;
4203+ }
4204+ } while (0);
4205+
4206+ do { /* PaX: unpatched PLT emulation step 1 */
4207+ unsigned int sethi, ba, nop;
4208+
4209+ err = get_user(sethi, (unsigned int *)regs->pc);
4210+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4211+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4212+
4213+ if (err)
4214+ break;
4215+
4216+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4217+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4218+ nop == 0x01000000U)
4219+ {
4220+ unsigned int addr, save, call;
4221+
4222+ if ((ba & 0xFFC00000U) == 0x30800000U)
4223+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4224+ else
4225+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4226+
4227+ err = get_user(save, (unsigned int *)addr);
4228+ err |= get_user(call, (unsigned int *)(addr+4));
4229+ err |= get_user(nop, (unsigned int *)(addr+8));
4230+ if (err)
4231+ break;
4232+
4233+#ifdef CONFIG_PAX_DLRESOLVE
4234+ if (save == 0x9DE3BFA8U &&
4235+ (call & 0xC0000000U) == 0x40000000U &&
4236+ nop == 0x01000000U)
4237+ {
4238+ struct vm_area_struct *vma;
4239+ unsigned long call_dl_resolve;
4240+
4241+ down_read(&current->mm->mmap_sem);
4242+ call_dl_resolve = current->mm->call_dl_resolve;
4243+ up_read(&current->mm->mmap_sem);
4244+ if (likely(call_dl_resolve))
4245+ goto emulate;
4246+
4247+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4248+
4249+ down_write(&current->mm->mmap_sem);
4250+ if (current->mm->call_dl_resolve) {
4251+ call_dl_resolve = current->mm->call_dl_resolve;
4252+ up_write(&current->mm->mmap_sem);
4253+ if (vma)
4254+ kmem_cache_free(vm_area_cachep, vma);
4255+ goto emulate;
4256+ }
4257+
4258+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4259+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4260+ up_write(&current->mm->mmap_sem);
4261+ if (vma)
4262+ kmem_cache_free(vm_area_cachep, vma);
4263+ return 1;
4264+ }
4265+
4266+ if (pax_insert_vma(vma, call_dl_resolve)) {
4267+ up_write(&current->mm->mmap_sem);
4268+ kmem_cache_free(vm_area_cachep, vma);
4269+ return 1;
4270+ }
4271+
4272+ current->mm->call_dl_resolve = call_dl_resolve;
4273+ up_write(&current->mm->mmap_sem);
4274+
4275+emulate:
4276+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4277+ regs->pc = call_dl_resolve;
4278+ regs->npc = addr+4;
4279+ return 3;
4280+ }
4281+#endif
4282+
4283+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4284+ if ((save & 0xFFC00000U) == 0x05000000U &&
4285+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4286+ nop == 0x01000000U)
4287+ {
4288+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4289+ regs->u_regs[UREG_G2] = addr + 4;
4290+ addr = (save & 0x003FFFFFU) << 10;
4291+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4292+ regs->pc = addr;
4293+ regs->npc = addr+4;
4294+ return 3;
4295+ }
4296+ }
4297+ } while (0);
4298+
4299+ do { /* PaX: unpatched PLT emulation step 2 */
4300+ unsigned int save, call, nop;
4301+
4302+ err = get_user(save, (unsigned int *)(regs->pc-4));
4303+ err |= get_user(call, (unsigned int *)regs->pc);
4304+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4305+ if (err)
4306+ break;
4307+
4308+ if (save == 0x9DE3BFA8U &&
4309+ (call & 0xC0000000U) == 0x40000000U &&
4310+ nop == 0x01000000U)
4311+ {
4312+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4313+
4314+ regs->u_regs[UREG_RETPC] = regs->pc;
4315+ regs->pc = dl_resolve;
4316+ regs->npc = dl_resolve+4;
4317+ return 3;
4318+ }
4319+ } while (0);
4320+#endif
4321+
4322+ return 1;
4323+}
4324+
4325+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4326+{
4327+ unsigned long i;
4328+
4329+ printk(KERN_ERR "PAX: bytes at PC: ");
4330+ for (i = 0; i < 8; i++) {
4331+ unsigned int c;
4332+ if (get_user(c, (unsigned int *)pc+i))
4333+ printk(KERN_CONT "???????? ");
4334+ else
4335+ printk(KERN_CONT "%08x ", c);
4336+ }
4337+ printk("\n");
4338+}
4339+#endif
4340+
4341 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4342 int text_fault)
4343 {
4344@@ -281,6 +546,24 @@ good_area:
4345 if(!(vma->vm_flags & VM_WRITE))
4346 goto bad_area;
4347 } else {
4348+
4349+#ifdef CONFIG_PAX_PAGEEXEC
4350+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4351+ up_read(&mm->mmap_sem);
4352+ switch (pax_handle_fetch_fault(regs)) {
4353+
4354+#ifdef CONFIG_PAX_EMUPLT
4355+ case 2:
4356+ case 3:
4357+ return;
4358+#endif
4359+
4360+ }
4361+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4362+ do_group_exit(SIGKILL);
4363+ }
4364+#endif
4365+
4366 /* Allow reads even for write-only mappings */
4367 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4368 goto bad_area;
4369diff -urNp linux-3.1.1/arch/sparc/mm/fault_64.c linux-3.1.1/arch/sparc/mm/fault_64.c
4370--- linux-3.1.1/arch/sparc/mm/fault_64.c 2011-11-11 15:19:27.000000000 -0500
4371+++ linux-3.1.1/arch/sparc/mm/fault_64.c 2011-11-16 18:40:08.000000000 -0500
4372@@ -21,6 +21,9 @@
4373 #include <linux/kprobes.h>
4374 #include <linux/kdebug.h>
4375 #include <linux/percpu.h>
4376+#include <linux/slab.h>
4377+#include <linux/pagemap.h>
4378+#include <linux/compiler.h>
4379
4380 #include <asm/page.h>
4381 #include <asm/pgtable.h>
4382@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4383 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4384 regs->tpc);
4385 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4386- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4387+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4388 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4389 dump_stack();
4390 unhandled_fault(regs->tpc, current, regs);
4391@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4392 show_regs(regs);
4393 }
4394
4395+#ifdef CONFIG_PAX_PAGEEXEC
4396+#ifdef CONFIG_PAX_DLRESOLVE
4397+static void pax_emuplt_close(struct vm_area_struct *vma)
4398+{
4399+ vma->vm_mm->call_dl_resolve = 0UL;
4400+}
4401+
4402+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4403+{
4404+ unsigned int *kaddr;
4405+
4406+ vmf->page = alloc_page(GFP_HIGHUSER);
4407+ if (!vmf->page)
4408+ return VM_FAULT_OOM;
4409+
4410+ kaddr = kmap(vmf->page);
4411+ memset(kaddr, 0, PAGE_SIZE);
4412+ kaddr[0] = 0x9DE3BFA8U; /* save */
4413+ flush_dcache_page(vmf->page);
4414+ kunmap(vmf->page);
4415+ return VM_FAULT_MAJOR;
4416+}
4417+
4418+static const struct vm_operations_struct pax_vm_ops = {
4419+ .close = pax_emuplt_close,
4420+ .fault = pax_emuplt_fault
4421+};
4422+
4423+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4424+{
4425+ int ret;
4426+
4427+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4428+ vma->vm_mm = current->mm;
4429+ vma->vm_start = addr;
4430+ vma->vm_end = addr + PAGE_SIZE;
4431+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4432+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4433+ vma->vm_ops = &pax_vm_ops;
4434+
4435+ ret = insert_vm_struct(current->mm, vma);
4436+ if (ret)
4437+ return ret;
4438+
4439+ ++current->mm->total_vm;
4440+ return 0;
4441+}
4442+#endif
4443+
4444+/*
4445+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4446+ *
4447+ * returns 1 when task should be killed
4448+ * 2 when patched PLT trampoline was detected
4449+ * 3 when unpatched PLT trampoline was detected
4450+ */
4451+static int pax_handle_fetch_fault(struct pt_regs *regs)
4452+{
4453+
4454+#ifdef CONFIG_PAX_EMUPLT
4455+ int err;
4456+
4457+ do { /* PaX: patched PLT emulation #1 */
4458+ unsigned int sethi1, sethi2, jmpl;
4459+
4460+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4461+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4462+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4463+
4464+ if (err)
4465+ break;
4466+
4467+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4468+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4469+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4470+ {
4471+ unsigned long addr;
4472+
4473+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4474+ addr = regs->u_regs[UREG_G1];
4475+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4476+
4477+ if (test_thread_flag(TIF_32BIT))
4478+ addr &= 0xFFFFFFFFUL;
4479+
4480+ regs->tpc = addr;
4481+ regs->tnpc = addr+4;
4482+ return 2;
4483+ }
4484+ } while (0);
4485+
4486+ { /* PaX: patched PLT emulation #2 */
4487+ unsigned int ba;
4488+
4489+ err = get_user(ba, (unsigned int *)regs->tpc);
4490+
4491+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4492+ unsigned long addr;
4493+
4494+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4495+
4496+ if (test_thread_flag(TIF_32BIT))
4497+ addr &= 0xFFFFFFFFUL;
4498+
4499+ regs->tpc = addr;
4500+ regs->tnpc = addr+4;
4501+ return 2;
4502+ }
4503+ }
4504+
4505+ do { /* PaX: patched PLT emulation #3 */
4506+ unsigned int sethi, jmpl, nop;
4507+
4508+ err = get_user(sethi, (unsigned int *)regs->tpc);
4509+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4510+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4511+
4512+ if (err)
4513+ break;
4514+
4515+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4516+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4517+ nop == 0x01000000U)
4518+ {
4519+ unsigned long addr;
4520+
4521+ addr = (sethi & 0x003FFFFFU) << 10;
4522+ regs->u_regs[UREG_G1] = addr;
4523+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4524+
4525+ if (test_thread_flag(TIF_32BIT))
4526+ addr &= 0xFFFFFFFFUL;
4527+
4528+ regs->tpc = addr;
4529+ regs->tnpc = addr+4;
4530+ return 2;
4531+ }
4532+ } while (0);
4533+
4534+ do { /* PaX: patched PLT emulation #4 */
4535+ unsigned int sethi, mov1, call, mov2;
4536+
4537+ err = get_user(sethi, (unsigned int *)regs->tpc);
4538+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4539+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
4540+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4541+
4542+ if (err)
4543+ break;
4544+
4545+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4546+ mov1 == 0x8210000FU &&
4547+ (call & 0xC0000000U) == 0x40000000U &&
4548+ mov2 == 0x9E100001U)
4549+ {
4550+ unsigned long addr;
4551+
4552+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4553+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4554+
4555+ if (test_thread_flag(TIF_32BIT))
4556+ addr &= 0xFFFFFFFFUL;
4557+
4558+ regs->tpc = addr;
4559+ regs->tnpc = addr+4;
4560+ return 2;
4561+ }
4562+ } while (0);
4563+
4564+ do { /* PaX: patched PLT emulation #5 */
4565+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4566+
4567+ err = get_user(sethi, (unsigned int *)regs->tpc);
4568+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4569+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4570+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4571+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4572+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4573+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4574+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4575+
4576+ if (err)
4577+ break;
4578+
4579+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4580+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4581+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4582+ (or1 & 0xFFFFE000U) == 0x82106000U &&
4583+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4584+ sllx == 0x83287020U &&
4585+ jmpl == 0x81C04005U &&
4586+ nop == 0x01000000U)
4587+ {
4588+ unsigned long addr;
4589+
4590+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4591+ regs->u_regs[UREG_G1] <<= 32;
4592+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4593+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4594+ regs->tpc = addr;
4595+ regs->tnpc = addr+4;
4596+ return 2;
4597+ }
4598+ } while (0);
4599+
4600+ do { /* PaX: patched PLT emulation #6 */
4601+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4602+
4603+ err = get_user(sethi, (unsigned int *)regs->tpc);
4604+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4605+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4606+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4607+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
4608+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4609+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4610+
4611+ if (err)
4612+ break;
4613+
4614+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4615+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4616+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4617+ sllx == 0x83287020U &&
4618+ (or & 0xFFFFE000U) == 0x8A116000U &&
4619+ jmpl == 0x81C04005U &&
4620+ nop == 0x01000000U)
4621+ {
4622+ unsigned long addr;
4623+
4624+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4625+ regs->u_regs[UREG_G1] <<= 32;
4626+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4627+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4628+ regs->tpc = addr;
4629+ regs->tnpc = addr+4;
4630+ return 2;
4631+ }
4632+ } while (0);
4633+
4634+ do { /* PaX: unpatched PLT emulation step 1 */
4635+ unsigned int sethi, ba, nop;
4636+
4637+ err = get_user(sethi, (unsigned int *)regs->tpc);
4638+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4639+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4640+
4641+ if (err)
4642+ break;
4643+
4644+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4645+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4646+ nop == 0x01000000U)
4647+ {
4648+ unsigned long addr;
4649+ unsigned int save, call;
4650+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4651+
4652+ if ((ba & 0xFFC00000U) == 0x30800000U)
4653+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4654+ else
4655+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4656+
4657+ if (test_thread_flag(TIF_32BIT))
4658+ addr &= 0xFFFFFFFFUL;
4659+
4660+ err = get_user(save, (unsigned int *)addr);
4661+ err |= get_user(call, (unsigned int *)(addr+4));
4662+ err |= get_user(nop, (unsigned int *)(addr+8));
4663+ if (err)
4664+ break;
4665+
4666+#ifdef CONFIG_PAX_DLRESOLVE
4667+ if (save == 0x9DE3BFA8U &&
4668+ (call & 0xC0000000U) == 0x40000000U &&
4669+ nop == 0x01000000U)
4670+ {
4671+ struct vm_area_struct *vma;
4672+ unsigned long call_dl_resolve;
4673+
4674+ down_read(&current->mm->mmap_sem);
4675+ call_dl_resolve = current->mm->call_dl_resolve;
4676+ up_read(&current->mm->mmap_sem);
4677+ if (likely(call_dl_resolve))
4678+ goto emulate;
4679+
4680+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4681+
4682+ down_write(&current->mm->mmap_sem);
4683+ if (current->mm->call_dl_resolve) {
4684+ call_dl_resolve = current->mm->call_dl_resolve;
4685+ up_write(&current->mm->mmap_sem);
4686+ if (vma)
4687+ kmem_cache_free(vm_area_cachep, vma);
4688+ goto emulate;
4689+ }
4690+
4691+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4692+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4693+ up_write(&current->mm->mmap_sem);
4694+ if (vma)
4695+ kmem_cache_free(vm_area_cachep, vma);
4696+ return 1;
4697+ }
4698+
4699+ if (pax_insert_vma(vma, call_dl_resolve)) {
4700+ up_write(&current->mm->mmap_sem);
4701+ kmem_cache_free(vm_area_cachep, vma);
4702+ return 1;
4703+ }
4704+
4705+ current->mm->call_dl_resolve = call_dl_resolve;
4706+ up_write(&current->mm->mmap_sem);
4707+
4708+emulate:
4709+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4710+ regs->tpc = call_dl_resolve;
4711+ regs->tnpc = addr+4;
4712+ return 3;
4713+ }
4714+#endif
4715+
4716+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4717+ if ((save & 0xFFC00000U) == 0x05000000U &&
4718+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4719+ nop == 0x01000000U)
4720+ {
4721+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4722+ regs->u_regs[UREG_G2] = addr + 4;
4723+ addr = (save & 0x003FFFFFU) << 10;
4724+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4725+
4726+ if (test_thread_flag(TIF_32BIT))
4727+ addr &= 0xFFFFFFFFUL;
4728+
4729+ regs->tpc = addr;
4730+ regs->tnpc = addr+4;
4731+ return 3;
4732+ }
4733+
4734+ /* PaX: 64-bit PLT stub */
4735+ err = get_user(sethi1, (unsigned int *)addr);
4736+ err |= get_user(sethi2, (unsigned int *)(addr+4));
4737+ err |= get_user(or1, (unsigned int *)(addr+8));
4738+ err |= get_user(or2, (unsigned int *)(addr+12));
4739+ err |= get_user(sllx, (unsigned int *)(addr+16));
4740+ err |= get_user(add, (unsigned int *)(addr+20));
4741+ err |= get_user(jmpl, (unsigned int *)(addr+24));
4742+ err |= get_user(nop, (unsigned int *)(addr+28));
4743+ if (err)
4744+ break;
4745+
4746+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4747+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4748+ (or1 & 0xFFFFE000U) == 0x88112000U &&
4749+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4750+ sllx == 0x89293020U &&
4751+ add == 0x8A010005U &&
4752+ jmpl == 0x89C14000U &&
4753+ nop == 0x01000000U)
4754+ {
4755+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4756+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4757+ regs->u_regs[UREG_G4] <<= 32;
4758+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4759+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4760+ regs->u_regs[UREG_G4] = addr + 24;
4761+ addr = regs->u_regs[UREG_G5];
4762+ regs->tpc = addr;
4763+ regs->tnpc = addr+4;
4764+ return 3;
4765+ }
4766+ }
4767+ } while (0);
4768+
4769+#ifdef CONFIG_PAX_DLRESOLVE
4770+ do { /* PaX: unpatched PLT emulation step 2 */
4771+ unsigned int save, call, nop;
4772+
4773+ err = get_user(save, (unsigned int *)(regs->tpc-4));
4774+ err |= get_user(call, (unsigned int *)regs->tpc);
4775+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4776+ if (err)
4777+ break;
4778+
4779+ if (save == 0x9DE3BFA8U &&
4780+ (call & 0xC0000000U) == 0x40000000U &&
4781+ nop == 0x01000000U)
4782+ {
4783+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4784+
4785+ if (test_thread_flag(TIF_32BIT))
4786+ dl_resolve &= 0xFFFFFFFFUL;
4787+
4788+ regs->u_regs[UREG_RETPC] = regs->tpc;
4789+ regs->tpc = dl_resolve;
4790+ regs->tnpc = dl_resolve+4;
4791+ return 3;
4792+ }
4793+ } while (0);
4794+#endif
4795+
4796+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4797+ unsigned int sethi, ba, nop;
4798+
4799+ err = get_user(sethi, (unsigned int *)regs->tpc);
4800+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4801+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4802+
4803+ if (err)
4804+ break;
4805+
4806+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4807+ (ba & 0xFFF00000U) == 0x30600000U &&
4808+ nop == 0x01000000U)
4809+ {
4810+ unsigned long addr;
4811+
4812+ addr = (sethi & 0x003FFFFFU) << 10;
4813+ regs->u_regs[UREG_G1] = addr;
4814+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4815+
4816+ if (test_thread_flag(TIF_32BIT))
4817+ addr &= 0xFFFFFFFFUL;
4818+
4819+ regs->tpc = addr;
4820+ regs->tnpc = addr+4;
4821+ return 2;
4822+ }
4823+ } while (0);
4824+
4825+#endif
4826+
4827+ return 1;
4828+}
4829+
4830+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4831+{
4832+ unsigned long i;
4833+
4834+ printk(KERN_ERR "PAX: bytes at PC: ");
4835+ for (i = 0; i < 8; i++) {
4836+ unsigned int c;
4837+ if (get_user(c, (unsigned int *)pc+i))
4838+ printk(KERN_CONT "???????? ");
4839+ else
4840+ printk(KERN_CONT "%08x ", c);
4841+ }
4842+ printk("\n");
4843+}
4844+#endif
4845+
4846 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4847 {
4848 struct mm_struct *mm = current->mm;
4849@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4850 if (!vma)
4851 goto bad_area;
4852
4853+#ifdef CONFIG_PAX_PAGEEXEC
4854+ /* PaX: detect ITLB misses on non-exec pages */
4855+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4856+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4857+ {
4858+ if (address != regs->tpc)
4859+ goto good_area;
4860+
4861+ up_read(&mm->mmap_sem);
4862+ switch (pax_handle_fetch_fault(regs)) {
4863+
4864+#ifdef CONFIG_PAX_EMUPLT
4865+ case 2:
4866+ case 3:
4867+ return;
4868+#endif
4869+
4870+ }
4871+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4872+ do_group_exit(SIGKILL);
4873+ }
4874+#endif
4875+
4876 /* Pure DTLB misses do not tell us whether the fault causing
4877 * load/store/atomic was a write or not, it only says that there
4878 * was no match. So in such a case we (carefully) read the
4879diff -urNp linux-3.1.1/arch/sparc/mm/hugetlbpage.c linux-3.1.1/arch/sparc/mm/hugetlbpage.c
4880--- linux-3.1.1/arch/sparc/mm/hugetlbpage.c 2011-11-11 15:19:27.000000000 -0500
4881+++ linux-3.1.1/arch/sparc/mm/hugetlbpage.c 2011-11-16 18:39:07.000000000 -0500
4882@@ -68,7 +68,7 @@ full_search:
4883 }
4884 return -ENOMEM;
4885 }
4886- if (likely(!vma || addr + len <= vma->vm_start)) {
4887+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4888 /*
4889 * Remember the place where we stopped the search:
4890 */
4891@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4892 /* make sure it can fit in the remaining address space */
4893 if (likely(addr > len)) {
4894 vma = find_vma(mm, addr-len);
4895- if (!vma || addr <= vma->vm_start) {
4896+ if (check_heap_stack_gap(vma, addr - len, len)) {
4897 /* remember the address as a hint for next time */
4898 return (mm->free_area_cache = addr-len);
4899 }
4900@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4901 if (unlikely(mm->mmap_base < len))
4902 goto bottomup;
4903
4904- addr = (mm->mmap_base-len) & HPAGE_MASK;
4905+ addr = mm->mmap_base - len;
4906
4907 do {
4908+ addr &= HPAGE_MASK;
4909 /*
4910 * Lookup failure means no vma is above this address,
4911 * else if new region fits below vma->vm_start,
4912 * return with success:
4913 */
4914 vma = find_vma(mm, addr);
4915- if (likely(!vma || addr+len <= vma->vm_start)) {
4916+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /* remember the address as a hint for next time */
4918 return (mm->free_area_cache = addr);
4919 }
4920@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4921 mm->cached_hole_size = vma->vm_start - addr;
4922
4923 /* try just below the current vma->vm_start */
4924- addr = (vma->vm_start-len) & HPAGE_MASK;
4925- } while (likely(len < vma->vm_start));
4926+ addr = skip_heap_stack_gap(vma, len);
4927+ } while (!IS_ERR_VALUE(addr));
4928
4929 bottomup:
4930 /*
4931@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4932 if (addr) {
4933 addr = ALIGN(addr, HPAGE_SIZE);
4934 vma = find_vma(mm, addr);
4935- if (task_size - len >= addr &&
4936- (!vma || addr + len <= vma->vm_start))
4937+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4938 return addr;
4939 }
4940 if (mm->get_unmapped_area == arch_get_unmapped_area)
4941diff -urNp linux-3.1.1/arch/sparc/mm/init_32.c linux-3.1.1/arch/sparc/mm/init_32.c
4942--- linux-3.1.1/arch/sparc/mm/init_32.c 2011-11-11 15:19:27.000000000 -0500
4943+++ linux-3.1.1/arch/sparc/mm/init_32.c 2011-11-16 18:39:07.000000000 -0500
4944@@ -316,6 +316,9 @@ extern void device_scan(void);
4945 pgprot_t PAGE_SHARED __read_mostly;
4946 EXPORT_SYMBOL(PAGE_SHARED);
4947
4948+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4949+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4950+
4951 void __init paging_init(void)
4952 {
4953 switch(sparc_cpu_model) {
4954@@ -344,17 +347,17 @@ void __init paging_init(void)
4955
4956 /* Initialize the protection map with non-constant, MMU dependent values. */
4957 protection_map[0] = PAGE_NONE;
4958- protection_map[1] = PAGE_READONLY;
4959- protection_map[2] = PAGE_COPY;
4960- protection_map[3] = PAGE_COPY;
4961+ protection_map[1] = PAGE_READONLY_NOEXEC;
4962+ protection_map[2] = PAGE_COPY_NOEXEC;
4963+ protection_map[3] = PAGE_COPY_NOEXEC;
4964 protection_map[4] = PAGE_READONLY;
4965 protection_map[5] = PAGE_READONLY;
4966 protection_map[6] = PAGE_COPY;
4967 protection_map[7] = PAGE_COPY;
4968 protection_map[8] = PAGE_NONE;
4969- protection_map[9] = PAGE_READONLY;
4970- protection_map[10] = PAGE_SHARED;
4971- protection_map[11] = PAGE_SHARED;
4972+ protection_map[9] = PAGE_READONLY_NOEXEC;
4973+ protection_map[10] = PAGE_SHARED_NOEXEC;
4974+ protection_map[11] = PAGE_SHARED_NOEXEC;
4975 protection_map[12] = PAGE_READONLY;
4976 protection_map[13] = PAGE_READONLY;
4977 protection_map[14] = PAGE_SHARED;
4978diff -urNp linux-3.1.1/arch/sparc/mm/Makefile linux-3.1.1/arch/sparc/mm/Makefile
4979--- linux-3.1.1/arch/sparc/mm/Makefile 2011-11-11 15:19:27.000000000 -0500
4980+++ linux-3.1.1/arch/sparc/mm/Makefile 2011-11-16 18:39:07.000000000 -0500
4981@@ -2,7 +2,7 @@
4982 #
4983
4984 asflags-y := -ansi
4985-ccflags-y := -Werror
4986+#ccflags-y := -Werror
4987
4988 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4989 obj-y += fault_$(BITS).o
4990diff -urNp linux-3.1.1/arch/sparc/mm/srmmu.c linux-3.1.1/arch/sparc/mm/srmmu.c
4991--- linux-3.1.1/arch/sparc/mm/srmmu.c 2011-11-11 15:19:27.000000000 -0500
4992+++ linux-3.1.1/arch/sparc/mm/srmmu.c 2011-11-16 18:39:07.000000000 -0500
4993@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
4994 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
4995 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
4996 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
4997+
4998+#ifdef CONFIG_PAX_PAGEEXEC
4999+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5000+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5001+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5002+#endif
5003+
5004 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5005 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5006
5007diff -urNp linux-3.1.1/arch/um/include/asm/kmap_types.h linux-3.1.1/arch/um/include/asm/kmap_types.h
5008--- linux-3.1.1/arch/um/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
5009+++ linux-3.1.1/arch/um/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
5010@@ -23,6 +23,7 @@ enum km_type {
5011 KM_IRQ1,
5012 KM_SOFTIRQ0,
5013 KM_SOFTIRQ1,
5014+ KM_CLEARPAGE,
5015 KM_TYPE_NR
5016 };
5017
5018diff -urNp linux-3.1.1/arch/um/include/asm/page.h linux-3.1.1/arch/um/include/asm/page.h
5019--- linux-3.1.1/arch/um/include/asm/page.h 2011-11-11 15:19:27.000000000 -0500
5020+++ linux-3.1.1/arch/um/include/asm/page.h 2011-11-16 18:39:07.000000000 -0500
5021@@ -14,6 +14,9 @@
5022 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5023 #define PAGE_MASK (~(PAGE_SIZE-1))
5024
5025+#define ktla_ktva(addr) (addr)
5026+#define ktva_ktla(addr) (addr)
5027+
5028 #ifndef __ASSEMBLY__
5029
5030 struct page;
5031diff -urNp linux-3.1.1/arch/um/kernel/process.c linux-3.1.1/arch/um/kernel/process.c
5032--- linux-3.1.1/arch/um/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
5033+++ linux-3.1.1/arch/um/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
5034@@ -404,22 +404,6 @@ int singlestepping(void * t)
5035 return 2;
5036 }
5037
5038-/*
5039- * Only x86 and x86_64 have an arch_align_stack().
5040- * All other arches have "#define arch_align_stack(x) (x)"
5041- * in their asm/system.h
5042- * As this is included in UML from asm-um/system-generic.h,
5043- * we can use it to behave as the subarch does.
5044- */
5045-#ifndef arch_align_stack
5046-unsigned long arch_align_stack(unsigned long sp)
5047-{
5048- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5049- sp -= get_random_int() % 8192;
5050- return sp & ~0xf;
5051-}
5052-#endif
5053-
5054 unsigned long get_wchan(struct task_struct *p)
5055 {
5056 unsigned long stack_page, sp, ip;
5057diff -urNp linux-3.1.1/arch/um/Makefile linux-3.1.1/arch/um/Makefile
5058--- linux-3.1.1/arch/um/Makefile 2011-11-11 15:19:27.000000000 -0500
5059+++ linux-3.1.1/arch/um/Makefile 2011-11-16 18:39:07.000000000 -0500
5060@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINE
5061 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5062 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
5063
5064+ifdef CONSTIFY_PLUGIN
5065+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5066+endif
5067+
5068 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
5069
5070 #This will adjust *FLAGS accordingly to the platform.
5071diff -urNp linux-3.1.1/arch/um/sys-i386/shared/sysdep/system.h linux-3.1.1/arch/um/sys-i386/shared/sysdep/system.h
5072--- linux-3.1.1/arch/um/sys-i386/shared/sysdep/system.h 2011-11-11 15:19:27.000000000 -0500
5073+++ linux-3.1.1/arch/um/sys-i386/shared/sysdep/system.h 2011-11-16 18:39:07.000000000 -0500
5074@@ -17,7 +17,7 @@
5075 # define AT_VECTOR_SIZE_ARCH 1
5076 #endif
5077
5078-extern unsigned long arch_align_stack(unsigned long sp);
5079+#define arch_align_stack(x) ((x) & ~0xfUL)
5080
5081 void default_idle(void);
5082
5083diff -urNp linux-3.1.1/arch/um/sys-i386/syscalls.c linux-3.1.1/arch/um/sys-i386/syscalls.c
5084--- linux-3.1.1/arch/um/sys-i386/syscalls.c 2011-11-11 15:19:27.000000000 -0500
5085+++ linux-3.1.1/arch/um/sys-i386/syscalls.c 2011-11-16 18:39:07.000000000 -0500
5086@@ -11,6 +11,21 @@
5087 #include "asm/uaccess.h"
5088 #include "asm/unistd.h"
5089
5090+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5091+{
5092+ unsigned long pax_task_size = TASK_SIZE;
5093+
5094+#ifdef CONFIG_PAX_SEGMEXEC
5095+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5096+ pax_task_size = SEGMEXEC_TASK_SIZE;
5097+#endif
5098+
5099+ if (len > pax_task_size || addr > pax_task_size - len)
5100+ return -EINVAL;
5101+
5102+ return 0;
5103+}
5104+
5105 /*
5106 * The prototype on i386 is:
5107 *
5108diff -urNp linux-3.1.1/arch/um/sys-x86_64/shared/sysdep/system.h linux-3.1.1/arch/um/sys-x86_64/shared/sysdep/system.h
5109--- linux-3.1.1/arch/um/sys-x86_64/shared/sysdep/system.h 2011-11-11 15:19:27.000000000 -0500
5110+++ linux-3.1.1/arch/um/sys-x86_64/shared/sysdep/system.h 2011-11-16 18:39:07.000000000 -0500
5111@@ -17,7 +17,7 @@
5112 # define AT_VECTOR_SIZE_ARCH 1
5113 #endif
5114
5115-extern unsigned long arch_align_stack(unsigned long sp);
5116+#define arch_align_stack(x) ((x) & ~0xfUL)
5117
5118 void default_idle(void);
5119
5120diff -urNp linux-3.1.1/arch/x86/boot/bitops.h linux-3.1.1/arch/x86/boot/bitops.h
5121--- linux-3.1.1/arch/x86/boot/bitops.h 2011-11-11 15:19:27.000000000 -0500
5122+++ linux-3.1.1/arch/x86/boot/bitops.h 2011-11-16 18:39:07.000000000 -0500
5123@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5124 u8 v;
5125 const u32 *p = (const u32 *)addr;
5126
5127- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5128+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5129 return v;
5130 }
5131
5132@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5133
5134 static inline void set_bit(int nr, void *addr)
5135 {
5136- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5137+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5138 }
5139
5140 #endif /* BOOT_BITOPS_H */
5141diff -urNp linux-3.1.1/arch/x86/boot/boot.h linux-3.1.1/arch/x86/boot/boot.h
5142--- linux-3.1.1/arch/x86/boot/boot.h 2011-11-11 15:19:27.000000000 -0500
5143+++ linux-3.1.1/arch/x86/boot/boot.h 2011-11-16 18:39:07.000000000 -0500
5144@@ -85,7 +85,7 @@ static inline void io_delay(void)
5145 static inline u16 ds(void)
5146 {
5147 u16 seg;
5148- asm("movw %%ds,%0" : "=rm" (seg));
5149+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5150 return seg;
5151 }
5152
5153@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5154 static inline int memcmp(const void *s1, const void *s2, size_t len)
5155 {
5156 u8 diff;
5157- asm("repe; cmpsb; setnz %0"
5158+ asm volatile("repe; cmpsb; setnz %0"
5159 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5160 return diff;
5161 }
5162diff -urNp linux-3.1.1/arch/x86/boot/compressed/head_32.S linux-3.1.1/arch/x86/boot/compressed/head_32.S
5163--- linux-3.1.1/arch/x86/boot/compressed/head_32.S 2011-11-11 15:19:27.000000000 -0500
5164+++ linux-3.1.1/arch/x86/boot/compressed/head_32.S 2011-11-16 18:39:07.000000000 -0500
5165@@ -76,7 +76,7 @@ ENTRY(startup_32)
5166 notl %eax
5167 andl %eax, %ebx
5168 #else
5169- movl $LOAD_PHYSICAL_ADDR, %ebx
5170+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5171 #endif
5172
5173 /* Target address to relocate to for decompression */
5174@@ -162,7 +162,7 @@ relocated:
5175 * and where it was actually loaded.
5176 */
5177 movl %ebp, %ebx
5178- subl $LOAD_PHYSICAL_ADDR, %ebx
5179+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5180 jz 2f /* Nothing to be done if loaded at compiled addr. */
5181 /*
5182 * Process relocations.
5183@@ -170,8 +170,7 @@ relocated:
5184
5185 1: subl $4, %edi
5186 movl (%edi), %ecx
5187- testl %ecx, %ecx
5188- jz 2f
5189+ jecxz 2f
5190 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5191 jmp 1b
5192 2:
5193diff -urNp linux-3.1.1/arch/x86/boot/compressed/head_64.S linux-3.1.1/arch/x86/boot/compressed/head_64.S
5194--- linux-3.1.1/arch/x86/boot/compressed/head_64.S 2011-11-11 15:19:27.000000000 -0500
5195+++ linux-3.1.1/arch/x86/boot/compressed/head_64.S 2011-11-16 18:39:07.000000000 -0500
5196@@ -91,7 +91,7 @@ ENTRY(startup_32)
5197 notl %eax
5198 andl %eax, %ebx
5199 #else
5200- movl $LOAD_PHYSICAL_ADDR, %ebx
5201+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205@@ -233,7 +233,7 @@ ENTRY(startup_64)
5206 notq %rax
5207 andq %rax, %rbp
5208 #else
5209- movq $LOAD_PHYSICAL_ADDR, %rbp
5210+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5211 #endif
5212
5213 /* Target address to relocate to for decompression */
5214diff -urNp linux-3.1.1/arch/x86/boot/compressed/Makefile linux-3.1.1/arch/x86/boot/compressed/Makefile
5215--- linux-3.1.1/arch/x86/boot/compressed/Makefile 2011-11-11 15:19:27.000000000 -0500
5216+++ linux-3.1.1/arch/x86/boot/compressed/Makefile 2011-11-16 18:39:07.000000000 -0500
5217@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5218 KBUILD_CFLAGS += $(cflags-y)
5219 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5220 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5221+ifdef CONSTIFY_PLUGIN
5222+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5223+endif
5224
5225 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5226 GCOV_PROFILE := n
5227diff -urNp linux-3.1.1/arch/x86/boot/compressed/misc.c linux-3.1.1/arch/x86/boot/compressed/misc.c
5228--- linux-3.1.1/arch/x86/boot/compressed/misc.c 2011-11-11 15:19:27.000000000 -0500
5229+++ linux-3.1.1/arch/x86/boot/compressed/misc.c 2011-11-16 18:39:07.000000000 -0500
5230@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5231 case PT_LOAD:
5232 #ifdef CONFIG_RELOCATABLE
5233 dest = output;
5234- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5235+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5236 #else
5237 dest = (void *)(phdr->p_paddr);
5238 #endif
5239@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5240 error("Destination address too large");
5241 #endif
5242 #ifndef CONFIG_RELOCATABLE
5243- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5244+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5245 error("Wrong destination address");
5246 #endif
5247
5248diff -urNp linux-3.1.1/arch/x86/boot/compressed/relocs.c linux-3.1.1/arch/x86/boot/compressed/relocs.c
5249--- linux-3.1.1/arch/x86/boot/compressed/relocs.c 2011-11-11 15:19:27.000000000 -0500
5250+++ linux-3.1.1/arch/x86/boot/compressed/relocs.c 2011-11-16 18:39:07.000000000 -0500
5251@@ -13,8 +13,11 @@
5252
5253 static void die(char *fmt, ...);
5254
5255+#include "../../../../include/generated/autoconf.h"
5256+
5257 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5258 static Elf32_Ehdr ehdr;
5259+static Elf32_Phdr *phdr;
5260 static unsigned long reloc_count, reloc_idx;
5261 static unsigned long *relocs;
5262
5263@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5264 }
5265 }
5266
5267+static void read_phdrs(FILE *fp)
5268+{
5269+ unsigned int i;
5270+
5271+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5272+ if (!phdr) {
5273+ die("Unable to allocate %d program headers\n",
5274+ ehdr.e_phnum);
5275+ }
5276+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5277+ die("Seek to %d failed: %s\n",
5278+ ehdr.e_phoff, strerror(errno));
5279+ }
5280+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5281+ die("Cannot read ELF program headers: %s\n",
5282+ strerror(errno));
5283+ }
5284+ for(i = 0; i < ehdr.e_phnum; i++) {
5285+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5286+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5287+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5288+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5289+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5290+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5291+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5292+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5293+ }
5294+
5295+}
5296+
5297 static void read_shdrs(FILE *fp)
5298 {
5299- int i;
5300+ unsigned int i;
5301 Elf32_Shdr shdr;
5302
5303 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5304@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5305
5306 static void read_strtabs(FILE *fp)
5307 {
5308- int i;
5309+ unsigned int i;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_STRTAB) {
5313@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5314
5315 static void read_symtabs(FILE *fp)
5316 {
5317- int i,j;
5318+ unsigned int i,j;
5319 for (i = 0; i < ehdr.e_shnum; i++) {
5320 struct section *sec = &secs[i];
5321 if (sec->shdr.sh_type != SHT_SYMTAB) {
5322@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5323
5324 static void read_relocs(FILE *fp)
5325 {
5326- int i,j;
5327+ unsigned int i,j;
5328+ uint32_t base;
5329+
5330 for (i = 0; i < ehdr.e_shnum; i++) {
5331 struct section *sec = &secs[i];
5332 if (sec->shdr.sh_type != SHT_REL) {
5333@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5334 die("Cannot read symbol table: %s\n",
5335 strerror(errno));
5336 }
5337+ base = 0;
5338+ for (j = 0; j < ehdr.e_phnum; j++) {
5339+ if (phdr[j].p_type != PT_LOAD )
5340+ continue;
5341+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5342+ continue;
5343+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5344+ break;
5345+ }
5346 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5347 Elf32_Rel *rel = &sec->reltab[j];
5348- rel->r_offset = elf32_to_cpu(rel->r_offset);
5349+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5350 rel->r_info = elf32_to_cpu(rel->r_info);
5351 }
5352 }
5353@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5354
5355 static void print_absolute_symbols(void)
5356 {
5357- int i;
5358+ unsigned int i;
5359 printf("Absolute symbols\n");
5360 printf(" Num: Value Size Type Bind Visibility Name\n");
5361 for (i = 0; i < ehdr.e_shnum; i++) {
5362 struct section *sec = &secs[i];
5363 char *sym_strtab;
5364 Elf32_Sym *sh_symtab;
5365- int j;
5366+ unsigned int j;
5367
5368 if (sec->shdr.sh_type != SHT_SYMTAB) {
5369 continue;
5370@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5371
5372 static void print_absolute_relocs(void)
5373 {
5374- int i, printed = 0;
5375+ unsigned int i, printed = 0;
5376
5377 for (i = 0; i < ehdr.e_shnum; i++) {
5378 struct section *sec = &secs[i];
5379 struct section *sec_applies, *sec_symtab;
5380 char *sym_strtab;
5381 Elf32_Sym *sh_symtab;
5382- int j;
5383+ unsigned int j;
5384 if (sec->shdr.sh_type != SHT_REL) {
5385 continue;
5386 }
5387@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5388
5389 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5390 {
5391- int i;
5392+ unsigned int i;
5393 /* Walk through the relocations */
5394 for (i = 0; i < ehdr.e_shnum; i++) {
5395 char *sym_strtab;
5396 Elf32_Sym *sh_symtab;
5397 struct section *sec_applies, *sec_symtab;
5398- int j;
5399+ unsigned int j;
5400 struct section *sec = &secs[i];
5401
5402 if (sec->shdr.sh_type != SHT_REL) {
5403@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5404 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5405 continue;
5406 }
5407+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5408+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5409+ continue;
5410+
5411+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5412+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5413+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5414+ continue;
5415+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5416+ continue;
5417+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5418+ continue;
5419+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5420+ continue;
5421+#endif
5422+
5423 switch (r_type) {
5424 case R_386_NONE:
5425 case R_386_PC32:
5426@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5427
5428 static void emit_relocs(int as_text)
5429 {
5430- int i;
5431+ unsigned int i;
5432 /* Count how many relocations I have and allocate space for them. */
5433 reloc_count = 0;
5434 walk_relocs(count_reloc);
5435@@ -665,6 +725,7 @@ int main(int argc, char **argv)
5436 fname, strerror(errno));
5437 }
5438 read_ehdr(fp);
5439+ read_phdrs(fp);
5440 read_shdrs(fp);
5441 read_strtabs(fp);
5442 read_symtabs(fp);
5443diff -urNp linux-3.1.1/arch/x86/boot/cpucheck.c linux-3.1.1/arch/x86/boot/cpucheck.c
5444--- linux-3.1.1/arch/x86/boot/cpucheck.c 2011-11-11 15:19:27.000000000 -0500
5445+++ linux-3.1.1/arch/x86/boot/cpucheck.c 2011-11-16 18:39:07.000000000 -0500
5446@@ -74,7 +74,7 @@ static int has_fpu(void)
5447 u16 fcw = -1, fsw = -1;
5448 u32 cr0;
5449
5450- asm("movl %%cr0,%0" : "=r" (cr0));
5451+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
5452 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5453 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5454 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5455@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5456 {
5457 u32 f0, f1;
5458
5459- asm("pushfl ; "
5460+ asm volatile("pushfl ; "
5461 "pushfl ; "
5462 "popl %0 ; "
5463 "movl %0,%1 ; "
5464@@ -115,7 +115,7 @@ static void get_flags(void)
5465 set_bit(X86_FEATURE_FPU, cpu.flags);
5466
5467 if (has_eflag(X86_EFLAGS_ID)) {
5468- asm("cpuid"
5469+ asm volatile("cpuid"
5470 : "=a" (max_intel_level),
5471 "=b" (cpu_vendor[0]),
5472 "=d" (cpu_vendor[1]),
5473@@ -124,7 +124,7 @@ static void get_flags(void)
5474
5475 if (max_intel_level >= 0x00000001 &&
5476 max_intel_level <= 0x0000ffff) {
5477- asm("cpuid"
5478+ asm volatile("cpuid"
5479 : "=a" (tfms),
5480 "=c" (cpu.flags[4]),
5481 "=d" (cpu.flags[0])
5482@@ -136,7 +136,7 @@ static void get_flags(void)
5483 cpu.model += ((tfms >> 16) & 0xf) << 4;
5484 }
5485
5486- asm("cpuid"
5487+ asm volatile("cpuid"
5488 : "=a" (max_amd_level)
5489 : "a" (0x80000000)
5490 : "ebx", "ecx", "edx");
5491@@ -144,7 +144,7 @@ static void get_flags(void)
5492 if (max_amd_level >= 0x80000001 &&
5493 max_amd_level <= 0x8000ffff) {
5494 u32 eax = 0x80000001;
5495- asm("cpuid"
5496+ asm volatile("cpuid"
5497 : "+a" (eax),
5498 "=c" (cpu.flags[6]),
5499 "=d" (cpu.flags[1])
5500@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5501 u32 ecx = MSR_K7_HWCR;
5502 u32 eax, edx;
5503
5504- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5505+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5506 eax &= ~(1 << 15);
5507- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5508+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5509
5510 get_flags(); /* Make sure it really did something */
5511 err = check_flags();
5512@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5513 u32 ecx = MSR_VIA_FCR;
5514 u32 eax, edx;
5515
5516- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5517+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5518 eax |= (1<<1)|(1<<7);
5519- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5520+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5521
5522 set_bit(X86_FEATURE_CX8, cpu.flags);
5523 err = check_flags();
5524@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5525 u32 eax, edx;
5526 u32 level = 1;
5527
5528- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5529- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5530- asm("cpuid"
5531+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5532+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5533+ asm volatile("cpuid"
5534 : "+a" (level), "=d" (cpu.flags[0])
5535 : : "ecx", "ebx");
5536- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5537+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5538
5539 err = check_flags();
5540 }
5541diff -urNp linux-3.1.1/arch/x86/boot/header.S linux-3.1.1/arch/x86/boot/header.S
5542--- linux-3.1.1/arch/x86/boot/header.S 2011-11-11 15:19:27.000000000 -0500
5543+++ linux-3.1.1/arch/x86/boot/header.S 2011-11-16 18:39:07.000000000 -0500
5544@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5545 # single linked list of
5546 # struct setup_data
5547
5548-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5549+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5550
5551 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5552 #define VO_INIT_SIZE (VO__end - VO__text)
5553diff -urNp linux-3.1.1/arch/x86/boot/Makefile linux-3.1.1/arch/x86/boot/Makefile
5554--- linux-3.1.1/arch/x86/boot/Makefile 2011-11-11 15:19:27.000000000 -0500
5555+++ linux-3.1.1/arch/x86/boot/Makefile 2011-11-16 18:39:07.000000000 -0500
5556@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5557 $(call cc-option, -fno-stack-protector) \
5558 $(call cc-option, -mpreferred-stack-boundary=2)
5559 KBUILD_CFLAGS += $(call cc-option, -m32)
5560+ifdef CONSTIFY_PLUGIN
5561+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5562+endif
5563 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5564 GCOV_PROFILE := n
5565
5566diff -urNp linux-3.1.1/arch/x86/boot/memory.c linux-3.1.1/arch/x86/boot/memory.c
5567--- linux-3.1.1/arch/x86/boot/memory.c 2011-11-11 15:19:27.000000000 -0500
5568+++ linux-3.1.1/arch/x86/boot/memory.c 2011-11-16 18:39:07.000000000 -0500
5569@@ -19,7 +19,7 @@
5570
5571 static int detect_memory_e820(void)
5572 {
5573- int count = 0;
5574+ unsigned int count = 0;
5575 struct biosregs ireg, oreg;
5576 struct e820entry *desc = boot_params.e820_map;
5577 static struct e820entry buf; /* static so it is zeroed */
5578diff -urNp linux-3.1.1/arch/x86/boot/video.c linux-3.1.1/arch/x86/boot/video.c
5579--- linux-3.1.1/arch/x86/boot/video.c 2011-11-11 15:19:27.000000000 -0500
5580+++ linux-3.1.1/arch/x86/boot/video.c 2011-11-16 18:39:07.000000000 -0500
5581@@ -96,7 +96,7 @@ static void store_mode_params(void)
5582 static unsigned int get_entry(void)
5583 {
5584 char entry_buf[4];
5585- int i, len = 0;
5586+ unsigned int i, len = 0;
5587 int key;
5588 unsigned int v;
5589
5590diff -urNp linux-3.1.1/arch/x86/boot/video-vesa.c linux-3.1.1/arch/x86/boot/video-vesa.c
5591--- linux-3.1.1/arch/x86/boot/video-vesa.c 2011-11-11 15:19:27.000000000 -0500
5592+++ linux-3.1.1/arch/x86/boot/video-vesa.c 2011-11-16 18:39:07.000000000 -0500
5593@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5594
5595 boot_params.screen_info.vesapm_seg = oreg.es;
5596 boot_params.screen_info.vesapm_off = oreg.di;
5597+ boot_params.screen_info.vesapm_size = oreg.cx;
5598 }
5599
5600 /*
5601diff -urNp linux-3.1.1/arch/x86/crypto/aes-x86_64-asm_64.S linux-3.1.1/arch/x86/crypto/aes-x86_64-asm_64.S
5602--- linux-3.1.1/arch/x86/crypto/aes-x86_64-asm_64.S 2011-11-11 15:19:27.000000000 -0500
5603+++ linux-3.1.1/arch/x86/crypto/aes-x86_64-asm_64.S 2011-11-16 18:39:07.000000000 -0500
5604@@ -8,6 +8,8 @@
5605 * including this sentence is retained in full.
5606 */
5607
5608+#include <asm/alternative-asm.h>
5609+
5610 .extern crypto_ft_tab
5611 .extern crypto_it_tab
5612 .extern crypto_fl_tab
5613@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
5614 je B192; \
5615 leaq 32(r9),r9;
5616
5617+#define ret pax_force_retaddr; ret
5618+
5619 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
5620 movq r1,r2; \
5621 movq r3,r4; \
5622diff -urNp linux-3.1.1/arch/x86/crypto/salsa20-x86_64-asm_64.S linux-3.1.1/arch/x86/crypto/salsa20-x86_64-asm_64.S
5623--- linux-3.1.1/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-11-11 15:19:27.000000000 -0500
5624+++ linux-3.1.1/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-11-16 18:39:07.000000000 -0500
5625@@ -1,3 +1,5 @@
5626+#include <asm/alternative-asm.h>
5627+
5628 # enter ECRYPT_encrypt_bytes
5629 .text
5630 .p2align 5
5631@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
5632 add %r11,%rsp
5633 mov %rdi,%rax
5634 mov %rsi,%rdx
5635+ pax_force_retaddr
5636 ret
5637 # bytesatleast65:
5638 ._bytesatleast65:
5639@@ -891,6 +894,7 @@ ECRYPT_keysetup:
5640 add %r11,%rsp
5641 mov %rdi,%rax
5642 mov %rsi,%rdx
5643+ pax_force_retaddr
5644 ret
5645 # enter ECRYPT_ivsetup
5646 .text
5647@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
5648 add %r11,%rsp
5649 mov %rdi,%rax
5650 mov %rsi,%rdx
5651+ pax_force_retaddr
5652 ret
5653diff -urNp linux-3.1.1/arch/x86/crypto/twofish-x86_64-asm_64.S linux-3.1.1/arch/x86/crypto/twofish-x86_64-asm_64.S
5654--- linux-3.1.1/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-11-11 15:19:27.000000000 -0500
5655+++ linux-3.1.1/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-11-16 18:39:07.000000000 -0500
5656@@ -21,6 +21,7 @@
5657 .text
5658
5659 #include <asm/asm-offsets.h>
5660+#include <asm/alternative-asm.h>
5661
5662 #define a_offset 0
5663 #define b_offset 4
5664@@ -269,6 +270,7 @@ twofish_enc_blk:
5665
5666 popq R1
5667 movq $1,%rax
5668+ pax_force_retaddr
5669 ret
5670
5671 twofish_dec_blk:
5672@@ -321,4 +323,5 @@ twofish_dec_blk:
5673
5674 popq R1
5675 movq $1,%rax
5676+ pax_force_retaddr
5677 ret
5678diff -urNp linux-3.1.1/arch/x86/ia32/ia32_aout.c linux-3.1.1/arch/x86/ia32/ia32_aout.c
5679--- linux-3.1.1/arch/x86/ia32/ia32_aout.c 2011-11-11 15:19:27.000000000 -0500
5680+++ linux-3.1.1/arch/x86/ia32/ia32_aout.c 2011-11-16 18:40:08.000000000 -0500
5681@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5682 unsigned long dump_start, dump_size;
5683 struct user32 dump;
5684
5685+ memset(&dump, 0, sizeof(dump));
5686+
5687 fs = get_fs();
5688 set_fs(KERNEL_DS);
5689 has_dumped = 1;
5690diff -urNp linux-3.1.1/arch/x86/ia32/ia32entry.S linux-3.1.1/arch/x86/ia32/ia32entry.S
5691--- linux-3.1.1/arch/x86/ia32/ia32entry.S 2011-11-11 15:19:27.000000000 -0500
5692+++ linux-3.1.1/arch/x86/ia32/ia32entry.S 2011-11-17 18:27:57.000000000 -0500
5693@@ -13,7 +13,9 @@
5694 #include <asm/thread_info.h>
5695 #include <asm/segment.h>
5696 #include <asm/irqflags.h>
5697+#include <asm/pgtable.h>
5698 #include <linux/linkage.h>
5699+#include <asm/alternative-asm.h>
5700
5701 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5702 #include <linux/elf-em.h>
5703@@ -95,6 +97,29 @@ ENTRY(native_irq_enable_sysexit)
5704 ENDPROC(native_irq_enable_sysexit)
5705 #endif
5706
5707+ .macro pax_enter_kernel_user
5708+#ifdef CONFIG_PAX_MEMORY_UDEREF
5709+ call pax_enter_kernel_user
5710+#endif
5711+ .endm
5712+
5713+ .macro pax_exit_kernel_user
5714+#ifdef CONFIG_PAX_MEMORY_UDEREF
5715+ call pax_exit_kernel_user
5716+#endif
5717+#ifdef CONFIG_PAX_RANDKSTACK
5718+ pushq %rax
5719+ call pax_randomize_kstack
5720+ popq %rax
5721+#endif
5722+ .endm
5723+
5724+.macro pax_erase_kstack
5725+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5726+ call pax_erase_kstack
5727+#endif
5728+.endm
5729+
5730 /*
5731 * 32bit SYSENTER instruction entry.
5732 *
5733@@ -121,7 +146,7 @@ ENTRY(ia32_sysenter_target)
5734 CFI_REGISTER rsp,rbp
5735 SWAPGS_UNSAFE_STACK
5736 movq PER_CPU_VAR(kernel_stack), %rsp
5737- addq $(KERNEL_STACK_OFFSET),%rsp
5738+ pax_enter_kernel_user
5739 /*
5740 * No need to follow this irqs on/off section: the syscall
5741 * disabled irqs, here we enable it straight after entry:
5742@@ -134,7 +159,8 @@ ENTRY(ia32_sysenter_target)
5743 CFI_REL_OFFSET rsp,0
5744 pushfq_cfi
5745 /*CFI_REL_OFFSET rflags,0*/
5746- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5747+ GET_THREAD_INFO(%r10)
5748+ movl TI_sysenter_return(%r10), %r10d
5749 CFI_REGISTER rip,r10
5750 pushq_cfi $__USER32_CS
5751 /*CFI_REL_OFFSET cs,0*/
5752@@ -146,6 +172,12 @@ ENTRY(ia32_sysenter_target)
5753 SAVE_ARGS 0,1,0
5754 /* no need to do an access_ok check here because rbp has been
5755 32bit zero extended */
5756+
5757+#ifdef CONFIG_PAX_MEMORY_UDEREF
5758+ mov $PAX_USER_SHADOW_BASE,%r10
5759+ add %r10,%rbp
5760+#endif
5761+
5762 1: movl (%rbp),%ebp
5763 .section __ex_table,"a"
5764 .quad 1b,ia32_badarg
5765@@ -168,6 +200,8 @@ sysenter_dispatch:
5766 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5767 jnz sysexit_audit
5768 sysexit_from_sys_call:
5769+ pax_exit_kernel_user
5770+ pax_erase_kstack
5771 andl $~TS_COMPAT,TI_status(%r10)
5772 /* clear IF, that popfq doesn't enable interrupts early */
5773 andl $~0x200,EFLAGS-R11(%rsp)
5774@@ -194,6 +228,9 @@ sysexit_from_sys_call:
5775 movl %eax,%esi /* 2nd arg: syscall number */
5776 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5777 call audit_syscall_entry
5778+
5779+ pax_erase_kstack
5780+
5781 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5782 cmpq $(IA32_NR_syscalls-1),%rax
5783 ja ia32_badsys
5784@@ -246,6 +283,9 @@ sysenter_tracesys:
5785 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5786 movq %rsp,%rdi /* &pt_regs -> arg1 */
5787 call syscall_trace_enter
5788+
5789+ pax_erase_kstack
5790+
5791 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5792 RESTORE_REST
5793 cmpq $(IA32_NR_syscalls-1),%rax
5794@@ -277,19 +317,24 @@ ENDPROC(ia32_sysenter_target)
5795 ENTRY(ia32_cstar_target)
5796 CFI_STARTPROC32 simple
5797 CFI_SIGNAL_FRAME
5798- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5799+ CFI_DEF_CFA rsp,0
5800 CFI_REGISTER rip,rcx
5801 /*CFI_REGISTER rflags,r11*/
5802 SWAPGS_UNSAFE_STACK
5803 movl %esp,%r8d
5804 CFI_REGISTER rsp,r8
5805 movq PER_CPU_VAR(kernel_stack),%rsp
5806+
5807+#ifdef CONFIG_PAX_MEMORY_UDEREF
5808+ pax_enter_kernel_user
5809+#endif
5810+
5811 /*
5812 * No need to follow this irqs on/off section: the syscall
5813 * disabled irqs and here we enable it straight after entry:
5814 */
5815 ENABLE_INTERRUPTS(CLBR_NONE)
5816- SAVE_ARGS 8,0,0
5817+ SAVE_ARGS 8*6,0,0
5818 movl %eax,%eax /* zero extension */
5819 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5820 movq %rcx,RIP-ARGOFFSET(%rsp)
5821@@ -305,6 +350,12 @@ ENTRY(ia32_cstar_target)
5822 /* no need to do an access_ok check here because r8 has been
5823 32bit zero extended */
5824 /* hardware stack frame is complete now */
5825+
5826+#ifdef CONFIG_PAX_MEMORY_UDEREF
5827+ mov $PAX_USER_SHADOW_BASE,%r10
5828+ add %r10,%r8
5829+#endif
5830+
5831 1: movl (%r8),%r9d
5832 .section __ex_table,"a"
5833 .quad 1b,ia32_badarg
5834@@ -327,6 +378,8 @@ cstar_dispatch:
5835 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5836 jnz sysretl_audit
5837 sysretl_from_sys_call:
5838+ pax_exit_kernel_user
5839+ pax_erase_kstack
5840 andl $~TS_COMPAT,TI_status(%r10)
5841 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
5842 movl RIP-ARGOFFSET(%rsp),%ecx
5843@@ -364,6 +417,9 @@ cstar_tracesys:
5844 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5845 movq %rsp,%rdi /* &pt_regs -> arg1 */
5846 call syscall_trace_enter
5847+
5848+ pax_erase_kstack
5849+
5850 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5851 RESTORE_REST
5852 xchgl %ebp,%r9d
5853@@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
5854 CFI_REL_OFFSET rip,RIP-RIP
5855 PARAVIRT_ADJUST_EXCEPTION_FRAME
5856 SWAPGS
5857+ pax_enter_kernel_user
5858 /*
5859 * No need to follow this irqs on/off section: the syscall
5860 * disabled irqs and here we enable it straight after entry:
5861@@ -441,6 +498,9 @@ ia32_tracesys:
5862 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5863 movq %rsp,%rdi /* &pt_regs -> arg1 */
5864 call syscall_trace_enter
5865+
5866+ pax_erase_kstack
5867+
5868 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5869 RESTORE_REST
5870 cmpq $(IA32_NR_syscalls-1),%rax
5871@@ -455,6 +515,7 @@ ia32_badsys:
5872
5873 quiet_ni_syscall:
5874 movq $-ENOSYS,%rax
5875+ pax_force_retaddr
5876 ret
5877 CFI_ENDPROC
5878
5879diff -urNp linux-3.1.1/arch/x86/ia32/ia32_signal.c linux-3.1.1/arch/x86/ia32/ia32_signal.c
5880--- linux-3.1.1/arch/x86/ia32/ia32_signal.c 2011-11-11 15:19:27.000000000 -0500
5881+++ linux-3.1.1/arch/x86/ia32/ia32_signal.c 2011-11-16 18:39:07.000000000 -0500
5882@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const
5883 }
5884 seg = get_fs();
5885 set_fs(KERNEL_DS);
5886- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
5887+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
5888 set_fs(seg);
5889 if (ret >= 0 && uoss_ptr) {
5890 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
5891@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct
5892 */
5893 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
5894 size_t frame_size,
5895- void **fpstate)
5896+ void __user **fpstate)
5897 {
5898 unsigned long sp;
5899
5900@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct
5901
5902 if (used_math()) {
5903 sp = sp - sig_xstate_ia32_size;
5904- *fpstate = (struct _fpstate_ia32 *) sp;
5905+ *fpstate = (struct _fpstate_ia32 __user *) sp;
5906 if (save_i387_xstate_ia32(*fpstate) < 0)
5907 return (void __user *) -1L;
5908 }
5909@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct
5910 sp -= frame_size;
5911 /* Align the stack pointer according to the i386 ABI,
5912 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5913- sp = ((sp + 4) & -16ul) - 4;
5914+ sp = ((sp - 12) & -16ul) - 4;
5915 return (void __user *) sp;
5916 }
5917
5918@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_s
5919 * These are actually not used anymore, but left because some
5920 * gdb versions depend on them as a marker.
5921 */
5922- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5923+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5924 } put_user_catch(err);
5925
5926 if (err)
5927@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct
5928 0xb8,
5929 __NR_ia32_rt_sigreturn,
5930 0x80cd,
5931- 0,
5932+ 0
5933 };
5934
5935 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5936@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct
5937
5938 if (ka->sa.sa_flags & SA_RESTORER)
5939 restorer = ka->sa.sa_restorer;
5940+ else if (current->mm->context.vdso)
5941+ /* Return stub is in 32bit vsyscall page */
5942+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5943 else
5944- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5945- rt_sigreturn);
5946+ restorer = &frame->retcode;
5947 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5948
5949 /*
5950 * Not actually used anymore, but left because some gdb
5951 * versions need it.
5952 */
5953- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5954+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5955 } put_user_catch(err);
5956
5957 if (err)
5958diff -urNp linux-3.1.1/arch/x86/ia32/sys_ia32.c linux-3.1.1/arch/x86/ia32/sys_ia32.c
5959--- linux-3.1.1/arch/x86/ia32/sys_ia32.c 2011-11-11 15:19:27.000000000 -0500
5960+++ linux-3.1.1/arch/x86/ia32/sys_ia32.c 2011-11-16 18:39:07.000000000 -0500
5961@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsign
5962 */
5963 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
5964 {
5965- typeof(ubuf->st_uid) uid = 0;
5966- typeof(ubuf->st_gid) gid = 0;
5967+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
5968+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
5969 SET_UID(uid, stat->uid);
5970 SET_GID(gid, stat->gid);
5971 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
5972@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int
5973 }
5974 set_fs(KERNEL_DS);
5975 ret = sys_rt_sigprocmask(how,
5976- set ? (sigset_t __user *)&s : NULL,
5977- oset ? (sigset_t __user *)&s : NULL,
5978+ set ? (sigset_t __force_user *)&s : NULL,
5979+ oset ? (sigset_t __force_user *)&s : NULL,
5980 sigsetsize);
5981 set_fs(old_fs);
5982 if (ret)
5983@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int
5984 return alarm_setitimer(seconds);
5985 }
5986
5987-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
5988+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
5989 int options)
5990 {
5991 return compat_sys_wait4(pid, stat_addr, options, NULL);
5992@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_inter
5993 mm_segment_t old_fs = get_fs();
5994
5995 set_fs(KERNEL_DS);
5996- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
5997+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
5998 set_fs(old_fs);
5999 if (put_compat_timespec(&t, interval))
6000 return -EFAULT;
6001@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(comp
6002 mm_segment_t old_fs = get_fs();
6003
6004 set_fs(KERNEL_DS);
6005- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
6006+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
6007 set_fs(old_fs);
6008 if (!ret) {
6009 switch (_NSIG_WORDS) {
6010@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(in
6011 if (copy_siginfo_from_user32(&info, uinfo))
6012 return -EFAULT;
6013 set_fs(KERNEL_DS);
6014- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
6015+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
6016 set_fs(old_fs);
6017 return ret;
6018 }
6019@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_f
6020 return -EFAULT;
6021
6022 set_fs(KERNEL_DS);
6023- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
6024+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
6025 count);
6026 set_fs(old_fs);
6027
6028diff -urNp linux-3.1.1/arch/x86/include/asm/alternative-asm.h linux-3.1.1/arch/x86/include/asm/alternative-asm.h
6029--- linux-3.1.1/arch/x86/include/asm/alternative-asm.h 2011-11-11 15:19:27.000000000 -0500
6030+++ linux-3.1.1/arch/x86/include/asm/alternative-asm.h 2011-11-16 18:39:07.000000000 -0500
6031@@ -15,6 +15,20 @@
6032 .endm
6033 #endif
6034
6035+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
6036+ .macro pax_force_retaddr rip=0
6037+ btsq $63,\rip(%rsp)
6038+ .endm
6039+ .macro pax_force_fptr ptr
6040+ btsq $63,\ptr
6041+ .endm
6042+#else
6043+ .macro pax_force_retaddr rip=0
6044+ .endm
6045+ .macro pax_force_fptr ptr
6046+ .endm
6047+#endif
6048+
6049 .macro altinstruction_entry orig alt feature orig_len alt_len
6050 .long \orig - .
6051 .long \alt - .
6052diff -urNp linux-3.1.1/arch/x86/include/asm/alternative.h linux-3.1.1/arch/x86/include/asm/alternative.h
6053--- linux-3.1.1/arch/x86/include/asm/alternative.h 2011-11-11 15:19:27.000000000 -0500
6054+++ linux-3.1.1/arch/x86/include/asm/alternative.h 2011-11-16 18:39:07.000000000 -0500
6055@@ -89,7 +89,7 @@ static inline int alternatives_text_rese
6056 ".section .discard,\"aw\",@progbits\n" \
6057 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
6058 ".previous\n" \
6059- ".section .altinstr_replacement, \"ax\"\n" \
6060+ ".section .altinstr_replacement, \"a\"\n" \
6061 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6062 ".previous"
6063
6064diff -urNp linux-3.1.1/arch/x86/include/asm/apic.h linux-3.1.1/arch/x86/include/asm/apic.h
6065--- linux-3.1.1/arch/x86/include/asm/apic.h 2011-11-11 15:19:27.000000000 -0500
6066+++ linux-3.1.1/arch/x86/include/asm/apic.h 2011-11-16 18:39:07.000000000 -0500
6067@@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
6068
6069 #ifdef CONFIG_X86_LOCAL_APIC
6070
6071-extern unsigned int apic_verbosity;
6072+extern int apic_verbosity;
6073 extern int local_apic_timer_c2_ok;
6074
6075 extern int disable_apic;
6076diff -urNp linux-3.1.1/arch/x86/include/asm/apm.h linux-3.1.1/arch/x86/include/asm/apm.h
6077--- linux-3.1.1/arch/x86/include/asm/apm.h 2011-11-11 15:19:27.000000000 -0500
6078+++ linux-3.1.1/arch/x86/include/asm/apm.h 2011-11-16 18:39:07.000000000 -0500
6079@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6080 __asm__ __volatile__(APM_DO_ZERO_SEGS
6081 "pushl %%edi\n\t"
6082 "pushl %%ebp\n\t"
6083- "lcall *%%cs:apm_bios_entry\n\t"
6084+ "lcall *%%ss:apm_bios_entry\n\t"
6085 "setc %%al\n\t"
6086 "popl %%ebp\n\t"
6087 "popl %%edi\n\t"
6088@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6089 __asm__ __volatile__(APM_DO_ZERO_SEGS
6090 "pushl %%edi\n\t"
6091 "pushl %%ebp\n\t"
6092- "lcall *%%cs:apm_bios_entry\n\t"
6093+ "lcall *%%ss:apm_bios_entry\n\t"
6094 "setc %%bl\n\t"
6095 "popl %%ebp\n\t"
6096 "popl %%edi\n\t"
6097diff -urNp linux-3.1.1/arch/x86/include/asm/atomic64_32.h linux-3.1.1/arch/x86/include/asm/atomic64_32.h
6098--- linux-3.1.1/arch/x86/include/asm/atomic64_32.h 2011-11-11 15:19:27.000000000 -0500
6099+++ linux-3.1.1/arch/x86/include/asm/atomic64_32.h 2011-11-16 18:39:07.000000000 -0500
6100@@ -12,6 +12,14 @@ typedef struct {
6101 u64 __aligned(8) counter;
6102 } atomic64_t;
6103
6104+#ifdef CONFIG_PAX_REFCOUNT
6105+typedef struct {
6106+ u64 __aligned(8) counter;
6107+} atomic64_unchecked_t;
6108+#else
6109+typedef atomic64_t atomic64_unchecked_t;
6110+#endif
6111+
6112 #define ATOMIC64_INIT(val) { (val) }
6113
6114 #ifdef CONFIG_X86_CMPXCHG64
6115@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
6116 }
6117
6118 /**
6119+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
6120+ * @p: pointer to type atomic64_unchecked_t
6121+ * @o: expected value
6122+ * @n: new value
6123+ *
6124+ * Atomically sets @v to @n if it was equal to @o and returns
6125+ * the old value.
6126+ */
6127+
6128+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
6129+{
6130+ return cmpxchg64(&v->counter, o, n);
6131+}
6132+
6133+/**
6134 * atomic64_xchg - xchg atomic64 variable
6135 * @v: pointer to type atomic64_t
6136 * @n: value to assign
6137@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
6138 }
6139
6140 /**
6141+ * atomic64_set_unchecked - set atomic64 variable
6142+ * @v: pointer to type atomic64_unchecked_t
6143+ * @n: value to assign
6144+ *
6145+ * Atomically sets the value of @v to @n.
6146+ */
6147+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
6148+{
6149+ unsigned high = (unsigned)(i >> 32);
6150+ unsigned low = (unsigned)i;
6151+ asm volatile(ATOMIC64_ALTERNATIVE(set)
6152+ : "+b" (low), "+c" (high)
6153+ : "S" (v)
6154+ : "eax", "edx", "memory"
6155+ );
6156+}
6157+
6158+/**
6159 * atomic64_read - read atomic64 variable
6160 * @v: pointer to type atomic64_t
6161 *
6162@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
6163 }
6164
6165 /**
6166+ * atomic64_read_unchecked - read atomic64 variable
6167+ * @v: pointer to type atomic64_unchecked_t
6168+ *
6169+ * Atomically reads the value of @v and returns it.
6170+ */
6171+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
6172+{
6173+ long long r;
6174+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
6175+ : "=A" (r), "+c" (v)
6176+ : : "memory"
6177+ );
6178+ return r;
6179+ }
6180+
6181+/**
6182 * atomic64_add_return - add and return
6183 * @i: integer value to add
6184 * @v: pointer to type atomic64_t
6185@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
6186 return i;
6187 }
6188
6189+/**
6190+ * atomic64_add_return_unchecked - add and return
6191+ * @i: integer value to add
6192+ * @v: pointer to type atomic64_unchecked_t
6193+ *
6194+ * Atomically adds @i to @v and returns @i + *@v
6195+ */
6196+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
6197+{
6198+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6199+ : "+A" (i), "+c" (v)
6200+ : : "memory"
6201+ );
6202+ return i;
6203+}
6204+
6205 /*
6206 * Other variants with different arithmetic operators:
6207 */
6208@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6209 return a;
6210 }
6211
6212+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6213+{
6214+ long long a;
6215+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6216+ : "=A" (a)
6217+ : "S" (v)
6218+ : "memory", "ecx"
6219+ );
6220+ return a;
6221+}
6222+
6223 static inline long long atomic64_dec_return(atomic64_t *v)
6224 {
6225 long long a;
6226@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6227 }
6228
6229 /**
6230+ * atomic64_add_unchecked - add integer to atomic64 variable
6231+ * @i: integer value to add
6232+ * @v: pointer to type atomic64_unchecked_t
6233+ *
6234+ * Atomically adds @i to @v.
6235+ */
6236+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6237+{
6238+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6239+ : "+A" (i), "+c" (v)
6240+ : : "memory"
6241+ );
6242+ return i;
6243+}
6244+
6245+/**
6246 * atomic64_sub - subtract the atomic64 variable
6247 * @i: integer value to subtract
6248 * @v: pointer to type atomic64_t
6249diff -urNp linux-3.1.1/arch/x86/include/asm/atomic64_64.h linux-3.1.1/arch/x86/include/asm/atomic64_64.h
6250--- linux-3.1.1/arch/x86/include/asm/atomic64_64.h 2011-11-11 15:19:27.000000000 -0500
6251+++ linux-3.1.1/arch/x86/include/asm/atomic64_64.h 2011-11-16 18:39:07.000000000 -0500
6252@@ -18,7 +18,19 @@
6253 */
6254 static inline long atomic64_read(const atomic64_t *v)
6255 {
6256- return (*(volatile long *)&(v)->counter);
6257+ return (*(volatile const long *)&(v)->counter);
6258+}
6259+
6260+/**
6261+ * atomic64_read_unchecked - read atomic64 variable
6262+ * @v: pointer of type atomic64_unchecked_t
6263+ *
6264+ * Atomically reads the value of @v.
6265+ * Doesn't imply a read memory barrier.
6266+ */
6267+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6268+{
6269+ return (*(volatile const long *)&(v)->counter);
6270 }
6271
6272 /**
6273@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6274 }
6275
6276 /**
6277+ * atomic64_set_unchecked - set atomic64 variable
6278+ * @v: pointer to type atomic64_unchecked_t
6279+ * @i: required value
6280+ *
6281+ * Atomically sets the value of @v to @i.
6282+ */
6283+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6284+{
6285+ v->counter = i;
6286+}
6287+
6288+/**
6289 * atomic64_add - add integer to atomic64 variable
6290 * @i: integer value to add
6291 * @v: pointer to type atomic64_t
6292@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6293 */
6294 static inline void atomic64_add(long i, atomic64_t *v)
6295 {
6296+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
6297+
6298+#ifdef CONFIG_PAX_REFCOUNT
6299+ "jno 0f\n"
6300+ LOCK_PREFIX "subq %1,%0\n"
6301+ "int $4\n0:\n"
6302+ _ASM_EXTABLE(0b, 0b)
6303+#endif
6304+
6305+ : "=m" (v->counter)
6306+ : "er" (i), "m" (v->counter));
6307+}
6308+
6309+/**
6310+ * atomic64_add_unchecked - add integer to atomic64 variable
6311+ * @i: integer value to add
6312+ * @v: pointer to type atomic64_unchecked_t
6313+ *
6314+ * Atomically adds @i to @v.
6315+ */
6316+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6317+{
6318 asm volatile(LOCK_PREFIX "addq %1,%0"
6319 : "=m" (v->counter)
6320 : "er" (i), "m" (v->counter));
6321@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6322 */
6323 static inline void atomic64_sub(long i, atomic64_t *v)
6324 {
6325- asm volatile(LOCK_PREFIX "subq %1,%0"
6326+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6327+
6328+#ifdef CONFIG_PAX_REFCOUNT
6329+ "jno 0f\n"
6330+ LOCK_PREFIX "addq %1,%0\n"
6331+ "int $4\n0:\n"
6332+ _ASM_EXTABLE(0b, 0b)
6333+#endif
6334+
6335+ : "=m" (v->counter)
6336+ : "er" (i), "m" (v->counter));
6337+}
6338+
6339+/**
6340+ * atomic64_sub_unchecked - subtract the atomic64 variable
6341+ * @i: integer value to subtract
6342+ * @v: pointer to type atomic64_unchecked_t
6343+ *
6344+ * Atomically subtracts @i from @v.
6345+ */
6346+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6347+{
6348+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6349 : "=m" (v->counter)
6350 : "er" (i), "m" (v->counter));
6351 }
6352@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6353 {
6354 unsigned char c;
6355
6356- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6357+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
6358+
6359+#ifdef CONFIG_PAX_REFCOUNT
6360+ "jno 0f\n"
6361+ LOCK_PREFIX "addq %2,%0\n"
6362+ "int $4\n0:\n"
6363+ _ASM_EXTABLE(0b, 0b)
6364+#endif
6365+
6366+ "sete %1\n"
6367 : "=m" (v->counter), "=qm" (c)
6368 : "er" (i), "m" (v->counter) : "memory");
6369 return c;
6370@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6371 */
6372 static inline void atomic64_inc(atomic64_t *v)
6373 {
6374+ asm volatile(LOCK_PREFIX "incq %0\n"
6375+
6376+#ifdef CONFIG_PAX_REFCOUNT
6377+ "jno 0f\n"
6378+ LOCK_PREFIX "decq %0\n"
6379+ "int $4\n0:\n"
6380+ _ASM_EXTABLE(0b, 0b)
6381+#endif
6382+
6383+ : "=m" (v->counter)
6384+ : "m" (v->counter));
6385+}
6386+
6387+/**
6388+ * atomic64_inc_unchecked - increment atomic64 variable
6389+ * @v: pointer to type atomic64_unchecked_t
6390+ *
6391+ * Atomically increments @v by 1.
6392+ */
6393+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6394+{
6395 asm volatile(LOCK_PREFIX "incq %0"
6396 : "=m" (v->counter)
6397 : "m" (v->counter));
6398@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6399 */
6400 static inline void atomic64_dec(atomic64_t *v)
6401 {
6402- asm volatile(LOCK_PREFIX "decq %0"
6403+ asm volatile(LOCK_PREFIX "decq %0\n"
6404+
6405+#ifdef CONFIG_PAX_REFCOUNT
6406+ "jno 0f\n"
6407+ LOCK_PREFIX "incq %0\n"
6408+ "int $4\n0:\n"
6409+ _ASM_EXTABLE(0b, 0b)
6410+#endif
6411+
6412+ : "=m" (v->counter)
6413+ : "m" (v->counter));
6414+}
6415+
6416+/**
6417+ * atomic64_dec_unchecked - decrement atomic64 variable
6418+ * @v: pointer to type atomic64_t
6419+ *
6420+ * Atomically decrements @v by 1.
6421+ */
6422+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6423+{
6424+ asm volatile(LOCK_PREFIX "decq %0\n"
6425 : "=m" (v->counter)
6426 : "m" (v->counter));
6427 }
6428@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6429 {
6430 unsigned char c;
6431
6432- asm volatile(LOCK_PREFIX "decq %0; sete %1"
6433+ asm volatile(LOCK_PREFIX "decq %0\n"
6434+
6435+#ifdef CONFIG_PAX_REFCOUNT
6436+ "jno 0f\n"
6437+ LOCK_PREFIX "incq %0\n"
6438+ "int $4\n0:\n"
6439+ _ASM_EXTABLE(0b, 0b)
6440+#endif
6441+
6442+ "sete %1\n"
6443 : "=m" (v->counter), "=qm" (c)
6444 : "m" (v->counter) : "memory");
6445 return c != 0;
6446@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6447 {
6448 unsigned char c;
6449
6450- asm volatile(LOCK_PREFIX "incq %0; sete %1"
6451+ asm volatile(LOCK_PREFIX "incq %0\n"
6452+
6453+#ifdef CONFIG_PAX_REFCOUNT
6454+ "jno 0f\n"
6455+ LOCK_PREFIX "decq %0\n"
6456+ "int $4\n0:\n"
6457+ _ASM_EXTABLE(0b, 0b)
6458+#endif
6459+
6460+ "sete %1\n"
6461 : "=m" (v->counter), "=qm" (c)
6462 : "m" (v->counter) : "memory");
6463 return c != 0;
6464@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6465 {
6466 unsigned char c;
6467
6468- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6469+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
6470+
6471+#ifdef CONFIG_PAX_REFCOUNT
6472+ "jno 0f\n"
6473+ LOCK_PREFIX "subq %2,%0\n"
6474+ "int $4\n0:\n"
6475+ _ASM_EXTABLE(0b, 0b)
6476+#endif
6477+
6478+ "sets %1\n"
6479 : "=m" (v->counter), "=qm" (c)
6480 : "er" (i), "m" (v->counter) : "memory");
6481 return c;
6482@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6483 static inline long atomic64_add_return(long i, atomic64_t *v)
6484 {
6485 long __i = i;
6486- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6487+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6488+
6489+#ifdef CONFIG_PAX_REFCOUNT
6490+ "jno 0f\n"
6491+ "movq %0, %1\n"
6492+ "int $4\n0:\n"
6493+ _ASM_EXTABLE(0b, 0b)
6494+#endif
6495+
6496+ : "+r" (i), "+m" (v->counter)
6497+ : : "memory");
6498+ return i + __i;
6499+}
6500+
6501+/**
6502+ * atomic64_add_return_unchecked - add and return
6503+ * @i: integer value to add
6504+ * @v: pointer to type atomic64_unchecked_t
6505+ *
6506+ * Atomically adds @i to @v and returns @i + @v
6507+ */
6508+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6509+{
6510+ long __i = i;
6511+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
6512 : "+r" (i), "+m" (v->counter)
6513 : : "memory");
6514 return i + __i;
6515@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6516 }
6517
6518 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6519+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6520+{
6521+ return atomic64_add_return_unchecked(1, v);
6522+}
6523 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6524
6525 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6526@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6527 return cmpxchg(&v->counter, old, new);
6528 }
6529
6530+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6531+{
6532+ return cmpxchg(&v->counter, old, new);
6533+}
6534+
6535 static inline long atomic64_xchg(atomic64_t *v, long new)
6536 {
6537 return xchg(&v->counter, new);
6538@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6539 */
6540 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6541 {
6542- long c, old;
6543+ long c, old, new;
6544 c = atomic64_read(v);
6545 for (;;) {
6546- if (unlikely(c == (u)))
6547+ if (unlikely(c == u))
6548 break;
6549- old = atomic64_cmpxchg((v), c, c + (a));
6550+
6551+ asm volatile("add %2,%0\n"
6552+
6553+#ifdef CONFIG_PAX_REFCOUNT
6554+ "jno 0f\n"
6555+ "sub %2,%0\n"
6556+ "int $4\n0:\n"
6557+ _ASM_EXTABLE(0b, 0b)
6558+#endif
6559+
6560+ : "=r" (new)
6561+ : "0" (c), "ir" (a));
6562+
6563+ old = atomic64_cmpxchg(v, c, new);
6564 if (likely(old == c))
6565 break;
6566 c = old;
6567 }
6568- return c != (u);
6569+ return c != u;
6570 }
6571
6572 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6573diff -urNp linux-3.1.1/arch/x86/include/asm/atomic.h linux-3.1.1/arch/x86/include/asm/atomic.h
6574--- linux-3.1.1/arch/x86/include/asm/atomic.h 2011-11-11 15:19:27.000000000 -0500
6575+++ linux-3.1.1/arch/x86/include/asm/atomic.h 2011-11-16 18:39:07.000000000 -0500
6576@@ -22,7 +22,18 @@
6577 */
6578 static inline int atomic_read(const atomic_t *v)
6579 {
6580- return (*(volatile int *)&(v)->counter);
6581+ return (*(volatile const int *)&(v)->counter);
6582+}
6583+
6584+/**
6585+ * atomic_read_unchecked - read atomic variable
6586+ * @v: pointer of type atomic_unchecked_t
6587+ *
6588+ * Atomically reads the value of @v.
6589+ */
6590+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6591+{
6592+ return (*(volatile const int *)&(v)->counter);
6593 }
6594
6595 /**
6596@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6597 }
6598
6599 /**
6600+ * atomic_set_unchecked - set atomic variable
6601+ * @v: pointer of type atomic_unchecked_t
6602+ * @i: required value
6603+ *
6604+ * Atomically sets the value of @v to @i.
6605+ */
6606+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6607+{
6608+ v->counter = i;
6609+}
6610+
6611+/**
6612 * atomic_add - add integer to atomic variable
6613 * @i: integer value to add
6614 * @v: pointer of type atomic_t
6615@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6616 */
6617 static inline void atomic_add(int i, atomic_t *v)
6618 {
6619- asm volatile(LOCK_PREFIX "addl %1,%0"
6620+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6621+
6622+#ifdef CONFIG_PAX_REFCOUNT
6623+ "jno 0f\n"
6624+ LOCK_PREFIX "subl %1,%0\n"
6625+ "int $4\n0:\n"
6626+ _ASM_EXTABLE(0b, 0b)
6627+#endif
6628+
6629+ : "+m" (v->counter)
6630+ : "ir" (i));
6631+}
6632+
6633+/**
6634+ * atomic_add_unchecked - add integer to atomic variable
6635+ * @i: integer value to add
6636+ * @v: pointer of type atomic_unchecked_t
6637+ *
6638+ * Atomically adds @i to @v.
6639+ */
6640+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6641+{
6642+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6643 : "+m" (v->counter)
6644 : "ir" (i));
6645 }
6646@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6647 */
6648 static inline void atomic_sub(int i, atomic_t *v)
6649 {
6650- asm volatile(LOCK_PREFIX "subl %1,%0"
6651+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6652+
6653+#ifdef CONFIG_PAX_REFCOUNT
6654+ "jno 0f\n"
6655+ LOCK_PREFIX "addl %1,%0\n"
6656+ "int $4\n0:\n"
6657+ _ASM_EXTABLE(0b, 0b)
6658+#endif
6659+
6660+ : "+m" (v->counter)
6661+ : "ir" (i));
6662+}
6663+
6664+/**
6665+ * atomic_sub_unchecked - subtract integer from atomic variable
6666+ * @i: integer value to subtract
6667+ * @v: pointer of type atomic_unchecked_t
6668+ *
6669+ * Atomically subtracts @i from @v.
6670+ */
6671+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6672+{
6673+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6674 : "+m" (v->counter)
6675 : "ir" (i));
6676 }
6677@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6678 {
6679 unsigned char c;
6680
6681- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6682+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6683+
6684+#ifdef CONFIG_PAX_REFCOUNT
6685+ "jno 0f\n"
6686+ LOCK_PREFIX "addl %2,%0\n"
6687+ "int $4\n0:\n"
6688+ _ASM_EXTABLE(0b, 0b)
6689+#endif
6690+
6691+ "sete %1\n"
6692 : "+m" (v->counter), "=qm" (c)
6693 : "ir" (i) : "memory");
6694 return c;
6695@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6696 */
6697 static inline void atomic_inc(atomic_t *v)
6698 {
6699- asm volatile(LOCK_PREFIX "incl %0"
6700+ asm volatile(LOCK_PREFIX "incl %0\n"
6701+
6702+#ifdef CONFIG_PAX_REFCOUNT
6703+ "jno 0f\n"
6704+ LOCK_PREFIX "decl %0\n"
6705+ "int $4\n0:\n"
6706+ _ASM_EXTABLE(0b, 0b)
6707+#endif
6708+
6709+ : "+m" (v->counter));
6710+}
6711+
6712+/**
6713+ * atomic_inc_unchecked - increment atomic variable
6714+ * @v: pointer of type atomic_unchecked_t
6715+ *
6716+ * Atomically increments @v by 1.
6717+ */
6718+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6719+{
6720+ asm volatile(LOCK_PREFIX "incl %0\n"
6721 : "+m" (v->counter));
6722 }
6723
6724@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6725 */
6726 static inline void atomic_dec(atomic_t *v)
6727 {
6728- asm volatile(LOCK_PREFIX "decl %0"
6729+ asm volatile(LOCK_PREFIX "decl %0\n"
6730+
6731+#ifdef CONFIG_PAX_REFCOUNT
6732+ "jno 0f\n"
6733+ LOCK_PREFIX "incl %0\n"
6734+ "int $4\n0:\n"
6735+ _ASM_EXTABLE(0b, 0b)
6736+#endif
6737+
6738+ : "+m" (v->counter));
6739+}
6740+
6741+/**
6742+ * atomic_dec_unchecked - decrement atomic variable
6743+ * @v: pointer of type atomic_unchecked_t
6744+ *
6745+ * Atomically decrements @v by 1.
6746+ */
6747+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6748+{
6749+ asm volatile(LOCK_PREFIX "decl %0\n"
6750 : "+m" (v->counter));
6751 }
6752
6753@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6754 {
6755 unsigned char c;
6756
6757- asm volatile(LOCK_PREFIX "decl %0; sete %1"
6758+ asm volatile(LOCK_PREFIX "decl %0\n"
6759+
6760+#ifdef CONFIG_PAX_REFCOUNT
6761+ "jno 0f\n"
6762+ LOCK_PREFIX "incl %0\n"
6763+ "int $4\n0:\n"
6764+ _ASM_EXTABLE(0b, 0b)
6765+#endif
6766+
6767+ "sete %1\n"
6768 : "+m" (v->counter), "=qm" (c)
6769 : : "memory");
6770 return c != 0;
6771@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6772 {
6773 unsigned char c;
6774
6775- asm volatile(LOCK_PREFIX "incl %0; sete %1"
6776+ asm volatile(LOCK_PREFIX "incl %0\n"
6777+
6778+#ifdef CONFIG_PAX_REFCOUNT
6779+ "jno 0f\n"
6780+ LOCK_PREFIX "decl %0\n"
6781+ "int $4\n0:\n"
6782+ _ASM_EXTABLE(0b, 0b)
6783+#endif
6784+
6785+ "sete %1\n"
6786+ : "+m" (v->counter), "=qm" (c)
6787+ : : "memory");
6788+ return c != 0;
6789+}
6790+
6791+/**
6792+ * atomic_inc_and_test_unchecked - increment and test
6793+ * @v: pointer of type atomic_unchecked_t
6794+ *
6795+ * Atomically increments @v by 1
6796+ * and returns true if the result is zero, or false for all
6797+ * other cases.
6798+ */
6799+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6800+{
6801+ unsigned char c;
6802+
6803+ asm volatile(LOCK_PREFIX "incl %0\n"
6804+ "sete %1\n"
6805 : "+m" (v->counter), "=qm" (c)
6806 : : "memory");
6807 return c != 0;
6808@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6809 {
6810 unsigned char c;
6811
6812- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6813+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
6814+
6815+#ifdef CONFIG_PAX_REFCOUNT
6816+ "jno 0f\n"
6817+ LOCK_PREFIX "subl %2,%0\n"
6818+ "int $4\n0:\n"
6819+ _ASM_EXTABLE(0b, 0b)
6820+#endif
6821+
6822+ "sets %1\n"
6823 : "+m" (v->counter), "=qm" (c)
6824 : "ir" (i) : "memory");
6825 return c;
6826@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6827 #endif
6828 /* Modern 486+ processor */
6829 __i = i;
6830+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6831+
6832+#ifdef CONFIG_PAX_REFCOUNT
6833+ "jno 0f\n"
6834+ "movl %0, %1\n"
6835+ "int $4\n0:\n"
6836+ _ASM_EXTABLE(0b, 0b)
6837+#endif
6838+
6839+ : "+r" (i), "+m" (v->counter)
6840+ : : "memory");
6841+ return i + __i;
6842+
6843+#ifdef CONFIG_M386
6844+no_xadd: /* Legacy 386 processor */
6845+ local_irq_save(flags);
6846+ __i = atomic_read(v);
6847+ atomic_set(v, i + __i);
6848+ local_irq_restore(flags);
6849+ return i + __i;
6850+#endif
6851+}
6852+
6853+/**
6854+ * atomic_add_return_unchecked - add integer and return
6855+ * @v: pointer of type atomic_unchecked_t
6856+ * @i: integer value to add
6857+ *
6858+ * Atomically adds @i to @v and returns @i + @v
6859+ */
6860+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6861+{
6862+ int __i;
6863+#ifdef CONFIG_M386
6864+ unsigned long flags;
6865+ if (unlikely(boot_cpu_data.x86 <= 3))
6866+ goto no_xadd;
6867+#endif
6868+ /* Modern 486+ processor */
6869+ __i = i;
6870 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6871 : "+r" (i), "+m" (v->counter)
6872 : : "memory");
6873@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6874 }
6875
6876 #define atomic_inc_return(v) (atomic_add_return(1, v))
6877+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6878+{
6879+ return atomic_add_return_unchecked(1, v);
6880+}
6881 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6882
6883 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6884@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6885 return cmpxchg(&v->counter, old, new);
6886 }
6887
6888+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6889+{
6890+ return cmpxchg(&v->counter, old, new);
6891+}
6892+
6893 static inline int atomic_xchg(atomic_t *v, int new)
6894 {
6895 return xchg(&v->counter, new);
6896 }
6897
6898+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6899+{
6900+ return xchg(&v->counter, new);
6901+}
6902+
6903 /**
6904 * __atomic_add_unless - add unless the number is already a given value
6905 * @v: pointer of type atomic_t
6906@@ -231,12 +447,25 @@ static inline int atomic_xchg(atomic_t *
6907 */
6908 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
6909 {
6910- int c, old;
6911+ int c, old, new;
6912 c = atomic_read(v);
6913 for (;;) {
6914- if (unlikely(c == (u)))
6915+ if (unlikely(c == u))
6916 break;
6917- old = atomic_cmpxchg((v), c, c + (a));
6918+
6919+ asm volatile("addl %2,%0\n"
6920+
6921+#ifdef CONFIG_PAX_REFCOUNT
6922+ "jno 0f\n"
6923+ "subl %2,%0\n"
6924+ "int $4\n0:\n"
6925+ _ASM_EXTABLE(0b, 0b)
6926+#endif
6927+
6928+ : "=r" (new)
6929+ : "0" (c), "ir" (a));
6930+
6931+ old = atomic_cmpxchg(v, c, new);
6932 if (likely(old == c))
6933 break;
6934 c = old;
6935@@ -244,6 +473,48 @@ static inline int __atomic_add_unless(at
6936 return c;
6937 }
6938
6939+/**
6940+ * atomic_inc_not_zero_hint - increment if not null
6941+ * @v: pointer of type atomic_t
6942+ * @hint: probable value of the atomic before the increment
6943+ *
6944+ * This version of atomic_inc_not_zero() gives a hint of probable
6945+ * value of the atomic. This helps processor to not read the memory
6946+ * before doing the atomic read/modify/write cycle, lowering
6947+ * number of bus transactions on some arches.
6948+ *
6949+ * Returns: 0 if increment was not done, 1 otherwise.
6950+ */
6951+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6952+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6953+{
6954+ int val, c = hint, new;
6955+
6956+ /* sanity test, should be removed by compiler if hint is a constant */
6957+ if (!hint)
6958+ return __atomic_add_unless(v, 1, 0);
6959+
6960+ do {
6961+ asm volatile("incl %0\n"
6962+
6963+#ifdef CONFIG_PAX_REFCOUNT
6964+ "jno 0f\n"
6965+ "decl %0\n"
6966+ "int $4\n0:\n"
6967+ _ASM_EXTABLE(0b, 0b)
6968+#endif
6969+
6970+ : "=r" (new)
6971+ : "0" (c));
6972+
6973+ val = atomic_cmpxchg(v, c, new);
6974+ if (val == c)
6975+ return 1;
6976+ c = val;
6977+ } while (c);
6978+
6979+ return 0;
6980+}
6981
6982 /*
6983 * atomic_dec_if_positive - decrement by 1 if old value positive
6984diff -urNp linux-3.1.1/arch/x86/include/asm/bitops.h linux-3.1.1/arch/x86/include/asm/bitops.h
6985--- linux-3.1.1/arch/x86/include/asm/bitops.h 2011-11-11 15:19:27.000000000 -0500
6986+++ linux-3.1.1/arch/x86/include/asm/bitops.h 2011-11-16 18:39:07.000000000 -0500
6987@@ -38,7 +38,7 @@
6988 * a mask operation on a byte.
6989 */
6990 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6991-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6992+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6993 #define CONST_MASK(nr) (1 << ((nr) & 7))
6994
6995 /**
6996diff -urNp linux-3.1.1/arch/x86/include/asm/boot.h linux-3.1.1/arch/x86/include/asm/boot.h
6997--- linux-3.1.1/arch/x86/include/asm/boot.h 2011-11-11 15:19:27.000000000 -0500
6998+++ linux-3.1.1/arch/x86/include/asm/boot.h 2011-11-16 18:39:07.000000000 -0500
6999@@ -11,10 +11,15 @@
7000 #include <asm/pgtable_types.h>
7001
7002 /* Physical address where kernel should be loaded. */
7003-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7004+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7005 + (CONFIG_PHYSICAL_ALIGN - 1)) \
7006 & ~(CONFIG_PHYSICAL_ALIGN - 1))
7007
7008+#ifndef __ASSEMBLY__
7009+extern unsigned char __LOAD_PHYSICAL_ADDR[];
7010+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
7011+#endif
7012+
7013 /* Minimum kernel alignment, as a power of two */
7014 #ifdef CONFIG_X86_64
7015 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
7016diff -urNp linux-3.1.1/arch/x86/include/asm/cacheflush.h linux-3.1.1/arch/x86/include/asm/cacheflush.h
7017--- linux-3.1.1/arch/x86/include/asm/cacheflush.h 2011-11-11 15:19:27.000000000 -0500
7018+++ linux-3.1.1/arch/x86/include/asm/cacheflush.h 2011-11-16 18:39:07.000000000 -0500
7019@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
7020 unsigned long pg_flags = pg->flags & _PGMT_MASK;
7021
7022 if (pg_flags == _PGMT_DEFAULT)
7023- return -1;
7024+ return ~0UL;
7025 else if (pg_flags == _PGMT_WC)
7026 return _PAGE_CACHE_WC;
7027 else if (pg_flags == _PGMT_UC_MINUS)
7028diff -urNp linux-3.1.1/arch/x86/include/asm/cache.h linux-3.1.1/arch/x86/include/asm/cache.h
7029--- linux-3.1.1/arch/x86/include/asm/cache.h 2011-11-11 15:19:27.000000000 -0500
7030+++ linux-3.1.1/arch/x86/include/asm/cache.h 2011-11-16 18:39:07.000000000 -0500
7031@@ -5,12 +5,13 @@
7032
7033 /* L1 cache line size */
7034 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7035-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7036+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7037
7038 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7039+#define __read_only __attribute__((__section__(".data..read_only")))
7040
7041 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
7042-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
7043+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
7044
7045 #ifdef CONFIG_X86_VSMP
7046 #ifdef CONFIG_SMP
7047diff -urNp linux-3.1.1/arch/x86/include/asm/checksum_32.h linux-3.1.1/arch/x86/include/asm/checksum_32.h
7048--- linux-3.1.1/arch/x86/include/asm/checksum_32.h 2011-11-11 15:19:27.000000000 -0500
7049+++ linux-3.1.1/arch/x86/include/asm/checksum_32.h 2011-11-16 18:39:07.000000000 -0500
7050@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7051 int len, __wsum sum,
7052 int *src_err_ptr, int *dst_err_ptr);
7053
7054+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7055+ int len, __wsum sum,
7056+ int *src_err_ptr, int *dst_err_ptr);
7057+
7058+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7059+ int len, __wsum sum,
7060+ int *src_err_ptr, int *dst_err_ptr);
7061+
7062 /*
7063 * Note: when you get a NULL pointer exception here this means someone
7064 * passed in an incorrect kernel address to one of these functions.
7065@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7066 int *err_ptr)
7067 {
7068 might_sleep();
7069- return csum_partial_copy_generic((__force void *)src, dst,
7070+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
7071 len, sum, err_ptr, NULL);
7072 }
7073
7074@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7075 {
7076 might_sleep();
7077 if (access_ok(VERIFY_WRITE, dst, len))
7078- return csum_partial_copy_generic(src, (__force void *)dst,
7079+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7080 len, sum, NULL, err_ptr);
7081
7082 if (len)
7083diff -urNp linux-3.1.1/arch/x86/include/asm/cpufeature.h linux-3.1.1/arch/x86/include/asm/cpufeature.h
7084--- linux-3.1.1/arch/x86/include/asm/cpufeature.h 2011-11-11 15:19:27.000000000 -0500
7085+++ linux-3.1.1/arch/x86/include/asm/cpufeature.h 2011-11-16 18:39:07.000000000 -0500
7086@@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
7087 ".section .discard,\"aw\",@progbits\n"
7088 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
7089 ".previous\n"
7090- ".section .altinstr_replacement,\"ax\"\n"
7091+ ".section .altinstr_replacement,\"a\"\n"
7092 "3: movb $1,%0\n"
7093 "4:\n"
7094 ".previous\n"
7095diff -urNp linux-3.1.1/arch/x86/include/asm/desc_defs.h linux-3.1.1/arch/x86/include/asm/desc_defs.h
7096--- linux-3.1.1/arch/x86/include/asm/desc_defs.h 2011-11-11 15:19:27.000000000 -0500
7097+++ linux-3.1.1/arch/x86/include/asm/desc_defs.h 2011-11-16 18:39:07.000000000 -0500
7098@@ -31,6 +31,12 @@ struct desc_struct {
7099 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7100 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7101 };
7102+ struct {
7103+ u16 offset_low;
7104+ u16 seg;
7105+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7106+ unsigned offset_high: 16;
7107+ } gate;
7108 };
7109 } __attribute__((packed));
7110
7111diff -urNp linux-3.1.1/arch/x86/include/asm/desc.h linux-3.1.1/arch/x86/include/asm/desc.h
7112--- linux-3.1.1/arch/x86/include/asm/desc.h 2011-11-11 15:19:27.000000000 -0500
7113+++ linux-3.1.1/arch/x86/include/asm/desc.h 2011-11-16 18:39:07.000000000 -0500
7114@@ -4,6 +4,7 @@
7115 #include <asm/desc_defs.h>
7116 #include <asm/ldt.h>
7117 #include <asm/mmu.h>
7118+#include <asm/pgtable.h>
7119
7120 #include <linux/smp.h>
7121
7122@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
7123
7124 desc->type = (info->read_exec_only ^ 1) << 1;
7125 desc->type |= info->contents << 2;
7126+ desc->type |= info->seg_not_present ^ 1;
7127
7128 desc->s = 1;
7129 desc->dpl = 0x3;
7130@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
7131 }
7132
7133 extern struct desc_ptr idt_descr;
7134-extern gate_desc idt_table[];
7135-
7136-struct gdt_page {
7137- struct desc_struct gdt[GDT_ENTRIES];
7138-} __attribute__((aligned(PAGE_SIZE)));
7139-
7140-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
7141+extern gate_desc idt_table[256];
7142
7143+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
7144 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7145 {
7146- return per_cpu(gdt_page, cpu).gdt;
7147+ return cpu_gdt_table[cpu];
7148 }
7149
7150 #ifdef CONFIG_X86_64
7151@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
7152 unsigned long base, unsigned dpl, unsigned flags,
7153 unsigned short seg)
7154 {
7155- gate->a = (seg << 16) | (base & 0xffff);
7156- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7157+ gate->gate.offset_low = base;
7158+ gate->gate.seg = seg;
7159+ gate->gate.reserved = 0;
7160+ gate->gate.type = type;
7161+ gate->gate.s = 0;
7162+ gate->gate.dpl = dpl;
7163+ gate->gate.p = 1;
7164+ gate->gate.offset_high = base >> 16;
7165 }
7166
7167 #endif
7168@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
7169
7170 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
7171 {
7172+ pax_open_kernel();
7173 memcpy(&idt[entry], gate, sizeof(*gate));
7174+ pax_close_kernel();
7175 }
7176
7177 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
7178 {
7179+ pax_open_kernel();
7180 memcpy(&ldt[entry], desc, 8);
7181+ pax_close_kernel();
7182 }
7183
7184 static inline void
7185@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
7186 default: size = sizeof(*gdt); break;
7187 }
7188
7189+ pax_open_kernel();
7190 memcpy(&gdt[entry], desc, size);
7191+ pax_close_kernel();
7192 }
7193
7194 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7195@@ -207,7 +216,9 @@ static inline void native_set_ldt(const
7196
7197 static inline void native_load_tr_desc(void)
7198 {
7199+ pax_open_kernel();
7200 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7201+ pax_close_kernel();
7202 }
7203
7204 static inline void native_load_gdt(const struct desc_ptr *dtr)
7205@@ -244,8 +255,10 @@ static inline void native_load_tls(struc
7206 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7207 unsigned int i;
7208
7209+ pax_open_kernel();
7210 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7211 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7212+ pax_close_kernel();
7213 }
7214
7215 #define _LDT_empty(info) \
7216@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7217 desc->limit = (limit >> 16) & 0xf;
7218 }
7219
7220-static inline void _set_gate(int gate, unsigned type, void *addr,
7221+static inline void _set_gate(int gate, unsigned type, const void *addr,
7222 unsigned dpl, unsigned ist, unsigned seg)
7223 {
7224 gate_desc s;
7225@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7226 * Pentium F0 0F bugfix can have resulted in the mapped
7227 * IDT being write-protected.
7228 */
7229-static inline void set_intr_gate(unsigned int n, void *addr)
7230+static inline void set_intr_gate(unsigned int n, const void *addr)
7231 {
7232 BUG_ON((unsigned)n > 0xFF);
7233 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7234@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7235 /*
7236 * This routine sets up an interrupt gate at directory privilege level 3.
7237 */
7238-static inline void set_system_intr_gate(unsigned int n, void *addr)
7239+static inline void set_system_intr_gate(unsigned int n, const void *addr)
7240 {
7241 BUG_ON((unsigned)n > 0xFF);
7242 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7243 }
7244
7245-static inline void set_system_trap_gate(unsigned int n, void *addr)
7246+static inline void set_system_trap_gate(unsigned int n, const void *addr)
7247 {
7248 BUG_ON((unsigned)n > 0xFF);
7249 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7250 }
7251
7252-static inline void set_trap_gate(unsigned int n, void *addr)
7253+static inline void set_trap_gate(unsigned int n, const void *addr)
7254 {
7255 BUG_ON((unsigned)n > 0xFF);
7256 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7257@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7258 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7259 {
7260 BUG_ON((unsigned)n > 0xFF);
7261- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7262+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7263 }
7264
7265-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7266+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7267 {
7268 BUG_ON((unsigned)n > 0xFF);
7269 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7270 }
7271
7272-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7273+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7274 {
7275 BUG_ON((unsigned)n > 0xFF);
7276 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7277 }
7278
7279+#ifdef CONFIG_X86_32
7280+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7281+{
7282+ struct desc_struct d;
7283+
7284+ if (likely(limit))
7285+ limit = (limit - 1UL) >> PAGE_SHIFT;
7286+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
7287+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7288+}
7289+#endif
7290+
7291 #endif /* _ASM_X86_DESC_H */
7292diff -urNp linux-3.1.1/arch/x86/include/asm/e820.h linux-3.1.1/arch/x86/include/asm/e820.h
7293--- linux-3.1.1/arch/x86/include/asm/e820.h 2011-11-11 15:19:27.000000000 -0500
7294+++ linux-3.1.1/arch/x86/include/asm/e820.h 2011-11-16 18:39:07.000000000 -0500
7295@@ -69,7 +69,7 @@ struct e820map {
7296 #define ISA_START_ADDRESS 0xa0000
7297 #define ISA_END_ADDRESS 0x100000
7298
7299-#define BIOS_BEGIN 0x000a0000
7300+#define BIOS_BEGIN 0x000c0000
7301 #define BIOS_END 0x00100000
7302
7303 #define BIOS_ROM_BASE 0xffe00000
7304diff -urNp linux-3.1.1/arch/x86/include/asm/elf.h linux-3.1.1/arch/x86/include/asm/elf.h
7305--- linux-3.1.1/arch/x86/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
7306+++ linux-3.1.1/arch/x86/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
7307@@ -237,7 +237,25 @@ extern int force_personality32;
7308 the loader. We need to make sure that it is out of the way of the program
7309 that it will "exec", and that there is sufficient room for the brk. */
7310
7311+#ifdef CONFIG_PAX_SEGMEXEC
7312+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7313+#else
7314 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7315+#endif
7316+
7317+#ifdef CONFIG_PAX_ASLR
7318+#ifdef CONFIG_X86_32
7319+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7320+
7321+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7322+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7323+#else
7324+#define PAX_ELF_ET_DYN_BASE 0x400000UL
7325+
7326+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7327+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7328+#endif
7329+#endif
7330
7331 /* This yields a mask that user programs can use to figure out what
7332 instruction set this CPU supports. This could be done in user space,
7333@@ -290,9 +308,7 @@ do { \
7334
7335 #define ARCH_DLINFO \
7336 do { \
7337- if (vdso_enabled) \
7338- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7339- (unsigned long)current->mm->context.vdso); \
7340+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7341 } while (0)
7342
7343 #define AT_SYSINFO 32
7344@@ -303,7 +319,7 @@ do { \
7345
7346 #endif /* !CONFIG_X86_32 */
7347
7348-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7349+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7350
7351 #define VDSO_ENTRY \
7352 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7353@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7354 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7355 #define compat_arch_setup_additional_pages syscall32_setup_pages
7356
7357-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7358-#define arch_randomize_brk arch_randomize_brk
7359-
7360 #endif /* _ASM_X86_ELF_H */
7361diff -urNp linux-3.1.1/arch/x86/include/asm/emergency-restart.h linux-3.1.1/arch/x86/include/asm/emergency-restart.h
7362--- linux-3.1.1/arch/x86/include/asm/emergency-restart.h 2011-11-11 15:19:27.000000000 -0500
7363+++ linux-3.1.1/arch/x86/include/asm/emergency-restart.h 2011-11-16 18:39:07.000000000 -0500
7364@@ -15,6 +15,6 @@ enum reboot_type {
7365
7366 extern enum reboot_type reboot_type;
7367
7368-extern void machine_emergency_restart(void);
7369+extern void machine_emergency_restart(void) __noreturn;
7370
7371 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7372diff -urNp linux-3.1.1/arch/x86/include/asm/futex.h linux-3.1.1/arch/x86/include/asm/futex.h
7373--- linux-3.1.1/arch/x86/include/asm/futex.h 2011-11-11 15:19:27.000000000 -0500
7374+++ linux-3.1.1/arch/x86/include/asm/futex.h 2011-11-16 18:39:07.000000000 -0500
7375@@ -12,16 +12,18 @@
7376 #include <asm/system.h>
7377
7378 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7379+ typecheck(u32 __user *, uaddr); \
7380 asm volatile("1:\t" insn "\n" \
7381 "2:\t.section .fixup,\"ax\"\n" \
7382 "3:\tmov\t%3, %1\n" \
7383 "\tjmp\t2b\n" \
7384 "\t.previous\n" \
7385 _ASM_EXTABLE(1b, 3b) \
7386- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7387+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
7388 : "i" (-EFAULT), "0" (oparg), "1" (0))
7389
7390 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7391+ typecheck(u32 __user *, uaddr); \
7392 asm volatile("1:\tmovl %2, %0\n" \
7393 "\tmovl\t%0, %3\n" \
7394 "\t" insn "\n" \
7395@@ -34,7 +36,7 @@
7396 _ASM_EXTABLE(1b, 4b) \
7397 _ASM_EXTABLE(2b, 4b) \
7398 : "=&a" (oldval), "=&r" (ret), \
7399- "+m" (*uaddr), "=&r" (tem) \
7400+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
7401 : "r" (oparg), "i" (-EFAULT), "1" (0))
7402
7403 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7404@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7405
7406 switch (op) {
7407 case FUTEX_OP_SET:
7408- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7409+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7410 break;
7411 case FUTEX_OP_ADD:
7412- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7413+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7414 uaddr, oparg);
7415 break;
7416 case FUTEX_OP_OR:
7417@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7418 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7419 return -EFAULT;
7420
7421- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7422+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7423 "2:\t.section .fixup, \"ax\"\n"
7424 "3:\tmov %3, %0\n"
7425 "\tjmp 2b\n"
7426 "\t.previous\n"
7427 _ASM_EXTABLE(1b, 3b)
7428- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7429+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
7430 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7431 : "memory"
7432 );
7433diff -urNp linux-3.1.1/arch/x86/include/asm/hw_irq.h linux-3.1.1/arch/x86/include/asm/hw_irq.h
7434--- linux-3.1.1/arch/x86/include/asm/hw_irq.h 2011-11-11 15:19:27.000000000 -0500
7435+++ linux-3.1.1/arch/x86/include/asm/hw_irq.h 2011-11-16 18:39:07.000000000 -0500
7436@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
7437 extern void enable_IO_APIC(void);
7438
7439 /* Statistics */
7440-extern atomic_t irq_err_count;
7441-extern atomic_t irq_mis_count;
7442+extern atomic_unchecked_t irq_err_count;
7443+extern atomic_unchecked_t irq_mis_count;
7444
7445 /* EISA */
7446 extern void eisa_set_level_irq(unsigned int irq);
7447diff -urNp linux-3.1.1/arch/x86/include/asm/i387.h linux-3.1.1/arch/x86/include/asm/i387.h
7448--- linux-3.1.1/arch/x86/include/asm/i387.h 2011-11-11 15:19:27.000000000 -0500
7449+++ linux-3.1.1/arch/x86/include/asm/i387.h 2011-11-16 18:39:07.000000000 -0500
7450@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7451 {
7452 int err;
7453
7454+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7455+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7456+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
7457+#endif
7458+
7459 /* See comment in fxsave() below. */
7460 #ifdef CONFIG_AS_FXSAVEQ
7461 asm volatile("1: fxrstorq %[fx]\n\t"
7462@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7463 {
7464 int err;
7465
7466+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7467+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7468+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7469+#endif
7470+
7471 /*
7472 * Clear the bytes not touched by the fxsave and reserved
7473 * for the SW usage.
7474@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7475 #endif /* CONFIG_X86_64 */
7476
7477 /* We need a safe address that is cheap to find and that is already
7478- in L1 during context switch. The best choices are unfortunately
7479- different for UP and SMP */
7480-#ifdef CONFIG_SMP
7481-#define safe_address (__per_cpu_offset[0])
7482-#else
7483-#define safe_address (kstat_cpu(0).cpustat.user)
7484-#endif
7485+ in L1 during context switch. */
7486+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7487
7488 /*
7489 * These must be called with preempt disabled
7490@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7491 struct thread_info *me = current_thread_info();
7492 preempt_disable();
7493 if (me->status & TS_USEDFPU)
7494- __save_init_fpu(me->task);
7495+ __save_init_fpu(current);
7496 else
7497 clts();
7498 }
7499diff -urNp linux-3.1.1/arch/x86/include/asm/io.h linux-3.1.1/arch/x86/include/asm/io.h
7500--- linux-3.1.1/arch/x86/include/asm/io.h 2011-11-11 15:19:27.000000000 -0500
7501+++ linux-3.1.1/arch/x86/include/asm/io.h 2011-11-16 18:39:07.000000000 -0500
7502@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
7503
7504 #include <linux/vmalloc.h>
7505
7506+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7507+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7508+{
7509+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7510+}
7511+
7512+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7513+{
7514+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7515+}
7516+
7517 /*
7518 * Convert a virtual cached pointer to an uncached pointer
7519 */
7520diff -urNp linux-3.1.1/arch/x86/include/asm/irqflags.h linux-3.1.1/arch/x86/include/asm/irqflags.h
7521--- linux-3.1.1/arch/x86/include/asm/irqflags.h 2011-11-11 15:19:27.000000000 -0500
7522+++ linux-3.1.1/arch/x86/include/asm/irqflags.h 2011-11-16 18:39:07.000000000 -0500
7523@@ -141,6 +141,11 @@ static inline notrace unsigned long arch
7524 sti; \
7525 sysexit
7526
7527+#define GET_CR0_INTO_RDI mov %cr0, %rdi
7528+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7529+#define GET_CR3_INTO_RDI mov %cr3, %rdi
7530+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7531+
7532 #else
7533 #define INTERRUPT_RETURN iret
7534 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7535diff -urNp linux-3.1.1/arch/x86/include/asm/kprobes.h linux-3.1.1/arch/x86/include/asm/kprobes.h
7536--- linux-3.1.1/arch/x86/include/asm/kprobes.h 2011-11-11 15:19:27.000000000 -0500
7537+++ linux-3.1.1/arch/x86/include/asm/kprobes.h 2011-11-16 18:39:07.000000000 -0500
7538@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7539 #define RELATIVEJUMP_SIZE 5
7540 #define RELATIVECALL_OPCODE 0xe8
7541 #define RELATIVE_ADDR_SIZE 4
7542-#define MAX_STACK_SIZE 64
7543-#define MIN_STACK_SIZE(ADDR) \
7544- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7545- THREAD_SIZE - (unsigned long)(ADDR))) \
7546- ? (MAX_STACK_SIZE) \
7547- : (((unsigned long)current_thread_info()) + \
7548- THREAD_SIZE - (unsigned long)(ADDR)))
7549+#define MAX_STACK_SIZE 64UL
7550+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7551
7552 #define flush_insn_slot(p) do { } while (0)
7553
7554diff -urNp linux-3.1.1/arch/x86/include/asm/kvm_host.h linux-3.1.1/arch/x86/include/asm/kvm_host.h
7555--- linux-3.1.1/arch/x86/include/asm/kvm_host.h 2011-11-11 15:19:27.000000000 -0500
7556+++ linux-3.1.1/arch/x86/include/asm/kvm_host.h 2011-11-16 18:39:07.000000000 -0500
7557@@ -456,7 +456,7 @@ struct kvm_arch {
7558 unsigned int n_requested_mmu_pages;
7559 unsigned int n_max_mmu_pages;
7560 unsigned int indirect_shadow_pages;
7561- atomic_t invlpg_counter;
7562+ atomic_unchecked_t invlpg_counter;
7563 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7564 /*
7565 * Hash table of struct kvm_mmu_page.
7566@@ -636,7 +636,7 @@ struct kvm_x86_ops {
7567 enum x86_intercept_stage stage);
7568
7569 const struct trace_print_flags *exit_reasons_str;
7570-};
7571+} __do_const;
7572
7573 struct kvm_arch_async_pf {
7574 u32 token;
7575diff -urNp linux-3.1.1/arch/x86/include/asm/local.h linux-3.1.1/arch/x86/include/asm/local.h
7576--- linux-3.1.1/arch/x86/include/asm/local.h 2011-11-11 15:19:27.000000000 -0500
7577+++ linux-3.1.1/arch/x86/include/asm/local.h 2011-11-16 18:39:07.000000000 -0500
7578@@ -18,26 +18,58 @@ typedef struct {
7579
7580 static inline void local_inc(local_t *l)
7581 {
7582- asm volatile(_ASM_INC "%0"
7583+ asm volatile(_ASM_INC "%0\n"
7584+
7585+#ifdef CONFIG_PAX_REFCOUNT
7586+ "jno 0f\n"
7587+ _ASM_DEC "%0\n"
7588+ "int $4\n0:\n"
7589+ _ASM_EXTABLE(0b, 0b)
7590+#endif
7591+
7592 : "+m" (l->a.counter));
7593 }
7594
7595 static inline void local_dec(local_t *l)
7596 {
7597- asm volatile(_ASM_DEC "%0"
7598+ asm volatile(_ASM_DEC "%0\n"
7599+
7600+#ifdef CONFIG_PAX_REFCOUNT
7601+ "jno 0f\n"
7602+ _ASM_INC "%0\n"
7603+ "int $4\n0:\n"
7604+ _ASM_EXTABLE(0b, 0b)
7605+#endif
7606+
7607 : "+m" (l->a.counter));
7608 }
7609
7610 static inline void local_add(long i, local_t *l)
7611 {
7612- asm volatile(_ASM_ADD "%1,%0"
7613+ asm volatile(_ASM_ADD "%1,%0\n"
7614+
7615+#ifdef CONFIG_PAX_REFCOUNT
7616+ "jno 0f\n"
7617+ _ASM_SUB "%1,%0\n"
7618+ "int $4\n0:\n"
7619+ _ASM_EXTABLE(0b, 0b)
7620+#endif
7621+
7622 : "+m" (l->a.counter)
7623 : "ir" (i));
7624 }
7625
7626 static inline void local_sub(long i, local_t *l)
7627 {
7628- asm volatile(_ASM_SUB "%1,%0"
7629+ asm volatile(_ASM_SUB "%1,%0\n"
7630+
7631+#ifdef CONFIG_PAX_REFCOUNT
7632+ "jno 0f\n"
7633+ _ASM_ADD "%1,%0\n"
7634+ "int $4\n0:\n"
7635+ _ASM_EXTABLE(0b, 0b)
7636+#endif
7637+
7638 : "+m" (l->a.counter)
7639 : "ir" (i));
7640 }
7641@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7642 {
7643 unsigned char c;
7644
7645- asm volatile(_ASM_SUB "%2,%0; sete %1"
7646+ asm volatile(_ASM_SUB "%2,%0\n"
7647+
7648+#ifdef CONFIG_PAX_REFCOUNT
7649+ "jno 0f\n"
7650+ _ASM_ADD "%2,%0\n"
7651+ "int $4\n0:\n"
7652+ _ASM_EXTABLE(0b, 0b)
7653+#endif
7654+
7655+ "sete %1\n"
7656 : "+m" (l->a.counter), "=qm" (c)
7657 : "ir" (i) : "memory");
7658 return c;
7659@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7660 {
7661 unsigned char c;
7662
7663- asm volatile(_ASM_DEC "%0; sete %1"
7664+ asm volatile(_ASM_DEC "%0\n"
7665+
7666+#ifdef CONFIG_PAX_REFCOUNT
7667+ "jno 0f\n"
7668+ _ASM_INC "%0\n"
7669+ "int $4\n0:\n"
7670+ _ASM_EXTABLE(0b, 0b)
7671+#endif
7672+
7673+ "sete %1\n"
7674 : "+m" (l->a.counter), "=qm" (c)
7675 : : "memory");
7676 return c != 0;
7677@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7678 {
7679 unsigned char c;
7680
7681- asm volatile(_ASM_INC "%0; sete %1"
7682+ asm volatile(_ASM_INC "%0\n"
7683+
7684+#ifdef CONFIG_PAX_REFCOUNT
7685+ "jno 0f\n"
7686+ _ASM_DEC "%0\n"
7687+ "int $4\n0:\n"
7688+ _ASM_EXTABLE(0b, 0b)
7689+#endif
7690+
7691+ "sete %1\n"
7692 : "+m" (l->a.counter), "=qm" (c)
7693 : : "memory");
7694 return c != 0;
7695@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7696 {
7697 unsigned char c;
7698
7699- asm volatile(_ASM_ADD "%2,%0; sets %1"
7700+ asm volatile(_ASM_ADD "%2,%0\n"
7701+
7702+#ifdef CONFIG_PAX_REFCOUNT
7703+ "jno 0f\n"
7704+ _ASM_SUB "%2,%0\n"
7705+ "int $4\n0:\n"
7706+ _ASM_EXTABLE(0b, 0b)
7707+#endif
7708+
7709+ "sets %1\n"
7710 : "+m" (l->a.counter), "=qm" (c)
7711 : "ir" (i) : "memory");
7712 return c;
7713@@ -133,7 +201,15 @@ static inline long local_add_return(long
7714 #endif
7715 /* Modern 486+ processor */
7716 __i = i;
7717- asm volatile(_ASM_XADD "%0, %1;"
7718+ asm volatile(_ASM_XADD "%0, %1\n"
7719+
7720+#ifdef CONFIG_PAX_REFCOUNT
7721+ "jno 0f\n"
7722+ _ASM_MOV "%0,%1\n"
7723+ "int $4\n0:\n"
7724+ _ASM_EXTABLE(0b, 0b)
7725+#endif
7726+
7727 : "+r" (i), "+m" (l->a.counter)
7728 : : "memory");
7729 return i + __i;
7730diff -urNp linux-3.1.1/arch/x86/include/asm/mman.h linux-3.1.1/arch/x86/include/asm/mman.h
7731--- linux-3.1.1/arch/x86/include/asm/mman.h 2011-11-11 15:19:27.000000000 -0500
7732+++ linux-3.1.1/arch/x86/include/asm/mman.h 2011-11-16 18:39:07.000000000 -0500
7733@@ -5,4 +5,14 @@
7734
7735 #include <asm-generic/mman.h>
7736
7737+#ifdef __KERNEL__
7738+#ifndef __ASSEMBLY__
7739+#ifdef CONFIG_X86_32
7740+#define arch_mmap_check i386_mmap_check
7741+int i386_mmap_check(unsigned long addr, unsigned long len,
7742+ unsigned long flags);
7743+#endif
7744+#endif
7745+#endif
7746+
7747 #endif /* _ASM_X86_MMAN_H */
7748diff -urNp linux-3.1.1/arch/x86/include/asm/mmu_context.h linux-3.1.1/arch/x86/include/asm/mmu_context.h
7749--- linux-3.1.1/arch/x86/include/asm/mmu_context.h 2011-11-11 15:19:27.000000000 -0500
7750+++ linux-3.1.1/arch/x86/include/asm/mmu_context.h 2011-11-16 18:39:07.000000000 -0500
7751@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7752
7753 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7754 {
7755+
7756+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7757+ unsigned int i;
7758+ pgd_t *pgd;
7759+
7760+ pax_open_kernel();
7761+ pgd = get_cpu_pgd(smp_processor_id());
7762+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7763+ set_pgd_batched(pgd+i, native_make_pgd(0));
7764+ pax_close_kernel();
7765+#endif
7766+
7767 #ifdef CONFIG_SMP
7768 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7769 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7770@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7771 struct task_struct *tsk)
7772 {
7773 unsigned cpu = smp_processor_id();
7774+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7775+ int tlbstate = TLBSTATE_OK;
7776+#endif
7777
7778 if (likely(prev != next)) {
7779 #ifdef CONFIG_SMP
7780+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7781+ tlbstate = percpu_read(cpu_tlbstate.state);
7782+#endif
7783 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7784 percpu_write(cpu_tlbstate.active_mm, next);
7785 #endif
7786 cpumask_set_cpu(cpu, mm_cpumask(next));
7787
7788 /* Re-load page tables */
7789+#ifdef CONFIG_PAX_PER_CPU_PGD
7790+ pax_open_kernel();
7791+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7792+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7793+ pax_close_kernel();
7794+ load_cr3(get_cpu_pgd(cpu));
7795+#else
7796 load_cr3(next->pgd);
7797+#endif
7798
7799 /* stop flush ipis for the previous mm */
7800 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7801@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7802 */
7803 if (unlikely(prev->context.ldt != next->context.ldt))
7804 load_LDT_nolock(&next->context);
7805- }
7806+
7807+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7808+ if (!(__supported_pte_mask & _PAGE_NX)) {
7809+ smp_mb__before_clear_bit();
7810+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7811+ smp_mb__after_clear_bit();
7812+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7813+ }
7814+#endif
7815+
7816+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7817+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7818+ prev->context.user_cs_limit != next->context.user_cs_limit))
7819+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7820 #ifdef CONFIG_SMP
7821+ else if (unlikely(tlbstate != TLBSTATE_OK))
7822+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7823+#endif
7824+#endif
7825+
7826+ }
7827 else {
7828+
7829+#ifdef CONFIG_PAX_PER_CPU_PGD
7830+ pax_open_kernel();
7831+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7832+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7833+ pax_close_kernel();
7834+ load_cr3(get_cpu_pgd(cpu));
7835+#endif
7836+
7837+#ifdef CONFIG_SMP
7838 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7839 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7840
7841@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7842 * tlb flush IPI delivery. We must reload CR3
7843 * to make sure to use no freed page tables.
7844 */
7845+
7846+#ifndef CONFIG_PAX_PER_CPU_PGD
7847 load_cr3(next->pgd);
7848+#endif
7849+
7850 load_LDT_nolock(&next->context);
7851+
7852+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7853+ if (!(__supported_pte_mask & _PAGE_NX))
7854+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7855+#endif
7856+
7857+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7858+#ifdef CONFIG_PAX_PAGEEXEC
7859+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7860+#endif
7861+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7862+#endif
7863+
7864 }
7865- }
7866 #endif
7867+ }
7868 }
7869
7870 #define activate_mm(prev, next) \
7871diff -urNp linux-3.1.1/arch/x86/include/asm/mmu.h linux-3.1.1/arch/x86/include/asm/mmu.h
7872--- linux-3.1.1/arch/x86/include/asm/mmu.h 2011-11-11 15:19:27.000000000 -0500
7873+++ linux-3.1.1/arch/x86/include/asm/mmu.h 2011-11-16 18:39:07.000000000 -0500
7874@@ -9,7 +9,7 @@
7875 * we put the segment information here.
7876 */
7877 typedef struct {
7878- void *ldt;
7879+ struct desc_struct *ldt;
7880 int size;
7881
7882 #ifdef CONFIG_X86_64
7883@@ -18,7 +18,19 @@ typedef struct {
7884 #endif
7885
7886 struct mutex lock;
7887- void *vdso;
7888+ unsigned long vdso;
7889+
7890+#ifdef CONFIG_X86_32
7891+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7892+ unsigned long user_cs_base;
7893+ unsigned long user_cs_limit;
7894+
7895+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7896+ cpumask_t cpu_user_cs_mask;
7897+#endif
7898+
7899+#endif
7900+#endif
7901 } mm_context_t;
7902
7903 #ifdef CONFIG_SMP
7904diff -urNp linux-3.1.1/arch/x86/include/asm/module.h linux-3.1.1/arch/x86/include/asm/module.h
7905--- linux-3.1.1/arch/x86/include/asm/module.h 2011-11-11 15:19:27.000000000 -0500
7906+++ linux-3.1.1/arch/x86/include/asm/module.h 2011-11-16 18:39:07.000000000 -0500
7907@@ -5,6 +5,7 @@
7908
7909 #ifdef CONFIG_X86_64
7910 /* X86_64 does not define MODULE_PROC_FAMILY */
7911+#define MODULE_PROC_FAMILY ""
7912 #elif defined CONFIG_M386
7913 #define MODULE_PROC_FAMILY "386 "
7914 #elif defined CONFIG_M486
7915@@ -59,8 +60,18 @@
7916 #error unknown processor family
7917 #endif
7918
7919-#ifdef CONFIG_X86_32
7920-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7921+#ifdef CONFIG_PAX_KERNEXEC
7922+#define MODULE_PAX_KERNEXEC "KERNEXEC "
7923+#else
7924+#define MODULE_PAX_KERNEXEC ""
7925 #endif
7926
7927+#ifdef CONFIG_PAX_MEMORY_UDEREF
7928+#define MODULE_PAX_UDEREF "UDEREF "
7929+#else
7930+#define MODULE_PAX_UDEREF ""
7931+#endif
7932+
7933+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
7934+
7935 #endif /* _ASM_X86_MODULE_H */
7936diff -urNp linux-3.1.1/arch/x86/include/asm/page_64_types.h linux-3.1.1/arch/x86/include/asm/page_64_types.h
7937--- linux-3.1.1/arch/x86/include/asm/page_64_types.h 2011-11-11 15:19:27.000000000 -0500
7938+++ linux-3.1.1/arch/x86/include/asm/page_64_types.h 2011-11-16 18:39:07.000000000 -0500
7939@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7940
7941 /* duplicated to the one in bootmem.h */
7942 extern unsigned long max_pfn;
7943-extern unsigned long phys_base;
7944+extern const unsigned long phys_base;
7945
7946 extern unsigned long __phys_addr(unsigned long);
7947 #define __phys_reloc_hide(x) (x)
7948diff -urNp linux-3.1.1/arch/x86/include/asm/paravirt.h linux-3.1.1/arch/x86/include/asm/paravirt.h
7949--- linux-3.1.1/arch/x86/include/asm/paravirt.h 2011-11-11 15:19:27.000000000 -0500
7950+++ linux-3.1.1/arch/x86/include/asm/paravirt.h 2011-11-16 18:39:07.000000000 -0500
7951@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp,
7952 val);
7953 }
7954
7955+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7956+{
7957+ pgdval_t val = native_pgd_val(pgd);
7958+
7959+ if (sizeof(pgdval_t) > sizeof(long))
7960+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7961+ val, (u64)val >> 32);
7962+ else
7963+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7964+ val);
7965+}
7966+
7967 static inline void pgd_clear(pgd_t *pgdp)
7968 {
7969 set_pgd(pgdp, __pgd(0));
7970@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned
7971 pv_mmu_ops.set_fixmap(idx, phys, flags);
7972 }
7973
7974+#ifdef CONFIG_PAX_KERNEXEC
7975+static inline unsigned long pax_open_kernel(void)
7976+{
7977+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7978+}
7979+
7980+static inline unsigned long pax_close_kernel(void)
7981+{
7982+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7983+}
7984+#else
7985+static inline unsigned long pax_open_kernel(void) { return 0; }
7986+static inline unsigned long pax_close_kernel(void) { return 0; }
7987+#endif
7988+
7989 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7990
7991 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7992@@ -964,7 +991,7 @@ extern void default_banner(void);
7993
7994 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7995 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7996-#define PARA_INDIRECT(addr) *%cs:addr
7997+#define PARA_INDIRECT(addr) *%ss:addr
7998 #endif
7999
8000 #define INTERRUPT_RETURN \
8001@@ -1041,6 +1068,21 @@ extern void default_banner(void);
8002 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
8003 CLBR_NONE, \
8004 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
8005+
8006+#define GET_CR0_INTO_RDI \
8007+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
8008+ mov %rax,%rdi
8009+
8010+#define SET_RDI_INTO_CR0 \
8011+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
8012+
8013+#define GET_CR3_INTO_RDI \
8014+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
8015+ mov %rax,%rdi
8016+
8017+#define SET_RDI_INTO_CR3 \
8018+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
8019+
8020 #endif /* CONFIG_X86_32 */
8021
8022 #endif /* __ASSEMBLY__ */
8023diff -urNp linux-3.1.1/arch/x86/include/asm/paravirt_types.h linux-3.1.1/arch/x86/include/asm/paravirt_types.h
8024--- linux-3.1.1/arch/x86/include/asm/paravirt_types.h 2011-11-11 15:19:27.000000000 -0500
8025+++ linux-3.1.1/arch/x86/include/asm/paravirt_types.h 2011-11-16 18:39:07.000000000 -0500
8026@@ -84,20 +84,20 @@ struct pv_init_ops {
8027 */
8028 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
8029 unsigned long addr, unsigned len);
8030-};
8031+} __no_const;
8032
8033
8034 struct pv_lazy_ops {
8035 /* Set deferred update mode, used for batching operations. */
8036 void (*enter)(void);
8037 void (*leave)(void);
8038-};
8039+} __no_const;
8040
8041 struct pv_time_ops {
8042 unsigned long long (*sched_clock)(void);
8043 unsigned long long (*steal_clock)(int cpu);
8044 unsigned long (*get_tsc_khz)(void);
8045-};
8046+} __no_const;
8047
8048 struct pv_cpu_ops {
8049 /* hooks for various privileged instructions */
8050@@ -193,7 +193,7 @@ struct pv_cpu_ops {
8051
8052 void (*start_context_switch)(struct task_struct *prev);
8053 void (*end_context_switch)(struct task_struct *next);
8054-};
8055+} __no_const;
8056
8057 struct pv_irq_ops {
8058 /*
8059@@ -224,7 +224,7 @@ struct pv_apic_ops {
8060 unsigned long start_eip,
8061 unsigned long start_esp);
8062 #endif
8063-};
8064+} __no_const;
8065
8066 struct pv_mmu_ops {
8067 unsigned long (*read_cr2)(void);
8068@@ -313,6 +313,7 @@ struct pv_mmu_ops {
8069 struct paravirt_callee_save make_pud;
8070
8071 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
8072+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
8073 #endif /* PAGETABLE_LEVELS == 4 */
8074 #endif /* PAGETABLE_LEVELS >= 3 */
8075
8076@@ -324,6 +325,12 @@ struct pv_mmu_ops {
8077 an mfn. We can tell which is which from the index. */
8078 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
8079 phys_addr_t phys, pgprot_t flags);
8080+
8081+#ifdef CONFIG_PAX_KERNEXEC
8082+ unsigned long (*pax_open_kernel)(void);
8083+ unsigned long (*pax_close_kernel)(void);
8084+#endif
8085+
8086 };
8087
8088 struct arch_spinlock;
8089@@ -334,7 +341,7 @@ struct pv_lock_ops {
8090 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
8091 int (*spin_trylock)(struct arch_spinlock *lock);
8092 void (*spin_unlock)(struct arch_spinlock *lock);
8093-};
8094+} __no_const;
8095
8096 /* This contains all the paravirt structures: we get a convenient
8097 * number for each function using the offset which we use to indicate
8098diff -urNp linux-3.1.1/arch/x86/include/asm/pgalloc.h linux-3.1.1/arch/x86/include/asm/pgalloc.h
8099--- linux-3.1.1/arch/x86/include/asm/pgalloc.h 2011-11-11 15:19:27.000000000 -0500
8100+++ linux-3.1.1/arch/x86/include/asm/pgalloc.h 2011-11-16 18:39:07.000000000 -0500
8101@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
8102 pmd_t *pmd, pte_t *pte)
8103 {
8104 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8105+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
8106+}
8107+
8108+static inline void pmd_populate_user(struct mm_struct *mm,
8109+ pmd_t *pmd, pte_t *pte)
8110+{
8111+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8112 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
8113 }
8114
8115diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable-2level.h linux-3.1.1/arch/x86/include/asm/pgtable-2level.h
8116--- linux-3.1.1/arch/x86/include/asm/pgtable-2level.h 2011-11-11 15:19:27.000000000 -0500
8117+++ linux-3.1.1/arch/x86/include/asm/pgtable-2level.h 2011-11-16 18:39:07.000000000 -0500
8118@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
8119
8120 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8121 {
8122+ pax_open_kernel();
8123 *pmdp = pmd;
8124+ pax_close_kernel();
8125 }
8126
8127 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
8128diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_32.h linux-3.1.1/arch/x86/include/asm/pgtable_32.h
8129--- linux-3.1.1/arch/x86/include/asm/pgtable_32.h 2011-11-11 15:19:27.000000000 -0500
8130+++ linux-3.1.1/arch/x86/include/asm/pgtable_32.h 2011-11-16 18:39:07.000000000 -0500
8131@@ -25,9 +25,6 @@
8132 struct mm_struct;
8133 struct vm_area_struct;
8134
8135-extern pgd_t swapper_pg_dir[1024];
8136-extern pgd_t initial_page_table[1024];
8137-
8138 static inline void pgtable_cache_init(void) { }
8139 static inline void check_pgt_cache(void) { }
8140 void paging_init(void);
8141@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
8142 # include <asm/pgtable-2level.h>
8143 #endif
8144
8145+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
8146+extern pgd_t initial_page_table[PTRS_PER_PGD];
8147+#ifdef CONFIG_X86_PAE
8148+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
8149+#endif
8150+
8151 #if defined(CONFIG_HIGHPTE)
8152 #define pte_offset_map(dir, address) \
8153 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
8154@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
8155 /* Clear a kernel PTE and flush it from the TLB */
8156 #define kpte_clear_flush(ptep, vaddr) \
8157 do { \
8158+ pax_open_kernel(); \
8159 pte_clear(&init_mm, (vaddr), (ptep)); \
8160+ pax_close_kernel(); \
8161 __flush_tlb_one((vaddr)); \
8162 } while (0)
8163
8164@@ -74,6 +79,9 @@ do { \
8165
8166 #endif /* !__ASSEMBLY__ */
8167
8168+#define HAVE_ARCH_UNMAPPED_AREA
8169+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
8170+
8171 /*
8172 * kern_addr_valid() is (1) for FLATMEM and (0) for
8173 * SPARSEMEM and DISCONTIGMEM
8174diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_32_types.h linux-3.1.1/arch/x86/include/asm/pgtable_32_types.h
8175--- linux-3.1.1/arch/x86/include/asm/pgtable_32_types.h 2011-11-11 15:19:27.000000000 -0500
8176+++ linux-3.1.1/arch/x86/include/asm/pgtable_32_types.h 2011-11-16 18:39:07.000000000 -0500
8177@@ -8,7 +8,7 @@
8178 */
8179 #ifdef CONFIG_X86_PAE
8180 # include <asm/pgtable-3level_types.h>
8181-# define PMD_SIZE (1UL << PMD_SHIFT)
8182+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8183 # define PMD_MASK (~(PMD_SIZE - 1))
8184 #else
8185 # include <asm/pgtable-2level_types.h>
8186@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8187 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8188 #endif
8189
8190+#ifdef CONFIG_PAX_KERNEXEC
8191+#ifndef __ASSEMBLY__
8192+extern unsigned char MODULES_EXEC_VADDR[];
8193+extern unsigned char MODULES_EXEC_END[];
8194+#endif
8195+#include <asm/boot.h>
8196+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8197+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8198+#else
8199+#define ktla_ktva(addr) (addr)
8200+#define ktva_ktla(addr) (addr)
8201+#endif
8202+
8203 #define MODULES_VADDR VMALLOC_START
8204 #define MODULES_END VMALLOC_END
8205 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8206diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable-3level.h linux-3.1.1/arch/x86/include/asm/pgtable-3level.h
8207--- linux-3.1.1/arch/x86/include/asm/pgtable-3level.h 2011-11-11 15:19:27.000000000 -0500
8208+++ linux-3.1.1/arch/x86/include/asm/pgtable-3level.h 2011-11-16 18:39:07.000000000 -0500
8209@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8210
8211 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8212 {
8213+ pax_open_kernel();
8214 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8215+ pax_close_kernel();
8216 }
8217
8218 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8219 {
8220+ pax_open_kernel();
8221 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8222+ pax_close_kernel();
8223 }
8224
8225 /*
8226diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_64.h linux-3.1.1/arch/x86/include/asm/pgtable_64.h
8227--- linux-3.1.1/arch/x86/include/asm/pgtable_64.h 2011-11-11 15:19:27.000000000 -0500
8228+++ linux-3.1.1/arch/x86/include/asm/pgtable_64.h 2011-11-16 18:39:07.000000000 -0500
8229@@ -16,10 +16,13 @@
8230
8231 extern pud_t level3_kernel_pgt[512];
8232 extern pud_t level3_ident_pgt[512];
8233+extern pud_t level3_vmalloc_pgt[512];
8234+extern pud_t level3_vmemmap_pgt[512];
8235+extern pud_t level2_vmemmap_pgt[512];
8236 extern pmd_t level2_kernel_pgt[512];
8237 extern pmd_t level2_fixmap_pgt[512];
8238-extern pmd_t level2_ident_pgt[512];
8239-extern pgd_t init_level4_pgt[];
8240+extern pmd_t level2_ident_pgt[512*2];
8241+extern pgd_t init_level4_pgt[512];
8242
8243 #define swapper_pg_dir init_level4_pgt
8244
8245@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8246
8247 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8248 {
8249+ pax_open_kernel();
8250 *pmdp = pmd;
8251+ pax_close_kernel();
8252 }
8253
8254 static inline void native_pmd_clear(pmd_t *pmd)
8255@@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8256
8257 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8258 {
8259+ pax_open_kernel();
8260+ *pgdp = pgd;
8261+ pax_close_kernel();
8262+}
8263+
8264+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8265+{
8266 *pgdp = pgd;
8267 }
8268
8269diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_64_types.h linux-3.1.1/arch/x86/include/asm/pgtable_64_types.h
8270--- linux-3.1.1/arch/x86/include/asm/pgtable_64_types.h 2011-11-11 15:19:27.000000000 -0500
8271+++ linux-3.1.1/arch/x86/include/asm/pgtable_64_types.h 2011-11-16 18:39:07.000000000 -0500
8272@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8273 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8274 #define MODULES_END _AC(0xffffffffff000000, UL)
8275 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8276+#define MODULES_EXEC_VADDR MODULES_VADDR
8277+#define MODULES_EXEC_END MODULES_END
8278+
8279+#define ktla_ktva(addr) (addr)
8280+#define ktva_ktla(addr) (addr)
8281
8282 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8283diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable.h linux-3.1.1/arch/x86/include/asm/pgtable.h
8284--- linux-3.1.1/arch/x86/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
8285+++ linux-3.1.1/arch/x86/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
8286@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8287
8288 #ifndef __PAGETABLE_PUD_FOLDED
8289 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8290+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8291 #define pgd_clear(pgd) native_pgd_clear(pgd)
8292 #endif
8293
8294@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8295
8296 #define arch_end_context_switch(prev) do {} while(0)
8297
8298+#define pax_open_kernel() native_pax_open_kernel()
8299+#define pax_close_kernel() native_pax_close_kernel()
8300 #endif /* CONFIG_PARAVIRT */
8301
8302+#define __HAVE_ARCH_PAX_OPEN_KERNEL
8303+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8304+
8305+#ifdef CONFIG_PAX_KERNEXEC
8306+static inline unsigned long native_pax_open_kernel(void)
8307+{
8308+ unsigned long cr0;
8309+
8310+ preempt_disable();
8311+ barrier();
8312+ cr0 = read_cr0() ^ X86_CR0_WP;
8313+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
8314+ write_cr0(cr0);
8315+ return cr0 ^ X86_CR0_WP;
8316+}
8317+
8318+static inline unsigned long native_pax_close_kernel(void)
8319+{
8320+ unsigned long cr0;
8321+
8322+ cr0 = read_cr0() ^ X86_CR0_WP;
8323+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8324+ write_cr0(cr0);
8325+ barrier();
8326+ preempt_enable_no_resched();
8327+ return cr0 ^ X86_CR0_WP;
8328+}
8329+#else
8330+static inline unsigned long native_pax_open_kernel(void) { return 0; }
8331+static inline unsigned long native_pax_close_kernel(void) { return 0; }
8332+#endif
8333+
8334 /*
8335 * The following only work if pte_present() is true.
8336 * Undefined behaviour if not..
8337 */
8338+static inline int pte_user(pte_t pte)
8339+{
8340+ return pte_val(pte) & _PAGE_USER;
8341+}
8342+
8343 static inline int pte_dirty(pte_t pte)
8344 {
8345 return pte_flags(pte) & _PAGE_DIRTY;
8346@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8347 return pte_clear_flags(pte, _PAGE_RW);
8348 }
8349
8350+static inline pte_t pte_mkread(pte_t pte)
8351+{
8352+ return __pte(pte_val(pte) | _PAGE_USER);
8353+}
8354+
8355 static inline pte_t pte_mkexec(pte_t pte)
8356 {
8357- return pte_clear_flags(pte, _PAGE_NX);
8358+#ifdef CONFIG_X86_PAE
8359+ if (__supported_pte_mask & _PAGE_NX)
8360+ return pte_clear_flags(pte, _PAGE_NX);
8361+ else
8362+#endif
8363+ return pte_set_flags(pte, _PAGE_USER);
8364+}
8365+
8366+static inline pte_t pte_exprotect(pte_t pte)
8367+{
8368+#ifdef CONFIG_X86_PAE
8369+ if (__supported_pte_mask & _PAGE_NX)
8370+ return pte_set_flags(pte, _PAGE_NX);
8371+ else
8372+#endif
8373+ return pte_clear_flags(pte, _PAGE_USER);
8374 }
8375
8376 static inline pte_t pte_mkdirty(pte_t pte)
8377@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8378 #endif
8379
8380 #ifndef __ASSEMBLY__
8381+
8382+#ifdef CONFIG_PAX_PER_CPU_PGD
8383+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8384+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8385+{
8386+ return cpu_pgd[cpu];
8387+}
8388+#endif
8389+
8390 #include <linux/mm_types.h>
8391
8392 static inline int pte_none(pte_t pte)
8393@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8394
8395 static inline int pgd_bad(pgd_t pgd)
8396 {
8397- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8398+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8399 }
8400
8401 static inline int pgd_none(pgd_t pgd)
8402@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8403 * pgd_offset() returns a (pgd_t *)
8404 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8405 */
8406-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8407+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8408+
8409+#ifdef CONFIG_PAX_PER_CPU_PGD
8410+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8411+#endif
8412+
8413 /*
8414 * a shortcut which implies the use of the kernel's pgd, instead
8415 * of a process's
8416@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8417 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8418 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8419
8420+#ifdef CONFIG_X86_32
8421+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8422+#else
8423+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8424+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8425+
8426+#ifdef CONFIG_PAX_MEMORY_UDEREF
8427+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8428+#else
8429+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8430+#endif
8431+
8432+#endif
8433+
8434 #ifndef __ASSEMBLY__
8435
8436 extern int direct_gbpages;
8437@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8438 * dst and src can be on the same page, but the range must not overlap,
8439 * and must not cross a page boundary.
8440 */
8441-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8442+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8443 {
8444- memcpy(dst, src, count * sizeof(pgd_t));
8445+ pax_open_kernel();
8446+ while (count--)
8447+ *dst++ = *src++;
8448+ pax_close_kernel();
8449 }
8450
8451+#ifdef CONFIG_PAX_PER_CPU_PGD
8452+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8453+#endif
8454+
8455+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8456+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8457+#else
8458+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8459+#endif
8460
8461 #include <asm-generic/pgtable.h>
8462 #endif /* __ASSEMBLY__ */
8463diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_types.h linux-3.1.1/arch/x86/include/asm/pgtable_types.h
8464--- linux-3.1.1/arch/x86/include/asm/pgtable_types.h 2011-11-11 15:19:27.000000000 -0500
8465+++ linux-3.1.1/arch/x86/include/asm/pgtable_types.h 2011-11-16 18:39:07.000000000 -0500
8466@@ -16,13 +16,12 @@
8467 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8468 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8469 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8470-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8471+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8472 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8473 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8474 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8475-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8476-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8477-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8478+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8479+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8480 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8481
8482 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8483@@ -40,7 +39,6 @@
8484 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8485 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8486 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8487-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8488 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8489 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8490 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8491@@ -57,8 +55,10 @@
8492
8493 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8494 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8495-#else
8496+#elif defined(CONFIG_KMEMCHECK)
8497 #define _PAGE_NX (_AT(pteval_t, 0))
8498+#else
8499+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8500 #endif
8501
8502 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8503@@ -96,6 +96,9 @@
8504 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8505 _PAGE_ACCESSED)
8506
8507+#define PAGE_READONLY_NOEXEC PAGE_READONLY
8508+#define PAGE_SHARED_NOEXEC PAGE_SHARED
8509+
8510 #define __PAGE_KERNEL_EXEC \
8511 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8512 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8513@@ -106,7 +109,7 @@
8514 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8515 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8516 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8517-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8518+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8519 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
8520 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
8521 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8522@@ -168,8 +171,8 @@
8523 * bits are combined, this will alow user to access the high address mapped
8524 * VDSO in the presence of CONFIG_COMPAT_VDSO
8525 */
8526-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8527-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8528+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8529+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8530 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8531 #endif
8532
8533@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8534 {
8535 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8536 }
8537+#endif
8538
8539+#if PAGETABLE_LEVELS == 3
8540+#include <asm-generic/pgtable-nopud.h>
8541+#endif
8542+
8543+#if PAGETABLE_LEVELS == 2
8544+#include <asm-generic/pgtable-nopmd.h>
8545+#endif
8546+
8547+#ifndef __ASSEMBLY__
8548 #if PAGETABLE_LEVELS > 3
8549 typedef struct { pudval_t pud; } pud_t;
8550
8551@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pu
8552 return pud.pud;
8553 }
8554 #else
8555-#include <asm-generic/pgtable-nopud.h>
8556-
8557 static inline pudval_t native_pud_val(pud_t pud)
8558 {
8559 return native_pgd_val(pud.pgd);
8560@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pm
8561 return pmd.pmd;
8562 }
8563 #else
8564-#include <asm-generic/pgtable-nopmd.h>
8565-
8566 static inline pmdval_t native_pmd_val(pmd_t pmd)
8567 {
8568 return native_pgd_val(pmd.pud.pgd);
8569@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
8570
8571 extern pteval_t __supported_pte_mask;
8572 extern void set_nx(void);
8573-extern int nx_enabled;
8574
8575 #define pgprot_writecombine pgprot_writecombine
8576 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8577diff -urNp linux-3.1.1/arch/x86/include/asm/processor.h linux-3.1.1/arch/x86/include/asm/processor.h
8578--- linux-3.1.1/arch/x86/include/asm/processor.h 2011-11-11 15:19:27.000000000 -0500
8579+++ linux-3.1.1/arch/x86/include/asm/processor.h 2011-11-16 18:39:07.000000000 -0500
8580@@ -266,7 +266,7 @@ struct tss_struct {
8581
8582 } ____cacheline_aligned;
8583
8584-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8585+extern struct tss_struct init_tss[NR_CPUS];
8586
8587 /*
8588 * Save the original ist values for checking stack pointers during debugging
8589@@ -858,11 +858,18 @@ static inline void spin_lock_prefetch(co
8590 */
8591 #define TASK_SIZE PAGE_OFFSET
8592 #define TASK_SIZE_MAX TASK_SIZE
8593+
8594+#ifdef CONFIG_PAX_SEGMEXEC
8595+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8596+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8597+#else
8598 #define STACK_TOP TASK_SIZE
8599-#define STACK_TOP_MAX STACK_TOP
8600+#endif
8601+
8602+#define STACK_TOP_MAX TASK_SIZE
8603
8604 #define INIT_THREAD { \
8605- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8606+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8607 .vm86_info = NULL, \
8608 .sysenter_cs = __KERNEL_CS, \
8609 .io_bitmap_ptr = NULL, \
8610@@ -876,7 +883,7 @@ static inline void spin_lock_prefetch(co
8611 */
8612 #define INIT_TSS { \
8613 .x86_tss = { \
8614- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8615+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8616 .ss0 = __KERNEL_DS, \
8617 .ss1 = __KERNEL_CS, \
8618 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8619@@ -887,11 +894,7 @@ static inline void spin_lock_prefetch(co
8620 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8621
8622 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8623-#define KSTK_TOP(info) \
8624-({ \
8625- unsigned long *__ptr = (unsigned long *)(info); \
8626- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8627-})
8628+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8629
8630 /*
8631 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8632@@ -906,7 +909,7 @@ extern unsigned long thread_saved_pc(str
8633 #define task_pt_regs(task) \
8634 ({ \
8635 struct pt_regs *__regs__; \
8636- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8637+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8638 __regs__ - 1; \
8639 })
8640
8641@@ -916,13 +919,13 @@ extern unsigned long thread_saved_pc(str
8642 /*
8643 * User space process size. 47bits minus one guard page.
8644 */
8645-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8646+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8647
8648 /* This decides where the kernel will search for a free chunk of vm
8649 * space during mmap's.
8650 */
8651 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8652- 0xc0000000 : 0xFFFFe000)
8653+ 0xc0000000 : 0xFFFFf000)
8654
8655 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8656 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8657@@ -933,11 +936,11 @@ extern unsigned long thread_saved_pc(str
8658 #define STACK_TOP_MAX TASK_SIZE_MAX
8659
8660 #define INIT_THREAD { \
8661- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8662+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8663 }
8664
8665 #define INIT_TSS { \
8666- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8667+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8668 }
8669
8670 /*
8671@@ -959,6 +962,10 @@ extern void start_thread(struct pt_regs
8672 */
8673 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8674
8675+#ifdef CONFIG_PAX_SEGMEXEC
8676+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8677+#endif
8678+
8679 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8680
8681 /* Get/set a process' ability to use the timestamp counter instruction */
8682diff -urNp linux-3.1.1/arch/x86/include/asm/ptrace.h linux-3.1.1/arch/x86/include/asm/ptrace.h
8683--- linux-3.1.1/arch/x86/include/asm/ptrace.h 2011-11-11 15:19:27.000000000 -0500
8684+++ linux-3.1.1/arch/x86/include/asm/ptrace.h 2011-11-16 18:39:07.000000000 -0500
8685@@ -156,28 +156,29 @@ static inline unsigned long regs_return_
8686 }
8687
8688 /*
8689- * user_mode_vm(regs) determines whether a register set came from user mode.
8690+ * user_mode(regs) determines whether a register set came from user mode.
8691 * This is true if V8086 mode was enabled OR if the register set was from
8692 * protected mode with RPL-3 CS value. This tricky test checks that with
8693 * one comparison. Many places in the kernel can bypass this full check
8694- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8695+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8696+ * be used.
8697 */
8698-static inline int user_mode(struct pt_regs *regs)
8699+static inline int user_mode_novm(struct pt_regs *regs)
8700 {
8701 #ifdef CONFIG_X86_32
8702 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8703 #else
8704- return !!(regs->cs & 3);
8705+ return !!(regs->cs & SEGMENT_RPL_MASK);
8706 #endif
8707 }
8708
8709-static inline int user_mode_vm(struct pt_regs *regs)
8710+static inline int user_mode(struct pt_regs *regs)
8711 {
8712 #ifdef CONFIG_X86_32
8713 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8714 USER_RPL;
8715 #else
8716- return user_mode(regs);
8717+ return user_mode_novm(regs);
8718 #endif
8719 }
8720
8721@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_r
8722 #ifdef CONFIG_X86_64
8723 static inline bool user_64bit_mode(struct pt_regs *regs)
8724 {
8725+ unsigned long cs = regs->cs & 0xffff;
8726 #ifndef CONFIG_PARAVIRT
8727 /*
8728 * On non-paravirt systems, this is the only long mode CPL 3
8729 * selector. We do not allow long mode selectors in the LDT.
8730 */
8731- return regs->cs == __USER_CS;
8732+ return cs == __USER_CS;
8733 #else
8734 /* Headers are too twisted for this to go in paravirt.h. */
8735- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
8736+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
8737 #endif
8738 }
8739 #endif
8740diff -urNp linux-3.1.1/arch/x86/include/asm/reboot.h linux-3.1.1/arch/x86/include/asm/reboot.h
8741--- linux-3.1.1/arch/x86/include/asm/reboot.h 2011-11-11 15:19:27.000000000 -0500
8742+++ linux-3.1.1/arch/x86/include/asm/reboot.h 2011-11-16 18:39:07.000000000 -0500
8743@@ -6,19 +6,19 @@
8744 struct pt_regs;
8745
8746 struct machine_ops {
8747- void (*restart)(char *cmd);
8748- void (*halt)(void);
8749- void (*power_off)(void);
8750+ void (* __noreturn restart)(char *cmd);
8751+ void (* __noreturn halt)(void);
8752+ void (* __noreturn power_off)(void);
8753 void (*shutdown)(void);
8754 void (*crash_shutdown)(struct pt_regs *);
8755- void (*emergency_restart)(void);
8756-};
8757+ void (* __noreturn emergency_restart)(void);
8758+} __no_const;
8759
8760 extern struct machine_ops machine_ops;
8761
8762 void native_machine_crash_shutdown(struct pt_regs *regs);
8763 void native_machine_shutdown(void);
8764-void machine_real_restart(unsigned int type);
8765+void machine_real_restart(unsigned int type) __noreturn;
8766 /* These must match dispatch_table in reboot_32.S */
8767 #define MRR_BIOS 0
8768 #define MRR_APM 1
8769diff -urNp linux-3.1.1/arch/x86/include/asm/rwsem.h linux-3.1.1/arch/x86/include/asm/rwsem.h
8770--- linux-3.1.1/arch/x86/include/asm/rwsem.h 2011-11-11 15:19:27.000000000 -0500
8771+++ linux-3.1.1/arch/x86/include/asm/rwsem.h 2011-11-16 18:39:07.000000000 -0500
8772@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8773 {
8774 asm volatile("# beginning down_read\n\t"
8775 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8776+
8777+#ifdef CONFIG_PAX_REFCOUNT
8778+ "jno 0f\n"
8779+ LOCK_PREFIX _ASM_DEC "(%1)\n"
8780+ "int $4\n0:\n"
8781+ _ASM_EXTABLE(0b, 0b)
8782+#endif
8783+
8784 /* adds 0x00000001 */
8785 " jns 1f\n"
8786 " call call_rwsem_down_read_failed\n"
8787@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8788 "1:\n\t"
8789 " mov %1,%2\n\t"
8790 " add %3,%2\n\t"
8791+
8792+#ifdef CONFIG_PAX_REFCOUNT
8793+ "jno 0f\n"
8794+ "sub %3,%2\n"
8795+ "int $4\n0:\n"
8796+ _ASM_EXTABLE(0b, 0b)
8797+#endif
8798+
8799 " jle 2f\n\t"
8800 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8801 " jnz 1b\n\t"
8802@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8803 long tmp;
8804 asm volatile("# beginning down_write\n\t"
8805 LOCK_PREFIX " xadd %1,(%2)\n\t"
8806+
8807+#ifdef CONFIG_PAX_REFCOUNT
8808+ "jno 0f\n"
8809+ "mov %1,(%2)\n"
8810+ "int $4\n0:\n"
8811+ _ASM_EXTABLE(0b, 0b)
8812+#endif
8813+
8814 /* adds 0xffff0001, returns the old value */
8815 " test %1,%1\n\t"
8816 /* was the count 0 before? */
8817@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8818 long tmp;
8819 asm volatile("# beginning __up_read\n\t"
8820 LOCK_PREFIX " xadd %1,(%2)\n\t"
8821+
8822+#ifdef CONFIG_PAX_REFCOUNT
8823+ "jno 0f\n"
8824+ "mov %1,(%2)\n"
8825+ "int $4\n0:\n"
8826+ _ASM_EXTABLE(0b, 0b)
8827+#endif
8828+
8829 /* subtracts 1, returns the old value */
8830 " jns 1f\n\t"
8831 " call call_rwsem_wake\n" /* expects old value in %edx */
8832@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8833 long tmp;
8834 asm volatile("# beginning __up_write\n\t"
8835 LOCK_PREFIX " xadd %1,(%2)\n\t"
8836+
8837+#ifdef CONFIG_PAX_REFCOUNT
8838+ "jno 0f\n"
8839+ "mov %1,(%2)\n"
8840+ "int $4\n0:\n"
8841+ _ASM_EXTABLE(0b, 0b)
8842+#endif
8843+
8844 /* subtracts 0xffff0001, returns the old value */
8845 " jns 1f\n\t"
8846 " call call_rwsem_wake\n" /* expects old value in %edx */
8847@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8848 {
8849 asm volatile("# beginning __downgrade_write\n\t"
8850 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8851+
8852+#ifdef CONFIG_PAX_REFCOUNT
8853+ "jno 0f\n"
8854+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8855+ "int $4\n0:\n"
8856+ _ASM_EXTABLE(0b, 0b)
8857+#endif
8858+
8859 /*
8860 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8861 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8862@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8863 */
8864 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8865 {
8866- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8867+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8868+
8869+#ifdef CONFIG_PAX_REFCOUNT
8870+ "jno 0f\n"
8871+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
8872+ "int $4\n0:\n"
8873+ _ASM_EXTABLE(0b, 0b)
8874+#endif
8875+
8876 : "+m" (sem->count)
8877 : "er" (delta));
8878 }
8879@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8880 {
8881 long tmp = delta;
8882
8883- asm volatile(LOCK_PREFIX "xadd %0,%1"
8884+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8885+
8886+#ifdef CONFIG_PAX_REFCOUNT
8887+ "jno 0f\n"
8888+ "mov %0,%1\n"
8889+ "int $4\n0:\n"
8890+ _ASM_EXTABLE(0b, 0b)
8891+#endif
8892+
8893 : "+r" (tmp), "+m" (sem->count)
8894 : : "memory");
8895
8896diff -urNp linux-3.1.1/arch/x86/include/asm/segment.h linux-3.1.1/arch/x86/include/asm/segment.h
8897--- linux-3.1.1/arch/x86/include/asm/segment.h 2011-11-11 15:19:27.000000000 -0500
8898+++ linux-3.1.1/arch/x86/include/asm/segment.h 2011-11-16 18:39:07.000000000 -0500
8899@@ -64,10 +64,15 @@
8900 * 26 - ESPFIX small SS
8901 * 27 - per-cpu [ offset to per-cpu data area ]
8902 * 28 - stack_canary-20 [ for stack protector ]
8903- * 29 - unused
8904- * 30 - unused
8905+ * 29 - PCI BIOS CS
8906+ * 30 - PCI BIOS DS
8907 * 31 - TSS for double fault handler
8908 */
8909+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
8910+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
8911+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
8912+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
8913+
8914 #define GDT_ENTRY_TLS_MIN 6
8915 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
8916
8917@@ -79,6 +84,8 @@
8918
8919 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8920
8921+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8922+
8923 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8924
8925 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8926@@ -104,6 +111,12 @@
8927 #define __KERNEL_STACK_CANARY 0
8928 #endif
8929
8930+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8931+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8932+
8933+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8934+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8935+
8936 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8937
8938 /*
8939@@ -141,7 +154,7 @@
8940 */
8941
8942 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8943-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8944+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8945
8946
8947 #else
8948@@ -165,6 +178,8 @@
8949 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
8950 #define __USER32_DS __USER_DS
8951
8952+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8953+
8954 #define GDT_ENTRY_TSS 8 /* needs two entries */
8955 #define GDT_ENTRY_LDT 10 /* needs two entries */
8956 #define GDT_ENTRY_TLS_MIN 12
8957@@ -185,6 +200,7 @@
8958 #endif
8959
8960 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8961+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8962 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8963 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8964 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8965diff -urNp linux-3.1.1/arch/x86/include/asm/smp.h linux-3.1.1/arch/x86/include/asm/smp.h
8966--- linux-3.1.1/arch/x86/include/asm/smp.h 2011-11-11 15:19:27.000000000 -0500
8967+++ linux-3.1.1/arch/x86/include/asm/smp.h 2011-11-16 18:39:07.000000000 -0500
8968@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8969 /* cpus sharing the last level cache: */
8970 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8971 DECLARE_PER_CPU(u16, cpu_llc_id);
8972-DECLARE_PER_CPU(int, cpu_number);
8973+DECLARE_PER_CPU(unsigned int, cpu_number);
8974
8975 static inline struct cpumask *cpu_sibling_mask(int cpu)
8976 {
8977@@ -77,7 +77,7 @@ struct smp_ops {
8978
8979 void (*send_call_func_ipi)(const struct cpumask *mask);
8980 void (*send_call_func_single_ipi)(int cpu);
8981-};
8982+} __no_const;
8983
8984 /* Globals due to paravirt */
8985 extern void set_cpu_sibling_map(int cpu);
8986@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8987 extern int safe_smp_processor_id(void);
8988
8989 #elif defined(CONFIG_X86_64_SMP)
8990-#define raw_smp_processor_id() (percpu_read(cpu_number))
8991-
8992-#define stack_smp_processor_id() \
8993-({ \
8994- struct thread_info *ti; \
8995- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8996- ti->cpu; \
8997-})
8998+#define raw_smp_processor_id() (percpu_read(cpu_number))
8999+#define stack_smp_processor_id() raw_smp_processor_id()
9000 #define safe_smp_processor_id() smp_processor_id()
9001
9002 #endif
9003diff -urNp linux-3.1.1/arch/x86/include/asm/spinlock.h linux-3.1.1/arch/x86/include/asm/spinlock.h
9004--- linux-3.1.1/arch/x86/include/asm/spinlock.h 2011-11-11 15:19:27.000000000 -0500
9005+++ linux-3.1.1/arch/x86/include/asm/spinlock.h 2011-11-16 18:39:07.000000000 -0500
9006@@ -248,6 +248,14 @@ static inline int arch_write_can_lock(ar
9007 static inline void arch_read_lock(arch_rwlock_t *rw)
9008 {
9009 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
9010+
9011+#ifdef CONFIG_PAX_REFCOUNT
9012+ "jno 0f\n"
9013+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
9014+ "int $4\n0:\n"
9015+ _ASM_EXTABLE(0b, 0b)
9016+#endif
9017+
9018 "jns 1f\n"
9019 "call __read_lock_failed\n\t"
9020 "1:\n"
9021@@ -257,6 +265,14 @@ static inline void arch_read_lock(arch_r
9022 static inline void arch_write_lock(arch_rwlock_t *rw)
9023 {
9024 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
9025+
9026+#ifdef CONFIG_PAX_REFCOUNT
9027+ "jno 0f\n"
9028+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
9029+ "int $4\n0:\n"
9030+ _ASM_EXTABLE(0b, 0b)
9031+#endif
9032+
9033 "jz 1f\n"
9034 "call __write_lock_failed\n\t"
9035 "1:\n"
9036@@ -286,13 +302,29 @@ static inline int arch_write_trylock(arc
9037
9038 static inline void arch_read_unlock(arch_rwlock_t *rw)
9039 {
9040- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
9041+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
9042+
9043+#ifdef CONFIG_PAX_REFCOUNT
9044+ "jno 0f\n"
9045+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
9046+ "int $4\n0:\n"
9047+ _ASM_EXTABLE(0b, 0b)
9048+#endif
9049+
9050 :"+m" (rw->lock) : : "memory");
9051 }
9052
9053 static inline void arch_write_unlock(arch_rwlock_t *rw)
9054 {
9055- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
9056+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
9057+
9058+#ifdef CONFIG_PAX_REFCOUNT
9059+ "jno 0f\n"
9060+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
9061+ "int $4\n0:\n"
9062+ _ASM_EXTABLE(0b, 0b)
9063+#endif
9064+
9065 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
9066 }
9067
9068diff -urNp linux-3.1.1/arch/x86/include/asm/stackprotector.h linux-3.1.1/arch/x86/include/asm/stackprotector.h
9069--- linux-3.1.1/arch/x86/include/asm/stackprotector.h 2011-11-11 15:19:27.000000000 -0500
9070+++ linux-3.1.1/arch/x86/include/asm/stackprotector.h 2011-11-16 18:39:07.000000000 -0500
9071@@ -48,7 +48,7 @@
9072 * head_32 for boot CPU and setup_per_cpu_areas() for others.
9073 */
9074 #define GDT_STACK_CANARY_INIT \
9075- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
9076+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
9077
9078 /*
9079 * Initialize the stackprotector canary value.
9080@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
9081
9082 static inline void load_stack_canary_segment(void)
9083 {
9084-#ifdef CONFIG_X86_32
9085+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
9086 asm volatile ("mov %0, %%gs" : : "r" (0));
9087 #endif
9088 }
9089diff -urNp linux-3.1.1/arch/x86/include/asm/stacktrace.h linux-3.1.1/arch/x86/include/asm/stacktrace.h
9090--- linux-3.1.1/arch/x86/include/asm/stacktrace.h 2011-11-11 15:19:27.000000000 -0500
9091+++ linux-3.1.1/arch/x86/include/asm/stacktrace.h 2011-11-16 18:39:07.000000000 -0500
9092@@ -11,28 +11,20 @@
9093
9094 extern int kstack_depth_to_print;
9095
9096-struct thread_info;
9097+struct task_struct;
9098 struct stacktrace_ops;
9099
9100-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
9101- unsigned long *stack,
9102- unsigned long bp,
9103- const struct stacktrace_ops *ops,
9104- void *data,
9105- unsigned long *end,
9106- int *graph);
9107-
9108-extern unsigned long
9109-print_context_stack(struct thread_info *tinfo,
9110- unsigned long *stack, unsigned long bp,
9111- const struct stacktrace_ops *ops, void *data,
9112- unsigned long *end, int *graph);
9113-
9114-extern unsigned long
9115-print_context_stack_bp(struct thread_info *tinfo,
9116- unsigned long *stack, unsigned long bp,
9117- const struct stacktrace_ops *ops, void *data,
9118- unsigned long *end, int *graph);
9119+typedef unsigned long walk_stack_t(struct task_struct *task,
9120+ void *stack_start,
9121+ unsigned long *stack,
9122+ unsigned long bp,
9123+ const struct stacktrace_ops *ops,
9124+ void *data,
9125+ unsigned long *end,
9126+ int *graph);
9127+
9128+extern walk_stack_t print_context_stack;
9129+extern walk_stack_t print_context_stack_bp;
9130
9131 /* Generic stack tracer with callbacks */
9132
9133@@ -40,7 +32,7 @@ struct stacktrace_ops {
9134 void (*address)(void *data, unsigned long address, int reliable);
9135 /* On negative return stop dumping */
9136 int (*stack)(void *data, char *name);
9137- walk_stack_t walk_stack;
9138+ walk_stack_t *walk_stack;
9139 };
9140
9141 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
9142diff -urNp linux-3.1.1/arch/x86/include/asm/sys_ia32.h linux-3.1.1/arch/x86/include/asm/sys_ia32.h
9143--- linux-3.1.1/arch/x86/include/asm/sys_ia32.h 2011-11-11 15:19:27.000000000 -0500
9144+++ linux-3.1.1/arch/x86/include/asm/sys_ia32.h 2011-11-16 18:39:07.000000000 -0500
9145@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int
9146 compat_sigset_t __user *, unsigned int);
9147 asmlinkage long sys32_alarm(unsigned int);
9148
9149-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
9150+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
9151 asmlinkage long sys32_sysfs(int, u32, u32);
9152
9153 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
9154diff -urNp linux-3.1.1/arch/x86/include/asm/system.h linux-3.1.1/arch/x86/include/asm/system.h
9155--- linux-3.1.1/arch/x86/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
9156+++ linux-3.1.1/arch/x86/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
9157@@ -129,7 +129,7 @@ do { \
9158 "call __switch_to\n\t" \
9159 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
9160 __switch_canary \
9161- "movq %P[thread_info](%%rsi),%%r8\n\t" \
9162+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9163 "movq %%rax,%%rdi\n\t" \
9164 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9165 "jnz ret_from_fork\n\t" \
9166@@ -140,7 +140,7 @@ do { \
9167 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9168 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9169 [_tif_fork] "i" (_TIF_FORK), \
9170- [thread_info] "i" (offsetof(struct task_struct, stack)), \
9171+ [thread_info] "m" (current_tinfo), \
9172 [current_task] "m" (current_task) \
9173 __switch_canary_iparam \
9174 : "memory", "cc" __EXTRA_CLOBBER)
9175@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9176 {
9177 unsigned long __limit;
9178 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9179- return __limit + 1;
9180+ return __limit;
9181 }
9182
9183 static inline void native_clts(void)
9184@@ -397,12 +397,12 @@ void enable_hlt(void);
9185
9186 void cpu_idle_wait(void);
9187
9188-extern unsigned long arch_align_stack(unsigned long sp);
9189+#define arch_align_stack(x) ((x) & ~0xfUL)
9190 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9191
9192 void default_idle(void);
9193
9194-void stop_this_cpu(void *dummy);
9195+void stop_this_cpu(void *dummy) __noreturn;
9196
9197 /*
9198 * Force strict CPU ordering.
9199diff -urNp linux-3.1.1/arch/x86/include/asm/thread_info.h linux-3.1.1/arch/x86/include/asm/thread_info.h
9200--- linux-3.1.1/arch/x86/include/asm/thread_info.h 2011-11-11 15:19:27.000000000 -0500
9201+++ linux-3.1.1/arch/x86/include/asm/thread_info.h 2011-11-16 18:39:07.000000000 -0500
9202@@ -10,6 +10,7 @@
9203 #include <linux/compiler.h>
9204 #include <asm/page.h>
9205 #include <asm/types.h>
9206+#include <asm/percpu.h>
9207
9208 /*
9209 * low level task data that entry.S needs immediate access to
9210@@ -24,7 +25,6 @@ struct exec_domain;
9211 #include <linux/atomic.h>
9212
9213 struct thread_info {
9214- struct task_struct *task; /* main task structure */
9215 struct exec_domain *exec_domain; /* execution domain */
9216 __u32 flags; /* low level flags */
9217 __u32 status; /* thread synchronous flags */
9218@@ -34,18 +34,12 @@ struct thread_info {
9219 mm_segment_t addr_limit;
9220 struct restart_block restart_block;
9221 void __user *sysenter_return;
9222-#ifdef CONFIG_X86_32
9223- unsigned long previous_esp; /* ESP of the previous stack in
9224- case of nested (IRQ) stacks
9225- */
9226- __u8 supervisor_stack[0];
9227-#endif
9228+ unsigned long lowest_stack;
9229 int uaccess_err;
9230 };
9231
9232-#define INIT_THREAD_INFO(tsk) \
9233+#define INIT_THREAD_INFO \
9234 { \
9235- .task = &tsk, \
9236 .exec_domain = &default_exec_domain, \
9237 .flags = 0, \
9238 .cpu = 0, \
9239@@ -56,7 +50,7 @@ struct thread_info {
9240 }, \
9241 }
9242
9243-#define init_thread_info (init_thread_union.thread_info)
9244+#define init_thread_info (init_thread_union.stack)
9245 #define init_stack (init_thread_union.stack)
9246
9247 #else /* !__ASSEMBLY__ */
9248@@ -170,6 +164,23 @@ struct thread_info {
9249 ret; \
9250 })
9251
9252+#ifdef __ASSEMBLY__
9253+/* how to get the thread information struct from ASM */
9254+#define GET_THREAD_INFO(reg) \
9255+ mov PER_CPU_VAR(current_tinfo), reg
9256+
9257+/* use this one if reg already contains %esp */
9258+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9259+#else
9260+/* how to get the thread information struct from C */
9261+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9262+
9263+static __always_inline struct thread_info *current_thread_info(void)
9264+{
9265+ return percpu_read_stable(current_tinfo);
9266+}
9267+#endif
9268+
9269 #ifdef CONFIG_X86_32
9270
9271 #define STACK_WARN (THREAD_SIZE/8)
9272@@ -180,35 +191,13 @@ struct thread_info {
9273 */
9274 #ifndef __ASSEMBLY__
9275
9276-
9277 /* how to get the current stack pointer from C */
9278 register unsigned long current_stack_pointer asm("esp") __used;
9279
9280-/* how to get the thread information struct from C */
9281-static inline struct thread_info *current_thread_info(void)
9282-{
9283- return (struct thread_info *)
9284- (current_stack_pointer & ~(THREAD_SIZE - 1));
9285-}
9286-
9287-#else /* !__ASSEMBLY__ */
9288-
9289-/* how to get the thread information struct from ASM */
9290-#define GET_THREAD_INFO(reg) \
9291- movl $-THREAD_SIZE, reg; \
9292- andl %esp, reg
9293-
9294-/* use this one if reg already contains %esp */
9295-#define GET_THREAD_INFO_WITH_ESP(reg) \
9296- andl $-THREAD_SIZE, reg
9297-
9298 #endif
9299
9300 #else /* X86_32 */
9301
9302-#include <asm/percpu.h>
9303-#define KERNEL_STACK_OFFSET (5*8)
9304-
9305 /*
9306 * macros/functions for gaining access to the thread information structure
9307 * preempt_count needs to be 1 initially, until the scheduler is functional.
9308@@ -216,21 +205,8 @@ static inline struct thread_info *curren
9309 #ifndef __ASSEMBLY__
9310 DECLARE_PER_CPU(unsigned long, kernel_stack);
9311
9312-static inline struct thread_info *current_thread_info(void)
9313-{
9314- struct thread_info *ti;
9315- ti = (void *)(percpu_read_stable(kernel_stack) +
9316- KERNEL_STACK_OFFSET - THREAD_SIZE);
9317- return ti;
9318-}
9319-
9320-#else /* !__ASSEMBLY__ */
9321-
9322-/* how to get the thread information struct from ASM */
9323-#define GET_THREAD_INFO(reg) \
9324- movq PER_CPU_VAR(kernel_stack),reg ; \
9325- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9326-
9327+/* how to get the current stack pointer from C */
9328+register unsigned long current_stack_pointer asm("rsp") __used;
9329 #endif
9330
9331 #endif /* !X86_32 */
9332@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9333 extern void free_thread_info(struct thread_info *ti);
9334 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9335 #define arch_task_cache_init arch_task_cache_init
9336+
9337+#define __HAVE_THREAD_FUNCTIONS
9338+#define task_thread_info(task) (&(task)->tinfo)
9339+#define task_stack_page(task) ((task)->stack)
9340+#define setup_thread_stack(p, org) do {} while (0)
9341+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9342+
9343+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9344+extern struct task_struct *alloc_task_struct_node(int node);
9345+extern void free_task_struct(struct task_struct *);
9346+
9347 #endif
9348 #endif /* _ASM_X86_THREAD_INFO_H */
9349diff -urNp linux-3.1.1/arch/x86/include/asm/uaccess_32.h linux-3.1.1/arch/x86/include/asm/uaccess_32.h
9350--- linux-3.1.1/arch/x86/include/asm/uaccess_32.h 2011-11-11 15:19:27.000000000 -0500
9351+++ linux-3.1.1/arch/x86/include/asm/uaccess_32.h 2011-11-16 18:40:08.000000000 -0500
9352@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9353 static __always_inline unsigned long __must_check
9354 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9355 {
9356+ pax_track_stack();
9357+
9358+ if ((long)n < 0)
9359+ return n;
9360+
9361 if (__builtin_constant_p(n)) {
9362 unsigned long ret;
9363
9364@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9365 return ret;
9366 }
9367 }
9368+ if (!__builtin_constant_p(n))
9369+ check_object_size(from, n, true);
9370 return __copy_to_user_ll(to, from, n);
9371 }
9372
9373@@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9374 __copy_to_user(void __user *to, const void *from, unsigned long n)
9375 {
9376 might_fault();
9377+
9378 return __copy_to_user_inatomic(to, from, n);
9379 }
9380
9381 static __always_inline unsigned long
9382 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9383 {
9384+ if ((long)n < 0)
9385+ return n;
9386+
9387 /* Avoid zeroing the tail if the copy fails..
9388 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9389 * but as the zeroing behaviour is only significant when n is not
9390@@ -137,6 +148,12 @@ static __always_inline unsigned long
9391 __copy_from_user(void *to, const void __user *from, unsigned long n)
9392 {
9393 might_fault();
9394+
9395+ pax_track_stack();
9396+
9397+ if ((long)n < 0)
9398+ return n;
9399+
9400 if (__builtin_constant_p(n)) {
9401 unsigned long ret;
9402
9403@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9404 return ret;
9405 }
9406 }
9407+ if (!__builtin_constant_p(n))
9408+ check_object_size(to, n, false);
9409 return __copy_from_user_ll(to, from, n);
9410 }
9411
9412@@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9413 const void __user *from, unsigned long n)
9414 {
9415 might_fault();
9416+
9417+ if ((long)n < 0)
9418+ return n;
9419+
9420 if (__builtin_constant_p(n)) {
9421 unsigned long ret;
9422
9423@@ -181,15 +204,19 @@ static __always_inline unsigned long
9424 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9425 unsigned long n)
9426 {
9427- return __copy_from_user_ll_nocache_nozero(to, from, n);
9428-}
9429+ if ((long)n < 0)
9430+ return n;
9431
9432-unsigned long __must_check copy_to_user(void __user *to,
9433- const void *from, unsigned long n);
9434-unsigned long __must_check _copy_from_user(void *to,
9435- const void __user *from,
9436- unsigned long n);
9437+ return __copy_from_user_ll_nocache_nozero(to, from, n);
9438+}
9439
9440+extern void copy_to_user_overflow(void)
9441+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9442+ __compiletime_error("copy_to_user() buffer size is not provably correct")
9443+#else
9444+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
9445+#endif
9446+;
9447
9448 extern void copy_from_user_overflow(void)
9449 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9450@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9451 #endif
9452 ;
9453
9454-static inline unsigned long __must_check copy_from_user(void *to,
9455- const void __user *from,
9456- unsigned long n)
9457+/**
9458+ * copy_to_user: - Copy a block of data into user space.
9459+ * @to: Destination address, in user space.
9460+ * @from: Source address, in kernel space.
9461+ * @n: Number of bytes to copy.
9462+ *
9463+ * Context: User context only. This function may sleep.
9464+ *
9465+ * Copy data from kernel space to user space.
9466+ *
9467+ * Returns number of bytes that could not be copied.
9468+ * On success, this will be zero.
9469+ */
9470+static inline unsigned long __must_check
9471+copy_to_user(void __user *to, const void *from, unsigned long n)
9472+{
9473+ int sz = __compiletime_object_size(from);
9474+
9475+ if (unlikely(sz != -1 && sz < n))
9476+ copy_to_user_overflow();
9477+ else if (access_ok(VERIFY_WRITE, to, n))
9478+ n = __copy_to_user(to, from, n);
9479+ return n;
9480+}
9481+
9482+/**
9483+ * copy_from_user: - Copy a block of data from user space.
9484+ * @to: Destination address, in kernel space.
9485+ * @from: Source address, in user space.
9486+ * @n: Number of bytes to copy.
9487+ *
9488+ * Context: User context only. This function may sleep.
9489+ *
9490+ * Copy data from user space to kernel space.
9491+ *
9492+ * Returns number of bytes that could not be copied.
9493+ * On success, this will be zero.
9494+ *
9495+ * If some data could not be copied, this function will pad the copied
9496+ * data to the requested size using zero bytes.
9497+ */
9498+static inline unsigned long __must_check
9499+copy_from_user(void *to, const void __user *from, unsigned long n)
9500 {
9501 int sz = __compiletime_object_size(to);
9502
9503- if (likely(sz == -1 || sz >= n))
9504- n = _copy_from_user(to, from, n);
9505- else
9506+ if (unlikely(sz != -1 && sz < n))
9507 copy_from_user_overflow();
9508-
9509+ else if (access_ok(VERIFY_READ, from, n))
9510+ n = __copy_from_user(to, from, n);
9511+ else if ((long)n > 0) {
9512+ if (!__builtin_constant_p(n))
9513+ check_object_size(to, n, false);
9514+ memset(to, 0, n);
9515+ }
9516 return n;
9517 }
9518
9519diff -urNp linux-3.1.1/arch/x86/include/asm/uaccess_64.h linux-3.1.1/arch/x86/include/asm/uaccess_64.h
9520--- linux-3.1.1/arch/x86/include/asm/uaccess_64.h 2011-11-11 15:19:27.000000000 -0500
9521+++ linux-3.1.1/arch/x86/include/asm/uaccess_64.h 2011-11-16 18:40:08.000000000 -0500
9522@@ -10,6 +10,9 @@
9523 #include <asm/alternative.h>
9524 #include <asm/cpufeature.h>
9525 #include <asm/page.h>
9526+#include <asm/pgtable.h>
9527+
9528+#define set_fs(x) (current_thread_info()->addr_limit = (x))
9529
9530 /*
9531 * Copy To/From Userspace
9532@@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9533 return ret;
9534 }
9535
9536-__must_check unsigned long
9537-_copy_to_user(void __user *to, const void *from, unsigned len);
9538-__must_check unsigned long
9539-_copy_from_user(void *to, const void __user *from, unsigned len);
9540+static __always_inline __must_check unsigned long
9541+__copy_to_user(void __user *to, const void *from, unsigned len);
9542+static __always_inline __must_check unsigned long
9543+__copy_from_user(void *to, const void __user *from, unsigned len);
9544 __must_check unsigned long
9545 copy_in_user(void __user *to, const void __user *from, unsigned len);
9546
9547 static inline unsigned long __must_check copy_from_user(void *to,
9548 const void __user *from,
9549- unsigned long n)
9550+ unsigned n)
9551 {
9552- int sz = __compiletime_object_size(to);
9553-
9554 might_fault();
9555- if (likely(sz == -1 || sz >= n))
9556- n = _copy_from_user(to, from, n);
9557-#ifdef CONFIG_DEBUG_VM
9558- else
9559- WARN(1, "Buffer overflow detected!\n");
9560-#endif
9561+
9562+ if (access_ok(VERIFY_READ, from, n))
9563+ n = __copy_from_user(to, from, n);
9564+ else if ((int)n > 0) {
9565+ if (!__builtin_constant_p(n))
9566+ check_object_size(to, n, false);
9567+ memset(to, 0, n);
9568+ }
9569 return n;
9570 }
9571
9572@@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9573 {
9574 might_fault();
9575
9576- return _copy_to_user(dst, src, size);
9577+ if (access_ok(VERIFY_WRITE, dst, size))
9578+ size = __copy_to_user(dst, src, size);
9579+ return size;
9580 }
9581
9582 static __always_inline __must_check
9583-int __copy_from_user(void *dst, const void __user *src, unsigned size)
9584+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9585 {
9586- int ret = 0;
9587+ int sz = __compiletime_object_size(dst);
9588+ unsigned ret = 0;
9589
9590 might_fault();
9591- if (!__builtin_constant_p(size))
9592- return copy_user_generic(dst, (__force void *)src, size);
9593+
9594+ pax_track_stack();
9595+
9596+ if ((int)size < 0)
9597+ return size;
9598+
9599+#ifdef CONFIG_PAX_MEMORY_UDEREF
9600+ if (!__access_ok(VERIFY_READ, src, size))
9601+ return size;
9602+#endif
9603+
9604+ if (unlikely(sz != -1 && sz < size)) {
9605+#ifdef CONFIG_DEBUG_VM
9606+ WARN(1, "Buffer overflow detected!\n");
9607+#endif
9608+ return size;
9609+ }
9610+
9611+ if (!__builtin_constant_p(size)) {
9612+ check_object_size(dst, size, false);
9613+
9614+#ifdef CONFIG_PAX_MEMORY_UDEREF
9615+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9616+ src += PAX_USER_SHADOW_BASE;
9617+#endif
9618+
9619+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9620+ }
9621 switch (size) {
9622- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9623+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9624 ret, "b", "b", "=q", 1);
9625 return ret;
9626- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9627+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9628 ret, "w", "w", "=r", 2);
9629 return ret;
9630- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9631+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9632 ret, "l", "k", "=r", 4);
9633 return ret;
9634- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9635+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9636 ret, "q", "", "=r", 8);
9637 return ret;
9638 case 10:
9639- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9640+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9641 ret, "q", "", "=r", 10);
9642 if (unlikely(ret))
9643 return ret;
9644 __get_user_asm(*(u16 *)(8 + (char *)dst),
9645- (u16 __user *)(8 + (char __user *)src),
9646+ (const u16 __user *)(8 + (const char __user *)src),
9647 ret, "w", "w", "=r", 2);
9648 return ret;
9649 case 16:
9650- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9651+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9652 ret, "q", "", "=r", 16);
9653 if (unlikely(ret))
9654 return ret;
9655 __get_user_asm(*(u64 *)(8 + (char *)dst),
9656- (u64 __user *)(8 + (char __user *)src),
9657+ (const u64 __user *)(8 + (const char __user *)src),
9658 ret, "q", "", "=r", 8);
9659 return ret;
9660 default:
9661- return copy_user_generic(dst, (__force void *)src, size);
9662+
9663+#ifdef CONFIG_PAX_MEMORY_UDEREF
9664+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9665+ src += PAX_USER_SHADOW_BASE;
9666+#endif
9667+
9668+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9669 }
9670 }
9671
9672 static __always_inline __must_check
9673-int __copy_to_user(void __user *dst, const void *src, unsigned size)
9674+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9675 {
9676- int ret = 0;
9677+ int sz = __compiletime_object_size(src);
9678+ unsigned ret = 0;
9679
9680 might_fault();
9681- if (!__builtin_constant_p(size))
9682- return copy_user_generic((__force void *)dst, src, size);
9683+
9684+ pax_track_stack();
9685+
9686+ if ((int)size < 0)
9687+ return size;
9688+
9689+#ifdef CONFIG_PAX_MEMORY_UDEREF
9690+ if (!__access_ok(VERIFY_WRITE, dst, size))
9691+ return size;
9692+#endif
9693+
9694+ if (unlikely(sz != -1 && sz < size)) {
9695+#ifdef CONFIG_DEBUG_VM
9696+ WARN(1, "Buffer overflow detected!\n");
9697+#endif
9698+ return size;
9699+ }
9700+
9701+ if (!__builtin_constant_p(size)) {
9702+ check_object_size(src, size, true);
9703+
9704+#ifdef CONFIG_PAX_MEMORY_UDEREF
9705+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9706+ dst += PAX_USER_SHADOW_BASE;
9707+#endif
9708+
9709+ return copy_user_generic((__force_kernel void *)dst, src, size);
9710+ }
9711 switch (size) {
9712- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9713+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9714 ret, "b", "b", "iq", 1);
9715 return ret;
9716- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9717+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9718 ret, "w", "w", "ir", 2);
9719 return ret;
9720- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9721+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9722 ret, "l", "k", "ir", 4);
9723 return ret;
9724- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9725+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9726 ret, "q", "", "er", 8);
9727 return ret;
9728 case 10:
9729- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9730+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9731 ret, "q", "", "er", 10);
9732 if (unlikely(ret))
9733 return ret;
9734 asm("":::"memory");
9735- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9736+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9737 ret, "w", "w", "ir", 2);
9738 return ret;
9739 case 16:
9740- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9741+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9742 ret, "q", "", "er", 16);
9743 if (unlikely(ret))
9744 return ret;
9745 asm("":::"memory");
9746- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9747+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9748 ret, "q", "", "er", 8);
9749 return ret;
9750 default:
9751- return copy_user_generic((__force void *)dst, src, size);
9752+
9753+#ifdef CONFIG_PAX_MEMORY_UDEREF
9754+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9755+ dst += PAX_USER_SHADOW_BASE;
9756+#endif
9757+
9758+ return copy_user_generic((__force_kernel void *)dst, src, size);
9759 }
9760 }
9761
9762 static __always_inline __must_check
9763-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9764+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9765 {
9766- int ret = 0;
9767+ unsigned ret = 0;
9768
9769 might_fault();
9770- if (!__builtin_constant_p(size))
9771- return copy_user_generic((__force void *)dst,
9772- (__force void *)src, size);
9773+
9774+ if ((int)size < 0)
9775+ return size;
9776+
9777+#ifdef CONFIG_PAX_MEMORY_UDEREF
9778+ if (!__access_ok(VERIFY_READ, src, size))
9779+ return size;
9780+ if (!__access_ok(VERIFY_WRITE, dst, size))
9781+ return size;
9782+#endif
9783+
9784+ if (!__builtin_constant_p(size)) {
9785+
9786+#ifdef CONFIG_PAX_MEMORY_UDEREF
9787+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9788+ src += PAX_USER_SHADOW_BASE;
9789+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9790+ dst += PAX_USER_SHADOW_BASE;
9791+#endif
9792+
9793+ return copy_user_generic((__force_kernel void *)dst,
9794+ (__force_kernel const void *)src, size);
9795+ }
9796 switch (size) {
9797 case 1: {
9798 u8 tmp;
9799- __get_user_asm(tmp, (u8 __user *)src,
9800+ __get_user_asm(tmp, (const u8 __user *)src,
9801 ret, "b", "b", "=q", 1);
9802 if (likely(!ret))
9803 __put_user_asm(tmp, (u8 __user *)dst,
9804@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9805 }
9806 case 2: {
9807 u16 tmp;
9808- __get_user_asm(tmp, (u16 __user *)src,
9809+ __get_user_asm(tmp, (const u16 __user *)src,
9810 ret, "w", "w", "=r", 2);
9811 if (likely(!ret))
9812 __put_user_asm(tmp, (u16 __user *)dst,
9813@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9814
9815 case 4: {
9816 u32 tmp;
9817- __get_user_asm(tmp, (u32 __user *)src,
9818+ __get_user_asm(tmp, (const u32 __user *)src,
9819 ret, "l", "k", "=r", 4);
9820 if (likely(!ret))
9821 __put_user_asm(tmp, (u32 __user *)dst,
9822@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9823 }
9824 case 8: {
9825 u64 tmp;
9826- __get_user_asm(tmp, (u64 __user *)src,
9827+ __get_user_asm(tmp, (const u64 __user *)src,
9828 ret, "q", "", "=r", 8);
9829 if (likely(!ret))
9830 __put_user_asm(tmp, (u64 __user *)dst,
9831@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9832 return ret;
9833 }
9834 default:
9835- return copy_user_generic((__force void *)dst,
9836- (__force void *)src, size);
9837+
9838+#ifdef CONFIG_PAX_MEMORY_UDEREF
9839+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9840+ src += PAX_USER_SHADOW_BASE;
9841+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9842+ dst += PAX_USER_SHADOW_BASE;
9843+#endif
9844+
9845+ return copy_user_generic((__force_kernel void *)dst,
9846+ (__force_kernel const void *)src, size);
9847 }
9848 }
9849
9850@@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9851 static __must_check __always_inline int
9852 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9853 {
9854- return copy_user_generic(dst, (__force const void *)src, size);
9855+ pax_track_stack();
9856+
9857+ if ((int)size < 0)
9858+ return size;
9859+
9860+#ifdef CONFIG_PAX_MEMORY_UDEREF
9861+ if (!__access_ok(VERIFY_READ, src, size))
9862+ return size;
9863+
9864+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9865+ src += PAX_USER_SHADOW_BASE;
9866+#endif
9867+
9868+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9869 }
9870
9871-static __must_check __always_inline int
9872+static __must_check __always_inline unsigned long
9873 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9874 {
9875- return copy_user_generic((__force void *)dst, src, size);
9876+ if ((int)size < 0)
9877+ return size;
9878+
9879+#ifdef CONFIG_PAX_MEMORY_UDEREF
9880+ if (!__access_ok(VERIFY_WRITE, dst, size))
9881+ return size;
9882+
9883+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9884+ dst += PAX_USER_SHADOW_BASE;
9885+#endif
9886+
9887+ return copy_user_generic((__force_kernel void *)dst, src, size);
9888 }
9889
9890-extern long __copy_user_nocache(void *dst, const void __user *src,
9891+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9892 unsigned size, int zerorest);
9893
9894-static inline int
9895-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9896+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9897 {
9898 might_sleep();
9899+
9900+ if ((int)size < 0)
9901+ return size;
9902+
9903+#ifdef CONFIG_PAX_MEMORY_UDEREF
9904+ if (!__access_ok(VERIFY_READ, src, size))
9905+ return size;
9906+#endif
9907+
9908 return __copy_user_nocache(dst, src, size, 1);
9909 }
9910
9911-static inline int
9912-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9913+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9914 unsigned size)
9915 {
9916+ if ((int)size < 0)
9917+ return size;
9918+
9919+#ifdef CONFIG_PAX_MEMORY_UDEREF
9920+ if (!__access_ok(VERIFY_READ, src, size))
9921+ return size;
9922+#endif
9923+
9924 return __copy_user_nocache(dst, src, size, 0);
9925 }
9926
9927-unsigned long
9928-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9929+extern unsigned long
9930+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest);
9931
9932 #endif /* _ASM_X86_UACCESS_64_H */
9933diff -urNp linux-3.1.1/arch/x86/include/asm/uaccess.h linux-3.1.1/arch/x86/include/asm/uaccess.h
9934--- linux-3.1.1/arch/x86/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
9935+++ linux-3.1.1/arch/x86/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
9936@@ -7,12 +7,15 @@
9937 #include <linux/compiler.h>
9938 #include <linux/thread_info.h>
9939 #include <linux/string.h>
9940+#include <linux/sched.h>
9941 #include <asm/asm.h>
9942 #include <asm/page.h>
9943
9944 #define VERIFY_READ 0
9945 #define VERIFY_WRITE 1
9946
9947+extern void check_object_size(const void *ptr, unsigned long n, bool to);
9948+
9949 /*
9950 * The fs value determines whether argument validity checking should be
9951 * performed or not. If get_fs() == USER_DS, checking is performed, with
9952@@ -28,7 +31,12 @@
9953
9954 #define get_ds() (KERNEL_DS)
9955 #define get_fs() (current_thread_info()->addr_limit)
9956+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9957+void __set_fs(mm_segment_t x);
9958+void set_fs(mm_segment_t x);
9959+#else
9960 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9961+#endif
9962
9963 #define segment_eq(a, b) ((a).seg == (b).seg)
9964
9965@@ -76,7 +84,33 @@
9966 * checks that the pointer is in the user space range - after calling
9967 * this function, memory access functions may still return -EFAULT.
9968 */
9969-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9970+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9971+#define access_ok(type, addr, size) \
9972+({ \
9973+ long __size = size; \
9974+ unsigned long __addr = (unsigned long)addr; \
9975+ unsigned long __addr_ao = __addr & PAGE_MASK; \
9976+ unsigned long __end_ao = __addr + __size - 1; \
9977+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9978+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9979+ while(__addr_ao <= __end_ao) { \
9980+ char __c_ao; \
9981+ __addr_ao += PAGE_SIZE; \
9982+ if (__size > PAGE_SIZE) \
9983+ cond_resched(); \
9984+ if (__get_user(__c_ao, (char __user *)__addr)) \
9985+ break; \
9986+ if (type != VERIFY_WRITE) { \
9987+ __addr = __addr_ao; \
9988+ continue; \
9989+ } \
9990+ if (__put_user(__c_ao, (char __user *)__addr)) \
9991+ break; \
9992+ __addr = __addr_ao; \
9993+ } \
9994+ } \
9995+ __ret_ao; \
9996+})
9997
9998 /*
9999 * The exception table consists of pairs of addresses: the first is the
10000@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10001 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10002 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10003
10004-
10005+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10006+#define __copyuser_seg "gs;"
10007+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10008+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10009+#else
10010+#define __copyuser_seg
10011+#define __COPYUSER_SET_ES
10012+#define __COPYUSER_RESTORE_ES
10013+#endif
10014
10015 #ifdef CONFIG_X86_32
10016 #define __put_user_asm_u64(x, addr, err, errret) \
10017- asm volatile("1: movl %%eax,0(%2)\n" \
10018- "2: movl %%edx,4(%2)\n" \
10019+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10020+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10021 "3:\n" \
10022 ".section .fixup,\"ax\"\n" \
10023 "4: movl %3,%0\n" \
10024@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10025 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10026
10027 #define __put_user_asm_ex_u64(x, addr) \
10028- asm volatile("1: movl %%eax,0(%1)\n" \
10029- "2: movl %%edx,4(%1)\n" \
10030+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10031+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10032 "3:\n" \
10033 _ASM_EXTABLE(1b, 2b - 1b) \
10034 _ASM_EXTABLE(2b, 3b - 2b) \
10035@@ -252,7 +294,7 @@ extern void __put_user_8(void);
10036 __typeof__(*(ptr)) __pu_val; \
10037 __chk_user_ptr(ptr); \
10038 might_fault(); \
10039- __pu_val = x; \
10040+ __pu_val = (x); \
10041 switch (sizeof(*(ptr))) { \
10042 case 1: \
10043 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10044@@ -373,7 +415,7 @@ do { \
10045 } while (0)
10046
10047 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10048- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10049+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10050 "2:\n" \
10051 ".section .fixup,\"ax\"\n" \
10052 "3: mov %3,%0\n" \
10053@@ -381,7 +423,7 @@ do { \
10054 " jmp 2b\n" \
10055 ".previous\n" \
10056 _ASM_EXTABLE(1b, 3b) \
10057- : "=r" (err), ltype(x) \
10058+ : "=r" (err), ltype (x) \
10059 : "m" (__m(addr)), "i" (errret), "0" (err))
10060
10061 #define __get_user_size_ex(x, ptr, size) \
10062@@ -406,7 +448,7 @@ do { \
10063 } while (0)
10064
10065 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10066- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10067+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10068 "2:\n" \
10069 _ASM_EXTABLE(1b, 2b - 1b) \
10070 : ltype(x) : "m" (__m(addr)))
10071@@ -423,13 +465,24 @@ do { \
10072 int __gu_err; \
10073 unsigned long __gu_val; \
10074 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10075- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10076+ (x) = (__typeof__(*(ptr)))__gu_val; \
10077 __gu_err; \
10078 })
10079
10080 /* FIXME: this hack is definitely wrong -AK */
10081 struct __large_struct { unsigned long buf[100]; };
10082-#define __m(x) (*(struct __large_struct __user *)(x))
10083+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10084+#define ____m(x) \
10085+({ \
10086+ unsigned long ____x = (unsigned long)(x); \
10087+ if (____x < PAX_USER_SHADOW_BASE) \
10088+ ____x += PAX_USER_SHADOW_BASE; \
10089+ (void __user *)____x; \
10090+})
10091+#else
10092+#define ____m(x) (x)
10093+#endif
10094+#define __m(x) (*(struct __large_struct __user *)____m(x))
10095
10096 /*
10097 * Tell gcc we read from memory instead of writing: this is because
10098@@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
10099 * aliasing issues.
10100 */
10101 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10102- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10103+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10104 "2:\n" \
10105 ".section .fixup,\"ax\"\n" \
10106 "3: mov %3,%0\n" \
10107@@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
10108 ".previous\n" \
10109 _ASM_EXTABLE(1b, 3b) \
10110 : "=r"(err) \
10111- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10112+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10113
10114 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10115- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10116+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10117 "2:\n" \
10118 _ASM_EXTABLE(1b, 2b - 1b) \
10119 : : ltype(x), "m" (__m(addr)))
10120@@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
10121 * On error, the variable @x is set to zero.
10122 */
10123
10124+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10125+#define __get_user(x, ptr) get_user((x), (ptr))
10126+#else
10127 #define __get_user(x, ptr) \
10128 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10129+#endif
10130
10131 /**
10132 * __put_user: - Write a simple value into user space, with less checking.
10133@@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
10134 * Returns zero on success, or -EFAULT on error.
10135 */
10136
10137+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10138+#define __put_user(x, ptr) put_user((x), (ptr))
10139+#else
10140 #define __put_user(x, ptr) \
10141 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10142+#endif
10143
10144 #define __get_user_unaligned __get_user
10145 #define __put_user_unaligned __put_user
10146@@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
10147 #define get_user_ex(x, ptr) do { \
10148 unsigned long __gue_val; \
10149 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10150- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10151+ (x) = (__typeof__(*(ptr)))__gue_val; \
10152 } while (0)
10153
10154 #ifdef CONFIG_X86_WP_WORKS_OK
10155diff -urNp linux-3.1.1/arch/x86/include/asm/vdso.h linux-3.1.1/arch/x86/include/asm/vdso.h
10156--- linux-3.1.1/arch/x86/include/asm/vdso.h 2011-11-11 15:19:27.000000000 -0500
10157+++ linux-3.1.1/arch/x86/include/asm/vdso.h 2011-11-16 18:39:07.000000000 -0500
10158@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
10159 #define VDSO32_SYMBOL(base, name) \
10160 ({ \
10161 extern const char VDSO32_##name[]; \
10162- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10163+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10164 })
10165 #endif
10166
10167diff -urNp linux-3.1.1/arch/x86/include/asm/x86_init.h linux-3.1.1/arch/x86/include/asm/x86_init.h
10168--- linux-3.1.1/arch/x86/include/asm/x86_init.h 2011-11-11 15:19:27.000000000 -0500
10169+++ linux-3.1.1/arch/x86/include/asm/x86_init.h 2011-11-16 18:39:07.000000000 -0500
10170@@ -28,7 +28,7 @@ struct x86_init_mpparse {
10171 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
10172 void (*find_smp_config)(void);
10173 void (*get_smp_config)(unsigned int early);
10174-};
10175+} __no_const;
10176
10177 /**
10178 * struct x86_init_resources - platform specific resource related ops
10179@@ -42,7 +42,7 @@ struct x86_init_resources {
10180 void (*probe_roms)(void);
10181 void (*reserve_resources)(void);
10182 char *(*memory_setup)(void);
10183-};
10184+} __no_const;
10185
10186 /**
10187 * struct x86_init_irqs - platform specific interrupt setup
10188@@ -55,7 +55,7 @@ struct x86_init_irqs {
10189 void (*pre_vector_init)(void);
10190 void (*intr_init)(void);
10191 void (*trap_init)(void);
10192-};
10193+} __no_const;
10194
10195 /**
10196 * struct x86_init_oem - oem platform specific customizing functions
10197@@ -65,7 +65,7 @@ struct x86_init_irqs {
10198 struct x86_init_oem {
10199 void (*arch_setup)(void);
10200 void (*banner)(void);
10201-};
10202+} __no_const;
10203
10204 /**
10205 * struct x86_init_mapping - platform specific initial kernel pagetable setup
10206@@ -76,7 +76,7 @@ struct x86_init_oem {
10207 */
10208 struct x86_init_mapping {
10209 void (*pagetable_reserve)(u64 start, u64 end);
10210-};
10211+} __no_const;
10212
10213 /**
10214 * struct x86_init_paging - platform specific paging functions
10215@@ -86,7 +86,7 @@ struct x86_init_mapping {
10216 struct x86_init_paging {
10217 void (*pagetable_setup_start)(pgd_t *base);
10218 void (*pagetable_setup_done)(pgd_t *base);
10219-};
10220+} __no_const;
10221
10222 /**
10223 * struct x86_init_timers - platform specific timer setup
10224@@ -101,7 +101,7 @@ struct x86_init_timers {
10225 void (*tsc_pre_init)(void);
10226 void (*timer_init)(void);
10227 void (*wallclock_init)(void);
10228-};
10229+} __no_const;
10230
10231 /**
10232 * struct x86_init_iommu - platform specific iommu setup
10233@@ -109,7 +109,7 @@ struct x86_init_timers {
10234 */
10235 struct x86_init_iommu {
10236 int (*iommu_init)(void);
10237-};
10238+} __no_const;
10239
10240 /**
10241 * struct x86_init_pci - platform specific pci init functions
10242@@ -123,7 +123,7 @@ struct x86_init_pci {
10243 int (*init)(void);
10244 void (*init_irq)(void);
10245 void (*fixup_irqs)(void);
10246-};
10247+} __no_const;
10248
10249 /**
10250 * struct x86_init_ops - functions for platform specific setup
10251@@ -139,7 +139,7 @@ struct x86_init_ops {
10252 struct x86_init_timers timers;
10253 struct x86_init_iommu iommu;
10254 struct x86_init_pci pci;
10255-};
10256+} __no_const;
10257
10258 /**
10259 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10260@@ -147,7 +147,7 @@ struct x86_init_ops {
10261 */
10262 struct x86_cpuinit_ops {
10263 void (*setup_percpu_clockev)(void);
10264-};
10265+} __no_const;
10266
10267 /**
10268 * struct x86_platform_ops - platform specific runtime functions
10269@@ -166,7 +166,7 @@ struct x86_platform_ops {
10270 bool (*is_untracked_pat_range)(u64 start, u64 end);
10271 void (*nmi_init)(void);
10272 int (*i8042_detect)(void);
10273-};
10274+} __no_const;
10275
10276 struct pci_dev;
10277
10278@@ -174,7 +174,7 @@ struct x86_msi_ops {
10279 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10280 void (*teardown_msi_irq)(unsigned int irq);
10281 void (*teardown_msi_irqs)(struct pci_dev *dev);
10282-};
10283+} __no_const;
10284
10285 extern struct x86_init_ops x86_init;
10286 extern struct x86_cpuinit_ops x86_cpuinit;
10287diff -urNp linux-3.1.1/arch/x86/include/asm/xsave.h linux-3.1.1/arch/x86/include/asm/xsave.h
10288--- linux-3.1.1/arch/x86/include/asm/xsave.h 2011-11-11 15:19:27.000000000 -0500
10289+++ linux-3.1.1/arch/x86/include/asm/xsave.h 2011-11-16 18:39:07.000000000 -0500
10290@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10291 {
10292 int err;
10293
10294+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10295+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10296+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10297+#endif
10298+
10299 /*
10300 * Clear the xsave header first, so that reserved fields are
10301 * initialized to zero.
10302@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsav
10303 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
10304 {
10305 int err;
10306- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
10307+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
10308 u32 lmask = mask;
10309 u32 hmask = mask >> 32;
10310
10311+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10312+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10313+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10314+#endif
10315+
10316 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10317 "2:\n"
10318 ".section .fixup,\"ax\"\n"
10319diff -urNp linux-3.1.1/arch/x86/Kconfig linux-3.1.1/arch/x86/Kconfig
10320--- linux-3.1.1/arch/x86/Kconfig 2011-11-11 15:19:27.000000000 -0500
10321+++ linux-3.1.1/arch/x86/Kconfig 2011-11-16 18:40:08.000000000 -0500
10322@@ -236,7 +236,7 @@ config X86_HT
10323
10324 config X86_32_LAZY_GS
10325 def_bool y
10326- depends on X86_32 && !CC_STACKPROTECTOR
10327+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10328
10329 config ARCH_HWEIGHT_CFLAGS
10330 string
10331@@ -1019,7 +1019,7 @@ choice
10332
10333 config NOHIGHMEM
10334 bool "off"
10335- depends on !X86_NUMAQ
10336+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10337 ---help---
10338 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10339 However, the address space of 32-bit x86 processors is only 4
10340@@ -1056,7 +1056,7 @@ config NOHIGHMEM
10341
10342 config HIGHMEM4G
10343 bool "4GB"
10344- depends on !X86_NUMAQ
10345+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10346 ---help---
10347 Select this if you have a 32-bit processor and between 1 and 4
10348 gigabytes of physical RAM.
10349@@ -1110,7 +1110,7 @@ config PAGE_OFFSET
10350 hex
10351 default 0xB0000000 if VMSPLIT_3G_OPT
10352 default 0x80000000 if VMSPLIT_2G
10353- default 0x78000000 if VMSPLIT_2G_OPT
10354+ default 0x70000000 if VMSPLIT_2G_OPT
10355 default 0x40000000 if VMSPLIT_1G
10356 default 0xC0000000
10357 depends on X86_32
10358@@ -1484,6 +1484,7 @@ config SECCOMP
10359
10360 config CC_STACKPROTECTOR
10361 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10362+ depends on X86_64 || !PAX_MEMORY_UDEREF
10363 ---help---
10364 This option turns on the -fstack-protector GCC feature. This
10365 feature puts, at the beginning of functions, a canary value on
10366@@ -1541,6 +1542,7 @@ config KEXEC_JUMP
10367 config PHYSICAL_START
10368 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10369 default "0x1000000"
10370+ range 0x400000 0x40000000
10371 ---help---
10372 This gives the physical address where the kernel is loaded.
10373
10374@@ -1604,6 +1606,7 @@ config X86_NEED_RELOCS
10375 config PHYSICAL_ALIGN
10376 hex "Alignment value to which kernel should be aligned" if X86_32
10377 default "0x1000000"
10378+ range 0x400000 0x1000000 if PAX_KERNEXEC
10379 range 0x2000 0x1000000
10380 ---help---
10381 This value puts the alignment restrictions on physical address
10382@@ -1635,9 +1638,10 @@ config HOTPLUG_CPU
10383 Say N if you want to disable CPU hotplug.
10384
10385 config COMPAT_VDSO
10386- def_bool y
10387+ def_bool n
10388 prompt "Compat VDSO support"
10389 depends on X86_32 || IA32_EMULATION
10390+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10391 ---help---
10392 Map the 32-bit VDSO to the predictable old-style address too.
10393
10394diff -urNp linux-3.1.1/arch/x86/Kconfig.cpu linux-3.1.1/arch/x86/Kconfig.cpu
10395--- linux-3.1.1/arch/x86/Kconfig.cpu 2011-11-11 15:19:27.000000000 -0500
10396+++ linux-3.1.1/arch/x86/Kconfig.cpu 2011-11-16 18:39:07.000000000 -0500
10397@@ -341,7 +341,7 @@ config X86_PPRO_FENCE
10398
10399 config X86_F00F_BUG
10400 def_bool y
10401- depends on M586MMX || M586TSC || M586 || M486 || M386
10402+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10403
10404 config X86_INVD_BUG
10405 def_bool y
10406@@ -365,7 +365,7 @@ config X86_POPAD_OK
10407
10408 config X86_ALIGNMENT_16
10409 def_bool y
10410- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10411+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10412
10413 config X86_INTEL_USERCOPY
10414 def_bool y
10415@@ -411,7 +411,7 @@ config X86_CMPXCHG64
10416 # generates cmov.
10417 config X86_CMOV
10418 def_bool y
10419- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10420+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10421
10422 config X86_MINIMUM_CPU_FAMILY
10423 int
10424diff -urNp linux-3.1.1/arch/x86/Kconfig.debug linux-3.1.1/arch/x86/Kconfig.debug
10425--- linux-3.1.1/arch/x86/Kconfig.debug 2011-11-11 15:19:27.000000000 -0500
10426+++ linux-3.1.1/arch/x86/Kconfig.debug 2011-11-16 18:39:07.000000000 -0500
10427@@ -81,7 +81,7 @@ config X86_PTDUMP
10428 config DEBUG_RODATA
10429 bool "Write protect kernel read-only data structures"
10430 default y
10431- depends on DEBUG_KERNEL
10432+ depends on DEBUG_KERNEL && BROKEN
10433 ---help---
10434 Mark the kernel read-only data as write-protected in the pagetables,
10435 in order to catch accidental (and incorrect) writes to such const
10436@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10437
10438 config DEBUG_SET_MODULE_RONX
10439 bool "Set loadable kernel module data as NX and text as RO"
10440- depends on MODULES
10441+ depends on MODULES && BROKEN
10442 ---help---
10443 This option helps catch unintended modifications to loadable
10444 kernel module's text and read-only data. It also prevents execution
10445diff -urNp linux-3.1.1/arch/x86/kernel/acpi/realmode/Makefile linux-3.1.1/arch/x86/kernel/acpi/realmode/Makefile
10446--- linux-3.1.1/arch/x86/kernel/acpi/realmode/Makefile 2011-11-11 15:19:27.000000000 -0500
10447+++ linux-3.1.1/arch/x86/kernel/acpi/realmode/Makefile 2011-11-16 18:39:07.000000000 -0500
10448@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10449 $(call cc-option, -fno-stack-protector) \
10450 $(call cc-option, -mpreferred-stack-boundary=2)
10451 KBUILD_CFLAGS += $(call cc-option, -m32)
10452+ifdef CONSTIFY_PLUGIN
10453+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10454+endif
10455 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10456 GCOV_PROFILE := n
10457
10458diff -urNp linux-3.1.1/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.1.1/arch/x86/kernel/acpi/realmode/wakeup.S
10459--- linux-3.1.1/arch/x86/kernel/acpi/realmode/wakeup.S 2011-11-11 15:19:27.000000000 -0500
10460+++ linux-3.1.1/arch/x86/kernel/acpi/realmode/wakeup.S 2011-11-16 18:40:08.000000000 -0500
10461@@ -108,6 +108,9 @@ wakeup_code:
10462 /* Do any other stuff... */
10463
10464 #ifndef CONFIG_64BIT
10465+ /* Recheck NX bit overrides (64bit path does this in trampoline */
10466+ call verify_cpu
10467+
10468 /* This could also be done in C code... */
10469 movl pmode_cr3, %eax
10470 movl %eax, %cr3
10471@@ -131,6 +134,7 @@ wakeup_code:
10472 movl pmode_cr0, %eax
10473 movl %eax, %cr0
10474 jmp pmode_return
10475+# include "../../verify_cpu.S"
10476 #else
10477 pushw $0
10478 pushw trampoline_segment
10479diff -urNp linux-3.1.1/arch/x86/kernel/acpi/sleep.c linux-3.1.1/arch/x86/kernel/acpi/sleep.c
10480--- linux-3.1.1/arch/x86/kernel/acpi/sleep.c 2011-11-11 15:19:27.000000000 -0500
10481+++ linux-3.1.1/arch/x86/kernel/acpi/sleep.c 2011-11-16 18:39:07.000000000 -0500
10482@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10483 header->trampoline_segment = trampoline_address() >> 4;
10484 #ifdef CONFIG_SMP
10485 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10486+
10487+ pax_open_kernel();
10488 early_gdt_descr.address =
10489 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10490+ pax_close_kernel();
10491+
10492 initial_gs = per_cpu_offset(smp_processor_id());
10493 #endif
10494 initial_code = (unsigned long)wakeup_long64;
10495diff -urNp linux-3.1.1/arch/x86/kernel/acpi/wakeup_32.S linux-3.1.1/arch/x86/kernel/acpi/wakeup_32.S
10496--- linux-3.1.1/arch/x86/kernel/acpi/wakeup_32.S 2011-11-11 15:19:27.000000000 -0500
10497+++ linux-3.1.1/arch/x86/kernel/acpi/wakeup_32.S 2011-11-16 18:39:07.000000000 -0500
10498@@ -30,13 +30,11 @@ wakeup_pmode_return:
10499 # and restore the stack ... but you need gdt for this to work
10500 movl saved_context_esp, %esp
10501
10502- movl %cs:saved_magic, %eax
10503- cmpl $0x12345678, %eax
10504+ cmpl $0x12345678, saved_magic
10505 jne bogus_magic
10506
10507 # jump to place where we left off
10508- movl saved_eip, %eax
10509- jmp *%eax
10510+ jmp *(saved_eip)
10511
10512 bogus_magic:
10513 jmp bogus_magic
10514diff -urNp linux-3.1.1/arch/x86/kernel/alternative.c linux-3.1.1/arch/x86/kernel/alternative.c
10515--- linux-3.1.1/arch/x86/kernel/alternative.c 2011-11-11 15:19:27.000000000 -0500
10516+++ linux-3.1.1/arch/x86/kernel/alternative.c 2011-11-16 18:39:07.000000000 -0500
10517@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives
10518 */
10519 for (a = start; a < end; a++) {
10520 instr = (u8 *)&a->instr_offset + a->instr_offset;
10521+
10522+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10523+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10524+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
10525+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10526+#endif
10527+
10528 replacement = (u8 *)&a->repl_offset + a->repl_offset;
10529 BUG_ON(a->replacementlen > a->instrlen);
10530 BUG_ON(a->instrlen > sizeof(insnbuf));
10531@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const
10532 for (poff = start; poff < end; poff++) {
10533 u8 *ptr = (u8 *)poff + *poff;
10534
10535+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10536+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10537+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
10538+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10539+#endif
10540+
10541 if (!*poff || ptr < text || ptr >= text_end)
10542 continue;
10543 /* turn DS segment override prefix into lock prefix */
10544- if (*ptr == 0x3e)
10545+ if (*ktla_ktva(ptr) == 0x3e)
10546 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10547 };
10548 mutex_unlock(&text_mutex);
10549@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(cons
10550 for (poff = start; poff < end; poff++) {
10551 u8 *ptr = (u8 *)poff + *poff;
10552
10553+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10554+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10555+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
10556+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10557+#endif
10558+
10559 if (!*poff || ptr < text || ptr >= text_end)
10560 continue;
10561 /* turn lock prefix into DS segment override prefix */
10562- if (*ptr == 0xf0)
10563+ if (*ktla_ktva(ptr) == 0xf0)
10564 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10565 };
10566 mutex_unlock(&text_mutex);
10567@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(str
10568
10569 BUG_ON(p->len > MAX_PATCH_LEN);
10570 /* prep the buffer with the original instructions */
10571- memcpy(insnbuf, p->instr, p->len);
10572+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10573 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10574 (unsigned long)p->instr, p->len);
10575
10576@@ -568,7 +587,7 @@ void __init alternative_instructions(voi
10577 if (smp_alt_once)
10578 free_init_pages("SMP alternatives",
10579 (unsigned long)__smp_locks,
10580- (unsigned long)__smp_locks_end);
10581+ PAGE_ALIGN((unsigned long)__smp_locks_end));
10582
10583 restart_nmi();
10584 }
10585@@ -585,13 +604,17 @@ void __init alternative_instructions(voi
10586 * instructions. And on the local CPU you need to be protected again NMI or MCE
10587 * handlers seeing an inconsistent instruction while you patch.
10588 */
10589-void *__init_or_module text_poke_early(void *addr, const void *opcode,
10590+void *__kprobes text_poke_early(void *addr, const void *opcode,
10591 size_t len)
10592 {
10593 unsigned long flags;
10594 local_irq_save(flags);
10595- memcpy(addr, opcode, len);
10596+
10597+ pax_open_kernel();
10598+ memcpy(ktla_ktva(addr), opcode, len);
10599 sync_core();
10600+ pax_close_kernel();
10601+
10602 local_irq_restore(flags);
10603 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10604 that causes hangs on some VIA CPUs. */
10605@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(v
10606 */
10607 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10608 {
10609- unsigned long flags;
10610- char *vaddr;
10611+ unsigned char *vaddr = ktla_ktva(addr);
10612 struct page *pages[2];
10613- int i;
10614+ size_t i;
10615
10616 if (!core_kernel_text((unsigned long)addr)) {
10617- pages[0] = vmalloc_to_page(addr);
10618- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10619+ pages[0] = vmalloc_to_page(vaddr);
10620+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10621 } else {
10622- pages[0] = virt_to_page(addr);
10623+ pages[0] = virt_to_page(vaddr);
10624 WARN_ON(!PageReserved(pages[0]));
10625- pages[1] = virt_to_page(addr + PAGE_SIZE);
10626+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10627 }
10628 BUG_ON(!pages[0]);
10629- local_irq_save(flags);
10630- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10631- if (pages[1])
10632- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10633- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10634- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10635- clear_fixmap(FIX_TEXT_POKE0);
10636- if (pages[1])
10637- clear_fixmap(FIX_TEXT_POKE1);
10638- local_flush_tlb();
10639- sync_core();
10640- /* Could also do a CLFLUSH here to speed up CPU recovery; but
10641- that causes hangs on some VIA CPUs. */
10642+ text_poke_early(addr, opcode, len);
10643 for (i = 0; i < len; i++)
10644- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10645- local_irq_restore(flags);
10646+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10647 return addr;
10648 }
10649
10650diff -urNp linux-3.1.1/arch/x86/kernel/apic/apic.c linux-3.1.1/arch/x86/kernel/apic/apic.c
10651--- linux-3.1.1/arch/x86/kernel/apic/apic.c 2011-11-11 15:19:27.000000000 -0500
10652+++ linux-3.1.1/arch/x86/kernel/apic/apic.c 2011-11-16 18:40:08.000000000 -0500
10653@@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
10654 /*
10655 * Debug level, exported for io_apic.c
10656 */
10657-unsigned int apic_verbosity;
10658+int apic_verbosity;
10659
10660 int pic_mode;
10661
10662@@ -1835,7 +1835,7 @@ void smp_error_interrupt(struct pt_regs
10663 apic_write(APIC_ESR, 0);
10664 v1 = apic_read(APIC_ESR);
10665 ack_APIC_irq();
10666- atomic_inc(&irq_err_count);
10667+ atomic_inc_unchecked(&irq_err_count);
10668
10669 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10670 smp_processor_id(), v0 , v1);
10671@@ -2209,6 +2209,8 @@ static int __cpuinit apic_cluster_num(vo
10672 u16 *bios_cpu_apicid;
10673 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10674
10675+ pax_track_stack();
10676+
10677 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10678 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10679
10680diff -urNp linux-3.1.1/arch/x86/kernel/apic/io_apic.c linux-3.1.1/arch/x86/kernel/apic/io_apic.c
10681--- linux-3.1.1/arch/x86/kernel/apic/io_apic.c 2011-11-11 15:19:27.000000000 -0500
10682+++ linux-3.1.1/arch/x86/kernel/apic/io_apic.c 2011-11-16 18:39:07.000000000 -0500
10683@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10684 }
10685 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10686
10687-void lock_vector_lock(void)
10688+void lock_vector_lock(void) __acquires(vector_lock)
10689 {
10690 /* Used to the online set of cpus does not change
10691 * during assign_irq_vector.
10692@@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10693 raw_spin_lock(&vector_lock);
10694 }
10695
10696-void unlock_vector_lock(void)
10697+void unlock_vector_lock(void) __releases(vector_lock)
10698 {
10699 raw_spin_unlock(&vector_lock);
10700 }
10701@@ -2405,7 +2405,7 @@ static void ack_apic_edge(struct irq_dat
10702 ack_APIC_irq();
10703 }
10704
10705-atomic_t irq_mis_count;
10706+atomic_unchecked_t irq_mis_count;
10707
10708 /*
10709 * IO-APIC versions below 0x20 don't support EOI register.
10710@@ -2513,7 +2513,7 @@ static void ack_apic_level(struct irq_da
10711 * at the cpu.
10712 */
10713 if (!(v & (1 << (i & 0x1f)))) {
10714- atomic_inc(&irq_mis_count);
10715+ atomic_inc_unchecked(&irq_mis_count);
10716
10717 eoi_ioapic_irq(irq, cfg);
10718 }
10719diff -urNp linux-3.1.1/arch/x86/kernel/apm_32.c linux-3.1.1/arch/x86/kernel/apm_32.c
10720--- linux-3.1.1/arch/x86/kernel/apm_32.c 2011-11-11 15:19:27.000000000 -0500
10721+++ linux-3.1.1/arch/x86/kernel/apm_32.c 2011-11-16 18:39:07.000000000 -0500
10722@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10723 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10724 * even though they are called in protected mode.
10725 */
10726-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10727+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10728 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10729
10730 static const char driver_version[] = "1.16ac"; /* no spaces */
10731@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10732 BUG_ON(cpu != 0);
10733 gdt = get_cpu_gdt_table(cpu);
10734 save_desc_40 = gdt[0x40 / 8];
10735+
10736+ pax_open_kernel();
10737 gdt[0x40 / 8] = bad_bios_desc;
10738+ pax_close_kernel();
10739
10740 apm_irq_save(flags);
10741 APM_DO_SAVE_SEGS;
10742@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10743 &call->esi);
10744 APM_DO_RESTORE_SEGS;
10745 apm_irq_restore(flags);
10746+
10747+ pax_open_kernel();
10748 gdt[0x40 / 8] = save_desc_40;
10749+ pax_close_kernel();
10750+
10751 put_cpu();
10752
10753 return call->eax & 0xff;
10754@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10755 BUG_ON(cpu != 0);
10756 gdt = get_cpu_gdt_table(cpu);
10757 save_desc_40 = gdt[0x40 / 8];
10758+
10759+ pax_open_kernel();
10760 gdt[0x40 / 8] = bad_bios_desc;
10761+ pax_close_kernel();
10762
10763 apm_irq_save(flags);
10764 APM_DO_SAVE_SEGS;
10765@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10766 &call->eax);
10767 APM_DO_RESTORE_SEGS;
10768 apm_irq_restore(flags);
10769+
10770+ pax_open_kernel();
10771 gdt[0x40 / 8] = save_desc_40;
10772+ pax_close_kernel();
10773+
10774 put_cpu();
10775 return error;
10776 }
10777@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10778 * code to that CPU.
10779 */
10780 gdt = get_cpu_gdt_table(0);
10781+
10782+ pax_open_kernel();
10783 set_desc_base(&gdt[APM_CS >> 3],
10784 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10785 set_desc_base(&gdt[APM_CS_16 >> 3],
10786 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10787 set_desc_base(&gdt[APM_DS >> 3],
10788 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10789+ pax_close_kernel();
10790
10791 proc_create("apm", 0, NULL, &apm_file_ops);
10792
10793diff -urNp linux-3.1.1/arch/x86/kernel/asm-offsets_64.c linux-3.1.1/arch/x86/kernel/asm-offsets_64.c
10794--- linux-3.1.1/arch/x86/kernel/asm-offsets_64.c 2011-11-11 15:19:27.000000000 -0500
10795+++ linux-3.1.1/arch/x86/kernel/asm-offsets_64.c 2011-11-16 18:39:07.000000000 -0500
10796@@ -69,6 +69,7 @@ int main(void)
10797 BLANK();
10798 #undef ENTRY
10799
10800+ DEFINE(TSS_size, sizeof(struct tss_struct));
10801 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10802 BLANK();
10803
10804diff -urNp linux-3.1.1/arch/x86/kernel/asm-offsets.c linux-3.1.1/arch/x86/kernel/asm-offsets.c
10805--- linux-3.1.1/arch/x86/kernel/asm-offsets.c 2011-11-11 15:19:27.000000000 -0500
10806+++ linux-3.1.1/arch/x86/kernel/asm-offsets.c 2011-11-16 18:39:07.000000000 -0500
10807@@ -33,6 +33,8 @@ void common(void) {
10808 OFFSET(TI_status, thread_info, status);
10809 OFFSET(TI_addr_limit, thread_info, addr_limit);
10810 OFFSET(TI_preempt_count, thread_info, preempt_count);
10811+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10812+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10813
10814 BLANK();
10815 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10816@@ -53,8 +55,26 @@ void common(void) {
10817 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10818 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10819 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10820+
10821+#ifdef CONFIG_PAX_KERNEXEC
10822+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10823+#endif
10824+
10825+#ifdef CONFIG_PAX_MEMORY_UDEREF
10826+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10827+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10828+#ifdef CONFIG_X86_64
10829+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10830+#endif
10831 #endif
10832
10833+#endif
10834+
10835+ BLANK();
10836+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10837+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10838+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10839+
10840 #ifdef CONFIG_XEN
10841 BLANK();
10842 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10843diff -urNp linux-3.1.1/arch/x86/kernel/cpu/amd.c linux-3.1.1/arch/x86/kernel/cpu/amd.c
10844--- linux-3.1.1/arch/x86/kernel/cpu/amd.c 2011-11-11 15:19:27.000000000 -0500
10845+++ linux-3.1.1/arch/x86/kernel/cpu/amd.c 2011-11-16 18:39:07.000000000 -0500
10846@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10847 unsigned int size)
10848 {
10849 /* AMD errata T13 (order #21922) */
10850- if ((c->x86 == 6)) {
10851+ if (c->x86 == 6) {
10852 /* Duron Rev A0 */
10853 if (c->x86_model == 3 && c->x86_mask == 0)
10854 size = 64;
10855diff -urNp linux-3.1.1/arch/x86/kernel/cpu/common.c linux-3.1.1/arch/x86/kernel/cpu/common.c
10856--- linux-3.1.1/arch/x86/kernel/cpu/common.c 2011-11-11 15:19:27.000000000 -0500
10857+++ linux-3.1.1/arch/x86/kernel/cpu/common.c 2011-11-16 18:39:07.000000000 -0500
10858@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10859
10860 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10861
10862-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10863-#ifdef CONFIG_X86_64
10864- /*
10865- * We need valid kernel segments for data and code in long mode too
10866- * IRET will check the segment types kkeil 2000/10/28
10867- * Also sysret mandates a special GDT layout
10868- *
10869- * TLS descriptors are currently at a different place compared to i386.
10870- * Hopefully nobody expects them at a fixed place (Wine?)
10871- */
10872- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10873- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10874- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10875- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10876- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10877- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10878-#else
10879- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10880- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10881- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10882- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10883- /*
10884- * Segments used for calling PnP BIOS have byte granularity.
10885- * They code segments and data segments have fixed 64k limits,
10886- * the transfer segment sizes are set at run time.
10887- */
10888- /* 32-bit code */
10889- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10890- /* 16-bit code */
10891- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10892- /* 16-bit data */
10893- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10894- /* 16-bit data */
10895- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10896- /* 16-bit data */
10897- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10898- /*
10899- * The APM segments have byte granularity and their bases
10900- * are set at run time. All have 64k limits.
10901- */
10902- /* 32-bit code */
10903- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10904- /* 16-bit code */
10905- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10906- /* data */
10907- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10908-
10909- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10910- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10911- GDT_STACK_CANARY_INIT
10912-#endif
10913-} };
10914-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10915-
10916 static int __init x86_xsave_setup(char *s)
10917 {
10918 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10919@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10920 {
10921 struct desc_ptr gdt_descr;
10922
10923- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10924+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10925 gdt_descr.size = GDT_SIZE - 1;
10926 load_gdt(&gdt_descr);
10927 /* Reload the per-cpu base */
10928@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10929 /* Filter out anything that depends on CPUID levels we don't have */
10930 filter_cpuid_features(c, true);
10931
10932+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10933+ setup_clear_cpu_cap(X86_FEATURE_SEP);
10934+#endif
10935+
10936 /* If the model name is still unset, do table lookup. */
10937 if (!c->x86_model_id[0]) {
10938 const char *p;
10939@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10940 }
10941 __setup("clearcpuid=", setup_disablecpuid);
10942
10943+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10944+EXPORT_PER_CPU_SYMBOL(current_tinfo);
10945+
10946 #ifdef CONFIG_X86_64
10947 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10948
10949@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10950 EXPORT_PER_CPU_SYMBOL(current_task);
10951
10952 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10953- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10954+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10955 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10956
10957 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10958@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10959 {
10960 memset(regs, 0, sizeof(struct pt_regs));
10961 regs->fs = __KERNEL_PERCPU;
10962- regs->gs = __KERNEL_STACK_CANARY;
10963+ savesegment(gs, regs->gs);
10964
10965 return regs;
10966 }
10967@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10968 int i;
10969
10970 cpu = stack_smp_processor_id();
10971- t = &per_cpu(init_tss, cpu);
10972+ t = init_tss + cpu;
10973 oist = &per_cpu(orig_ist, cpu);
10974
10975 #ifdef CONFIG_NUMA
10976@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10977 switch_to_new_gdt(cpu);
10978 loadsegment(fs, 0);
10979
10980- load_idt((const struct desc_ptr *)&idt_descr);
10981+ load_idt(&idt_descr);
10982
10983 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10984 syscall_init();
10985@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10986 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10987 barrier();
10988
10989- x86_configure_nx();
10990 if (cpu != 0)
10991 enable_x2apic();
10992
10993@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10994 {
10995 int cpu = smp_processor_id();
10996 struct task_struct *curr = current;
10997- struct tss_struct *t = &per_cpu(init_tss, cpu);
10998+ struct tss_struct *t = init_tss + cpu;
10999 struct thread_struct *thread = &curr->thread;
11000
11001 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
11002diff -urNp linux-3.1.1/arch/x86/kernel/cpu/intel.c linux-3.1.1/arch/x86/kernel/cpu/intel.c
11003--- linux-3.1.1/arch/x86/kernel/cpu/intel.c 2011-11-11 15:19:27.000000000 -0500
11004+++ linux-3.1.1/arch/x86/kernel/cpu/intel.c 2011-11-16 18:39:07.000000000 -0500
11005@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
11006 * Update the IDT descriptor and reload the IDT so that
11007 * it uses the read-only mapped virtual address.
11008 */
11009- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
11010+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
11011 load_idt(&idt_descr);
11012 }
11013 #endif
11014diff -urNp linux-3.1.1/arch/x86/kernel/cpu/Makefile linux-3.1.1/arch/x86/kernel/cpu/Makefile
11015--- linux-3.1.1/arch/x86/kernel/cpu/Makefile 2011-11-11 15:19:27.000000000 -0500
11016+++ linux-3.1.1/arch/x86/kernel/cpu/Makefile 2011-11-16 18:39:07.000000000 -0500
11017@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11018 CFLAGS_REMOVE_perf_event.o = -pg
11019 endif
11020
11021-# Make sure load_percpu_segment has no stackprotector
11022-nostackp := $(call cc-option, -fno-stack-protector)
11023-CFLAGS_common.o := $(nostackp)
11024-
11025 obj-y := intel_cacheinfo.o scattered.o topology.o
11026 obj-y += proc.o capflags.o powerflags.o common.o
11027 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11028diff -urNp linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce.c linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce.c
11029--- linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce.c 2011-11-11 15:19:27.000000000 -0500
11030+++ linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce.c 2011-11-16 18:39:07.000000000 -0500
11031@@ -42,6 +42,7 @@
11032 #include <asm/processor.h>
11033 #include <asm/mce.h>
11034 #include <asm/msr.h>
11035+#include <asm/local.h>
11036
11037 #include "mce-internal.h"
11038
11039@@ -205,7 +206,7 @@ static void print_mce(struct mce *m)
11040 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
11041 m->cs, m->ip);
11042
11043- if (m->cs == __KERNEL_CS)
11044+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
11045 print_symbol("{%s}", m->ip);
11046 pr_cont("\n");
11047 }
11048@@ -233,10 +234,10 @@ static void print_mce(struct mce *m)
11049
11050 #define PANIC_TIMEOUT 5 /* 5 seconds */
11051
11052-static atomic_t mce_paniced;
11053+static atomic_unchecked_t mce_paniced;
11054
11055 static int fake_panic;
11056-static atomic_t mce_fake_paniced;
11057+static atomic_unchecked_t mce_fake_paniced;
11058
11059 /* Panic in progress. Enable interrupts and wait for final IPI */
11060 static void wait_for_panic(void)
11061@@ -260,7 +261,7 @@ static void mce_panic(char *msg, struct
11062 /*
11063 * Make sure only one CPU runs in machine check panic
11064 */
11065- if (atomic_inc_return(&mce_paniced) > 1)
11066+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11067 wait_for_panic();
11068 barrier();
11069
11070@@ -268,7 +269,7 @@ static void mce_panic(char *msg, struct
11071 console_verbose();
11072 } else {
11073 /* Don't log too much for fake panic */
11074- if (atomic_inc_return(&mce_fake_paniced) > 1)
11075+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11076 return;
11077 }
11078 /* First print corrected ones that are still unlogged */
11079@@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
11080 * might have been modified by someone else.
11081 */
11082 rmb();
11083- if (atomic_read(&mce_paniced))
11084+ if (atomic_read_unchecked(&mce_paniced))
11085 wait_for_panic();
11086 if (!monarch_timeout)
11087 goto out;
11088@@ -1429,7 +1430,7 @@ void __cpuinit mcheck_cpu_init(struct cp
11089 */
11090
11091 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
11092-static int mce_chrdev_open_count; /* #times opened */
11093+static local_t mce_chrdev_open_count; /* #times opened */
11094 static int mce_chrdev_open_exclu; /* already open exclusive? */
11095
11096 static int mce_chrdev_open(struct inode *inode, struct file *file)
11097@@ -1437,7 +1438,7 @@ static int mce_chrdev_open(struct inode
11098 spin_lock(&mce_chrdev_state_lock);
11099
11100 if (mce_chrdev_open_exclu ||
11101- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
11102+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
11103 spin_unlock(&mce_chrdev_state_lock);
11104
11105 return -EBUSY;
11106@@ -1445,7 +1446,7 @@ static int mce_chrdev_open(struct inode
11107
11108 if (file->f_flags & O_EXCL)
11109 mce_chrdev_open_exclu = 1;
11110- mce_chrdev_open_count++;
11111+ local_inc(&mce_chrdev_open_count);
11112
11113 spin_unlock(&mce_chrdev_state_lock);
11114
11115@@ -1456,7 +1457,7 @@ static int mce_chrdev_release(struct ino
11116 {
11117 spin_lock(&mce_chrdev_state_lock);
11118
11119- mce_chrdev_open_count--;
11120+ local_dec(&mce_chrdev_open_count);
11121 mce_chrdev_open_exclu = 0;
11122
11123 spin_unlock(&mce_chrdev_state_lock);
11124@@ -2147,7 +2148,7 @@ struct dentry *mce_get_debugfs_dir(void)
11125 static void mce_reset(void)
11126 {
11127 cpu_missing = 0;
11128- atomic_set(&mce_fake_paniced, 0);
11129+ atomic_set_unchecked(&mce_fake_paniced, 0);
11130 atomic_set(&mce_executing, 0);
11131 atomic_set(&mce_callin, 0);
11132 atomic_set(&global_nwo, 0);
11133diff -urNp linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce-inject.c
11134--- linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-11-11 15:19:27.000000000 -0500
11135+++ linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-11-16 18:39:07.000000000 -0500
11136@@ -215,7 +215,9 @@ static int inject_init(void)
11137 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
11138 return -ENOMEM;
11139 printk(KERN_INFO "Machine check injector initialized\n");
11140- mce_chrdev_ops.write = mce_write;
11141+ pax_open_kernel();
11142+ *(void **)&mce_chrdev_ops.write = mce_write;
11143+ pax_close_kernel();
11144 register_die_notifier(&mce_raise_nb);
11145 return 0;
11146 }
11147diff -urNp linux-3.1.1/arch/x86/kernel/cpu/mtrr/main.c linux-3.1.1/arch/x86/kernel/cpu/mtrr/main.c
11148--- linux-3.1.1/arch/x86/kernel/cpu/mtrr/main.c 2011-11-11 15:19:27.000000000 -0500
11149+++ linux-3.1.1/arch/x86/kernel/cpu/mtrr/main.c 2011-11-16 18:39:07.000000000 -0500
11150@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
11151 u64 size_or_mask, size_and_mask;
11152 static bool mtrr_aps_delayed_init;
11153
11154-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11155+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11156
11157 const struct mtrr_ops *mtrr_if;
11158
11159diff -urNp linux-3.1.1/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.1.1/arch/x86/kernel/cpu/mtrr/mtrr.h
11160--- linux-3.1.1/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-11-11 15:19:27.000000000 -0500
11161+++ linux-3.1.1/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-11-16 18:39:07.000000000 -0500
11162@@ -25,7 +25,7 @@ struct mtrr_ops {
11163 int (*validate_add_page)(unsigned long base, unsigned long size,
11164 unsigned int type);
11165 int (*have_wrcomb)(void);
11166-};
11167+} __do_const;
11168
11169 extern int generic_get_free_region(unsigned long base, unsigned long size,
11170 int replace_reg);
11171diff -urNp linux-3.1.1/arch/x86/kernel/cpu/perf_event.c linux-3.1.1/arch/x86/kernel/cpu/perf_event.c
11172--- linux-3.1.1/arch/x86/kernel/cpu/perf_event.c 2011-11-11 15:19:27.000000000 -0500
11173+++ linux-3.1.1/arch/x86/kernel/cpu/perf_event.c 2011-11-16 18:40:08.000000000 -0500
11174@@ -795,6 +795,8 @@ static int x86_schedule_events(struct cp
11175 int i, j, w, wmax, num = 0;
11176 struct hw_perf_event *hwc;
11177
11178+ pax_track_stack();
11179+
11180 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
11181
11182 for (i = 0; i < n; i++) {
11183@@ -1919,7 +1921,7 @@ perf_callchain_user(struct perf_callchai
11184 break;
11185
11186 perf_callchain_store(entry, frame.return_address);
11187- fp = frame.next_frame;
11188+ fp = (const void __force_user *)frame.next_frame;
11189 }
11190 }
11191
11192diff -urNp linux-3.1.1/arch/x86/kernel/crash.c linux-3.1.1/arch/x86/kernel/crash.c
11193--- linux-3.1.1/arch/x86/kernel/crash.c 2011-11-11 15:19:27.000000000 -0500
11194+++ linux-3.1.1/arch/x86/kernel/crash.c 2011-11-16 18:39:07.000000000 -0500
11195@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
11196 regs = args->regs;
11197
11198 #ifdef CONFIG_X86_32
11199- if (!user_mode_vm(regs)) {
11200+ if (!user_mode(regs)) {
11201 crash_fixup_ss_esp(&fixed_regs, regs);
11202 regs = &fixed_regs;
11203 }
11204diff -urNp linux-3.1.1/arch/x86/kernel/doublefault_32.c linux-3.1.1/arch/x86/kernel/doublefault_32.c
11205--- linux-3.1.1/arch/x86/kernel/doublefault_32.c 2011-11-11 15:19:27.000000000 -0500
11206+++ linux-3.1.1/arch/x86/kernel/doublefault_32.c 2011-11-16 18:39:07.000000000 -0500
11207@@ -11,7 +11,7 @@
11208
11209 #define DOUBLEFAULT_STACKSIZE (1024)
11210 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
11211-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
11212+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
11213
11214 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
11215
11216@@ -21,7 +21,7 @@ static void doublefault_fn(void)
11217 unsigned long gdt, tss;
11218
11219 store_gdt(&gdt_desc);
11220- gdt = gdt_desc.address;
11221+ gdt = (unsigned long)gdt_desc.address;
11222
11223 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
11224
11225@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
11226 /* 0x2 bit is always set */
11227 .flags = X86_EFLAGS_SF | 0x2,
11228 .sp = STACK_START,
11229- .es = __USER_DS,
11230+ .es = __KERNEL_DS,
11231 .cs = __KERNEL_CS,
11232 .ss = __KERNEL_DS,
11233- .ds = __USER_DS,
11234+ .ds = __KERNEL_DS,
11235 .fs = __KERNEL_PERCPU,
11236
11237 .__cr3 = __pa_nodebug(swapper_pg_dir),
11238diff -urNp linux-3.1.1/arch/x86/kernel/dumpstack_32.c linux-3.1.1/arch/x86/kernel/dumpstack_32.c
11239--- linux-3.1.1/arch/x86/kernel/dumpstack_32.c 2011-11-11 15:19:27.000000000 -0500
11240+++ linux-3.1.1/arch/x86/kernel/dumpstack_32.c 2011-11-16 18:39:07.000000000 -0500
11241@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11242 bp = stack_frame(task, regs);
11243
11244 for (;;) {
11245- struct thread_info *context;
11246+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11247
11248- context = (struct thread_info *)
11249- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11250- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11251+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11252
11253- stack = (unsigned long *)context->previous_esp;
11254- if (!stack)
11255+ if (stack_start == task_stack_page(task))
11256 break;
11257+ stack = *(unsigned long **)stack_start;
11258 if (ops->stack(data, "IRQ") < 0)
11259 break;
11260 touch_nmi_watchdog();
11261@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11262 * When in-kernel, we also print out the stack and code at the
11263 * time of the fault..
11264 */
11265- if (!user_mode_vm(regs)) {
11266+ if (!user_mode(regs)) {
11267 unsigned int code_prologue = code_bytes * 43 / 64;
11268 unsigned int code_len = code_bytes;
11269 unsigned char c;
11270 u8 *ip;
11271+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11272
11273 printk(KERN_EMERG "Stack:\n");
11274 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11275
11276 printk(KERN_EMERG "Code: ");
11277
11278- ip = (u8 *)regs->ip - code_prologue;
11279+ ip = (u8 *)regs->ip - code_prologue + cs_base;
11280 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11281 /* try starting at IP */
11282- ip = (u8 *)regs->ip;
11283+ ip = (u8 *)regs->ip + cs_base;
11284 code_len = code_len - code_prologue + 1;
11285 }
11286 for (i = 0; i < code_len; i++, ip++) {
11287@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11288 printk(" Bad EIP value.");
11289 break;
11290 }
11291- if (ip == (u8 *)regs->ip)
11292+ if (ip == (u8 *)regs->ip + cs_base)
11293 printk("<%02x> ", c);
11294 else
11295 printk("%02x ", c);
11296@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11297 {
11298 unsigned short ud2;
11299
11300+ ip = ktla_ktva(ip);
11301 if (ip < PAGE_OFFSET)
11302 return 0;
11303 if (probe_kernel_address((unsigned short *)ip, ud2))
11304@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
11305
11306 return ud2 == 0x0b0f;
11307 }
11308+
11309+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11310+void pax_check_alloca(unsigned long size)
11311+{
11312+ unsigned long sp = (unsigned long)&sp, stack_left;
11313+
11314+ /* all kernel stacks are of the same size */
11315+ stack_left = sp & (THREAD_SIZE - 1);
11316+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11317+}
11318+EXPORT_SYMBOL(pax_check_alloca);
11319+#endif
11320diff -urNp linux-3.1.1/arch/x86/kernel/dumpstack_64.c linux-3.1.1/arch/x86/kernel/dumpstack_64.c
11321--- linux-3.1.1/arch/x86/kernel/dumpstack_64.c 2011-11-11 15:19:27.000000000 -0500
11322+++ linux-3.1.1/arch/x86/kernel/dumpstack_64.c 2011-11-16 18:39:07.000000000 -0500
11323@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task
11324 unsigned long *irq_stack_end =
11325 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11326 unsigned used = 0;
11327- struct thread_info *tinfo;
11328 int graph = 0;
11329 unsigned long dummy;
11330+ void *stack_start;
11331
11332 if (!task)
11333 task = current;
11334@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task
11335 * current stack address. If the stacks consist of nested
11336 * exceptions
11337 */
11338- tinfo = task_thread_info(task);
11339 for (;;) {
11340 char *id;
11341 unsigned long *estack_end;
11342+
11343 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11344 &used, &id);
11345
11346@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task
11347 if (ops->stack(data, id) < 0)
11348 break;
11349
11350- bp = ops->walk_stack(tinfo, stack, bp, ops,
11351+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11352 data, estack_end, &graph);
11353 ops->stack(data, "<EOE>");
11354 /*
11355@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task
11356 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11357 if (ops->stack(data, "IRQ") < 0)
11358 break;
11359- bp = ops->walk_stack(tinfo, stack, bp,
11360+ bp = ops->walk_stack(task, irq_stack, stack, bp,
11361 ops, data, irq_stack_end, &graph);
11362 /*
11363 * We link to the next stack (which would be
11364@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task
11365 /*
11366 * This handles the process stack:
11367 */
11368- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11369+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11370+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11371 put_cpu();
11372 }
11373 EXPORT_SYMBOL(dump_trace);
11374@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
11375
11376 return ud2 == 0x0b0f;
11377 }
11378+
11379+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11380+void pax_check_alloca(unsigned long size)
11381+{
11382+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
11383+ unsigned cpu, used;
11384+ char *id;
11385+
11386+ /* check the process stack first */
11387+ stack_start = (unsigned long)task_stack_page(current);
11388+ stack_end = stack_start + THREAD_SIZE;
11389+ if (likely(stack_start <= sp && sp < stack_end)) {
11390+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
11391+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11392+ return;
11393+ }
11394+
11395+ cpu = get_cpu();
11396+
11397+ /* check the irq stacks */
11398+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
11399+ stack_start = stack_end - IRQ_STACK_SIZE;
11400+ if (stack_start <= sp && sp < stack_end) {
11401+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
11402+ put_cpu();
11403+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11404+ return;
11405+ }
11406+
11407+ /* check the exception stacks */
11408+ used = 0;
11409+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
11410+ stack_start = stack_end - EXCEPTION_STKSZ;
11411+ if (stack_end && stack_start <= sp && sp < stack_end) {
11412+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
11413+ put_cpu();
11414+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11415+ return;
11416+ }
11417+
11418+ put_cpu();
11419+
11420+ /* unknown stack */
11421+ BUG();
11422+}
11423+EXPORT_SYMBOL(pax_check_alloca);
11424+#endif
11425diff -urNp linux-3.1.1/arch/x86/kernel/dumpstack.c linux-3.1.1/arch/x86/kernel/dumpstack.c
11426--- linux-3.1.1/arch/x86/kernel/dumpstack.c 2011-11-11 15:19:27.000000000 -0500
11427+++ linux-3.1.1/arch/x86/kernel/dumpstack.c 2011-11-16 18:40:08.000000000 -0500
11428@@ -2,6 +2,9 @@
11429 * Copyright (C) 1991, 1992 Linus Torvalds
11430 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11431 */
11432+#ifdef CONFIG_GRKERNSEC_HIDESYM
11433+#define __INCLUDED_BY_HIDESYM 1
11434+#endif
11435 #include <linux/kallsyms.h>
11436 #include <linux/kprobes.h>
11437 #include <linux/uaccess.h>
11438@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11439 static void
11440 print_ftrace_graph_addr(unsigned long addr, void *data,
11441 const struct stacktrace_ops *ops,
11442- struct thread_info *tinfo, int *graph)
11443+ struct task_struct *task, int *graph)
11444 {
11445- struct task_struct *task = tinfo->task;
11446 unsigned long ret_addr;
11447 int index = task->curr_ret_stack;
11448
11449@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11450 static inline void
11451 print_ftrace_graph_addr(unsigned long addr, void *data,
11452 const struct stacktrace_ops *ops,
11453- struct thread_info *tinfo, int *graph)
11454+ struct task_struct *task, int *graph)
11455 { }
11456 #endif
11457
11458@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11459 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11460 */
11461
11462-static inline int valid_stack_ptr(struct thread_info *tinfo,
11463- void *p, unsigned int size, void *end)
11464+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11465 {
11466- void *t = tinfo;
11467 if (end) {
11468 if (p < end && p >= (end-THREAD_SIZE))
11469 return 1;
11470@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11471 }
11472
11473 unsigned long
11474-print_context_stack(struct thread_info *tinfo,
11475+print_context_stack(struct task_struct *task, void *stack_start,
11476 unsigned long *stack, unsigned long bp,
11477 const struct stacktrace_ops *ops, void *data,
11478 unsigned long *end, int *graph)
11479 {
11480 struct stack_frame *frame = (struct stack_frame *)bp;
11481
11482- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11483+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11484 unsigned long addr;
11485
11486 addr = *stack;
11487@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11488 } else {
11489 ops->address(data, addr, 0);
11490 }
11491- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11492+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11493 }
11494 stack++;
11495 }
11496@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11497 EXPORT_SYMBOL_GPL(print_context_stack);
11498
11499 unsigned long
11500-print_context_stack_bp(struct thread_info *tinfo,
11501+print_context_stack_bp(struct task_struct *task, void *stack_start,
11502 unsigned long *stack, unsigned long bp,
11503 const struct stacktrace_ops *ops, void *data,
11504 unsigned long *end, int *graph)
11505@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11506 struct stack_frame *frame = (struct stack_frame *)bp;
11507 unsigned long *ret_addr = &frame->return_address;
11508
11509- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11510+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11511 unsigned long addr = *ret_addr;
11512
11513 if (!__kernel_text_address(addr))
11514@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11515 ops->address(data, addr, 1);
11516 frame = frame->next_frame;
11517 ret_addr = &frame->return_address;
11518- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11519+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11520 }
11521
11522 return (unsigned long)frame;
11523@@ -186,7 +186,7 @@ void dump_stack(void)
11524
11525 bp = stack_frame(current, NULL);
11526 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11527- current->pid, current->comm, print_tainted(),
11528+ task_pid_nr(current), current->comm, print_tainted(),
11529 init_utsname()->release,
11530 (int)strcspn(init_utsname()->version, " "),
11531 init_utsname()->version);
11532@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11533 }
11534 EXPORT_SYMBOL_GPL(oops_begin);
11535
11536+extern void gr_handle_kernel_exploit(void);
11537+
11538 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11539 {
11540 if (regs && kexec_should_crash(current))
11541@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11542 panic("Fatal exception in interrupt");
11543 if (panic_on_oops)
11544 panic("Fatal exception");
11545- do_exit(signr);
11546+
11547+ gr_handle_kernel_exploit();
11548+
11549+ do_group_exit(signr);
11550 }
11551
11552 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11553@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11554
11555 show_registers(regs);
11556 #ifdef CONFIG_X86_32
11557- if (user_mode_vm(regs)) {
11558+ if (user_mode(regs)) {
11559 sp = regs->sp;
11560 ss = regs->ss & 0xffff;
11561 } else {
11562@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11563 unsigned long flags = oops_begin();
11564 int sig = SIGSEGV;
11565
11566- if (!user_mode_vm(regs))
11567+ if (!user_mode(regs))
11568 report_bug(regs->ip, regs);
11569
11570 if (__die(str, regs, err))
11571diff -urNp linux-3.1.1/arch/x86/kernel/early_printk.c linux-3.1.1/arch/x86/kernel/early_printk.c
11572--- linux-3.1.1/arch/x86/kernel/early_printk.c 2011-11-11 15:19:27.000000000 -0500
11573+++ linux-3.1.1/arch/x86/kernel/early_printk.c 2011-11-16 18:40:08.000000000 -0500
11574@@ -7,6 +7,7 @@
11575 #include <linux/pci_regs.h>
11576 #include <linux/pci_ids.h>
11577 #include <linux/errno.h>
11578+#include <linux/sched.h>
11579 #include <asm/io.h>
11580 #include <asm/processor.h>
11581 #include <asm/fcntl.h>
11582@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11583 int n;
11584 va_list ap;
11585
11586+ pax_track_stack();
11587+
11588 va_start(ap, fmt);
11589 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11590 early_console->write(early_console, buf, n);
11591diff -urNp linux-3.1.1/arch/x86/kernel/entry_32.S linux-3.1.1/arch/x86/kernel/entry_32.S
11592--- linux-3.1.1/arch/x86/kernel/entry_32.S 2011-11-11 15:19:27.000000000 -0500
11593+++ linux-3.1.1/arch/x86/kernel/entry_32.S 2011-11-16 18:40:08.000000000 -0500
11594@@ -186,13 +186,146 @@
11595 /*CFI_REL_OFFSET gs, PT_GS*/
11596 .endm
11597 .macro SET_KERNEL_GS reg
11598+
11599+#ifdef CONFIG_CC_STACKPROTECTOR
11600 movl $(__KERNEL_STACK_CANARY), \reg
11601+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11602+ movl $(__USER_DS), \reg
11603+#else
11604+ xorl \reg, \reg
11605+#endif
11606+
11607 movl \reg, %gs
11608 .endm
11609
11610 #endif /* CONFIG_X86_32_LAZY_GS */
11611
11612-.macro SAVE_ALL
11613+.macro pax_enter_kernel
11614+#ifdef CONFIG_PAX_KERNEXEC
11615+ call pax_enter_kernel
11616+#endif
11617+.endm
11618+
11619+.macro pax_exit_kernel
11620+#ifdef CONFIG_PAX_KERNEXEC
11621+ call pax_exit_kernel
11622+#endif
11623+.endm
11624+
11625+#ifdef CONFIG_PAX_KERNEXEC
11626+ENTRY(pax_enter_kernel)
11627+#ifdef CONFIG_PARAVIRT
11628+ pushl %eax
11629+ pushl %ecx
11630+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11631+ mov %eax, %esi
11632+#else
11633+ mov %cr0, %esi
11634+#endif
11635+ bts $16, %esi
11636+ jnc 1f
11637+ mov %cs, %esi
11638+ cmp $__KERNEL_CS, %esi
11639+ jz 3f
11640+ ljmp $__KERNEL_CS, $3f
11641+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11642+2:
11643+#ifdef CONFIG_PARAVIRT
11644+ mov %esi, %eax
11645+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11646+#else
11647+ mov %esi, %cr0
11648+#endif
11649+3:
11650+#ifdef CONFIG_PARAVIRT
11651+ popl %ecx
11652+ popl %eax
11653+#endif
11654+ ret
11655+ENDPROC(pax_enter_kernel)
11656+
11657+ENTRY(pax_exit_kernel)
11658+#ifdef CONFIG_PARAVIRT
11659+ pushl %eax
11660+ pushl %ecx
11661+#endif
11662+ mov %cs, %esi
11663+ cmp $__KERNEXEC_KERNEL_CS, %esi
11664+ jnz 2f
11665+#ifdef CONFIG_PARAVIRT
11666+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11667+ mov %eax, %esi
11668+#else
11669+ mov %cr0, %esi
11670+#endif
11671+ btr $16, %esi
11672+ ljmp $__KERNEL_CS, $1f
11673+1:
11674+#ifdef CONFIG_PARAVIRT
11675+ mov %esi, %eax
11676+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11677+#else
11678+ mov %esi, %cr0
11679+#endif
11680+2:
11681+#ifdef CONFIG_PARAVIRT
11682+ popl %ecx
11683+ popl %eax
11684+#endif
11685+ ret
11686+ENDPROC(pax_exit_kernel)
11687+#endif
11688+
11689+.macro pax_erase_kstack
11690+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11691+ call pax_erase_kstack
11692+#endif
11693+.endm
11694+
11695+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11696+/*
11697+ * ebp: thread_info
11698+ * ecx, edx: can be clobbered
11699+ */
11700+ENTRY(pax_erase_kstack)
11701+ pushl %edi
11702+ pushl %eax
11703+
11704+ mov TI_lowest_stack(%ebp), %edi
11705+ mov $-0xBEEF, %eax
11706+ std
11707+
11708+1: mov %edi, %ecx
11709+ and $THREAD_SIZE_asm - 1, %ecx
11710+ shr $2, %ecx
11711+ repne scasl
11712+ jecxz 2f
11713+
11714+ cmp $2*16, %ecx
11715+ jc 2f
11716+
11717+ mov $2*16, %ecx
11718+ repe scasl
11719+ jecxz 2f
11720+ jne 1b
11721+
11722+2: cld
11723+ mov %esp, %ecx
11724+ sub %edi, %ecx
11725+ shr $2, %ecx
11726+ rep stosl
11727+
11728+ mov TI_task_thread_sp0(%ebp), %edi
11729+ sub $128, %edi
11730+ mov %edi, TI_lowest_stack(%ebp)
11731+
11732+ popl %eax
11733+ popl %edi
11734+ ret
11735+ENDPROC(pax_erase_kstack)
11736+#endif
11737+
11738+.macro __SAVE_ALL _DS
11739 cld
11740 PUSH_GS
11741 pushl_cfi %fs
11742@@ -215,7 +348,7 @@
11743 CFI_REL_OFFSET ecx, 0
11744 pushl_cfi %ebx
11745 CFI_REL_OFFSET ebx, 0
11746- movl $(__USER_DS), %edx
11747+ movl $\_DS, %edx
11748 movl %edx, %ds
11749 movl %edx, %es
11750 movl $(__KERNEL_PERCPU), %edx
11751@@ -223,6 +356,15 @@
11752 SET_KERNEL_GS %edx
11753 .endm
11754
11755+.macro SAVE_ALL
11756+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11757+ __SAVE_ALL __KERNEL_DS
11758+ pax_enter_kernel
11759+#else
11760+ __SAVE_ALL __USER_DS
11761+#endif
11762+.endm
11763+
11764 .macro RESTORE_INT_REGS
11765 popl_cfi %ebx
11766 CFI_RESTORE ebx
11767@@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
11768 popfl_cfi
11769 jmp syscall_exit
11770 CFI_ENDPROC
11771-END(ret_from_fork)
11772+ENDPROC(ret_from_fork)
11773
11774 /*
11775 * Interrupt exit functions should be protected against kprobes
11776@@ -333,7 +475,15 @@ check_userspace:
11777 movb PT_CS(%esp), %al
11778 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11779 cmpl $USER_RPL, %eax
11780+
11781+#ifdef CONFIG_PAX_KERNEXEC
11782+ jae resume_userspace
11783+
11784+ PAX_EXIT_KERNEL
11785+ jmp resume_kernel
11786+#else
11787 jb resume_kernel # not returning to v8086 or userspace
11788+#endif
11789
11790 ENTRY(resume_userspace)
11791 LOCKDEP_SYS_EXIT
11792@@ -345,8 +495,8 @@ ENTRY(resume_userspace)
11793 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11794 # int/exception return?
11795 jne work_pending
11796- jmp restore_all
11797-END(ret_from_exception)
11798+ jmp restore_all_pax
11799+ENDPROC(ret_from_exception)
11800
11801 #ifdef CONFIG_PREEMPT
11802 ENTRY(resume_kernel)
11803@@ -361,7 +511,7 @@ need_resched:
11804 jz restore_all
11805 call preempt_schedule_irq
11806 jmp need_resched
11807-END(resume_kernel)
11808+ENDPROC(resume_kernel)
11809 #endif
11810 CFI_ENDPROC
11811 /*
11812@@ -395,23 +545,34 @@ sysenter_past_esp:
11813 /*CFI_REL_OFFSET cs, 0*/
11814 /*
11815 * Push current_thread_info()->sysenter_return to the stack.
11816- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11817- * pushed above; +8 corresponds to copy_thread's esp0 setting.
11818 */
11819- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11820+ pushl_cfi $0
11821 CFI_REL_OFFSET eip, 0
11822
11823 pushl_cfi %eax
11824 SAVE_ALL
11825+ GET_THREAD_INFO(%ebp)
11826+ movl TI_sysenter_return(%ebp),%ebp
11827+ movl %ebp,PT_EIP(%esp)
11828 ENABLE_INTERRUPTS(CLBR_NONE)
11829
11830 /*
11831 * Load the potential sixth argument from user stack.
11832 * Careful about security.
11833 */
11834+ movl PT_OLDESP(%esp),%ebp
11835+
11836+#ifdef CONFIG_PAX_MEMORY_UDEREF
11837+ mov PT_OLDSS(%esp),%ds
11838+1: movl %ds:(%ebp),%ebp
11839+ push %ss
11840+ pop %ds
11841+#else
11842 cmpl $__PAGE_OFFSET-3,%ebp
11843 jae syscall_fault
11844 1: movl (%ebp),%ebp
11845+#endif
11846+
11847 movl %ebp,PT_EBP(%esp)
11848 .section __ex_table,"a"
11849 .align 4
11850@@ -434,12 +595,24 @@ sysenter_do_call:
11851 testl $_TIF_ALLWORK_MASK, %ecx
11852 jne sysexit_audit
11853 sysenter_exit:
11854+
11855+#ifdef CONFIG_PAX_RANDKSTACK
11856+ pushl_cfi %eax
11857+ movl %esp, %eax
11858+ call pax_randomize_kstack
11859+ popl_cfi %eax
11860+#endif
11861+
11862+ pax_erase_kstack
11863+
11864 /* if something modifies registers it must also disable sysexit */
11865 movl PT_EIP(%esp), %edx
11866 movl PT_OLDESP(%esp), %ecx
11867 xorl %ebp,%ebp
11868 TRACE_IRQS_ON
11869 1: mov PT_FS(%esp), %fs
11870+2: mov PT_DS(%esp), %ds
11871+3: mov PT_ES(%esp), %es
11872 PTGS_TO_GS
11873 ENABLE_INTERRUPTS_SYSEXIT
11874
11875@@ -456,6 +629,9 @@ sysenter_audit:
11876 movl %eax,%edx /* 2nd arg: syscall number */
11877 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11878 call audit_syscall_entry
11879+
11880+ pax_erase_kstack
11881+
11882 pushl_cfi %ebx
11883 movl PT_EAX(%esp),%eax /* reload syscall number */
11884 jmp sysenter_do_call
11885@@ -482,11 +658,17 @@ sysexit_audit:
11886
11887 CFI_ENDPROC
11888 .pushsection .fixup,"ax"
11889-2: movl $0,PT_FS(%esp)
11890+4: movl $0,PT_FS(%esp)
11891+ jmp 1b
11892+5: movl $0,PT_DS(%esp)
11893+ jmp 1b
11894+6: movl $0,PT_ES(%esp)
11895 jmp 1b
11896 .section __ex_table,"a"
11897 .align 4
11898- .long 1b,2b
11899+ .long 1b,4b
11900+ .long 2b,5b
11901+ .long 3b,6b
11902 .popsection
11903 PTGS_TO_GS_EX
11904 ENDPROC(ia32_sysenter_target)
11905@@ -519,6 +701,15 @@ syscall_exit:
11906 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11907 jne syscall_exit_work
11908
11909+restore_all_pax:
11910+
11911+#ifdef CONFIG_PAX_RANDKSTACK
11912+ movl %esp, %eax
11913+ call pax_randomize_kstack
11914+#endif
11915+
11916+ pax_erase_kstack
11917+
11918 restore_all:
11919 TRACE_IRQS_IRET
11920 restore_all_notrace:
11921@@ -578,14 +769,34 @@ ldt_ss:
11922 * compensating for the offset by changing to the ESPFIX segment with
11923 * a base address that matches for the difference.
11924 */
11925-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11926+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11927 mov %esp, %edx /* load kernel esp */
11928 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11929 mov %dx, %ax /* eax: new kernel esp */
11930 sub %eax, %edx /* offset (low word is 0) */
11931+#ifdef CONFIG_SMP
11932+ movl PER_CPU_VAR(cpu_number), %ebx
11933+ shll $PAGE_SHIFT_asm, %ebx
11934+ addl $cpu_gdt_table, %ebx
11935+#else
11936+ movl $cpu_gdt_table, %ebx
11937+#endif
11938 shr $16, %edx
11939- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11940- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11941+
11942+#ifdef CONFIG_PAX_KERNEXEC
11943+ mov %cr0, %esi
11944+ btr $16, %esi
11945+ mov %esi, %cr0
11946+#endif
11947+
11948+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11949+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11950+
11951+#ifdef CONFIG_PAX_KERNEXEC
11952+ bts $16, %esi
11953+ mov %esi, %cr0
11954+#endif
11955+
11956 pushl_cfi $__ESPFIX_SS
11957 pushl_cfi %eax /* new kernel esp */
11958 /* Disable interrupts, but do not irqtrace this section: we
11959@@ -614,34 +825,28 @@ work_resched:
11960 movl TI_flags(%ebp), %ecx
11961 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11962 # than syscall tracing?
11963- jz restore_all
11964+ jz restore_all_pax
11965 testb $_TIF_NEED_RESCHED, %cl
11966 jnz work_resched
11967
11968 work_notifysig: # deal with pending signals and
11969 # notify-resume requests
11970+ movl %esp, %eax
11971 #ifdef CONFIG_VM86
11972 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11973- movl %esp, %eax
11974- jne work_notifysig_v86 # returning to kernel-space or
11975+ jz 1f # returning to kernel-space or
11976 # vm86-space
11977- xorl %edx, %edx
11978- call do_notify_resume
11979- jmp resume_userspace_sig
11980
11981- ALIGN
11982-work_notifysig_v86:
11983 pushl_cfi %ecx # save ti_flags for do_notify_resume
11984 call save_v86_state # %eax contains pt_regs pointer
11985 popl_cfi %ecx
11986 movl %eax, %esp
11987-#else
11988- movl %esp, %eax
11989+1:
11990 #endif
11991 xorl %edx, %edx
11992 call do_notify_resume
11993 jmp resume_userspace_sig
11994-END(work_pending)
11995+ENDPROC(work_pending)
11996
11997 # perform syscall exit tracing
11998 ALIGN
11999@@ -649,11 +854,14 @@ syscall_trace_entry:
12000 movl $-ENOSYS,PT_EAX(%esp)
12001 movl %esp, %eax
12002 call syscall_trace_enter
12003+
12004+ pax_erase_kstack
12005+
12006 /* What it returned is what we'll actually use. */
12007 cmpl $(nr_syscalls), %eax
12008 jnae syscall_call
12009 jmp syscall_exit
12010-END(syscall_trace_entry)
12011+ENDPROC(syscall_trace_entry)
12012
12013 # perform syscall exit tracing
12014 ALIGN
12015@@ -666,20 +874,24 @@ syscall_exit_work:
12016 movl %esp, %eax
12017 call syscall_trace_leave
12018 jmp resume_userspace
12019-END(syscall_exit_work)
12020+ENDPROC(syscall_exit_work)
12021 CFI_ENDPROC
12022
12023 RING0_INT_FRAME # can't unwind into user space anyway
12024 syscall_fault:
12025+#ifdef CONFIG_PAX_MEMORY_UDEREF
12026+ push %ss
12027+ pop %ds
12028+#endif
12029 GET_THREAD_INFO(%ebp)
12030 movl $-EFAULT,PT_EAX(%esp)
12031 jmp resume_userspace
12032-END(syscall_fault)
12033+ENDPROC(syscall_fault)
12034
12035 syscall_badsys:
12036 movl $-ENOSYS,PT_EAX(%esp)
12037 jmp resume_userspace
12038-END(syscall_badsys)
12039+ENDPROC(syscall_badsys)
12040 CFI_ENDPROC
12041 /*
12042 * End of kprobes section
12043@@ -753,6 +965,36 @@ ptregs_clone:
12044 CFI_ENDPROC
12045 ENDPROC(ptregs_clone)
12046
12047+ ALIGN;
12048+ENTRY(kernel_execve)
12049+ CFI_STARTPROC
12050+ pushl_cfi %ebp
12051+ sub $PT_OLDSS+4,%esp
12052+ pushl_cfi %edi
12053+ pushl_cfi %ecx
12054+ pushl_cfi %eax
12055+ lea 3*4(%esp),%edi
12056+ mov $PT_OLDSS/4+1,%ecx
12057+ xorl %eax,%eax
12058+ rep stosl
12059+ popl_cfi %eax
12060+ popl_cfi %ecx
12061+ popl_cfi %edi
12062+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
12063+ pushl_cfi %esp
12064+ call sys_execve
12065+ add $4,%esp
12066+ CFI_ADJUST_CFA_OFFSET -4
12067+ GET_THREAD_INFO(%ebp)
12068+ test %eax,%eax
12069+ jz syscall_exit
12070+ add $PT_OLDSS+4,%esp
12071+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
12072+ popl_cfi %ebp
12073+ ret
12074+ CFI_ENDPROC
12075+ENDPROC(kernel_execve)
12076+
12077 .macro FIXUP_ESPFIX_STACK
12078 /*
12079 * Switch back for ESPFIX stack to the normal zerobased stack
12080@@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
12081 * normal stack and adjusts ESP with the matching offset.
12082 */
12083 /* fixup the stack */
12084- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
12085- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
12086+#ifdef CONFIG_SMP
12087+ movl PER_CPU_VAR(cpu_number), %ebx
12088+ shll $PAGE_SHIFT_asm, %ebx
12089+ addl $cpu_gdt_table, %ebx
12090+#else
12091+ movl $cpu_gdt_table, %ebx
12092+#endif
12093+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
12094+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
12095 shl $16, %eax
12096 addl %esp, %eax /* the adjusted stack pointer */
12097 pushl_cfi $__KERNEL_DS
12098@@ -816,7 +1065,7 @@ vector=vector+1
12099 .endr
12100 2: jmp common_interrupt
12101 .endr
12102-END(irq_entries_start)
12103+ENDPROC(irq_entries_start)
12104
12105 .previous
12106 END(interrupt)
12107@@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
12108 pushl_cfi $do_coprocessor_error
12109 jmp error_code
12110 CFI_ENDPROC
12111-END(coprocessor_error)
12112+ENDPROC(coprocessor_error)
12113
12114 ENTRY(simd_coprocessor_error)
12115 RING0_INT_FRAME
12116@@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
12117 #endif
12118 jmp error_code
12119 CFI_ENDPROC
12120-END(simd_coprocessor_error)
12121+ENDPROC(simd_coprocessor_error)
12122
12123 ENTRY(device_not_available)
12124 RING0_INT_FRAME
12125@@ -893,7 +1142,7 @@ ENTRY(device_not_available)
12126 pushl_cfi $do_device_not_available
12127 jmp error_code
12128 CFI_ENDPROC
12129-END(device_not_available)
12130+ENDPROC(device_not_available)
12131
12132 #ifdef CONFIG_PARAVIRT
12133 ENTRY(native_iret)
12134@@ -902,12 +1151,12 @@ ENTRY(native_iret)
12135 .align 4
12136 .long native_iret, iret_exc
12137 .previous
12138-END(native_iret)
12139+ENDPROC(native_iret)
12140
12141 ENTRY(native_irq_enable_sysexit)
12142 sti
12143 sysexit
12144-END(native_irq_enable_sysexit)
12145+ENDPROC(native_irq_enable_sysexit)
12146 #endif
12147
12148 ENTRY(overflow)
12149@@ -916,7 +1165,7 @@ ENTRY(overflow)
12150 pushl_cfi $do_overflow
12151 jmp error_code
12152 CFI_ENDPROC
12153-END(overflow)
12154+ENDPROC(overflow)
12155
12156 ENTRY(bounds)
12157 RING0_INT_FRAME
12158@@ -924,7 +1173,7 @@ ENTRY(bounds)
12159 pushl_cfi $do_bounds
12160 jmp error_code
12161 CFI_ENDPROC
12162-END(bounds)
12163+ENDPROC(bounds)
12164
12165 ENTRY(invalid_op)
12166 RING0_INT_FRAME
12167@@ -932,7 +1181,7 @@ ENTRY(invalid_op)
12168 pushl_cfi $do_invalid_op
12169 jmp error_code
12170 CFI_ENDPROC
12171-END(invalid_op)
12172+ENDPROC(invalid_op)
12173
12174 ENTRY(coprocessor_segment_overrun)
12175 RING0_INT_FRAME
12176@@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
12177 pushl_cfi $do_coprocessor_segment_overrun
12178 jmp error_code
12179 CFI_ENDPROC
12180-END(coprocessor_segment_overrun)
12181+ENDPROC(coprocessor_segment_overrun)
12182
12183 ENTRY(invalid_TSS)
12184 RING0_EC_FRAME
12185 pushl_cfi $do_invalid_TSS
12186 jmp error_code
12187 CFI_ENDPROC
12188-END(invalid_TSS)
12189+ENDPROC(invalid_TSS)
12190
12191 ENTRY(segment_not_present)
12192 RING0_EC_FRAME
12193 pushl_cfi $do_segment_not_present
12194 jmp error_code
12195 CFI_ENDPROC
12196-END(segment_not_present)
12197+ENDPROC(segment_not_present)
12198
12199 ENTRY(stack_segment)
12200 RING0_EC_FRAME
12201 pushl_cfi $do_stack_segment
12202 jmp error_code
12203 CFI_ENDPROC
12204-END(stack_segment)
12205+ENDPROC(stack_segment)
12206
12207 ENTRY(alignment_check)
12208 RING0_EC_FRAME
12209 pushl_cfi $do_alignment_check
12210 jmp error_code
12211 CFI_ENDPROC
12212-END(alignment_check)
12213+ENDPROC(alignment_check)
12214
12215 ENTRY(divide_error)
12216 RING0_INT_FRAME
12217@@ -976,7 +1225,7 @@ ENTRY(divide_error)
12218 pushl_cfi $do_divide_error
12219 jmp error_code
12220 CFI_ENDPROC
12221-END(divide_error)
12222+ENDPROC(divide_error)
12223
12224 #ifdef CONFIG_X86_MCE
12225 ENTRY(machine_check)
12226@@ -985,7 +1234,7 @@ ENTRY(machine_check)
12227 pushl_cfi machine_check_vector
12228 jmp error_code
12229 CFI_ENDPROC
12230-END(machine_check)
12231+ENDPROC(machine_check)
12232 #endif
12233
12234 ENTRY(spurious_interrupt_bug)
12235@@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
12236 pushl_cfi $do_spurious_interrupt_bug
12237 jmp error_code
12238 CFI_ENDPROC
12239-END(spurious_interrupt_bug)
12240+ENDPROC(spurious_interrupt_bug)
12241 /*
12242 * End of kprobes section
12243 */
12244@@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector
12245
12246 ENTRY(mcount)
12247 ret
12248-END(mcount)
12249+ENDPROC(mcount)
12250
12251 ENTRY(ftrace_caller)
12252 cmpl $0, function_trace_stop
12253@@ -1138,7 +1387,7 @@ ftrace_graph_call:
12254 .globl ftrace_stub
12255 ftrace_stub:
12256 ret
12257-END(ftrace_caller)
12258+ENDPROC(ftrace_caller)
12259
12260 #else /* ! CONFIG_DYNAMIC_FTRACE */
12261
12262@@ -1174,7 +1423,7 @@ trace:
12263 popl %ecx
12264 popl %eax
12265 jmp ftrace_stub
12266-END(mcount)
12267+ENDPROC(mcount)
12268 #endif /* CONFIG_DYNAMIC_FTRACE */
12269 #endif /* CONFIG_FUNCTION_TRACER */
12270
12271@@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
12272 popl %ecx
12273 popl %eax
12274 ret
12275-END(ftrace_graph_caller)
12276+ENDPROC(ftrace_graph_caller)
12277
12278 .globl return_to_handler
12279 return_to_handler:
12280@@ -1209,7 +1458,6 @@ return_to_handler:
12281 jmp *%ecx
12282 #endif
12283
12284-.section .rodata,"a"
12285 #include "syscall_table_32.S"
12286
12287 syscall_table_size=(.-sys_call_table)
12288@@ -1255,15 +1503,18 @@ error_code:
12289 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
12290 REG_TO_PTGS %ecx
12291 SET_KERNEL_GS %ecx
12292- movl $(__USER_DS), %ecx
12293+ movl $(__KERNEL_DS), %ecx
12294 movl %ecx, %ds
12295 movl %ecx, %es
12296+
12297+ pax_enter_kernel
12298+
12299 TRACE_IRQS_OFF
12300 movl %esp,%eax # pt_regs pointer
12301 call *%edi
12302 jmp ret_from_exception
12303 CFI_ENDPROC
12304-END(page_fault)
12305+ENDPROC(page_fault)
12306
12307 /*
12308 * Debug traps and NMI can happen at the one SYSENTER instruction
12309@@ -1305,7 +1556,7 @@ debug_stack_correct:
12310 call do_debug
12311 jmp ret_from_exception
12312 CFI_ENDPROC
12313-END(debug)
12314+ENDPROC(debug)
12315
12316 /*
12317 * NMI is doubly nasty. It can happen _while_ we're handling
12318@@ -1342,6 +1593,9 @@ nmi_stack_correct:
12319 xorl %edx,%edx # zero error code
12320 movl %esp,%eax # pt_regs pointer
12321 call do_nmi
12322+
12323+ pax_exit_kernel
12324+
12325 jmp restore_all_notrace
12326 CFI_ENDPROC
12327
12328@@ -1378,12 +1632,15 @@ nmi_espfix_stack:
12329 FIXUP_ESPFIX_STACK # %eax == %esp
12330 xorl %edx,%edx # zero error code
12331 call do_nmi
12332+
12333+ pax_exit_kernel
12334+
12335 RESTORE_REGS
12336 lss 12+4(%esp), %esp # back to espfix stack
12337 CFI_ADJUST_CFA_OFFSET -24
12338 jmp irq_return
12339 CFI_ENDPROC
12340-END(nmi)
12341+ENDPROC(nmi)
12342
12343 ENTRY(int3)
12344 RING0_INT_FRAME
12345@@ -1395,14 +1652,14 @@ ENTRY(int3)
12346 call do_int3
12347 jmp ret_from_exception
12348 CFI_ENDPROC
12349-END(int3)
12350+ENDPROC(int3)
12351
12352 ENTRY(general_protection)
12353 RING0_EC_FRAME
12354 pushl_cfi $do_general_protection
12355 jmp error_code
12356 CFI_ENDPROC
12357-END(general_protection)
12358+ENDPROC(general_protection)
12359
12360 #ifdef CONFIG_KVM_GUEST
12361 ENTRY(async_page_fault)
12362@@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
12363 pushl_cfi $do_async_page_fault
12364 jmp error_code
12365 CFI_ENDPROC
12366-END(async_page_fault)
12367+ENDPROC(async_page_fault)
12368 #endif
12369
12370 /*
12371diff -urNp linux-3.1.1/arch/x86/kernel/entry_64.S linux-3.1.1/arch/x86/kernel/entry_64.S
12372--- linux-3.1.1/arch/x86/kernel/entry_64.S 2011-11-11 15:19:27.000000000 -0500
12373+++ linux-3.1.1/arch/x86/kernel/entry_64.S 2011-11-17 18:28:56.000000000 -0500
12374@@ -55,6 +55,8 @@
12375 #include <asm/paravirt.h>
12376 #include <asm/ftrace.h>
12377 #include <asm/percpu.h>
12378+#include <asm/pgtable.h>
12379+#include <asm/alternative-asm.h>
12380
12381 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
12382 #include <linux/elf-em.h>
12383@@ -68,8 +70,9 @@
12384 #ifdef CONFIG_FUNCTION_TRACER
12385 #ifdef CONFIG_DYNAMIC_FTRACE
12386 ENTRY(mcount)
12387+ pax_force_retaddr
12388 retq
12389-END(mcount)
12390+ENDPROC(mcount)
12391
12392 ENTRY(ftrace_caller)
12393 cmpl $0, function_trace_stop
12394@@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
12395 #endif
12396
12397 GLOBAL(ftrace_stub)
12398+ pax_force_retaddr
12399 retq
12400-END(ftrace_caller)
12401+ENDPROC(ftrace_caller)
12402
12403 #else /* ! CONFIG_DYNAMIC_FTRACE */
12404 ENTRY(mcount)
12405@@ -112,6 +116,7 @@ ENTRY(mcount)
12406 #endif
12407
12408 GLOBAL(ftrace_stub)
12409+ pax_force_retaddr
12410 retq
12411
12412 trace:
12413@@ -121,12 +126,13 @@ trace:
12414 movq 8(%rbp), %rsi
12415 subq $MCOUNT_INSN_SIZE, %rdi
12416
12417+ pax_force_fptr ftrace_trace_function
12418 call *ftrace_trace_function
12419
12420 MCOUNT_RESTORE_FRAME
12421
12422 jmp ftrace_stub
12423-END(mcount)
12424+ENDPROC(mcount)
12425 #endif /* CONFIG_DYNAMIC_FTRACE */
12426 #endif /* CONFIG_FUNCTION_TRACER */
12427
12428@@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
12429
12430 MCOUNT_RESTORE_FRAME
12431
12432+ pax_force_retaddr
12433 retq
12434-END(ftrace_graph_caller)
12435+ENDPROC(ftrace_graph_caller)
12436
12437 GLOBAL(return_to_handler)
12438 subq $24, %rsp
12439@@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
12440 movq 8(%rsp), %rdx
12441 movq (%rsp), %rax
12442 addq $24, %rsp
12443+ pax_force_fptr %rdi
12444 jmp *%rdi
12445 #endif
12446
12447@@ -178,6 +186,269 @@ ENTRY(native_usergs_sysret64)
12448 ENDPROC(native_usergs_sysret64)
12449 #endif /* CONFIG_PARAVIRT */
12450
12451+ .macro ljmpq sel, off
12452+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
12453+ .byte 0x48; ljmp *1234f(%rip)
12454+ .pushsection .rodata
12455+ .align 16
12456+ 1234: .quad \off; .word \sel
12457+ .popsection
12458+#else
12459+ pushq $\sel
12460+ pushq $\off
12461+ lretq
12462+#endif
12463+ .endm
12464+
12465+ .macro pax_enter_kernel
12466+#ifdef CONFIG_PAX_KERNEXEC
12467+ call pax_enter_kernel
12468+#endif
12469+ .endm
12470+
12471+ .macro pax_exit_kernel
12472+#ifdef CONFIG_PAX_KERNEXEC
12473+ call pax_exit_kernel
12474+#endif
12475+ .endm
12476+
12477+#ifdef CONFIG_PAX_KERNEXEC
12478+ENTRY(pax_enter_kernel)
12479+ pushq %rdi
12480+
12481+#ifdef CONFIG_PARAVIRT
12482+ PV_SAVE_REGS(CLBR_RDI)
12483+#endif
12484+
12485+ GET_CR0_INTO_RDI
12486+ bts $16,%rdi
12487+ jnc 1f
12488+ mov %cs,%edi
12489+ cmp $__KERNEL_CS,%edi
12490+ jz 3f
12491+ ljmpq __KERNEL_CS,3f
12492+1: ljmpq __KERNEXEC_KERNEL_CS,2f
12493+2: SET_RDI_INTO_CR0
12494+3:
12495+
12496+#ifdef CONFIG_PARAVIRT
12497+ PV_RESTORE_REGS(CLBR_RDI)
12498+#endif
12499+
12500+ popq %rdi
12501+ pax_force_retaddr
12502+ retq
12503+ENDPROC(pax_enter_kernel)
12504+
12505+ENTRY(pax_exit_kernel)
12506+ pushq %rdi
12507+
12508+#ifdef CONFIG_PARAVIRT
12509+ PV_SAVE_REGS(CLBR_RDI)
12510+#endif
12511+
12512+ mov %cs,%rdi
12513+ cmp $__KERNEXEC_KERNEL_CS,%edi
12514+ jnz 2f
12515+ GET_CR0_INTO_RDI
12516+ btr $16,%rdi
12517+ ljmpq __KERNEL_CS,1f
12518+1: SET_RDI_INTO_CR0
12519+2:
12520+
12521+#ifdef CONFIG_PARAVIRT
12522+ PV_RESTORE_REGS(CLBR_RDI);
12523+#endif
12524+
12525+ popq %rdi
12526+ pax_force_retaddr
12527+ retq
12528+ENDPROC(pax_exit_kernel)
12529+#endif
12530+
12531+ .macro pax_enter_kernel_user
12532+#ifdef CONFIG_PAX_MEMORY_UDEREF
12533+ call pax_enter_kernel_user
12534+#endif
12535+ .endm
12536+
12537+ .macro pax_exit_kernel_user
12538+#ifdef CONFIG_PAX_MEMORY_UDEREF
12539+ call pax_exit_kernel_user
12540+#endif
12541+#ifdef CONFIG_PAX_RANDKSTACK
12542+ push %rax
12543+ call pax_randomize_kstack
12544+ pop %rax
12545+#endif
12546+ .endm
12547+
12548+#ifdef CONFIG_PAX_MEMORY_UDEREF
12549+ENTRY(pax_enter_kernel_user)
12550+ pushq %rdi
12551+ pushq %rbx
12552+
12553+#ifdef CONFIG_PARAVIRT
12554+ PV_SAVE_REGS(CLBR_RDI)
12555+#endif
12556+
12557+ GET_CR3_INTO_RDI
12558+ mov %rdi,%rbx
12559+ add $__START_KERNEL_map,%rbx
12560+ sub phys_base(%rip),%rbx
12561+
12562+#ifdef CONFIG_PARAVIRT
12563+ pushq %rdi
12564+ cmpl $0, pv_info+PARAVIRT_enabled
12565+ jz 1f
12566+ i = 0
12567+ .rept USER_PGD_PTRS
12568+ mov i*8(%rbx),%rsi
12569+ mov $0,%sil
12570+ lea i*8(%rbx),%rdi
12571+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12572+ i = i + 1
12573+ .endr
12574+ jmp 2f
12575+1:
12576+#endif
12577+
12578+ i = 0
12579+ .rept USER_PGD_PTRS
12580+ movb $0,i*8(%rbx)
12581+ i = i + 1
12582+ .endr
12583+
12584+#ifdef CONFIG_PARAVIRT
12585+2: popq %rdi
12586+#endif
12587+ SET_RDI_INTO_CR3
12588+
12589+#ifdef CONFIG_PAX_KERNEXEC
12590+ GET_CR0_INTO_RDI
12591+ bts $16,%rdi
12592+ SET_RDI_INTO_CR0
12593+#endif
12594+
12595+#ifdef CONFIG_PARAVIRT
12596+ PV_RESTORE_REGS(CLBR_RDI)
12597+#endif
12598+
12599+ popq %rbx
12600+ popq %rdi
12601+ pax_force_retaddr
12602+ retq
12603+ENDPROC(pax_enter_kernel_user)
12604+
12605+ENTRY(pax_exit_kernel_user)
12606+ push %rdi
12607+
12608+#ifdef CONFIG_PARAVIRT
12609+ pushq %rbx
12610+ PV_SAVE_REGS(CLBR_RDI)
12611+#endif
12612+
12613+#ifdef CONFIG_PAX_KERNEXEC
12614+ GET_CR0_INTO_RDI
12615+ btr $16,%rdi
12616+ SET_RDI_INTO_CR0
12617+#endif
12618+
12619+ GET_CR3_INTO_RDI
12620+ add $__START_KERNEL_map,%rdi
12621+ sub phys_base(%rip),%rdi
12622+
12623+#ifdef CONFIG_PARAVIRT
12624+ cmpl $0, pv_info+PARAVIRT_enabled
12625+ jz 1f
12626+ mov %rdi,%rbx
12627+ i = 0
12628+ .rept USER_PGD_PTRS
12629+ mov i*8(%rbx),%rsi
12630+ mov $0x67,%sil
12631+ lea i*8(%rbx),%rdi
12632+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12633+ i = i + 1
12634+ .endr
12635+ jmp 2f
12636+1:
12637+#endif
12638+
12639+ i = 0
12640+ .rept USER_PGD_PTRS
12641+ movb $0x67,i*8(%rdi)
12642+ i = i + 1
12643+ .endr
12644+
12645+#ifdef CONFIG_PARAVIRT
12646+2: PV_RESTORE_REGS(CLBR_RDI)
12647+ popq %rbx
12648+#endif
12649+
12650+ popq %rdi
12651+ pax_force_retaddr
12652+ retq
12653+ENDPROC(pax_exit_kernel_user)
12654+#endif
12655+
12656+.macro pax_erase_kstack
12657+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12658+ call pax_erase_kstack
12659+#endif
12660+.endm
12661+
12662+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12663+/*
12664+ * r10: thread_info
12665+ * rcx, rdx: can be clobbered
12666+ */
12667+ENTRY(pax_erase_kstack)
12668+ pushq %rdi
12669+ pushq %rax
12670+ pushq %r10
12671+
12672+ GET_THREAD_INFO(%r10)
12673+ mov TI_lowest_stack(%r10), %rdi
12674+ mov $-0xBEEF, %rax
12675+ std
12676+
12677+1: mov %edi, %ecx
12678+ and $THREAD_SIZE_asm - 1, %ecx
12679+ shr $3, %ecx
12680+ repne scasq
12681+ jecxz 2f
12682+
12683+ cmp $2*8, %ecx
12684+ jc 2f
12685+
12686+ mov $2*8, %ecx
12687+ repe scasq
12688+ jecxz 2f
12689+ jne 1b
12690+
12691+2: cld
12692+ mov %esp, %ecx
12693+ sub %edi, %ecx
12694+
12695+ cmp $THREAD_SIZE_asm, %rcx
12696+ jb 3f
12697+ ud2
12698+3:
12699+
12700+ shr $3, %ecx
12701+ rep stosq
12702+
12703+ mov TI_task_thread_sp0(%r10), %rdi
12704+ sub $256, %rdi
12705+ mov %rdi, TI_lowest_stack(%r10)
12706+
12707+ popq %r10
12708+ popq %rax
12709+ popq %rdi
12710+ pax_force_retaddr
12711+ ret
12712+ENDPROC(pax_erase_kstack)
12713+#endif
12714
12715 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12716 #ifdef CONFIG_TRACE_IRQFLAGS
12717@@ -319,7 +590,7 @@ ENDPROC(native_usergs_sysret64)
12718 movq %rsp, %rsi
12719
12720 leaq -RBP(%rsp),%rdi /* arg1 for handler */
12721- testl $3, CS(%rdi)
12722+ testb $3, CS(%rdi)
12723 je 1f
12724 SWAPGS
12725 /*
12726@@ -350,9 +621,10 @@ ENTRY(save_rest)
12727 movq_cfi r15, R15+16
12728 movq %r11, 8(%rsp) /* return address */
12729 FIXUP_TOP_OF_STACK %r11, 16
12730+ pax_force_retaddr
12731 ret
12732 CFI_ENDPROC
12733-END(save_rest)
12734+ENDPROC(save_rest)
12735
12736 /* save complete stack frame */
12737 .pushsection .kprobes.text, "ax"
12738@@ -381,9 +653,10 @@ ENTRY(save_paranoid)
12739 js 1f /* negative -> in kernel */
12740 SWAPGS
12741 xorl %ebx,%ebx
12742-1: ret
12743+1: pax_force_retaddr
12744+ ret
12745 CFI_ENDPROC
12746-END(save_paranoid)
12747+ENDPROC(save_paranoid)
12748 .popsection
12749
12750 /*
12751@@ -405,7 +678,7 @@ ENTRY(ret_from_fork)
12752
12753 RESTORE_REST
12754
12755- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12756+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12757 je int_ret_from_sys_call
12758
12759 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12760@@ -415,7 +688,7 @@ ENTRY(ret_from_fork)
12761 jmp ret_from_sys_call # go to the SYSRET fastpath
12762
12763 CFI_ENDPROC
12764-END(ret_from_fork)
12765+ENDPROC(ret_from_fork)
12766
12767 /*
12768 * System call entry. Up to 6 arguments in registers are supported.
12769@@ -451,7 +724,7 @@ END(ret_from_fork)
12770 ENTRY(system_call)
12771 CFI_STARTPROC simple
12772 CFI_SIGNAL_FRAME
12773- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12774+ CFI_DEF_CFA rsp,0
12775 CFI_REGISTER rip,rcx
12776 /*CFI_REGISTER rflags,r11*/
12777 SWAPGS_UNSAFE_STACK
12778@@ -464,12 +737,13 @@ ENTRY(system_call_after_swapgs)
12779
12780 movq %rsp,PER_CPU_VAR(old_rsp)
12781 movq PER_CPU_VAR(kernel_stack),%rsp
12782+ pax_enter_kernel_user
12783 /*
12784 * No need to follow this irqs off/on section - it's straight
12785 * and short:
12786 */
12787 ENABLE_INTERRUPTS(CLBR_NONE)
12788- SAVE_ARGS 8,0
12789+ SAVE_ARGS 8*6,0
12790 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12791 movq %rcx,RIP-ARGOFFSET(%rsp)
12792 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12793@@ -498,6 +772,8 @@ sysret_check:
12794 andl %edi,%edx
12795 jnz sysret_careful
12796 CFI_REMEMBER_STATE
12797+ pax_exit_kernel_user
12798+ pax_erase_kstack
12799 /*
12800 * sysretq will re-enable interrupts:
12801 */
12802@@ -556,6 +832,9 @@ auditsys:
12803 movq %rax,%rsi /* 2nd arg: syscall number */
12804 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12805 call audit_syscall_entry
12806+
12807+ pax_erase_kstack
12808+
12809 LOAD_ARGS 0 /* reload call-clobbered registers */
12810 jmp system_call_fastpath
12811
12812@@ -586,6 +865,9 @@ tracesys:
12813 FIXUP_TOP_OF_STACK %rdi
12814 movq %rsp,%rdi
12815 call syscall_trace_enter
12816+
12817+ pax_erase_kstack
12818+
12819 /*
12820 * Reload arg registers from stack in case ptrace changed them.
12821 * We don't reload %rax because syscall_trace_enter() returned
12822@@ -607,7 +889,7 @@ tracesys:
12823 GLOBAL(int_ret_from_sys_call)
12824 DISABLE_INTERRUPTS(CLBR_NONE)
12825 TRACE_IRQS_OFF
12826- testl $3,CS-ARGOFFSET(%rsp)
12827+ testb $3,CS-ARGOFFSET(%rsp)
12828 je retint_restore_args
12829 movl $_TIF_ALLWORK_MASK,%edi
12830 /* edi: mask to check */
12831@@ -664,7 +946,7 @@ int_restore_rest:
12832 TRACE_IRQS_OFF
12833 jmp int_with_check
12834 CFI_ENDPROC
12835-END(system_call)
12836+ENDPROC(system_call)
12837
12838 /*
12839 * Certain special system calls that need to save a complete full stack frame.
12840@@ -680,7 +962,7 @@ ENTRY(\label)
12841 call \func
12842 jmp ptregscall_common
12843 CFI_ENDPROC
12844-END(\label)
12845+ENDPROC(\label)
12846 .endm
12847
12848 PTREGSCALL stub_clone, sys_clone, %r8
12849@@ -698,9 +980,10 @@ ENTRY(ptregscall_common)
12850 movq_cfi_restore R12+8, r12
12851 movq_cfi_restore RBP+8, rbp
12852 movq_cfi_restore RBX+8, rbx
12853+ pax_force_retaddr
12854 ret $REST_SKIP /* pop extended registers */
12855 CFI_ENDPROC
12856-END(ptregscall_common)
12857+ENDPROC(ptregscall_common)
12858
12859 ENTRY(stub_execve)
12860 CFI_STARTPROC
12861@@ -715,7 +998,7 @@ ENTRY(stub_execve)
12862 RESTORE_REST
12863 jmp int_ret_from_sys_call
12864 CFI_ENDPROC
12865-END(stub_execve)
12866+ENDPROC(stub_execve)
12867
12868 /*
12869 * sigreturn is special because it needs to restore all registers on return.
12870@@ -733,7 +1016,7 @@ ENTRY(stub_rt_sigreturn)
12871 RESTORE_REST
12872 jmp int_ret_from_sys_call
12873 CFI_ENDPROC
12874-END(stub_rt_sigreturn)
12875+ENDPROC(stub_rt_sigreturn)
12876
12877 /*
12878 * Build the entry stubs and pointer table with some assembler magic.
12879@@ -768,7 +1051,7 @@ vector=vector+1
12880 2: jmp common_interrupt
12881 .endr
12882 CFI_ENDPROC
12883-END(irq_entries_start)
12884+ENDPROC(irq_entries_start)
12885
12886 .previous
12887 END(interrupt)
12888@@ -789,6 +1072,16 @@ END(interrupt)
12889 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12890 SAVE_ARGS_IRQ
12891 PARTIAL_FRAME 0
12892+#ifdef CONFIG_PAX_MEMORY_UDEREF
12893+ testb $3, CS(%rdi)
12894+ jnz 1f
12895+ pax_enter_kernel
12896+ jmp 2f
12897+1: pax_enter_kernel_user
12898+2:
12899+#else
12900+ pax_enter_kernel
12901+#endif
12902 call \func
12903 .endm
12904
12905@@ -820,7 +1113,7 @@ ret_from_intr:
12906
12907 exit_intr:
12908 GET_THREAD_INFO(%rcx)
12909- testl $3,CS-ARGOFFSET(%rsp)
12910+ testb $3,CS-ARGOFFSET(%rsp)
12911 je retint_kernel
12912
12913 /* Interrupt came from user space */
12914@@ -842,12 +1135,16 @@ retint_swapgs: /* return to user-space
12915 * The iretq could re-enable interrupts:
12916 */
12917 DISABLE_INTERRUPTS(CLBR_ANY)
12918+ pax_exit_kernel_user
12919+ pax_erase_kstack
12920 TRACE_IRQS_IRETQ
12921 SWAPGS
12922 jmp restore_args
12923
12924 retint_restore_args: /* return to kernel space */
12925 DISABLE_INTERRUPTS(CLBR_ANY)
12926+ pax_exit_kernel
12927+ pax_force_retaddr RIP-ARGOFFSET
12928 /*
12929 * The iretq could re-enable interrupts:
12930 */
12931@@ -936,7 +1233,7 @@ ENTRY(retint_kernel)
12932 #endif
12933
12934 CFI_ENDPROC
12935-END(common_interrupt)
12936+ENDPROC(common_interrupt)
12937 /*
12938 * End of kprobes section
12939 */
12940@@ -952,7 +1249,7 @@ ENTRY(\sym)
12941 interrupt \do_sym
12942 jmp ret_from_intr
12943 CFI_ENDPROC
12944-END(\sym)
12945+ENDPROC(\sym)
12946 .endm
12947
12948 #ifdef CONFIG_SMP
12949@@ -1017,12 +1314,22 @@ ENTRY(\sym)
12950 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12951 call error_entry
12952 DEFAULT_FRAME 0
12953+#ifdef CONFIG_PAX_MEMORY_UDEREF
12954+ testb $3, CS(%rsp)
12955+ jnz 1f
12956+ pax_enter_kernel
12957+ jmp 2f
12958+1: pax_enter_kernel_user
12959+2:
12960+#else
12961+ pax_enter_kernel
12962+#endif
12963 movq %rsp,%rdi /* pt_regs pointer */
12964 xorl %esi,%esi /* no error code */
12965 call \do_sym
12966 jmp error_exit /* %ebx: no swapgs flag */
12967 CFI_ENDPROC
12968-END(\sym)
12969+ENDPROC(\sym)
12970 .endm
12971
12972 .macro paranoidzeroentry sym do_sym
12973@@ -1034,15 +1341,25 @@ ENTRY(\sym)
12974 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12975 call save_paranoid
12976 TRACE_IRQS_OFF
12977+#ifdef CONFIG_PAX_MEMORY_UDEREF
12978+ testb $3, CS(%rsp)
12979+ jnz 1f
12980+ pax_enter_kernel
12981+ jmp 2f
12982+1: pax_enter_kernel_user
12983+2:
12984+#else
12985+ pax_enter_kernel
12986+#endif
12987 movq %rsp,%rdi /* pt_regs pointer */
12988 xorl %esi,%esi /* no error code */
12989 call \do_sym
12990 jmp paranoid_exit /* %ebx: no swapgs flag */
12991 CFI_ENDPROC
12992-END(\sym)
12993+ENDPROC(\sym)
12994 .endm
12995
12996-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12997+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12998 .macro paranoidzeroentry_ist sym do_sym ist
12999 ENTRY(\sym)
13000 INTR_FRAME
13001@@ -1052,14 +1369,30 @@ ENTRY(\sym)
13002 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13003 call save_paranoid
13004 TRACE_IRQS_OFF
13005+#ifdef CONFIG_PAX_MEMORY_UDEREF
13006+ testb $3, CS(%rsp)
13007+ jnz 1f
13008+ pax_enter_kernel
13009+ jmp 2f
13010+1: pax_enter_kernel_user
13011+2:
13012+#else
13013+ pax_enter_kernel
13014+#endif
13015 movq %rsp,%rdi /* pt_regs pointer */
13016 xorl %esi,%esi /* no error code */
13017+#ifdef CONFIG_SMP
13018+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
13019+ lea init_tss(%r12), %r12
13020+#else
13021+ lea init_tss(%rip), %r12
13022+#endif
13023 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
13024 call \do_sym
13025 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
13026 jmp paranoid_exit /* %ebx: no swapgs flag */
13027 CFI_ENDPROC
13028-END(\sym)
13029+ENDPROC(\sym)
13030 .endm
13031
13032 .macro errorentry sym do_sym
13033@@ -1070,13 +1403,23 @@ ENTRY(\sym)
13034 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13035 call error_entry
13036 DEFAULT_FRAME 0
13037+#ifdef CONFIG_PAX_MEMORY_UDEREF
13038+ testb $3, CS(%rsp)
13039+ jnz 1f
13040+ pax_enter_kernel
13041+ jmp 2f
13042+1: pax_enter_kernel_user
13043+2:
13044+#else
13045+ pax_enter_kernel
13046+#endif
13047 movq %rsp,%rdi /* pt_regs pointer */
13048 movq ORIG_RAX(%rsp),%rsi /* get error code */
13049 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13050 call \do_sym
13051 jmp error_exit /* %ebx: no swapgs flag */
13052 CFI_ENDPROC
13053-END(\sym)
13054+ENDPROC(\sym)
13055 .endm
13056
13057 /* error code is on the stack already */
13058@@ -1089,13 +1432,23 @@ ENTRY(\sym)
13059 call save_paranoid
13060 DEFAULT_FRAME 0
13061 TRACE_IRQS_OFF
13062+#ifdef CONFIG_PAX_MEMORY_UDEREF
13063+ testb $3, CS(%rsp)
13064+ jnz 1f
13065+ pax_enter_kernel
13066+ jmp 2f
13067+1: pax_enter_kernel_user
13068+2:
13069+#else
13070+ pax_enter_kernel
13071+#endif
13072 movq %rsp,%rdi /* pt_regs pointer */
13073 movq ORIG_RAX(%rsp),%rsi /* get error code */
13074 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13075 call \do_sym
13076 jmp paranoid_exit /* %ebx: no swapgs flag */
13077 CFI_ENDPROC
13078-END(\sym)
13079+ENDPROC(\sym)
13080 .endm
13081
13082 zeroentry divide_error do_divide_error
13083@@ -1125,9 +1478,10 @@ gs_change:
13084 2: mfence /* workaround */
13085 SWAPGS
13086 popfq_cfi
13087+ pax_force_retaddr
13088 ret
13089 CFI_ENDPROC
13090-END(native_load_gs_index)
13091+ENDPROC(native_load_gs_index)
13092
13093 .section __ex_table,"a"
13094 .align 8
13095@@ -1149,13 +1503,14 @@ ENTRY(kernel_thread_helper)
13096 * Here we are in the child and the registers are set as they were
13097 * at kernel_thread() invocation in the parent.
13098 */
13099+ pax_force_fptr %rsi
13100 call *%rsi
13101 # exit
13102 mov %eax, %edi
13103 call do_exit
13104 ud2 # padding for call trace
13105 CFI_ENDPROC
13106-END(kernel_thread_helper)
13107+ENDPROC(kernel_thread_helper)
13108
13109 /*
13110 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
13111@@ -1184,9 +1539,10 @@ ENTRY(kernel_execve)
13112 je int_ret_from_sys_call
13113 RESTORE_ARGS
13114 UNFAKE_STACK_FRAME
13115+ pax_force_retaddr
13116 ret
13117 CFI_ENDPROC
13118-END(kernel_execve)
13119+ENDPROC(kernel_execve)
13120
13121 /* Call softirq on interrupt stack. Interrupts are off. */
13122 ENTRY(call_softirq)
13123@@ -1204,9 +1560,10 @@ ENTRY(call_softirq)
13124 CFI_DEF_CFA_REGISTER rsp
13125 CFI_ADJUST_CFA_OFFSET -8
13126 decl PER_CPU_VAR(irq_count)
13127+ pax_force_retaddr
13128 ret
13129 CFI_ENDPROC
13130-END(call_softirq)
13131+ENDPROC(call_softirq)
13132
13133 #ifdef CONFIG_XEN
13134 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
13135@@ -1244,7 +1601,7 @@ ENTRY(xen_do_hypervisor_callback) # do
13136 decl PER_CPU_VAR(irq_count)
13137 jmp error_exit
13138 CFI_ENDPROC
13139-END(xen_do_hypervisor_callback)
13140+ENDPROC(xen_do_hypervisor_callback)
13141
13142 /*
13143 * Hypervisor uses this for application faults while it executes.
13144@@ -1303,7 +1660,7 @@ ENTRY(xen_failsafe_callback)
13145 SAVE_ALL
13146 jmp error_exit
13147 CFI_ENDPROC
13148-END(xen_failsafe_callback)
13149+ENDPROC(xen_failsafe_callback)
13150
13151 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
13152 xen_hvm_callback_vector xen_evtchn_do_upcall
13153@@ -1352,16 +1709,31 @@ ENTRY(paranoid_exit)
13154 TRACE_IRQS_OFF
13155 testl %ebx,%ebx /* swapgs needed? */
13156 jnz paranoid_restore
13157- testl $3,CS(%rsp)
13158+ testb $3,CS(%rsp)
13159 jnz paranoid_userspace
13160+#ifdef CONFIG_PAX_MEMORY_UDEREF
13161+ pax_exit_kernel
13162+ TRACE_IRQS_IRETQ 0
13163+ SWAPGS_UNSAFE_STACK
13164+ RESTORE_ALL 8
13165+ pax_force_retaddr
13166+ jmp irq_return
13167+#endif
13168 paranoid_swapgs:
13169+#ifdef CONFIG_PAX_MEMORY_UDEREF
13170+ pax_exit_kernel_user
13171+#else
13172+ pax_exit_kernel
13173+#endif
13174 TRACE_IRQS_IRETQ 0
13175 SWAPGS_UNSAFE_STACK
13176 RESTORE_ALL 8
13177 jmp irq_return
13178 paranoid_restore:
13179+ pax_exit_kernel
13180 TRACE_IRQS_IRETQ 0
13181 RESTORE_ALL 8
13182+ pax_force_retaddr
13183 jmp irq_return
13184 paranoid_userspace:
13185 GET_THREAD_INFO(%rcx)
13186@@ -1390,7 +1762,7 @@ paranoid_schedule:
13187 TRACE_IRQS_OFF
13188 jmp paranoid_userspace
13189 CFI_ENDPROC
13190-END(paranoid_exit)
13191+ENDPROC(paranoid_exit)
13192
13193 /*
13194 * Exception entry point. This expects an error code/orig_rax on the stack.
13195@@ -1417,12 +1789,13 @@ ENTRY(error_entry)
13196 movq_cfi r14, R14+8
13197 movq_cfi r15, R15+8
13198 xorl %ebx,%ebx
13199- testl $3,CS+8(%rsp)
13200+ testb $3,CS+8(%rsp)
13201 je error_kernelspace
13202 error_swapgs:
13203 SWAPGS
13204 error_sti:
13205 TRACE_IRQS_OFF
13206+ pax_force_retaddr
13207 ret
13208
13209 /*
13210@@ -1449,7 +1822,7 @@ bstep_iret:
13211 movq %rcx,RIP+8(%rsp)
13212 jmp error_swapgs
13213 CFI_ENDPROC
13214-END(error_entry)
13215+ENDPROC(error_entry)
13216
13217
13218 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
13219@@ -1469,7 +1842,7 @@ ENTRY(error_exit)
13220 jnz retint_careful
13221 jmp retint_swapgs
13222 CFI_ENDPROC
13223-END(error_exit)
13224+ENDPROC(error_exit)
13225
13226
13227 /* runs on exception stack */
13228@@ -1481,6 +1854,16 @@ ENTRY(nmi)
13229 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13230 call save_paranoid
13231 DEFAULT_FRAME 0
13232+#ifdef CONFIG_PAX_MEMORY_UDEREF
13233+ testb $3, CS(%rsp)
13234+ jnz 1f
13235+ pax_enter_kernel
13236+ jmp 2f
13237+1: pax_enter_kernel_user
13238+2:
13239+#else
13240+ pax_enter_kernel
13241+#endif
13242 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13243 movq %rsp,%rdi
13244 movq $-1,%rsi
13245@@ -1491,12 +1874,28 @@ ENTRY(nmi)
13246 DISABLE_INTERRUPTS(CLBR_NONE)
13247 testl %ebx,%ebx /* swapgs needed? */
13248 jnz nmi_restore
13249- testl $3,CS(%rsp)
13250+ testb $3,CS(%rsp)
13251 jnz nmi_userspace
13252+#ifdef CONFIG_PAX_MEMORY_UDEREF
13253+ pax_exit_kernel
13254+ SWAPGS_UNSAFE_STACK
13255+ RESTORE_ALL 8
13256+ pax_force_retaddr
13257+ jmp irq_return
13258+#endif
13259 nmi_swapgs:
13260+#ifdef CONFIG_PAX_MEMORY_UDEREF
13261+ pax_exit_kernel_user
13262+#else
13263+ pax_exit_kernel
13264+#endif
13265 SWAPGS_UNSAFE_STACK
13266+ RESTORE_ALL 8
13267+ jmp irq_return
13268 nmi_restore:
13269+ pax_exit_kernel
13270 RESTORE_ALL 8
13271+ pax_force_retaddr
13272 jmp irq_return
13273 nmi_userspace:
13274 GET_THREAD_INFO(%rcx)
13275@@ -1525,14 +1924,14 @@ nmi_schedule:
13276 jmp paranoid_exit
13277 CFI_ENDPROC
13278 #endif
13279-END(nmi)
13280+ENDPROC(nmi)
13281
13282 ENTRY(ignore_sysret)
13283 CFI_STARTPROC
13284 mov $-ENOSYS,%eax
13285 sysret
13286 CFI_ENDPROC
13287-END(ignore_sysret)
13288+ENDPROC(ignore_sysret)
13289
13290 /*
13291 * End of kprobes section
13292diff -urNp linux-3.1.1/arch/x86/kernel/ftrace.c linux-3.1.1/arch/x86/kernel/ftrace.c
13293--- linux-3.1.1/arch/x86/kernel/ftrace.c 2011-11-11 15:19:27.000000000 -0500
13294+++ linux-3.1.1/arch/x86/kernel/ftrace.c 2011-11-16 18:39:07.000000000 -0500
13295@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
13296 static const void *mod_code_newcode; /* holds the text to write to the IP */
13297
13298 static unsigned nmi_wait_count;
13299-static atomic_t nmi_update_count = ATOMIC_INIT(0);
13300+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13301
13302 int ftrace_arch_read_dyn_info(char *buf, int size)
13303 {
13304@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13305
13306 r = snprintf(buf, size, "%u %u",
13307 nmi_wait_count,
13308- atomic_read(&nmi_update_count));
13309+ atomic_read_unchecked(&nmi_update_count));
13310 return r;
13311 }
13312
13313@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
13314
13315 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13316 smp_rmb();
13317+ pax_open_kernel();
13318 ftrace_mod_code();
13319- atomic_inc(&nmi_update_count);
13320+ pax_close_kernel();
13321+ atomic_inc_unchecked(&nmi_update_count);
13322 }
13323 /* Must have previous changes seen before executions */
13324 smp_mb();
13325@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
13326 {
13327 unsigned char replaced[MCOUNT_INSN_SIZE];
13328
13329+ ip = ktla_ktva(ip);
13330+
13331 /*
13332 * Note: Due to modules and __init, code can
13333 * disappear and change, we need to protect against faulting
13334@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13335 unsigned char old[MCOUNT_INSN_SIZE], *new;
13336 int ret;
13337
13338- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13339+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13340 new = ftrace_call_replace(ip, (unsigned long)func);
13341 ret = ftrace_modify_code(ip, old, new);
13342
13343@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
13344 {
13345 unsigned char code[MCOUNT_INSN_SIZE];
13346
13347+ ip = ktla_ktva(ip);
13348+
13349 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
13350 return -EFAULT;
13351
13352diff -urNp linux-3.1.1/arch/x86/kernel/head32.c linux-3.1.1/arch/x86/kernel/head32.c
13353--- linux-3.1.1/arch/x86/kernel/head32.c 2011-11-11 15:19:27.000000000 -0500
13354+++ linux-3.1.1/arch/x86/kernel/head32.c 2011-11-16 18:39:07.000000000 -0500
13355@@ -19,6 +19,7 @@
13356 #include <asm/io_apic.h>
13357 #include <asm/bios_ebda.h>
13358 #include <asm/tlbflush.h>
13359+#include <asm/boot.h>
13360
13361 static void __init i386_default_early_setup(void)
13362 {
13363@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
13364 {
13365 memblock_init();
13366
13367- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13368+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13369
13370 #ifdef CONFIG_BLK_DEV_INITRD
13371 /* Reserve INITRD */
13372diff -urNp linux-3.1.1/arch/x86/kernel/head_32.S linux-3.1.1/arch/x86/kernel/head_32.S
13373--- linux-3.1.1/arch/x86/kernel/head_32.S 2011-11-11 15:19:27.000000000 -0500
13374+++ linux-3.1.1/arch/x86/kernel/head_32.S 2011-11-16 18:39:07.000000000 -0500
13375@@ -25,6 +25,12 @@
13376 /* Physical address */
13377 #define pa(X) ((X) - __PAGE_OFFSET)
13378
13379+#ifdef CONFIG_PAX_KERNEXEC
13380+#define ta(X) (X)
13381+#else
13382+#define ta(X) ((X) - __PAGE_OFFSET)
13383+#endif
13384+
13385 /*
13386 * References to members of the new_cpu_data structure.
13387 */
13388@@ -54,11 +60,7 @@
13389 * and small than max_low_pfn, otherwise will waste some page table entries
13390 */
13391
13392-#if PTRS_PER_PMD > 1
13393-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
13394-#else
13395-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
13396-#endif
13397+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
13398
13399 /* Number of possible pages in the lowmem region */
13400 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
13401@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
13402 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13403
13404 /*
13405+ * Real beginning of normal "text" segment
13406+ */
13407+ENTRY(stext)
13408+ENTRY(_stext)
13409+
13410+/*
13411 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
13412 * %esi points to the real-mode code as a 32-bit pointer.
13413 * CS and DS must be 4 GB flat segments, but we don't depend on
13414@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13415 * can.
13416 */
13417 __HEAD
13418+
13419+#ifdef CONFIG_PAX_KERNEXEC
13420+ jmp startup_32
13421+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
13422+.fill PAGE_SIZE-5,1,0xcc
13423+#endif
13424+
13425 ENTRY(startup_32)
13426 movl pa(stack_start),%ecx
13427
13428@@ -105,6 +120,57 @@ ENTRY(startup_32)
13429 2:
13430 leal -__PAGE_OFFSET(%ecx),%esp
13431
13432+#ifdef CONFIG_SMP
13433+ movl $pa(cpu_gdt_table),%edi
13434+ movl $__per_cpu_load,%eax
13435+ movw %ax,__KERNEL_PERCPU + 2(%edi)
13436+ rorl $16,%eax
13437+ movb %al,__KERNEL_PERCPU + 4(%edi)
13438+ movb %ah,__KERNEL_PERCPU + 7(%edi)
13439+ movl $__per_cpu_end - 1,%eax
13440+ subl $__per_cpu_start,%eax
13441+ movw %ax,__KERNEL_PERCPU + 0(%edi)
13442+#endif
13443+
13444+#ifdef CONFIG_PAX_MEMORY_UDEREF
13445+ movl $NR_CPUS,%ecx
13446+ movl $pa(cpu_gdt_table),%edi
13447+1:
13448+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
13449+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
13450+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
13451+ addl $PAGE_SIZE_asm,%edi
13452+ loop 1b
13453+#endif
13454+
13455+#ifdef CONFIG_PAX_KERNEXEC
13456+ movl $pa(boot_gdt),%edi
13457+ movl $__LOAD_PHYSICAL_ADDR,%eax
13458+ movw %ax,__BOOT_CS + 2(%edi)
13459+ rorl $16,%eax
13460+ movb %al,__BOOT_CS + 4(%edi)
13461+ movb %ah,__BOOT_CS + 7(%edi)
13462+ rorl $16,%eax
13463+
13464+ ljmp $(__BOOT_CS),$1f
13465+1:
13466+
13467+ movl $NR_CPUS,%ecx
13468+ movl $pa(cpu_gdt_table),%edi
13469+ addl $__PAGE_OFFSET,%eax
13470+1:
13471+ movw %ax,__KERNEL_CS + 2(%edi)
13472+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
13473+ rorl $16,%eax
13474+ movb %al,__KERNEL_CS + 4(%edi)
13475+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
13476+ movb %ah,__KERNEL_CS + 7(%edi)
13477+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
13478+ rorl $16,%eax
13479+ addl $PAGE_SIZE_asm,%edi
13480+ loop 1b
13481+#endif
13482+
13483 /*
13484 * Clear BSS first so that there are no surprises...
13485 */
13486@@ -195,8 +261,11 @@ ENTRY(startup_32)
13487 movl %eax, pa(max_pfn_mapped)
13488
13489 /* Do early initialization of the fixmap area */
13490- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
13491- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
13492+#ifdef CONFIG_COMPAT_VDSO
13493+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
13494+#else
13495+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
13496+#endif
13497 #else /* Not PAE */
13498
13499 page_pde_offset = (__PAGE_OFFSET >> 20);
13500@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13501 movl %eax, pa(max_pfn_mapped)
13502
13503 /* Do early initialization of the fixmap area */
13504- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
13505- movl %eax,pa(initial_page_table+0xffc)
13506+#ifdef CONFIG_COMPAT_VDSO
13507+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
13508+#else
13509+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
13510+#endif
13511 #endif
13512
13513 #ifdef CONFIG_PARAVIRT
13514@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13515 cmpl $num_subarch_entries, %eax
13516 jae bad_subarch
13517
13518- movl pa(subarch_entries)(,%eax,4), %eax
13519- subl $__PAGE_OFFSET, %eax
13520- jmp *%eax
13521+ jmp *pa(subarch_entries)(,%eax,4)
13522
13523 bad_subarch:
13524 WEAK(lguest_entry)
13525@@ -255,10 +325,10 @@ WEAK(xen_entry)
13526 __INITDATA
13527
13528 subarch_entries:
13529- .long default_entry /* normal x86/PC */
13530- .long lguest_entry /* lguest hypervisor */
13531- .long xen_entry /* Xen hypervisor */
13532- .long default_entry /* Moorestown MID */
13533+ .long ta(default_entry) /* normal x86/PC */
13534+ .long ta(lguest_entry) /* lguest hypervisor */
13535+ .long ta(xen_entry) /* Xen hypervisor */
13536+ .long ta(default_entry) /* Moorestown MID */
13537 num_subarch_entries = (. - subarch_entries) / 4
13538 .previous
13539 #else
13540@@ -312,6 +382,7 @@ default_entry:
13541 orl %edx,%eax
13542 movl %eax,%cr4
13543
13544+#ifdef CONFIG_X86_PAE
13545 testb $X86_CR4_PAE, %al # check if PAE is enabled
13546 jz 6f
13547
13548@@ -340,6 +411,9 @@ default_entry:
13549 /* Make changes effective */
13550 wrmsr
13551
13552+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
13553+#endif
13554+
13555 6:
13556
13557 /*
13558@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
13559 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
13560 movl %eax,%ss # after changing gdt.
13561
13562- movl $(__USER_DS),%eax # DS/ES contains default USER segment
13563+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
13564 movl %eax,%ds
13565 movl %eax,%es
13566
13567@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
13568 */
13569 cmpb $0,ready
13570 jne 1f
13571- movl $gdt_page,%eax
13572+ movl $cpu_gdt_table,%eax
13573 movl $stack_canary,%ecx
13574+#ifdef CONFIG_SMP
13575+ addl $__per_cpu_load,%ecx
13576+#endif
13577 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
13578 shrl $16, %ecx
13579 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
13580 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
13581 1:
13582-#endif
13583 movl $(__KERNEL_STACK_CANARY),%eax
13584+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13585+ movl $(__USER_DS),%eax
13586+#else
13587+ xorl %eax,%eax
13588+#endif
13589 movl %eax,%gs
13590
13591 xorl %eax,%eax # Clear LDT
13592@@ -558,22 +639,22 @@ early_page_fault:
13593 jmp early_fault
13594
13595 early_fault:
13596- cld
13597 #ifdef CONFIG_PRINTK
13598+ cmpl $1,%ss:early_recursion_flag
13599+ je hlt_loop
13600+ incl %ss:early_recursion_flag
13601+ cld
13602 pusha
13603 movl $(__KERNEL_DS),%eax
13604 movl %eax,%ds
13605 movl %eax,%es
13606- cmpl $2,early_recursion_flag
13607- je hlt_loop
13608- incl early_recursion_flag
13609 movl %cr2,%eax
13610 pushl %eax
13611 pushl %edx /* trapno */
13612 pushl $fault_msg
13613 call printk
13614+; call dump_stack
13615 #endif
13616- call dump_stack
13617 hlt_loop:
13618 hlt
13619 jmp hlt_loop
13620@@ -581,8 +662,11 @@ hlt_loop:
13621 /* This is the default interrupt "handler" :-) */
13622 ALIGN
13623 ignore_int:
13624- cld
13625 #ifdef CONFIG_PRINTK
13626+ cmpl $2,%ss:early_recursion_flag
13627+ je hlt_loop
13628+ incl %ss:early_recursion_flag
13629+ cld
13630 pushl %eax
13631 pushl %ecx
13632 pushl %edx
13633@@ -591,9 +675,6 @@ ignore_int:
13634 movl $(__KERNEL_DS),%eax
13635 movl %eax,%ds
13636 movl %eax,%es
13637- cmpl $2,early_recursion_flag
13638- je hlt_loop
13639- incl early_recursion_flag
13640 pushl 16(%esp)
13641 pushl 24(%esp)
13642 pushl 32(%esp)
13643@@ -622,29 +703,43 @@ ENTRY(initial_code)
13644 /*
13645 * BSS section
13646 */
13647-__PAGE_ALIGNED_BSS
13648- .align PAGE_SIZE
13649 #ifdef CONFIG_X86_PAE
13650+.section .initial_pg_pmd,"a",@progbits
13651 initial_pg_pmd:
13652 .fill 1024*KPMDS,4,0
13653 #else
13654+.section .initial_page_table,"a",@progbits
13655 ENTRY(initial_page_table)
13656 .fill 1024,4,0
13657 #endif
13658+.section .initial_pg_fixmap,"a",@progbits
13659 initial_pg_fixmap:
13660 .fill 1024,4,0
13661+.section .empty_zero_page,"a",@progbits
13662 ENTRY(empty_zero_page)
13663 .fill 4096,1,0
13664+.section .swapper_pg_dir,"a",@progbits
13665 ENTRY(swapper_pg_dir)
13666+#ifdef CONFIG_X86_PAE
13667+ .fill 4,8,0
13668+#else
13669 .fill 1024,4,0
13670+#endif
13671+
13672+/*
13673+ * The IDT has to be page-aligned to simplify the Pentium
13674+ * F0 0F bug workaround.. We have a special link segment
13675+ * for this.
13676+ */
13677+.section .idt,"a",@progbits
13678+ENTRY(idt_table)
13679+ .fill 256,8,0
13680
13681 /*
13682 * This starts the data section.
13683 */
13684 #ifdef CONFIG_X86_PAE
13685-__PAGE_ALIGNED_DATA
13686- /* Page-aligned for the benefit of paravirt? */
13687- .align PAGE_SIZE
13688+.section .initial_page_table,"a",@progbits
13689 ENTRY(initial_page_table)
13690 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
13691 # if KPMDS == 3
13692@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
13693 # error "Kernel PMDs should be 1, 2 or 3"
13694 # endif
13695 .align PAGE_SIZE /* needs to be page-sized too */
13696+
13697+#ifdef CONFIG_PAX_PER_CPU_PGD
13698+ENTRY(cpu_pgd)
13699+ .rept NR_CPUS
13700+ .fill 4,8,0
13701+ .endr
13702+#endif
13703+
13704 #endif
13705
13706 .data
13707 .balign 4
13708 ENTRY(stack_start)
13709- .long init_thread_union+THREAD_SIZE
13710+ .long init_thread_union+THREAD_SIZE-8
13711+
13712+ready: .byte 0
13713
13714+.section .rodata,"a",@progbits
13715 early_recursion_flag:
13716 .long 0
13717
13718-ready: .byte 0
13719-
13720 int_msg:
13721 .asciz "Unknown interrupt or fault at: %p %p %p\n"
13722
13723@@ -707,7 +811,7 @@ fault_msg:
13724 .word 0 # 32 bit align gdt_desc.address
13725 boot_gdt_descr:
13726 .word __BOOT_DS+7
13727- .long boot_gdt - __PAGE_OFFSET
13728+ .long pa(boot_gdt)
13729
13730 .word 0 # 32-bit align idt_desc.address
13731 idt_descr:
13732@@ -718,7 +822,7 @@ idt_descr:
13733 .word 0 # 32 bit align gdt_desc.address
13734 ENTRY(early_gdt_descr)
13735 .word GDT_ENTRIES*8-1
13736- .long gdt_page /* Overwritten for secondary CPUs */
13737+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
13738
13739 /*
13740 * The boot_gdt must mirror the equivalent in setup.S and is
13741@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
13742 .align L1_CACHE_BYTES
13743 ENTRY(boot_gdt)
13744 .fill GDT_ENTRY_BOOT_CS,8,0
13745- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
13746- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
13747+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
13748+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
13749+
13750+ .align PAGE_SIZE_asm
13751+ENTRY(cpu_gdt_table)
13752+ .rept NR_CPUS
13753+ .quad 0x0000000000000000 /* NULL descriptor */
13754+ .quad 0x0000000000000000 /* 0x0b reserved */
13755+ .quad 0x0000000000000000 /* 0x13 reserved */
13756+ .quad 0x0000000000000000 /* 0x1b reserved */
13757+
13758+#ifdef CONFIG_PAX_KERNEXEC
13759+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
13760+#else
13761+ .quad 0x0000000000000000 /* 0x20 unused */
13762+#endif
13763+
13764+ .quad 0x0000000000000000 /* 0x28 unused */
13765+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
13766+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
13767+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
13768+ .quad 0x0000000000000000 /* 0x4b reserved */
13769+ .quad 0x0000000000000000 /* 0x53 reserved */
13770+ .quad 0x0000000000000000 /* 0x5b reserved */
13771+
13772+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
13773+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
13774+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
13775+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
13776+
13777+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
13778+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
13779+
13780+ /*
13781+ * Segments used for calling PnP BIOS have byte granularity.
13782+ * The code segments and data segments have fixed 64k limits,
13783+ * the transfer segment sizes are set at run time.
13784+ */
13785+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
13786+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
13787+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
13788+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
13789+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
13790+
13791+ /*
13792+ * The APM segments have byte granularity and their bases
13793+ * are set at run time. All have 64k limits.
13794+ */
13795+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
13796+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
13797+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
13798+
13799+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
13800+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
13801+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
13802+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
13803+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
13804+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
13805+
13806+ /* Be sure this is zeroed to avoid false validations in Xen */
13807+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
13808+ .endr
13809diff -urNp linux-3.1.1/arch/x86/kernel/head_64.S linux-3.1.1/arch/x86/kernel/head_64.S
13810--- linux-3.1.1/arch/x86/kernel/head_64.S 2011-11-11 15:19:27.000000000 -0500
13811+++ linux-3.1.1/arch/x86/kernel/head_64.S 2011-11-16 18:39:07.000000000 -0500
13812@@ -19,6 +19,7 @@
13813 #include <asm/cache.h>
13814 #include <asm/processor-flags.h>
13815 #include <asm/percpu.h>
13816+#include <asm/cpufeature.h>
13817
13818 #ifdef CONFIG_PARAVIRT
13819 #include <asm/asm-offsets.h>
13820@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
13821 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
13822 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
13823 L3_START_KERNEL = pud_index(__START_KERNEL_map)
13824+L4_VMALLOC_START = pgd_index(VMALLOC_START)
13825+L3_VMALLOC_START = pud_index(VMALLOC_START)
13826+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
13827+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
13828
13829 .text
13830 __HEAD
13831@@ -85,35 +90,22 @@ startup_64:
13832 */
13833 addq %rbp, init_level4_pgt + 0(%rip)
13834 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
13835+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
13836+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
13837 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
13838
13839 addq %rbp, level3_ident_pgt + 0(%rip)
13840+#ifndef CONFIG_XEN
13841+ addq %rbp, level3_ident_pgt + 8(%rip)
13842+#endif
13843
13844- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
13845- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
13846+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
13847
13848- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13849+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
13850+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
13851
13852- /* Add an Identity mapping if I am above 1G */
13853- leaq _text(%rip), %rdi
13854- andq $PMD_PAGE_MASK, %rdi
13855-
13856- movq %rdi, %rax
13857- shrq $PUD_SHIFT, %rax
13858- andq $(PTRS_PER_PUD - 1), %rax
13859- jz ident_complete
13860-
13861- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
13862- leaq level3_ident_pgt(%rip), %rbx
13863- movq %rdx, 0(%rbx, %rax, 8)
13864-
13865- movq %rdi, %rax
13866- shrq $PMD_SHIFT, %rax
13867- andq $(PTRS_PER_PMD - 1), %rax
13868- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
13869- leaq level2_spare_pgt(%rip), %rbx
13870- movq %rdx, 0(%rbx, %rax, 8)
13871-ident_complete:
13872+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13873+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
13874
13875 /*
13876 * Fixup the kernel text+data virtual addresses. Note that
13877@@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
13878 * after the boot processor executes this code.
13879 */
13880
13881- /* Enable PAE mode and PGE */
13882- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
13883+ /* Enable PAE mode and PSE/PGE */
13884+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
13885 movq %rax, %cr4
13886
13887 /* Setup early boot stage 4 level pagetables. */
13888@@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
13889 movl $MSR_EFER, %ecx
13890 rdmsr
13891 btsl $_EFER_SCE, %eax /* Enable System Call */
13892- btl $20,%edi /* No Execute supported? */
13893+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
13894 jnc 1f
13895 btsl $_EFER_NX, %eax
13896+ leaq init_level4_pgt(%rip), %rdi
13897+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
13898+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
13899+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
13900+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
13901 1: wrmsr /* Make changes effective */
13902
13903 /* Setup cr0 */
13904@@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
13905 bad_address:
13906 jmp bad_address
13907
13908- .section ".init.text","ax"
13909+ __INIT
13910 #ifdef CONFIG_EARLY_PRINTK
13911 .globl early_idt_handlers
13912 early_idt_handlers:
13913@@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
13914 #endif /* EARLY_PRINTK */
13915 1: hlt
13916 jmp 1b
13917+ .previous
13918
13919 #ifdef CONFIG_EARLY_PRINTK
13920+ __INITDATA
13921 early_recursion_flag:
13922 .long 0
13923+ .previous
13924
13925+ .section .rodata,"a",@progbits
13926 early_idt_msg:
13927 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
13928 early_idt_ripmsg:
13929 .asciz "RIP %s\n"
13930-#endif /* CONFIG_EARLY_PRINTK */
13931 .previous
13932+#endif /* CONFIG_EARLY_PRINTK */
13933
13934+ .section .rodata,"a",@progbits
13935 #define NEXT_PAGE(name) \
13936 .balign PAGE_SIZE; \
13937 ENTRY(name)
13938@@ -338,7 +340,6 @@ ENTRY(name)
13939 i = i + 1 ; \
13940 .endr
13941
13942- .data
13943 /*
13944 * This default setting generates an ident mapping at address 0x100000
13945 * and a mapping for the kernel that precisely maps virtual address
13946@@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
13947 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13948 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13949 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13950+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
13951+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13952+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13953+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13954 .org init_level4_pgt + L4_START_KERNEL*8, 0
13955 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13956 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13957
13958+#ifdef CONFIG_PAX_PER_CPU_PGD
13959+NEXT_PAGE(cpu_pgd)
13960+ .rept NR_CPUS
13961+ .fill 512,8,0
13962+ .endr
13963+#endif
13964+
13965 NEXT_PAGE(level3_ident_pgt)
13966 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13967+#ifdef CONFIG_XEN
13968 .fill 511,8,0
13969+#else
13970+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13971+ .fill 510,8,0
13972+#endif
13973+
13974+NEXT_PAGE(level3_vmalloc_pgt)
13975+ .fill 512,8,0
13976+
13977+NEXT_PAGE(level3_vmemmap_pgt)
13978+ .fill L3_VMEMMAP_START,8,0
13979+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13980
13981 NEXT_PAGE(level3_kernel_pgt)
13982 .fill L3_START_KERNEL,8,0
13983@@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13984 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13985 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13986
13987+NEXT_PAGE(level2_vmemmap_pgt)
13988+ .fill 512,8,0
13989+
13990 NEXT_PAGE(level2_fixmap_pgt)
13991- .fill 506,8,0
13992- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13993- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13994- .fill 5,8,0
13995+ .fill 507,8,0
13996+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13997+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13998+ .fill 4,8,0
13999
14000-NEXT_PAGE(level1_fixmap_pgt)
14001+NEXT_PAGE(level1_vsyscall_pgt)
14002 .fill 512,8,0
14003
14004-NEXT_PAGE(level2_ident_pgt)
14005- /* Since I easily can, map the first 1G.
14006+ /* Since I easily can, map the first 2G.
14007 * Don't set NX because code runs from these pages.
14008 */
14009- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14010+NEXT_PAGE(level2_ident_pgt)
14011+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14012
14013 NEXT_PAGE(level2_kernel_pgt)
14014 /*
14015@@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
14016 * If you want to increase this then increase MODULES_VADDR
14017 * too.)
14018 */
14019- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14020- KERNEL_IMAGE_SIZE/PMD_SIZE)
14021-
14022-NEXT_PAGE(level2_spare_pgt)
14023- .fill 512, 8, 0
14024+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14025
14026 #undef PMDS
14027 #undef NEXT_PAGE
14028
14029- .data
14030+ .align PAGE_SIZE
14031+ENTRY(cpu_gdt_table)
14032+ .rept NR_CPUS
14033+ .quad 0x0000000000000000 /* NULL descriptor */
14034+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14035+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
14036+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
14037+ .quad 0x00cffb000000ffff /* __USER32_CS */
14038+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14039+ .quad 0x00affb000000ffff /* __USER_CS */
14040+
14041+#ifdef CONFIG_PAX_KERNEXEC
14042+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14043+#else
14044+ .quad 0x0 /* unused */
14045+#endif
14046+
14047+ .quad 0,0 /* TSS */
14048+ .quad 0,0 /* LDT */
14049+ .quad 0,0,0 /* three TLS descriptors */
14050+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
14051+ /* asm/segment.h:GDT_ENTRIES must match this */
14052+
14053+ /* zero the remaining page */
14054+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14055+ .endr
14056+
14057 .align 16
14058 .globl early_gdt_descr
14059 early_gdt_descr:
14060 .word GDT_ENTRIES*8-1
14061 early_gdt_descr_base:
14062- .quad INIT_PER_CPU_VAR(gdt_page)
14063+ .quad cpu_gdt_table
14064
14065 ENTRY(phys_base)
14066 /* This must match the first entry in level2_kernel_pgt */
14067 .quad 0x0000000000000000
14068
14069 #include "../../x86/xen/xen-head.S"
14070-
14071- .section .bss, "aw", @nobits
14072+
14073+ .section .rodata,"a",@progbits
14074 .align L1_CACHE_BYTES
14075 ENTRY(idt_table)
14076- .skip IDT_ENTRIES * 16
14077+ .fill 512,8,0
14078
14079 __PAGE_ALIGNED_BSS
14080 .align PAGE_SIZE
14081diff -urNp linux-3.1.1/arch/x86/kernel/i386_ksyms_32.c linux-3.1.1/arch/x86/kernel/i386_ksyms_32.c
14082--- linux-3.1.1/arch/x86/kernel/i386_ksyms_32.c 2011-11-11 15:19:27.000000000 -0500
14083+++ linux-3.1.1/arch/x86/kernel/i386_ksyms_32.c 2011-11-16 18:39:07.000000000 -0500
14084@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
14085 EXPORT_SYMBOL(cmpxchg8b_emu);
14086 #endif
14087
14088+EXPORT_SYMBOL_GPL(cpu_gdt_table);
14089+
14090 /* Networking helper routines. */
14091 EXPORT_SYMBOL(csum_partial_copy_generic);
14092+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
14093+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
14094
14095 EXPORT_SYMBOL(__get_user_1);
14096 EXPORT_SYMBOL(__get_user_2);
14097@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
14098
14099 EXPORT_SYMBOL(csum_partial);
14100 EXPORT_SYMBOL(empty_zero_page);
14101+
14102+#ifdef CONFIG_PAX_KERNEXEC
14103+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
14104+#endif
14105diff -urNp linux-3.1.1/arch/x86/kernel/i8259.c linux-3.1.1/arch/x86/kernel/i8259.c
14106--- linux-3.1.1/arch/x86/kernel/i8259.c 2011-11-11 15:19:27.000000000 -0500
14107+++ linux-3.1.1/arch/x86/kernel/i8259.c 2011-11-16 18:39:07.000000000 -0500
14108@@ -210,7 +210,7 @@ spurious_8259A_irq:
14109 "spurious 8259A interrupt: IRQ%d.\n", irq);
14110 spurious_irq_mask |= irqmask;
14111 }
14112- atomic_inc(&irq_err_count);
14113+ atomic_inc_unchecked(&irq_err_count);
14114 /*
14115 * Theoretically we do not have to handle this IRQ,
14116 * but in Linux this does not cause problems and is
14117diff -urNp linux-3.1.1/arch/x86/kernel/init_task.c linux-3.1.1/arch/x86/kernel/init_task.c
14118--- linux-3.1.1/arch/x86/kernel/init_task.c 2011-11-11 15:19:27.000000000 -0500
14119+++ linux-3.1.1/arch/x86/kernel/init_task.c 2011-11-16 18:39:07.000000000 -0500
14120@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
14121 * way process stacks are handled. This is done by having a special
14122 * "init_task" linker map entry..
14123 */
14124-union thread_union init_thread_union __init_task_data =
14125- { INIT_THREAD_INFO(init_task) };
14126+union thread_union init_thread_union __init_task_data;
14127
14128 /*
14129 * Initial task structure.
14130@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
14131 * section. Since TSS's are completely CPU-local, we want them
14132 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
14133 */
14134-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
14135-
14136+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
14137+EXPORT_SYMBOL(init_tss);
14138diff -urNp linux-3.1.1/arch/x86/kernel/ioport.c linux-3.1.1/arch/x86/kernel/ioport.c
14139--- linux-3.1.1/arch/x86/kernel/ioport.c 2011-11-11 15:19:27.000000000 -0500
14140+++ linux-3.1.1/arch/x86/kernel/ioport.c 2011-11-16 18:40:08.000000000 -0500
14141@@ -6,6 +6,7 @@
14142 #include <linux/sched.h>
14143 #include <linux/kernel.h>
14144 #include <linux/capability.h>
14145+#include <linux/security.h>
14146 #include <linux/errno.h>
14147 #include <linux/types.h>
14148 #include <linux/ioport.h>
14149@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
14150
14151 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
14152 return -EINVAL;
14153+#ifdef CONFIG_GRKERNSEC_IO
14154+ if (turn_on && grsec_disable_privio) {
14155+ gr_handle_ioperm();
14156+ return -EPERM;
14157+ }
14158+#endif
14159 if (turn_on && !capable(CAP_SYS_RAWIO))
14160 return -EPERM;
14161
14162@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
14163 * because the ->io_bitmap_max value must match the bitmap
14164 * contents:
14165 */
14166- tss = &per_cpu(init_tss, get_cpu());
14167+ tss = init_tss + get_cpu();
14168
14169 if (turn_on)
14170 bitmap_clear(t->io_bitmap_ptr, from, num);
14171@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
14172 return -EINVAL;
14173 /* Trying to gain more privileges? */
14174 if (level > old) {
14175+#ifdef CONFIG_GRKERNSEC_IO
14176+ if (grsec_disable_privio) {
14177+ gr_handle_iopl();
14178+ return -EPERM;
14179+ }
14180+#endif
14181 if (!capable(CAP_SYS_RAWIO))
14182 return -EPERM;
14183 }
14184diff -urNp linux-3.1.1/arch/x86/kernel/irq_32.c linux-3.1.1/arch/x86/kernel/irq_32.c
14185--- linux-3.1.1/arch/x86/kernel/irq_32.c 2011-11-11 15:19:27.000000000 -0500
14186+++ linux-3.1.1/arch/x86/kernel/irq_32.c 2011-11-16 18:39:07.000000000 -0500
14187@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
14188 __asm__ __volatile__("andl %%esp,%0" :
14189 "=r" (sp) : "0" (THREAD_SIZE - 1));
14190
14191- return sp < (sizeof(struct thread_info) + STACK_WARN);
14192+ return sp < STACK_WARN;
14193 }
14194
14195 static void print_stack_overflow(void)
14196@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
14197 * per-CPU IRQ handling contexts (thread information and stack)
14198 */
14199 union irq_ctx {
14200- struct thread_info tinfo;
14201- u32 stack[THREAD_SIZE/sizeof(u32)];
14202+ unsigned long previous_esp;
14203+ u32 stack[THREAD_SIZE/sizeof(u32)];
14204 } __attribute__((aligned(THREAD_SIZE)));
14205
14206 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
14207@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
14208 static inline int
14209 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
14210 {
14211- union irq_ctx *curctx, *irqctx;
14212+ union irq_ctx *irqctx;
14213 u32 *isp, arg1, arg2;
14214
14215- curctx = (union irq_ctx *) current_thread_info();
14216 irqctx = __this_cpu_read(hardirq_ctx);
14217
14218 /*
14219@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
14220 * handler) we can't do that and just have to keep using the
14221 * current stack (which is the irq stack already after all)
14222 */
14223- if (unlikely(curctx == irqctx))
14224+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14225 return 0;
14226
14227 /* build the stack frame on the IRQ stack */
14228- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14229- irqctx->tinfo.task = curctx->tinfo.task;
14230- irqctx->tinfo.previous_esp = current_stack_pointer;
14231+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14232+ irqctx->previous_esp = current_stack_pointer;
14233
14234- /*
14235- * Copy the softirq bits in preempt_count so that the
14236- * softirq checks work in the hardirq context.
14237- */
14238- irqctx->tinfo.preempt_count =
14239- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
14240- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
14241+#ifdef CONFIG_PAX_MEMORY_UDEREF
14242+ __set_fs(MAKE_MM_SEG(0));
14243+#endif
14244
14245 if (unlikely(overflow))
14246 call_on_stack(print_stack_overflow, isp);
14247@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
14248 : "0" (irq), "1" (desc), "2" (isp),
14249 "D" (desc->handle_irq)
14250 : "memory", "cc", "ecx");
14251+
14252+#ifdef CONFIG_PAX_MEMORY_UDEREF
14253+ __set_fs(current_thread_info()->addr_limit);
14254+#endif
14255+
14256 return 1;
14257 }
14258
14259@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
14260 */
14261 void __cpuinit irq_ctx_init(int cpu)
14262 {
14263- union irq_ctx *irqctx;
14264-
14265 if (per_cpu(hardirq_ctx, cpu))
14266 return;
14267
14268- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
14269- THREAD_FLAGS,
14270- THREAD_ORDER));
14271- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
14272- irqctx->tinfo.cpu = cpu;
14273- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
14274- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14275-
14276- per_cpu(hardirq_ctx, cpu) = irqctx;
14277-
14278- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
14279- THREAD_FLAGS,
14280- THREAD_ORDER));
14281- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
14282- irqctx->tinfo.cpu = cpu;
14283- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14284-
14285- per_cpu(softirq_ctx, cpu) = irqctx;
14286+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
14287+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
14288
14289 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
14290 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
14291@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
14292 asmlinkage void do_softirq(void)
14293 {
14294 unsigned long flags;
14295- struct thread_info *curctx;
14296 union irq_ctx *irqctx;
14297 u32 *isp;
14298
14299@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
14300 local_irq_save(flags);
14301
14302 if (local_softirq_pending()) {
14303- curctx = current_thread_info();
14304 irqctx = __this_cpu_read(softirq_ctx);
14305- irqctx->tinfo.task = curctx->task;
14306- irqctx->tinfo.previous_esp = current_stack_pointer;
14307+ irqctx->previous_esp = current_stack_pointer;
14308
14309 /* build the stack frame on the softirq stack */
14310- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14311+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14312+
14313+#ifdef CONFIG_PAX_MEMORY_UDEREF
14314+ __set_fs(MAKE_MM_SEG(0));
14315+#endif
14316
14317 call_on_stack(__do_softirq, isp);
14318+
14319+#ifdef CONFIG_PAX_MEMORY_UDEREF
14320+ __set_fs(current_thread_info()->addr_limit);
14321+#endif
14322+
14323 /*
14324 * Shouldn't happen, we returned above if in_interrupt():
14325 */
14326diff -urNp linux-3.1.1/arch/x86/kernel/irq.c linux-3.1.1/arch/x86/kernel/irq.c
14327--- linux-3.1.1/arch/x86/kernel/irq.c 2011-11-11 15:19:27.000000000 -0500
14328+++ linux-3.1.1/arch/x86/kernel/irq.c 2011-11-16 18:39:07.000000000 -0500
14329@@ -17,7 +17,7 @@
14330 #include <asm/mce.h>
14331 #include <asm/hw_irq.h>
14332
14333-atomic_t irq_err_count;
14334+atomic_unchecked_t irq_err_count;
14335
14336 /* Function pointer for generic interrupt vector handling */
14337 void (*x86_platform_ipi_callback)(void) = NULL;
14338@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
14339 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
14340 seq_printf(p, " Machine check polls\n");
14341 #endif
14342- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
14343+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
14344 #if defined(CONFIG_X86_IO_APIC)
14345- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
14346+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
14347 #endif
14348 return 0;
14349 }
14350@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
14351
14352 u64 arch_irq_stat(void)
14353 {
14354- u64 sum = atomic_read(&irq_err_count);
14355+ u64 sum = atomic_read_unchecked(&irq_err_count);
14356
14357 #ifdef CONFIG_X86_IO_APIC
14358- sum += atomic_read(&irq_mis_count);
14359+ sum += atomic_read_unchecked(&irq_mis_count);
14360 #endif
14361 return sum;
14362 }
14363diff -urNp linux-3.1.1/arch/x86/kernel/kgdb.c linux-3.1.1/arch/x86/kernel/kgdb.c
14364--- linux-3.1.1/arch/x86/kernel/kgdb.c 2011-11-11 15:19:27.000000000 -0500
14365+++ linux-3.1.1/arch/x86/kernel/kgdb.c 2011-11-16 18:39:07.000000000 -0500
14366@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
14367 #ifdef CONFIG_X86_32
14368 switch (regno) {
14369 case GDB_SS:
14370- if (!user_mode_vm(regs))
14371+ if (!user_mode(regs))
14372 *(unsigned long *)mem = __KERNEL_DS;
14373 break;
14374 case GDB_SP:
14375- if (!user_mode_vm(regs))
14376+ if (!user_mode(regs))
14377 *(unsigned long *)mem = kernel_stack_pointer(regs);
14378 break;
14379 case GDB_GS:
14380@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
14381 case 'k':
14382 /* clear the trace bit */
14383 linux_regs->flags &= ~X86_EFLAGS_TF;
14384- atomic_set(&kgdb_cpu_doing_single_step, -1);
14385+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
14386
14387 /* set the trace bit if we're stepping */
14388 if (remcomInBuffer[0] == 's') {
14389 linux_regs->flags |= X86_EFLAGS_TF;
14390- atomic_set(&kgdb_cpu_doing_single_step,
14391+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
14392 raw_smp_processor_id());
14393 }
14394
14395@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
14396 return NOTIFY_DONE;
14397
14398 case DIE_DEBUG:
14399- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
14400+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
14401 if (user_mode(regs))
14402 return single_step_cont(regs, args);
14403 break;
14404diff -urNp linux-3.1.1/arch/x86/kernel/kprobes.c linux-3.1.1/arch/x86/kernel/kprobes.c
14405--- linux-3.1.1/arch/x86/kernel/kprobes.c 2011-11-11 15:19:27.000000000 -0500
14406+++ linux-3.1.1/arch/x86/kernel/kprobes.c 2011-11-16 18:39:07.000000000 -0500
14407@@ -117,8 +117,11 @@ static void __kprobes __synthesize_relat
14408 } __attribute__((packed)) *insn;
14409
14410 insn = (struct __arch_relative_insn *)from;
14411+
14412+ pax_open_kernel();
14413 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
14414 insn->op = op;
14415+ pax_close_kernel();
14416 }
14417
14418 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
14419@@ -155,7 +158,7 @@ static int __kprobes can_boost(kprobe_op
14420 kprobe_opcode_t opcode;
14421 kprobe_opcode_t *orig_opcodes = opcodes;
14422
14423- if (search_exception_tables((unsigned long)opcodes))
14424+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
14425 return 0; /* Page fault may occur on this address. */
14426
14427 retry:
14428@@ -316,7 +319,9 @@ static int __kprobes __copy_instruction(
14429 }
14430 }
14431 insn_get_length(&insn);
14432+ pax_open_kernel();
14433 memcpy(dest, insn.kaddr, insn.length);
14434+ pax_close_kernel();
14435
14436 #ifdef CONFIG_X86_64
14437 if (insn_rip_relative(&insn)) {
14438@@ -340,7 +345,9 @@ static int __kprobes __copy_instruction(
14439 (u8 *) dest;
14440 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
14441 disp = (u8 *) dest + insn_offset_displacement(&insn);
14442+ pax_open_kernel();
14443 *(s32 *) disp = (s32) newdisp;
14444+ pax_close_kernel();
14445 }
14446 #endif
14447 return insn.length;
14448@@ -354,12 +361,12 @@ static void __kprobes arch_copy_kprobe(s
14449 */
14450 __copy_instruction(p->ainsn.insn, p->addr, 0);
14451
14452- if (can_boost(p->addr))
14453+ if (can_boost(ktla_ktva(p->addr)))
14454 p->ainsn.boostable = 0;
14455 else
14456 p->ainsn.boostable = -1;
14457
14458- p->opcode = *p->addr;
14459+ p->opcode = *(ktla_ktva(p->addr));
14460 }
14461
14462 int __kprobes arch_prepare_kprobe(struct kprobe *p)
14463@@ -476,7 +483,7 @@ static void __kprobes setup_singlestep(s
14464 * nor set current_kprobe, because it doesn't use single
14465 * stepping.
14466 */
14467- regs->ip = (unsigned long)p->ainsn.insn;
14468+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14469 preempt_enable_no_resched();
14470 return;
14471 }
14472@@ -495,7 +502,7 @@ static void __kprobes setup_singlestep(s
14473 if (p->opcode == BREAKPOINT_INSTRUCTION)
14474 regs->ip = (unsigned long)p->addr;
14475 else
14476- regs->ip = (unsigned long)p->ainsn.insn;
14477+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14478 }
14479
14480 /*
14481@@ -574,7 +581,7 @@ static int __kprobes kprobe_handler(stru
14482 setup_singlestep(p, regs, kcb, 0);
14483 return 1;
14484 }
14485- } else if (*addr != BREAKPOINT_INSTRUCTION) {
14486+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
14487 /*
14488 * The breakpoint instruction was removed right
14489 * after we hit it. Another cpu has removed
14490@@ -682,6 +689,9 @@ static void __used __kprobes kretprobe_t
14491 " movq %rax, 152(%rsp)\n"
14492 RESTORE_REGS_STRING
14493 " popfq\n"
14494+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
14495+ " btsq $63,(%rsp)\n"
14496+#endif
14497 #else
14498 " pushf\n"
14499 SAVE_REGS_STRING
14500@@ -819,7 +829,7 @@ static void __kprobes resume_execution(s
14501 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
14502 {
14503 unsigned long *tos = stack_addr(regs);
14504- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
14505+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
14506 unsigned long orig_ip = (unsigned long)p->addr;
14507 kprobe_opcode_t *insn = p->ainsn.insn;
14508
14509@@ -1001,7 +1011,7 @@ int __kprobes kprobe_exceptions_notify(s
14510 struct die_args *args = data;
14511 int ret = NOTIFY_DONE;
14512
14513- if (args->regs && user_mode_vm(args->regs))
14514+ if (args->regs && user_mode(args->regs))
14515 return ret;
14516
14517 switch (val) {
14518@@ -1383,7 +1393,7 @@ int __kprobes arch_prepare_optimized_kpr
14519 * Verify if the address gap is in 2GB range, because this uses
14520 * a relative jump.
14521 */
14522- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
14523+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
14524 if (abs(rel) > 0x7fffffff)
14525 return -ERANGE;
14526
14527@@ -1404,11 +1414,11 @@ int __kprobes arch_prepare_optimized_kpr
14528 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
14529
14530 /* Set probe function call */
14531- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
14532+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
14533
14534 /* Set returning jmp instruction at the tail of out-of-line buffer */
14535 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
14536- (u8 *)op->kp.addr + op->optinsn.size);
14537+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
14538
14539 flush_icache_range((unsigned long) buf,
14540 (unsigned long) buf + TMPL_END_IDX +
14541@@ -1430,7 +1440,7 @@ static void __kprobes setup_optimize_kpr
14542 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
14543
14544 /* Backup instructions which will be replaced by jump address */
14545- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
14546+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
14547 RELATIVE_ADDR_SIZE);
14548
14549 insn_buf[0] = RELATIVEJUMP_OPCODE;
14550diff -urNp linux-3.1.1/arch/x86/kernel/kvm.c linux-3.1.1/arch/x86/kernel/kvm.c
14551--- linux-3.1.1/arch/x86/kernel/kvm.c 2011-11-11 15:19:27.000000000 -0500
14552+++ linux-3.1.1/arch/x86/kernel/kvm.c 2011-11-16 18:39:07.000000000 -0500
14553@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(vo
14554 pv_mmu_ops.set_pud = kvm_set_pud;
14555 #if PAGETABLE_LEVELS == 4
14556 pv_mmu_ops.set_pgd = kvm_set_pgd;
14557+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
14558 #endif
14559 #endif
14560 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
14561diff -urNp linux-3.1.1/arch/x86/kernel/ldt.c linux-3.1.1/arch/x86/kernel/ldt.c
14562--- linux-3.1.1/arch/x86/kernel/ldt.c 2011-11-11 15:19:27.000000000 -0500
14563+++ linux-3.1.1/arch/x86/kernel/ldt.c 2011-11-16 18:39:07.000000000 -0500
14564@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
14565 if (reload) {
14566 #ifdef CONFIG_SMP
14567 preempt_disable();
14568- load_LDT(pc);
14569+ load_LDT_nolock(pc);
14570 if (!cpumask_equal(mm_cpumask(current->mm),
14571 cpumask_of(smp_processor_id())))
14572 smp_call_function(flush_ldt, current->mm, 1);
14573 preempt_enable();
14574 #else
14575- load_LDT(pc);
14576+ load_LDT_nolock(pc);
14577 #endif
14578 }
14579 if (oldsize) {
14580@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
14581 return err;
14582
14583 for (i = 0; i < old->size; i++)
14584- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
14585+ write_ldt_entry(new->ldt, i, old->ldt + i);
14586 return 0;
14587 }
14588
14589@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
14590 retval = copy_ldt(&mm->context, &old_mm->context);
14591 mutex_unlock(&old_mm->context.lock);
14592 }
14593+
14594+ if (tsk == current) {
14595+ mm->context.vdso = 0;
14596+
14597+#ifdef CONFIG_X86_32
14598+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
14599+ mm->context.user_cs_base = 0UL;
14600+ mm->context.user_cs_limit = ~0UL;
14601+
14602+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14603+ cpus_clear(mm->context.cpu_user_cs_mask);
14604+#endif
14605+
14606+#endif
14607+#endif
14608+
14609+ }
14610+
14611 return retval;
14612 }
14613
14614@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
14615 }
14616 }
14617
14618+#ifdef CONFIG_PAX_SEGMEXEC
14619+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
14620+ error = -EINVAL;
14621+ goto out_unlock;
14622+ }
14623+#endif
14624+
14625 fill_ldt(&ldt, &ldt_info);
14626 if (oldmode)
14627 ldt.avl = 0;
14628diff -urNp linux-3.1.1/arch/x86/kernel/machine_kexec_32.c linux-3.1.1/arch/x86/kernel/machine_kexec_32.c
14629--- linux-3.1.1/arch/x86/kernel/machine_kexec_32.c 2011-11-11 15:19:27.000000000 -0500
14630+++ linux-3.1.1/arch/x86/kernel/machine_kexec_32.c 2011-11-16 18:39:07.000000000 -0500
14631@@ -27,7 +27,7 @@
14632 #include <asm/cacheflush.h>
14633 #include <asm/debugreg.h>
14634
14635-static void set_idt(void *newidt, __u16 limit)
14636+static void set_idt(struct desc_struct *newidt, __u16 limit)
14637 {
14638 struct desc_ptr curidt;
14639
14640@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
14641 }
14642
14643
14644-static void set_gdt(void *newgdt, __u16 limit)
14645+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
14646 {
14647 struct desc_ptr curgdt;
14648
14649@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
14650 }
14651
14652 control_page = page_address(image->control_code_page);
14653- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
14654+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
14655
14656 relocate_kernel_ptr = control_page;
14657 page_list[PA_CONTROL_PAGE] = __pa(control_page);
14658diff -urNp linux-3.1.1/arch/x86/kernel/microcode_intel.c linux-3.1.1/arch/x86/kernel/microcode_intel.c
14659--- linux-3.1.1/arch/x86/kernel/microcode_intel.c 2011-11-11 15:19:27.000000000 -0500
14660+++ linux-3.1.1/arch/x86/kernel/microcode_intel.c 2011-11-16 18:39:07.000000000 -0500
14661@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
14662
14663 static int get_ucode_user(void *to, const void *from, size_t n)
14664 {
14665- return copy_from_user(to, from, n);
14666+ return copy_from_user(to, (const void __force_user *)from, n);
14667 }
14668
14669 static enum ucode_state
14670 request_microcode_user(int cpu, const void __user *buf, size_t size)
14671 {
14672- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
14673+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
14674 }
14675
14676 static void microcode_fini_cpu(int cpu)
14677diff -urNp linux-3.1.1/arch/x86/kernel/module.c linux-3.1.1/arch/x86/kernel/module.c
14678--- linux-3.1.1/arch/x86/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
14679+++ linux-3.1.1/arch/x86/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
14680@@ -36,15 +36,60 @@
14681 #define DEBUGP(fmt...)
14682 #endif
14683
14684-void *module_alloc(unsigned long size)
14685+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
14686 {
14687 if (PAGE_ALIGN(size) > MODULES_LEN)
14688 return NULL;
14689 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
14690- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
14691+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
14692 -1, __builtin_return_address(0));
14693 }
14694
14695+void *module_alloc(unsigned long size)
14696+{
14697+
14698+#ifdef CONFIG_PAX_KERNEXEC
14699+ return __module_alloc(size, PAGE_KERNEL);
14700+#else
14701+ return __module_alloc(size, PAGE_KERNEL_EXEC);
14702+#endif
14703+
14704+}
14705+
14706+#ifdef CONFIG_PAX_KERNEXEC
14707+#ifdef CONFIG_X86_32
14708+void *module_alloc_exec(unsigned long size)
14709+{
14710+ struct vm_struct *area;
14711+
14712+ if (size == 0)
14713+ return NULL;
14714+
14715+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
14716+ return area ? area->addr : NULL;
14717+}
14718+EXPORT_SYMBOL(module_alloc_exec);
14719+
14720+void module_free_exec(struct module *mod, void *module_region)
14721+{
14722+ vunmap(module_region);
14723+}
14724+EXPORT_SYMBOL(module_free_exec);
14725+#else
14726+void module_free_exec(struct module *mod, void *module_region)
14727+{
14728+ module_free(mod, module_region);
14729+}
14730+EXPORT_SYMBOL(module_free_exec);
14731+
14732+void *module_alloc_exec(unsigned long size)
14733+{
14734+ return __module_alloc(size, PAGE_KERNEL_RX);
14735+}
14736+EXPORT_SYMBOL(module_alloc_exec);
14737+#endif
14738+#endif
14739+
14740 #ifdef CONFIG_X86_32
14741 int apply_relocate(Elf32_Shdr *sechdrs,
14742 const char *strtab,
14743@@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14744 unsigned int i;
14745 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
14746 Elf32_Sym *sym;
14747- uint32_t *location;
14748+ uint32_t *plocation, location;
14749
14750 DEBUGP("Applying relocate section %u to %u\n", relsec,
14751 sechdrs[relsec].sh_info);
14752 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
14753 /* This is where to make the change */
14754- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
14755- + rel[i].r_offset;
14756+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
14757+ location = (uint32_t)plocation;
14758+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
14759+ plocation = ktla_ktva((void *)plocation);
14760 /* This is the symbol it is referring to. Note that all
14761 undefined symbols have been resolved. */
14762 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
14763@@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14764 switch (ELF32_R_TYPE(rel[i].r_info)) {
14765 case R_386_32:
14766 /* We add the value into the location given */
14767- *location += sym->st_value;
14768+ pax_open_kernel();
14769+ *plocation += sym->st_value;
14770+ pax_close_kernel();
14771 break;
14772 case R_386_PC32:
14773 /* Add the value, subtract its postition */
14774- *location += sym->st_value - (uint32_t)location;
14775+ pax_open_kernel();
14776+ *plocation += sym->st_value - location;
14777+ pax_close_kernel();
14778 break;
14779 default:
14780 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
14781@@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
14782 case R_X86_64_NONE:
14783 break;
14784 case R_X86_64_64:
14785+ pax_open_kernel();
14786 *(u64 *)loc = val;
14787+ pax_close_kernel();
14788 break;
14789 case R_X86_64_32:
14790+ pax_open_kernel();
14791 *(u32 *)loc = val;
14792+ pax_close_kernel();
14793 if (val != *(u32 *)loc)
14794 goto overflow;
14795 break;
14796 case R_X86_64_32S:
14797+ pax_open_kernel();
14798 *(s32 *)loc = val;
14799+ pax_close_kernel();
14800 if ((s64)val != *(s32 *)loc)
14801 goto overflow;
14802 break;
14803 case R_X86_64_PC32:
14804 val -= (u64)loc;
14805+ pax_open_kernel();
14806 *(u32 *)loc = val;
14807+ pax_close_kernel();
14808+
14809 #if 0
14810 if ((s64)val != *(s32 *)loc)
14811 goto overflow;
14812diff -urNp linux-3.1.1/arch/x86/kernel/paravirt.c linux-3.1.1/arch/x86/kernel/paravirt.c
14813--- linux-3.1.1/arch/x86/kernel/paravirt.c 2011-11-11 15:19:27.000000000 -0500
14814+++ linux-3.1.1/arch/x86/kernel/paravirt.c 2011-11-17 18:29:42.000000000 -0500
14815@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
14816 {
14817 return x;
14818 }
14819+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14820+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
14821+#endif
14822
14823 void __init default_banner(void)
14824 {
14825@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
14826 .pv_lock_ops = pv_lock_ops,
14827 #endif
14828 };
14829+
14830+ pax_track_stack();
14831+
14832 return *((void **)&tmpl + type);
14833 }
14834
14835@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
14836 if (opfunc == NULL)
14837 /* If there's no function, patch it with a ud2a (BUG) */
14838 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
14839- else if (opfunc == _paravirt_nop)
14840+ else if (opfunc == (void *)_paravirt_nop)
14841 /* If the operation is a nop, then nop the callsite */
14842 ret = paravirt_patch_nop();
14843
14844 /* identity functions just return their single argument */
14845- else if (opfunc == _paravirt_ident_32)
14846+ else if (opfunc == (void *)_paravirt_ident_32)
14847 ret = paravirt_patch_ident_32(insnbuf, len);
14848- else if (opfunc == _paravirt_ident_64)
14849+ else if (opfunc == (void *)_paravirt_ident_64)
14850 ret = paravirt_patch_ident_64(insnbuf, len);
14851+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14852+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
14853+ ret = paravirt_patch_ident_64(insnbuf, len);
14854+#endif
14855
14856 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
14857 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
14858@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
14859 if (insn_len > len || start == NULL)
14860 insn_len = len;
14861 else
14862- memcpy(insnbuf, start, insn_len);
14863+ memcpy(insnbuf, ktla_ktva(start), insn_len);
14864
14865 return insn_len;
14866 }
14867@@ -302,7 +312,7 @@ void arch_flush_lazy_mmu_mode(void)
14868 preempt_enable();
14869 }
14870
14871-struct pv_info pv_info = {
14872+struct pv_info pv_info __read_only = {
14873 .name = "bare hardware",
14874 .paravirt_enabled = 0,
14875 .kernel_rpl = 0,
14876@@ -313,16 +323,16 @@ struct pv_info pv_info = {
14877 #endif
14878 };
14879
14880-struct pv_init_ops pv_init_ops = {
14881+struct pv_init_ops pv_init_ops __read_only = {
14882 .patch = native_patch,
14883 };
14884
14885-struct pv_time_ops pv_time_ops = {
14886+struct pv_time_ops pv_time_ops __read_only = {
14887 .sched_clock = native_sched_clock,
14888 .steal_clock = native_steal_clock,
14889 };
14890
14891-struct pv_irq_ops pv_irq_ops = {
14892+struct pv_irq_ops pv_irq_ops __read_only = {
14893 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
14894 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
14895 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
14896@@ -334,7 +344,7 @@ struct pv_irq_ops pv_irq_ops = {
14897 #endif
14898 };
14899
14900-struct pv_cpu_ops pv_cpu_ops = {
14901+struct pv_cpu_ops pv_cpu_ops __read_only = {
14902 .cpuid = native_cpuid,
14903 .get_debugreg = native_get_debugreg,
14904 .set_debugreg = native_set_debugreg,
14905@@ -395,21 +405,26 @@ struct pv_cpu_ops pv_cpu_ops = {
14906 .end_context_switch = paravirt_nop,
14907 };
14908
14909-struct pv_apic_ops pv_apic_ops = {
14910+struct pv_apic_ops pv_apic_ops __read_only = {
14911 #ifdef CONFIG_X86_LOCAL_APIC
14912 .startup_ipi_hook = paravirt_nop,
14913 #endif
14914 };
14915
14916-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
14917+#ifdef CONFIG_X86_32
14918+#ifdef CONFIG_X86_PAE
14919+/* 64-bit pagetable entries */
14920+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
14921+#else
14922 /* 32-bit pagetable entries */
14923 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
14924+#endif
14925 #else
14926 /* 64-bit pagetable entries */
14927 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
14928 #endif
14929
14930-struct pv_mmu_ops pv_mmu_ops = {
14931+struct pv_mmu_ops pv_mmu_ops __read_only = {
14932
14933 .read_cr2 = native_read_cr2,
14934 .write_cr2 = native_write_cr2,
14935@@ -459,6 +474,7 @@ struct pv_mmu_ops pv_mmu_ops = {
14936 .make_pud = PTE_IDENT,
14937
14938 .set_pgd = native_set_pgd,
14939+ .set_pgd_batched = native_set_pgd_batched,
14940 #endif
14941 #endif /* PAGETABLE_LEVELS >= 3 */
14942
14943@@ -478,6 +494,12 @@ struct pv_mmu_ops pv_mmu_ops = {
14944 },
14945
14946 .set_fixmap = native_set_fixmap,
14947+
14948+#ifdef CONFIG_PAX_KERNEXEC
14949+ .pax_open_kernel = native_pax_open_kernel,
14950+ .pax_close_kernel = native_pax_close_kernel,
14951+#endif
14952+
14953 };
14954
14955 EXPORT_SYMBOL_GPL(pv_time_ops);
14956diff -urNp linux-3.1.1/arch/x86/kernel/paravirt-spinlocks.c linux-3.1.1/arch/x86/kernel/paravirt-spinlocks.c
14957--- linux-3.1.1/arch/x86/kernel/paravirt-spinlocks.c 2011-11-11 15:19:27.000000000 -0500
14958+++ linux-3.1.1/arch/x86/kernel/paravirt-spinlocks.c 2011-11-16 18:39:07.000000000 -0500
14959@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14960 arch_spin_lock(lock);
14961 }
14962
14963-struct pv_lock_ops pv_lock_ops = {
14964+struct pv_lock_ops pv_lock_ops __read_only = {
14965 #ifdef CONFIG_SMP
14966 .spin_is_locked = __ticket_spin_is_locked,
14967 .spin_is_contended = __ticket_spin_is_contended,
14968diff -urNp linux-3.1.1/arch/x86/kernel/pci-iommu_table.c linux-3.1.1/arch/x86/kernel/pci-iommu_table.c
14969--- linux-3.1.1/arch/x86/kernel/pci-iommu_table.c 2011-11-11 15:19:27.000000000 -0500
14970+++ linux-3.1.1/arch/x86/kernel/pci-iommu_table.c 2011-11-16 18:40:08.000000000 -0500
14971@@ -2,7 +2,7 @@
14972 #include <asm/iommu_table.h>
14973 #include <linux/string.h>
14974 #include <linux/kallsyms.h>
14975-
14976+#include <linux/sched.h>
14977
14978 #define DEBUG 1
14979
14980@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14981 {
14982 struct iommu_table_entry *p, *q, *x;
14983
14984+ pax_track_stack();
14985+
14986 /* Simple cyclic dependency checker. */
14987 for (p = start; p < finish; p++) {
14988 q = find_dependents_of(start, finish, p);
14989diff -urNp linux-3.1.1/arch/x86/kernel/process_32.c linux-3.1.1/arch/x86/kernel/process_32.c
14990--- linux-3.1.1/arch/x86/kernel/process_32.c 2011-11-11 15:19:27.000000000 -0500
14991+++ linux-3.1.1/arch/x86/kernel/process_32.c 2011-11-16 18:39:07.000000000 -0500
14992@@ -66,6 +66,7 @@ asmlinkage void ret_from_fork(void) __as
14993 unsigned long thread_saved_pc(struct task_struct *tsk)
14994 {
14995 return ((unsigned long *)tsk->thread.sp)[3];
14996+//XXX return tsk->thread.eip;
14997 }
14998
14999 #ifndef CONFIG_SMP
15000@@ -128,15 +129,14 @@ void __show_regs(struct pt_regs *regs, i
15001 unsigned long sp;
15002 unsigned short ss, gs;
15003
15004- if (user_mode_vm(regs)) {
15005+ if (user_mode(regs)) {
15006 sp = regs->sp;
15007 ss = regs->ss & 0xffff;
15008- gs = get_user_gs(regs);
15009 } else {
15010 sp = kernel_stack_pointer(regs);
15011 savesegment(ss, ss);
15012- savesegment(gs, gs);
15013 }
15014+ gs = get_user_gs(regs);
15015
15016 show_regs_common();
15017
15018@@ -198,13 +198,14 @@ int copy_thread(unsigned long clone_flag
15019 struct task_struct *tsk;
15020 int err;
15021
15022- childregs = task_pt_regs(p);
15023+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
15024 *childregs = *regs;
15025 childregs->ax = 0;
15026 childregs->sp = sp;
15027
15028 p->thread.sp = (unsigned long) childregs;
15029 p->thread.sp0 = (unsigned long) (childregs+1);
15030+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15031
15032 p->thread.ip = (unsigned long) ret_from_fork;
15033
15034@@ -294,7 +295,7 @@ __switch_to(struct task_struct *prev_p,
15035 struct thread_struct *prev = &prev_p->thread,
15036 *next = &next_p->thread;
15037 int cpu = smp_processor_id();
15038- struct tss_struct *tss = &per_cpu(init_tss, cpu);
15039+ struct tss_struct *tss = init_tss + cpu;
15040 bool preload_fpu;
15041
15042 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
15043@@ -329,6 +330,10 @@ __switch_to(struct task_struct *prev_p,
15044 */
15045 lazy_save_gs(prev->gs);
15046
15047+#ifdef CONFIG_PAX_MEMORY_UDEREF
15048+ __set_fs(task_thread_info(next_p)->addr_limit);
15049+#endif
15050+
15051 /*
15052 * Load the per-thread Thread-Local Storage descriptor.
15053 */
15054@@ -364,6 +369,9 @@ __switch_to(struct task_struct *prev_p,
15055 */
15056 arch_end_context_switch(next_p);
15057
15058+ percpu_write(current_task, next_p);
15059+ percpu_write(current_tinfo, &next_p->tinfo);
15060+
15061 if (preload_fpu)
15062 __math_state_restore();
15063
15064@@ -373,8 +381,6 @@ __switch_to(struct task_struct *prev_p,
15065 if (prev->gs | next->gs)
15066 lazy_load_gs(next->gs);
15067
15068- percpu_write(current_task, next_p);
15069-
15070 return prev_p;
15071 }
15072
15073@@ -404,4 +410,3 @@ unsigned long get_wchan(struct task_stru
15074 } while (count++ < 16);
15075 return 0;
15076 }
15077-
15078diff -urNp linux-3.1.1/arch/x86/kernel/process_64.c linux-3.1.1/arch/x86/kernel/process_64.c
15079--- linux-3.1.1/arch/x86/kernel/process_64.c 2011-11-11 15:19:27.000000000 -0500
15080+++ linux-3.1.1/arch/x86/kernel/process_64.c 2011-11-16 18:39:07.000000000 -0500
15081@@ -88,7 +88,7 @@ static void __exit_idle(void)
15082 void exit_idle(void)
15083 {
15084 /* idle loop has pid 0 */
15085- if (current->pid)
15086+ if (task_pid_nr(current))
15087 return;
15088 __exit_idle();
15089 }
15090@@ -262,8 +262,7 @@ int copy_thread(unsigned long clone_flag
15091 struct pt_regs *childregs;
15092 struct task_struct *me = current;
15093
15094- childregs = ((struct pt_regs *)
15095- (THREAD_SIZE + task_stack_page(p))) - 1;
15096+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
15097 *childregs = *regs;
15098
15099 childregs->ax = 0;
15100@@ -275,6 +274,7 @@ int copy_thread(unsigned long clone_flag
15101 p->thread.sp = (unsigned long) childregs;
15102 p->thread.sp0 = (unsigned long) (childregs+1);
15103 p->thread.usersp = me->thread.usersp;
15104+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15105
15106 set_tsk_thread_flag(p, TIF_FORK);
15107
15108@@ -377,7 +377,7 @@ __switch_to(struct task_struct *prev_p,
15109 struct thread_struct *prev = &prev_p->thread;
15110 struct thread_struct *next = &next_p->thread;
15111 int cpu = smp_processor_id();
15112- struct tss_struct *tss = &per_cpu(init_tss, cpu);
15113+ struct tss_struct *tss = init_tss + cpu;
15114 unsigned fsindex, gsindex;
15115 bool preload_fpu;
15116
15117@@ -473,10 +473,9 @@ __switch_to(struct task_struct *prev_p,
15118 prev->usersp = percpu_read(old_rsp);
15119 percpu_write(old_rsp, next->usersp);
15120 percpu_write(current_task, next_p);
15121+ percpu_write(current_tinfo, &next_p->tinfo);
15122
15123- percpu_write(kernel_stack,
15124- (unsigned long)task_stack_page(next_p) +
15125- THREAD_SIZE - KERNEL_STACK_OFFSET);
15126+ percpu_write(kernel_stack, next->sp0);
15127
15128 /*
15129 * Now maybe reload the debug registers and handle I/O bitmaps
15130@@ -538,12 +537,11 @@ unsigned long get_wchan(struct task_stru
15131 if (!p || p == current || p->state == TASK_RUNNING)
15132 return 0;
15133 stack = (unsigned long)task_stack_page(p);
15134- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15135+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15136 return 0;
15137 fp = *(u64 *)(p->thread.sp);
15138 do {
15139- if (fp < (unsigned long)stack ||
15140- fp >= (unsigned long)stack+THREAD_SIZE)
15141+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15142 return 0;
15143 ip = *(u64 *)(fp+8);
15144 if (!in_sched_functions(ip))
15145diff -urNp linux-3.1.1/arch/x86/kernel/process.c linux-3.1.1/arch/x86/kernel/process.c
15146--- linux-3.1.1/arch/x86/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
15147+++ linux-3.1.1/arch/x86/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
15148@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
15149
15150 void free_thread_info(struct thread_info *ti)
15151 {
15152- free_thread_xstate(ti->task);
15153 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
15154 }
15155
15156+static struct kmem_cache *task_struct_cachep;
15157+
15158 void arch_task_cache_init(void)
15159 {
15160- task_xstate_cachep =
15161- kmem_cache_create("task_xstate", xstate_size,
15162+ /* create a slab on which task_structs can be allocated */
15163+ task_struct_cachep =
15164+ kmem_cache_create("task_struct", sizeof(struct task_struct),
15165+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
15166+
15167+ task_xstate_cachep =
15168+ kmem_cache_create("task_xstate", xstate_size,
15169 __alignof__(union thread_xstate),
15170- SLAB_PANIC | SLAB_NOTRACK, NULL);
15171+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
15172+}
15173+
15174+struct task_struct *alloc_task_struct_node(int node)
15175+{
15176+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
15177+}
15178+
15179+void free_task_struct(struct task_struct *task)
15180+{
15181+ free_thread_xstate(task);
15182+ kmem_cache_free(task_struct_cachep, task);
15183 }
15184
15185 /*
15186@@ -70,7 +87,7 @@ void exit_thread(void)
15187 unsigned long *bp = t->io_bitmap_ptr;
15188
15189 if (bp) {
15190- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
15191+ struct tss_struct *tss = init_tss + get_cpu();
15192
15193 t->io_bitmap_ptr = NULL;
15194 clear_thread_flag(TIF_IO_BITMAP);
15195@@ -106,7 +123,7 @@ void show_regs_common(void)
15196
15197 printk(KERN_CONT "\n");
15198 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
15199- current->pid, current->comm, print_tainted(),
15200+ task_pid_nr(current), current->comm, print_tainted(),
15201 init_utsname()->release,
15202 (int)strcspn(init_utsname()->version, " "),
15203 init_utsname()->version);
15204@@ -120,6 +137,9 @@ void flush_thread(void)
15205 {
15206 struct task_struct *tsk = current;
15207
15208+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15209+ loadsegment(gs, 0);
15210+#endif
15211 flush_ptrace_hw_breakpoint(tsk);
15212 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
15213 /*
15214@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
15215 regs.di = (unsigned long) arg;
15216
15217 #ifdef CONFIG_X86_32
15218- regs.ds = __USER_DS;
15219- regs.es = __USER_DS;
15220+ regs.ds = __KERNEL_DS;
15221+ regs.es = __KERNEL_DS;
15222 regs.fs = __KERNEL_PERCPU;
15223- regs.gs = __KERNEL_STACK_CANARY;
15224+ savesegment(gs, regs.gs);
15225 #else
15226 regs.ss = __KERNEL_DS;
15227 #endif
15228@@ -403,7 +423,7 @@ void default_idle(void)
15229 EXPORT_SYMBOL(default_idle);
15230 #endif
15231
15232-void stop_this_cpu(void *dummy)
15233+__noreturn void stop_this_cpu(void *dummy)
15234 {
15235 local_irq_disable();
15236 /*
15237@@ -645,16 +665,37 @@ static int __init idle_setup(char *str)
15238 }
15239 early_param("idle", idle_setup);
15240
15241-unsigned long arch_align_stack(unsigned long sp)
15242+#ifdef CONFIG_PAX_RANDKSTACK
15243+void pax_randomize_kstack(struct pt_regs *regs)
15244 {
15245- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
15246- sp -= get_random_int() % 8192;
15247- return sp & ~0xf;
15248-}
15249+ struct thread_struct *thread = &current->thread;
15250+ unsigned long time;
15251
15252-unsigned long arch_randomize_brk(struct mm_struct *mm)
15253-{
15254- unsigned long range_end = mm->brk + 0x02000000;
15255- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
15256-}
15257+ if (!randomize_va_space)
15258+ return;
15259+
15260+ if (v8086_mode(regs))
15261+ return;
15262
15263+ rdtscl(time);
15264+
15265+ /* P4 seems to return a 0 LSB, ignore it */
15266+#ifdef CONFIG_MPENTIUM4
15267+ time &= 0x3EUL;
15268+ time <<= 2;
15269+#elif defined(CONFIG_X86_64)
15270+ time &= 0xFUL;
15271+ time <<= 4;
15272+#else
15273+ time &= 0x1FUL;
15274+ time <<= 3;
15275+#endif
15276+
15277+ thread->sp0 ^= time;
15278+ load_sp0(init_tss + smp_processor_id(), thread);
15279+
15280+#ifdef CONFIG_X86_64
15281+ percpu_write(kernel_stack, thread->sp0);
15282+#endif
15283+}
15284+#endif
15285diff -urNp linux-3.1.1/arch/x86/kernel/ptrace.c linux-3.1.1/arch/x86/kernel/ptrace.c
15286--- linux-3.1.1/arch/x86/kernel/ptrace.c 2011-11-11 15:19:27.000000000 -0500
15287+++ linux-3.1.1/arch/x86/kernel/ptrace.c 2011-11-16 18:39:07.000000000 -0500
15288@@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *chi
15289 unsigned long addr, unsigned long data)
15290 {
15291 int ret;
15292- unsigned long __user *datap = (unsigned long __user *)data;
15293+ unsigned long __user *datap = (__force unsigned long __user *)data;
15294
15295 switch (request) {
15296 /* read the word at location addr in the USER area. */
15297@@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *chi
15298 if ((int) addr < 0)
15299 return -EIO;
15300 ret = do_get_thread_area(child, addr,
15301- (struct user_desc __user *)data);
15302+ (__force struct user_desc __user *) data);
15303 break;
15304
15305 case PTRACE_SET_THREAD_AREA:
15306 if ((int) addr < 0)
15307 return -EIO;
15308 ret = do_set_thread_area(child, addr,
15309- (struct user_desc __user *)data, 0);
15310+ (__force struct user_desc __user *) data, 0);
15311 break;
15312 #endif
15313
15314@@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct tas
15315 memset(info, 0, sizeof(*info));
15316 info->si_signo = SIGTRAP;
15317 info->si_code = si_code;
15318- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
15319+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
15320 }
15321
15322 void user_single_step_siginfo(struct task_struct *tsk,
15323diff -urNp linux-3.1.1/arch/x86/kernel/pvclock.c linux-3.1.1/arch/x86/kernel/pvclock.c
15324--- linux-3.1.1/arch/x86/kernel/pvclock.c 2011-11-11 15:19:27.000000000 -0500
15325+++ linux-3.1.1/arch/x86/kernel/pvclock.c 2011-11-16 18:39:07.000000000 -0500
15326@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
15327 return pv_tsc_khz;
15328 }
15329
15330-static atomic64_t last_value = ATOMIC64_INIT(0);
15331+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
15332
15333 void pvclock_resume(void)
15334 {
15335- atomic64_set(&last_value, 0);
15336+ atomic64_set_unchecked(&last_value, 0);
15337 }
15338
15339 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
15340@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
15341 * updating at the same time, and one of them could be slightly behind,
15342 * making the assumption that last_value always go forward fail to hold.
15343 */
15344- last = atomic64_read(&last_value);
15345+ last = atomic64_read_unchecked(&last_value);
15346 do {
15347 if (ret < last)
15348 return last;
15349- last = atomic64_cmpxchg(&last_value, last, ret);
15350+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
15351 } while (unlikely(last != ret));
15352
15353 return ret;
15354diff -urNp linux-3.1.1/arch/x86/kernel/reboot.c linux-3.1.1/arch/x86/kernel/reboot.c
15355--- linux-3.1.1/arch/x86/kernel/reboot.c 2011-11-11 15:19:27.000000000 -0500
15356+++ linux-3.1.1/arch/x86/kernel/reboot.c 2011-11-16 18:39:07.000000000 -0500
15357@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
15358 EXPORT_SYMBOL(pm_power_off);
15359
15360 static const struct desc_ptr no_idt = {};
15361-static int reboot_mode;
15362+static unsigned short reboot_mode;
15363 enum reboot_type reboot_type = BOOT_ACPI;
15364 int reboot_force;
15365
15366@@ -315,13 +315,17 @@ core_initcall(reboot_init);
15367 extern const unsigned char machine_real_restart_asm[];
15368 extern const u64 machine_real_restart_gdt[3];
15369
15370-void machine_real_restart(unsigned int type)
15371+__noreturn void machine_real_restart(unsigned int type)
15372 {
15373 void *restart_va;
15374 unsigned long restart_pa;
15375- void (*restart_lowmem)(unsigned int);
15376+ void (* __noreturn restart_lowmem)(unsigned int);
15377 u64 *lowmem_gdt;
15378
15379+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15380+ struct desc_struct *gdt;
15381+#endif
15382+
15383 local_irq_disable();
15384
15385 /* Write zero to CMOS register number 0x0f, which the BIOS POST
15386@@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
15387 boot)". This seems like a fairly standard thing that gets set by
15388 REBOOT.COM programs, and the previous reset routine did this
15389 too. */
15390- *((unsigned short *)0x472) = reboot_mode;
15391+ *(unsigned short *)(__va(0x472)) = reboot_mode;
15392
15393 /* Patch the GDT in the low memory trampoline */
15394 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
15395
15396 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
15397 restart_pa = virt_to_phys(restart_va);
15398- restart_lowmem = (void (*)(unsigned int))restart_pa;
15399+ restart_lowmem = (void *)restart_pa;
15400
15401 /* GDT[0]: GDT self-pointer */
15402 lowmem_gdt[0] =
15403@@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
15404 GDT_ENTRY(0x009b, restart_pa, 0xffff);
15405
15406 /* Jump to the identity-mapped low memory code */
15407+
15408+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15409+ gdt = get_cpu_gdt_table(smp_processor_id());
15410+ pax_open_kernel();
15411+#ifdef CONFIG_PAX_MEMORY_UDEREF
15412+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
15413+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
15414+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
15415+#endif
15416+#ifdef CONFIG_PAX_KERNEXEC
15417+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
15418+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
15419+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
15420+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
15421+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
15422+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
15423+#endif
15424+ pax_close_kernel();
15425+#endif
15426+
15427+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15428+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
15429+ unreachable();
15430+#else
15431 restart_lowmem(type);
15432+#endif
15433+
15434 }
15435 #ifdef CONFIG_APM_MODULE
15436 EXPORT_SYMBOL(machine_real_restart);
15437@@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
15438 * try to force a triple fault and then cycle between hitting the keyboard
15439 * controller and doing that
15440 */
15441-static void native_machine_emergency_restart(void)
15442+__noreturn static void native_machine_emergency_restart(void)
15443 {
15444 int i;
15445 int attempt = 0;
15446@@ -647,13 +677,13 @@ void native_machine_shutdown(void)
15447 #endif
15448 }
15449
15450-static void __machine_emergency_restart(int emergency)
15451+static __noreturn void __machine_emergency_restart(int emergency)
15452 {
15453 reboot_emergency = emergency;
15454 machine_ops.emergency_restart();
15455 }
15456
15457-static void native_machine_restart(char *__unused)
15458+static __noreturn void native_machine_restart(char *__unused)
15459 {
15460 printk("machine restart\n");
15461
15462@@ -662,7 +692,7 @@ static void native_machine_restart(char
15463 __machine_emergency_restart(0);
15464 }
15465
15466-static void native_machine_halt(void)
15467+static __noreturn void native_machine_halt(void)
15468 {
15469 /* stop other cpus and apics */
15470 machine_shutdown();
15471@@ -673,7 +703,7 @@ static void native_machine_halt(void)
15472 stop_this_cpu(NULL);
15473 }
15474
15475-static void native_machine_power_off(void)
15476+__noreturn static void native_machine_power_off(void)
15477 {
15478 if (pm_power_off) {
15479 if (!reboot_force)
15480@@ -682,6 +712,7 @@ static void native_machine_power_off(voi
15481 }
15482 /* a fallback in case there is no PM info available */
15483 tboot_shutdown(TB_SHUTDOWN_HALT);
15484+ unreachable();
15485 }
15486
15487 struct machine_ops machine_ops = {
15488diff -urNp linux-3.1.1/arch/x86/kernel/setup.c linux-3.1.1/arch/x86/kernel/setup.c
15489--- linux-3.1.1/arch/x86/kernel/setup.c 2011-11-11 15:19:27.000000000 -0500
15490+++ linux-3.1.1/arch/x86/kernel/setup.c 2011-11-16 18:39:07.000000000 -0500
15491@@ -447,7 +447,7 @@ static void __init parse_setup_data(void
15492
15493 switch (data->type) {
15494 case SETUP_E820_EXT:
15495- parse_e820_ext(data);
15496+ parse_e820_ext((struct setup_data __force_kernel *)data);
15497 break;
15498 case SETUP_DTB:
15499 add_dtb(pa_data);
15500@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
15501 * area (640->1Mb) as ram even though it is not.
15502 * take them out.
15503 */
15504- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
15505+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
15506 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
15507 }
15508
15509@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
15510
15511 if (!boot_params.hdr.root_flags)
15512 root_mountflags &= ~MS_RDONLY;
15513- init_mm.start_code = (unsigned long) _text;
15514- init_mm.end_code = (unsigned long) _etext;
15515+ init_mm.start_code = ktla_ktva((unsigned long) _text);
15516+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
15517 init_mm.end_data = (unsigned long) _edata;
15518 init_mm.brk = _brk_end;
15519
15520- code_resource.start = virt_to_phys(_text);
15521- code_resource.end = virt_to_phys(_etext)-1;
15522- data_resource.start = virt_to_phys(_etext);
15523+ code_resource.start = virt_to_phys(ktla_ktva(_text));
15524+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
15525+ data_resource.start = virt_to_phys(_sdata);
15526 data_resource.end = virt_to_phys(_edata)-1;
15527 bss_resource.start = virt_to_phys(&__bss_start);
15528 bss_resource.end = virt_to_phys(&__bss_stop)-1;
15529diff -urNp linux-3.1.1/arch/x86/kernel/setup_percpu.c linux-3.1.1/arch/x86/kernel/setup_percpu.c
15530--- linux-3.1.1/arch/x86/kernel/setup_percpu.c 2011-11-11 15:19:27.000000000 -0500
15531+++ linux-3.1.1/arch/x86/kernel/setup_percpu.c 2011-11-16 18:39:07.000000000 -0500
15532@@ -21,19 +21,17 @@
15533 #include <asm/cpu.h>
15534 #include <asm/stackprotector.h>
15535
15536-DEFINE_PER_CPU(int, cpu_number);
15537+#ifdef CONFIG_SMP
15538+DEFINE_PER_CPU(unsigned int, cpu_number);
15539 EXPORT_PER_CPU_SYMBOL(cpu_number);
15540+#endif
15541
15542-#ifdef CONFIG_X86_64
15543 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
15544-#else
15545-#define BOOT_PERCPU_OFFSET 0
15546-#endif
15547
15548 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
15549 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
15550
15551-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
15552+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
15553 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
15554 };
15555 EXPORT_SYMBOL(__per_cpu_offset);
15556@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
15557 {
15558 #ifdef CONFIG_X86_32
15559 struct desc_struct gdt;
15560+ unsigned long base = per_cpu_offset(cpu);
15561
15562- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
15563- 0x2 | DESCTYPE_S, 0x8);
15564- gdt.s = 1;
15565+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
15566+ 0x83 | DESCTYPE_S, 0xC);
15567 write_gdt_entry(get_cpu_gdt_table(cpu),
15568 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
15569 #endif
15570@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
15571 /* alrighty, percpu areas up and running */
15572 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
15573 for_each_possible_cpu(cpu) {
15574+#ifdef CONFIG_CC_STACKPROTECTOR
15575+#ifdef CONFIG_X86_32
15576+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
15577+#endif
15578+#endif
15579 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
15580 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
15581 per_cpu(cpu_number, cpu) = cpu;
15582@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
15583 */
15584 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
15585 #endif
15586+#ifdef CONFIG_CC_STACKPROTECTOR
15587+#ifdef CONFIG_X86_32
15588+ if (!cpu)
15589+ per_cpu(stack_canary.canary, cpu) = canary;
15590+#endif
15591+#endif
15592 /*
15593 * Up to this point, the boot CPU has been using .init.data
15594 * area. Reload any changed state for the boot CPU.
15595diff -urNp linux-3.1.1/arch/x86/kernel/signal.c linux-3.1.1/arch/x86/kernel/signal.c
15596--- linux-3.1.1/arch/x86/kernel/signal.c 2011-11-11 15:19:27.000000000 -0500
15597+++ linux-3.1.1/arch/x86/kernel/signal.c 2011-11-16 19:39:49.000000000 -0500
15598@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
15599 * Align the stack pointer according to the i386 ABI,
15600 * i.e. so that on function entry ((sp + 4) & 15) == 0.
15601 */
15602- sp = ((sp + 4) & -16ul) - 4;
15603+ sp = ((sp - 12) & -16ul) - 4;
15604 #else /* !CONFIG_X86_32 */
15605 sp = round_down(sp, 16) - 8;
15606 #endif
15607@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
15608 * Return an always-bogus address instead so we will die with SIGSEGV.
15609 */
15610 if (onsigstack && !likely(on_sig_stack(sp)))
15611- return (void __user *)-1L;
15612+ return (__force void __user *)-1L;
15613
15614 /* save i387 state */
15615 if (used_math() && save_i387_xstate(*fpstate) < 0)
15616- return (void __user *)-1L;
15617+ return (__force void __user *)-1L;
15618
15619 return (void __user *)sp;
15620 }
15621@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
15622 }
15623
15624 if (current->mm->context.vdso)
15625- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15626+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15627 else
15628- restorer = &frame->retcode;
15629+ restorer = (void __user *)&frame->retcode;
15630 if (ka->sa.sa_flags & SA_RESTORER)
15631 restorer = ka->sa.sa_restorer;
15632
15633@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
15634 * reasons and because gdb uses it as a signature to notice
15635 * signal handler stack frames.
15636 */
15637- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
15638+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
15639
15640 if (err)
15641 return -EFAULT;
15642@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
15643 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
15644
15645 /* Set up to return from userspace. */
15646- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15647+ if (current->mm->context.vdso)
15648+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15649+ else
15650+ restorer = (void __user *)&frame->retcode;
15651 if (ka->sa.sa_flags & SA_RESTORER)
15652 restorer = ka->sa.sa_restorer;
15653 put_user_ex(restorer, &frame->pretcode);
15654@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
15655 * reasons and because gdb uses it as a signature to notice
15656 * signal handler stack frames.
15657 */
15658- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
15659+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
15660 } put_user_catch(err);
15661
15662 if (err)
15663@@ -762,6 +765,8 @@ static void do_signal(struct pt_regs *re
15664 siginfo_t info;
15665 int signr;
15666
15667+ pax_track_stack();
15668+
15669 /*
15670 * We want the common case to go fast, which is why we may in certain
15671 * cases get here from kernel mode. Just return without doing anything
15672@@ -769,7 +774,7 @@ static void do_signal(struct pt_regs *re
15673 * X86_32: vm86 regs switched out by assembly code before reaching
15674 * here, so testing against kernel CS suffices.
15675 */
15676- if (!user_mode(regs))
15677+ if (!user_mode_novm(regs))
15678 return;
15679
15680 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
15681diff -urNp linux-3.1.1/arch/x86/kernel/smpboot.c linux-3.1.1/arch/x86/kernel/smpboot.c
15682--- linux-3.1.1/arch/x86/kernel/smpboot.c 2011-11-11 15:19:27.000000000 -0500
15683+++ linux-3.1.1/arch/x86/kernel/smpboot.c 2011-11-16 18:39:07.000000000 -0500
15684@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
15685 set_idle_for_cpu(cpu, c_idle.idle);
15686 do_rest:
15687 per_cpu(current_task, cpu) = c_idle.idle;
15688+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
15689 #ifdef CONFIG_X86_32
15690 /* Stack for startup_32 can be just as for start_secondary onwards */
15691 irq_ctx_init(cpu);
15692 #else
15693 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
15694 initial_gs = per_cpu_offset(cpu);
15695- per_cpu(kernel_stack, cpu) =
15696- (unsigned long)task_stack_page(c_idle.idle) -
15697- KERNEL_STACK_OFFSET + THREAD_SIZE;
15698+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
15699 #endif
15700+
15701+ pax_open_kernel();
15702 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
15703+ pax_close_kernel();
15704+
15705 initial_code = (unsigned long)start_secondary;
15706 stack_start = c_idle.idle->thread.sp;
15707
15708@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
15709
15710 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
15711
15712+#ifdef CONFIG_PAX_PER_CPU_PGD
15713+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
15714+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15715+ KERNEL_PGD_PTRS);
15716+#endif
15717+
15718 err = do_boot_cpu(apicid, cpu);
15719 if (err) {
15720 pr_debug("do_boot_cpu failed %d\n", err);
15721diff -urNp linux-3.1.1/arch/x86/kernel/step.c linux-3.1.1/arch/x86/kernel/step.c
15722--- linux-3.1.1/arch/x86/kernel/step.c 2011-11-11 15:19:27.000000000 -0500
15723+++ linux-3.1.1/arch/x86/kernel/step.c 2011-11-16 18:39:07.000000000 -0500
15724@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
15725 struct desc_struct *desc;
15726 unsigned long base;
15727
15728- seg &= ~7UL;
15729+ seg >>= 3;
15730
15731 mutex_lock(&child->mm->context.lock);
15732- if (unlikely((seg >> 3) >= child->mm->context.size))
15733+ if (unlikely(seg >= child->mm->context.size))
15734 addr = -1L; /* bogus selector, access would fault */
15735 else {
15736 desc = child->mm->context.ldt + seg;
15737@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
15738 addr += base;
15739 }
15740 mutex_unlock(&child->mm->context.lock);
15741- }
15742+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
15743+ addr = ktla_ktva(addr);
15744
15745 return addr;
15746 }
15747@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
15748 unsigned char opcode[15];
15749 unsigned long addr = convert_ip_to_linear(child, regs);
15750
15751+ if (addr == -EINVAL)
15752+ return 0;
15753+
15754 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
15755 for (i = 0; i < copied; i++) {
15756 switch (opcode[i]) {
15757diff -urNp linux-3.1.1/arch/x86/kernel/syscall_table_32.S linux-3.1.1/arch/x86/kernel/syscall_table_32.S
15758--- linux-3.1.1/arch/x86/kernel/syscall_table_32.S 2011-11-11 15:19:27.000000000 -0500
15759+++ linux-3.1.1/arch/x86/kernel/syscall_table_32.S 2011-11-16 18:39:07.000000000 -0500
15760@@ -1,3 +1,4 @@
15761+.section .rodata,"a",@progbits
15762 ENTRY(sys_call_table)
15763 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
15764 .long sys_exit
15765diff -urNp linux-3.1.1/arch/x86/kernel/sys_i386_32.c linux-3.1.1/arch/x86/kernel/sys_i386_32.c
15766--- linux-3.1.1/arch/x86/kernel/sys_i386_32.c 2011-11-11 15:19:27.000000000 -0500
15767+++ linux-3.1.1/arch/x86/kernel/sys_i386_32.c 2011-11-16 18:39:07.000000000 -0500
15768@@ -24,17 +24,224 @@
15769
15770 #include <asm/syscalls.h>
15771
15772-/*
15773- * Do a system call from kernel instead of calling sys_execve so we
15774- * end up with proper pt_regs.
15775- */
15776-int kernel_execve(const char *filename,
15777- const char *const argv[],
15778- const char *const envp[])
15779+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
15780 {
15781- long __res;
15782- asm volatile ("int $0x80"
15783- : "=a" (__res)
15784- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
15785- return __res;
15786+ unsigned long pax_task_size = TASK_SIZE;
15787+
15788+#ifdef CONFIG_PAX_SEGMEXEC
15789+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
15790+ pax_task_size = SEGMEXEC_TASK_SIZE;
15791+#endif
15792+
15793+ if (len > pax_task_size || addr > pax_task_size - len)
15794+ return -EINVAL;
15795+
15796+ return 0;
15797+}
15798+
15799+unsigned long
15800+arch_get_unmapped_area(struct file *filp, unsigned long addr,
15801+ unsigned long len, unsigned long pgoff, unsigned long flags)
15802+{
15803+ struct mm_struct *mm = current->mm;
15804+ struct vm_area_struct *vma;
15805+ unsigned long start_addr, pax_task_size = TASK_SIZE;
15806+
15807+#ifdef CONFIG_PAX_SEGMEXEC
15808+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15809+ pax_task_size = SEGMEXEC_TASK_SIZE;
15810+#endif
15811+
15812+ pax_task_size -= PAGE_SIZE;
15813+
15814+ if (len > pax_task_size)
15815+ return -ENOMEM;
15816+
15817+ if (flags & MAP_FIXED)
15818+ return addr;
15819+
15820+#ifdef CONFIG_PAX_RANDMMAP
15821+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15822+#endif
15823+
15824+ if (addr) {
15825+ addr = PAGE_ALIGN(addr);
15826+ if (pax_task_size - len >= addr) {
15827+ vma = find_vma(mm, addr);
15828+ if (check_heap_stack_gap(vma, addr, len))
15829+ return addr;
15830+ }
15831+ }
15832+ if (len > mm->cached_hole_size) {
15833+ start_addr = addr = mm->free_area_cache;
15834+ } else {
15835+ start_addr = addr = mm->mmap_base;
15836+ mm->cached_hole_size = 0;
15837+ }
15838+
15839+#ifdef CONFIG_PAX_PAGEEXEC
15840+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
15841+ start_addr = 0x00110000UL;
15842+
15843+#ifdef CONFIG_PAX_RANDMMAP
15844+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15845+ start_addr += mm->delta_mmap & 0x03FFF000UL;
15846+#endif
15847+
15848+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
15849+ start_addr = addr = mm->mmap_base;
15850+ else
15851+ addr = start_addr;
15852+ }
15853+#endif
15854+
15855+full_search:
15856+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
15857+ /* At this point: (!vma || addr < vma->vm_end). */
15858+ if (pax_task_size - len < addr) {
15859+ /*
15860+ * Start a new search - just in case we missed
15861+ * some holes.
15862+ */
15863+ if (start_addr != mm->mmap_base) {
15864+ start_addr = addr = mm->mmap_base;
15865+ mm->cached_hole_size = 0;
15866+ goto full_search;
15867+ }
15868+ return -ENOMEM;
15869+ }
15870+ if (check_heap_stack_gap(vma, addr, len))
15871+ break;
15872+ if (addr + mm->cached_hole_size < vma->vm_start)
15873+ mm->cached_hole_size = vma->vm_start - addr;
15874+ addr = vma->vm_end;
15875+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
15876+ start_addr = addr = mm->mmap_base;
15877+ mm->cached_hole_size = 0;
15878+ goto full_search;
15879+ }
15880+ }
15881+
15882+ /*
15883+ * Remember the place where we stopped the search:
15884+ */
15885+ mm->free_area_cache = addr + len;
15886+ return addr;
15887+}
15888+
15889+unsigned long
15890+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
15891+ const unsigned long len, const unsigned long pgoff,
15892+ const unsigned long flags)
15893+{
15894+ struct vm_area_struct *vma;
15895+ struct mm_struct *mm = current->mm;
15896+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
15897+
15898+#ifdef CONFIG_PAX_SEGMEXEC
15899+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15900+ pax_task_size = SEGMEXEC_TASK_SIZE;
15901+#endif
15902+
15903+ pax_task_size -= PAGE_SIZE;
15904+
15905+ /* requested length too big for entire address space */
15906+ if (len > pax_task_size)
15907+ return -ENOMEM;
15908+
15909+ if (flags & MAP_FIXED)
15910+ return addr;
15911+
15912+#ifdef CONFIG_PAX_PAGEEXEC
15913+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
15914+ goto bottomup;
15915+#endif
15916+
15917+#ifdef CONFIG_PAX_RANDMMAP
15918+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15919+#endif
15920+
15921+ /* requesting a specific address */
15922+ if (addr) {
15923+ addr = PAGE_ALIGN(addr);
15924+ if (pax_task_size - len >= addr) {
15925+ vma = find_vma(mm, addr);
15926+ if (check_heap_stack_gap(vma, addr, len))
15927+ return addr;
15928+ }
15929+ }
15930+
15931+ /* check if free_area_cache is useful for us */
15932+ if (len <= mm->cached_hole_size) {
15933+ mm->cached_hole_size = 0;
15934+ mm->free_area_cache = mm->mmap_base;
15935+ }
15936+
15937+ /* either no address requested or can't fit in requested address hole */
15938+ addr = mm->free_area_cache;
15939+
15940+ /* make sure it can fit in the remaining address space */
15941+ if (addr > len) {
15942+ vma = find_vma(mm, addr-len);
15943+ if (check_heap_stack_gap(vma, addr - len, len))
15944+ /* remember the address as a hint for next time */
15945+ return (mm->free_area_cache = addr-len);
15946+ }
15947+
15948+ if (mm->mmap_base < len)
15949+ goto bottomup;
15950+
15951+ addr = mm->mmap_base-len;
15952+
15953+ do {
15954+ /*
15955+ * Lookup failure means no vma is above this address,
15956+ * else if new region fits below vma->vm_start,
15957+ * return with success:
15958+ */
15959+ vma = find_vma(mm, addr);
15960+ if (check_heap_stack_gap(vma, addr, len))
15961+ /* remember the address as a hint for next time */
15962+ return (mm->free_area_cache = addr);
15963+
15964+ /* remember the largest hole we saw so far */
15965+ if (addr + mm->cached_hole_size < vma->vm_start)
15966+ mm->cached_hole_size = vma->vm_start - addr;
15967+
15968+ /* try just below the current vma->vm_start */
15969+ addr = skip_heap_stack_gap(vma, len);
15970+ } while (!IS_ERR_VALUE(addr));
15971+
15972+bottomup:
15973+ /*
15974+ * A failed mmap() very likely causes application failure,
15975+ * so fall back to the bottom-up function here. This scenario
15976+ * can happen with large stack limits and large mmap()
15977+ * allocations.
15978+ */
15979+
15980+#ifdef CONFIG_PAX_SEGMEXEC
15981+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15982+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15983+ else
15984+#endif
15985+
15986+ mm->mmap_base = TASK_UNMAPPED_BASE;
15987+
15988+#ifdef CONFIG_PAX_RANDMMAP
15989+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15990+ mm->mmap_base += mm->delta_mmap;
15991+#endif
15992+
15993+ mm->free_area_cache = mm->mmap_base;
15994+ mm->cached_hole_size = ~0UL;
15995+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15996+ /*
15997+ * Restore the topdown base:
15998+ */
15999+ mm->mmap_base = base;
16000+ mm->free_area_cache = base;
16001+ mm->cached_hole_size = ~0UL;
16002+
16003+ return addr;
16004 }
16005diff -urNp linux-3.1.1/arch/x86/kernel/sys_x86_64.c linux-3.1.1/arch/x86/kernel/sys_x86_64.c
16006--- linux-3.1.1/arch/x86/kernel/sys_x86_64.c 2011-11-11 15:19:27.000000000 -0500
16007+++ linux-3.1.1/arch/x86/kernel/sys_x86_64.c 2011-11-16 18:39:07.000000000 -0500
16008@@ -32,8 +32,8 @@ out:
16009 return error;
16010 }
16011
16012-static void find_start_end(unsigned long flags, unsigned long *begin,
16013- unsigned long *end)
16014+static void find_start_end(struct mm_struct *mm, unsigned long flags,
16015+ unsigned long *begin, unsigned long *end)
16016 {
16017 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
16018 unsigned long new_begin;
16019@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
16020 *begin = new_begin;
16021 }
16022 } else {
16023- *begin = TASK_UNMAPPED_BASE;
16024+ *begin = mm->mmap_base;
16025 *end = TASK_SIZE;
16026 }
16027 }
16028@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
16029 if (flags & MAP_FIXED)
16030 return addr;
16031
16032- find_start_end(flags, &begin, &end);
16033+ find_start_end(mm, flags, &begin, &end);
16034
16035 if (len > end)
16036 return -ENOMEM;
16037
16038+#ifdef CONFIG_PAX_RANDMMAP
16039+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16040+#endif
16041+
16042 if (addr) {
16043 addr = PAGE_ALIGN(addr);
16044 vma = find_vma(mm, addr);
16045- if (end - len >= addr &&
16046- (!vma || addr + len <= vma->vm_start))
16047+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
16048 return addr;
16049 }
16050 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
16051@@ -106,7 +109,7 @@ full_search:
16052 }
16053 return -ENOMEM;
16054 }
16055- if (!vma || addr + len <= vma->vm_start) {
16056+ if (check_heap_stack_gap(vma, addr, len)) {
16057 /*
16058 * Remember the place where we stopped the search:
16059 */
16060@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
16061 {
16062 struct vm_area_struct *vma;
16063 struct mm_struct *mm = current->mm;
16064- unsigned long addr = addr0;
16065+ unsigned long base = mm->mmap_base, addr = addr0;
16066
16067 /* requested length too big for entire address space */
16068 if (len > TASK_SIZE)
16069@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
16070 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
16071 goto bottomup;
16072
16073+#ifdef CONFIG_PAX_RANDMMAP
16074+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16075+#endif
16076+
16077 /* requesting a specific address */
16078 if (addr) {
16079 addr = PAGE_ALIGN(addr);
16080- vma = find_vma(mm, addr);
16081- if (TASK_SIZE - len >= addr &&
16082- (!vma || addr + len <= vma->vm_start))
16083- return addr;
16084+ if (TASK_SIZE - len >= addr) {
16085+ vma = find_vma(mm, addr);
16086+ if (check_heap_stack_gap(vma, addr, len))
16087+ return addr;
16088+ }
16089 }
16090
16091 /* check if free_area_cache is useful for us */
16092@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
16093 /* make sure it can fit in the remaining address space */
16094 if (addr > len) {
16095 vma = find_vma(mm, addr-len);
16096- if (!vma || addr <= vma->vm_start)
16097+ if (check_heap_stack_gap(vma, addr - len, len))
16098 /* remember the address as a hint for next time */
16099 return mm->free_area_cache = addr-len;
16100 }
16101@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
16102 * return with success:
16103 */
16104 vma = find_vma(mm, addr);
16105- if (!vma || addr+len <= vma->vm_start)
16106+ if (check_heap_stack_gap(vma, addr, len))
16107 /* remember the address as a hint for next time */
16108 return mm->free_area_cache = addr;
16109
16110@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
16111 mm->cached_hole_size = vma->vm_start - addr;
16112
16113 /* try just below the current vma->vm_start */
16114- addr = vma->vm_start-len;
16115- } while (len < vma->vm_start);
16116+ addr = skip_heap_stack_gap(vma, len);
16117+ } while (!IS_ERR_VALUE(addr));
16118
16119 bottomup:
16120 /*
16121@@ -198,13 +206,21 @@ bottomup:
16122 * can happen with large stack limits and large mmap()
16123 * allocations.
16124 */
16125+ mm->mmap_base = TASK_UNMAPPED_BASE;
16126+
16127+#ifdef CONFIG_PAX_RANDMMAP
16128+ if (mm->pax_flags & MF_PAX_RANDMMAP)
16129+ mm->mmap_base += mm->delta_mmap;
16130+#endif
16131+
16132+ mm->free_area_cache = mm->mmap_base;
16133 mm->cached_hole_size = ~0UL;
16134- mm->free_area_cache = TASK_UNMAPPED_BASE;
16135 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16136 /*
16137 * Restore the topdown base:
16138 */
16139- mm->free_area_cache = mm->mmap_base;
16140+ mm->mmap_base = base;
16141+ mm->free_area_cache = base;
16142 mm->cached_hole_size = ~0UL;
16143
16144 return addr;
16145diff -urNp linux-3.1.1/arch/x86/kernel/tboot.c linux-3.1.1/arch/x86/kernel/tboot.c
16146--- linux-3.1.1/arch/x86/kernel/tboot.c 2011-11-11 15:19:27.000000000 -0500
16147+++ linux-3.1.1/arch/x86/kernel/tboot.c 2011-11-16 18:39:07.000000000 -0500
16148@@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
16149
16150 void tboot_shutdown(u32 shutdown_type)
16151 {
16152- void (*shutdown)(void);
16153+ void (* __noreturn shutdown)(void);
16154
16155 if (!tboot_enabled())
16156 return;
16157@@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
16158
16159 switch_to_tboot_pt();
16160
16161- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
16162+ shutdown = (void *)tboot->shutdown_entry;
16163 shutdown();
16164
16165 /* should not reach here */
16166@@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
16167 tboot_shutdown(acpi_shutdown_map[sleep_state]);
16168 }
16169
16170-static atomic_t ap_wfs_count;
16171+static atomic_unchecked_t ap_wfs_count;
16172
16173 static int tboot_wait_for_aps(int num_aps)
16174 {
16175@@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(
16176 {
16177 switch (action) {
16178 case CPU_DYING:
16179- atomic_inc(&ap_wfs_count);
16180+ atomic_inc_unchecked(&ap_wfs_count);
16181 if (num_online_cpus() == 1)
16182- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
16183+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
16184 return NOTIFY_BAD;
16185 break;
16186 }
16187@@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
16188
16189 tboot_create_trampoline();
16190
16191- atomic_set(&ap_wfs_count, 0);
16192+ atomic_set_unchecked(&ap_wfs_count, 0);
16193 register_hotcpu_notifier(&tboot_cpu_notifier);
16194 return 0;
16195 }
16196diff -urNp linux-3.1.1/arch/x86/kernel/time.c linux-3.1.1/arch/x86/kernel/time.c
16197--- linux-3.1.1/arch/x86/kernel/time.c 2011-11-11 15:19:27.000000000 -0500
16198+++ linux-3.1.1/arch/x86/kernel/time.c 2011-11-16 18:39:07.000000000 -0500
16199@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
16200 {
16201 unsigned long pc = instruction_pointer(regs);
16202
16203- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
16204+ if (!user_mode(regs) && in_lock_functions(pc)) {
16205 #ifdef CONFIG_FRAME_POINTER
16206- return *(unsigned long *)(regs->bp + sizeof(long));
16207+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
16208 #else
16209 unsigned long *sp =
16210 (unsigned long *)kernel_stack_pointer(regs);
16211@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
16212 * or above a saved flags. Eflags has bits 22-31 zero,
16213 * kernel addresses don't.
16214 */
16215+
16216+#ifdef CONFIG_PAX_KERNEXEC
16217+ return ktla_ktva(sp[0]);
16218+#else
16219 if (sp[0] >> 22)
16220 return sp[0];
16221 if (sp[1] >> 22)
16222 return sp[1];
16223 #endif
16224+
16225+#endif
16226 }
16227 return pc;
16228 }
16229diff -urNp linux-3.1.1/arch/x86/kernel/tls.c linux-3.1.1/arch/x86/kernel/tls.c
16230--- linux-3.1.1/arch/x86/kernel/tls.c 2011-11-11 15:19:27.000000000 -0500
16231+++ linux-3.1.1/arch/x86/kernel/tls.c 2011-11-16 18:39:07.000000000 -0500
16232@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
16233 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
16234 return -EINVAL;
16235
16236+#ifdef CONFIG_PAX_SEGMEXEC
16237+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
16238+ return -EINVAL;
16239+#endif
16240+
16241 set_tls_desc(p, idx, &info, 1);
16242
16243 return 0;
16244diff -urNp linux-3.1.1/arch/x86/kernel/trampoline_32.S linux-3.1.1/arch/x86/kernel/trampoline_32.S
16245--- linux-3.1.1/arch/x86/kernel/trampoline_32.S 2011-11-11 15:19:27.000000000 -0500
16246+++ linux-3.1.1/arch/x86/kernel/trampoline_32.S 2011-11-16 18:39:07.000000000 -0500
16247@@ -32,6 +32,12 @@
16248 #include <asm/segment.h>
16249 #include <asm/page_types.h>
16250
16251+#ifdef CONFIG_PAX_KERNEXEC
16252+#define ta(X) (X)
16253+#else
16254+#define ta(X) ((X) - __PAGE_OFFSET)
16255+#endif
16256+
16257 #ifdef CONFIG_SMP
16258
16259 .section ".x86_trampoline","a"
16260@@ -62,7 +68,7 @@ r_base = .
16261 inc %ax # protected mode (PE) bit
16262 lmsw %ax # into protected mode
16263 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
16264- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
16265+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
16266
16267 # These need to be in the same 64K segment as the above;
16268 # hence we don't use the boot_gdt_descr defined in head.S
16269diff -urNp linux-3.1.1/arch/x86/kernel/trampoline_64.S linux-3.1.1/arch/x86/kernel/trampoline_64.S
16270--- linux-3.1.1/arch/x86/kernel/trampoline_64.S 2011-11-11 15:19:27.000000000 -0500
16271+++ linux-3.1.1/arch/x86/kernel/trampoline_64.S 2011-11-16 18:39:07.000000000 -0500
16272@@ -90,7 +90,7 @@ startup_32:
16273 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
16274 movl %eax, %ds
16275
16276- movl $X86_CR4_PAE, %eax
16277+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16278 movl %eax, %cr4 # Enable PAE mode
16279
16280 # Setup trampoline 4 level pagetables
16281@@ -138,7 +138,7 @@ tidt:
16282 # so the kernel can live anywhere
16283 .balign 4
16284 tgdt:
16285- .short tgdt_end - tgdt # gdt limit
16286+ .short tgdt_end - tgdt - 1 # gdt limit
16287 .long tgdt - r_base
16288 .short 0
16289 .quad 0x00cf9b000000ffff # __KERNEL32_CS
16290diff -urNp linux-3.1.1/arch/x86/kernel/traps.c linux-3.1.1/arch/x86/kernel/traps.c
16291--- linux-3.1.1/arch/x86/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
16292+++ linux-3.1.1/arch/x86/kernel/traps.c 2011-11-16 18:39:07.000000000 -0500
16293@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
16294
16295 /* Do we ignore FPU interrupts ? */
16296 char ignore_fpu_irq;
16297-
16298-/*
16299- * The IDT has to be page-aligned to simplify the Pentium
16300- * F0 0F bug workaround.
16301- */
16302-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
16303 #endif
16304
16305 DECLARE_BITMAP(used_vectors, NR_VECTORS);
16306@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
16307 }
16308
16309 static void __kprobes
16310-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
16311+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
16312 long error_code, siginfo_t *info)
16313 {
16314 struct task_struct *tsk = current;
16315
16316 #ifdef CONFIG_X86_32
16317- if (regs->flags & X86_VM_MASK) {
16318+ if (v8086_mode(regs)) {
16319 /*
16320 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
16321 * On nmi (interrupt 2), do_trap should not be called.
16322@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
16323 }
16324 #endif
16325
16326- if (!user_mode(regs))
16327+ if (!user_mode_novm(regs))
16328 goto kernel_trap;
16329
16330 #ifdef CONFIG_X86_32
16331@@ -157,7 +151,7 @@ trap_signal:
16332 printk_ratelimit()) {
16333 printk(KERN_INFO
16334 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
16335- tsk->comm, tsk->pid, str,
16336+ tsk->comm, task_pid_nr(tsk), str,
16337 regs->ip, regs->sp, error_code);
16338 print_vma_addr(" in ", regs->ip);
16339 printk("\n");
16340@@ -174,8 +168,20 @@ kernel_trap:
16341 if (!fixup_exception(regs)) {
16342 tsk->thread.error_code = error_code;
16343 tsk->thread.trap_no = trapnr;
16344+
16345+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16346+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
16347+ str = "PAX: suspicious stack segment fault";
16348+#endif
16349+
16350 die(str, regs, error_code);
16351 }
16352+
16353+#ifdef CONFIG_PAX_REFCOUNT
16354+ if (trapnr == 4)
16355+ pax_report_refcount_overflow(regs);
16356+#endif
16357+
16358 return;
16359
16360 #ifdef CONFIG_X86_32
16361@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
16362 conditional_sti(regs);
16363
16364 #ifdef CONFIG_X86_32
16365- if (regs->flags & X86_VM_MASK)
16366+ if (v8086_mode(regs))
16367 goto gp_in_vm86;
16368 #endif
16369
16370 tsk = current;
16371- if (!user_mode(regs))
16372+ if (!user_mode_novm(regs))
16373 goto gp_in_kernel;
16374
16375+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
16376+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
16377+ struct mm_struct *mm = tsk->mm;
16378+ unsigned long limit;
16379+
16380+ down_write(&mm->mmap_sem);
16381+ limit = mm->context.user_cs_limit;
16382+ if (limit < TASK_SIZE) {
16383+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
16384+ up_write(&mm->mmap_sem);
16385+ return;
16386+ }
16387+ up_write(&mm->mmap_sem);
16388+ }
16389+#endif
16390+
16391 tsk->thread.error_code = error_code;
16392 tsk->thread.trap_no = 13;
16393
16394@@ -304,6 +326,13 @@ gp_in_kernel:
16395 if (notify_die(DIE_GPF, "general protection fault", regs,
16396 error_code, 13, SIGSEGV) == NOTIFY_STOP)
16397 return;
16398+
16399+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16400+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
16401+ die("PAX: suspicious general protection fault", regs, error_code);
16402+ else
16403+#endif
16404+
16405 die("general protection fault", regs, error_code);
16406 }
16407
16408@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
16409 dotraplinkage notrace __kprobes void
16410 do_nmi(struct pt_regs *regs, long error_code)
16411 {
16412+
16413+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16414+ if (!user_mode(regs)) {
16415+ unsigned long cs = regs->cs & 0xFFFF;
16416+ unsigned long ip = ktva_ktla(regs->ip);
16417+
16418+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16419+ regs->ip = ip;
16420+ }
16421+#endif
16422+
16423 nmi_enter();
16424
16425 inc_irq_stat(__nmi_count);
16426@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
16427 /* It's safe to allow irq's after DR6 has been saved */
16428 preempt_conditional_sti(regs);
16429
16430- if (regs->flags & X86_VM_MASK) {
16431+ if (v8086_mode(regs)) {
16432 handle_vm86_trap((struct kernel_vm86_regs *) regs,
16433 error_code, 1);
16434 preempt_conditional_cli(regs);
16435@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
16436 * We already checked v86 mode above, so we can check for kernel mode
16437 * by just checking the CPL of CS.
16438 */
16439- if ((dr6 & DR_STEP) && !user_mode(regs)) {
16440+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
16441 tsk->thread.debugreg6 &= ~DR_STEP;
16442 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
16443 regs->flags &= ~X86_EFLAGS_TF;
16444@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
16445 return;
16446 conditional_sti(regs);
16447
16448- if (!user_mode_vm(regs))
16449+ if (!user_mode(regs))
16450 {
16451 if (!fixup_exception(regs)) {
16452 task->thread.error_code = error_code;
16453@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
16454 void __math_state_restore(void)
16455 {
16456 struct thread_info *thread = current_thread_info();
16457- struct task_struct *tsk = thread->task;
16458+ struct task_struct *tsk = current;
16459
16460 /*
16461 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
16462@@ -750,8 +790,7 @@ void __math_state_restore(void)
16463 */
16464 asmlinkage void math_state_restore(void)
16465 {
16466- struct thread_info *thread = current_thread_info();
16467- struct task_struct *tsk = thread->task;
16468+ struct task_struct *tsk = current;
16469
16470 if (!tsk_used_math(tsk)) {
16471 local_irq_enable();
16472diff -urNp linux-3.1.1/arch/x86/kernel/verify_cpu.S linux-3.1.1/arch/x86/kernel/verify_cpu.S
16473--- linux-3.1.1/arch/x86/kernel/verify_cpu.S 2011-11-11 15:19:27.000000000 -0500
16474+++ linux-3.1.1/arch/x86/kernel/verify_cpu.S 2011-11-16 18:40:08.000000000 -0500
16475@@ -20,6 +20,7 @@
16476 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
16477 * arch/x86/kernel/trampoline_64.S: secondary processor verification
16478 * arch/x86/kernel/head_32.S: processor startup
16479+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
16480 *
16481 * verify_cpu, returns the status of longmode and SSE in register %eax.
16482 * 0: Success 1: Failure
16483diff -urNp linux-3.1.1/arch/x86/kernel/vm86_32.c linux-3.1.1/arch/x86/kernel/vm86_32.c
16484--- linux-3.1.1/arch/x86/kernel/vm86_32.c 2011-11-11 15:19:27.000000000 -0500
16485+++ linux-3.1.1/arch/x86/kernel/vm86_32.c 2011-11-16 18:40:08.000000000 -0500
16486@@ -41,6 +41,7 @@
16487 #include <linux/ptrace.h>
16488 #include <linux/audit.h>
16489 #include <linux/stddef.h>
16490+#include <linux/grsecurity.h>
16491
16492 #include <asm/uaccess.h>
16493 #include <asm/io.h>
16494@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
16495 do_exit(SIGSEGV);
16496 }
16497
16498- tss = &per_cpu(init_tss, get_cpu());
16499+ tss = init_tss + get_cpu();
16500 current->thread.sp0 = current->thread.saved_sp0;
16501 current->thread.sysenter_cs = __KERNEL_CS;
16502 load_sp0(tss, &current->thread);
16503@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
16504 struct task_struct *tsk;
16505 int tmp, ret = -EPERM;
16506
16507+#ifdef CONFIG_GRKERNSEC_VM86
16508+ if (!capable(CAP_SYS_RAWIO)) {
16509+ gr_handle_vm86();
16510+ goto out;
16511+ }
16512+#endif
16513+
16514 tsk = current;
16515 if (tsk->thread.saved_sp0)
16516 goto out;
16517@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
16518 int tmp, ret;
16519 struct vm86plus_struct __user *v86;
16520
16521+#ifdef CONFIG_GRKERNSEC_VM86
16522+ if (!capable(CAP_SYS_RAWIO)) {
16523+ gr_handle_vm86();
16524+ ret = -EPERM;
16525+ goto out;
16526+ }
16527+#endif
16528+
16529 tsk = current;
16530 switch (cmd) {
16531 case VM86_REQUEST_IRQ:
16532@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
16533 tsk->thread.saved_fs = info->regs32->fs;
16534 tsk->thread.saved_gs = get_user_gs(info->regs32);
16535
16536- tss = &per_cpu(init_tss, get_cpu());
16537+ tss = init_tss + get_cpu();
16538 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
16539 if (cpu_has_sep)
16540 tsk->thread.sysenter_cs = 0;
16541@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
16542 goto cannot_handle;
16543 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
16544 goto cannot_handle;
16545- intr_ptr = (unsigned long __user *) (i << 2);
16546+ intr_ptr = (__force unsigned long __user *) (i << 2);
16547 if (get_user(segoffs, intr_ptr))
16548 goto cannot_handle;
16549 if ((segoffs >> 16) == BIOSSEG)
16550diff -urNp linux-3.1.1/arch/x86/kernel/vmlinux.lds.S linux-3.1.1/arch/x86/kernel/vmlinux.lds.S
16551--- linux-3.1.1/arch/x86/kernel/vmlinux.lds.S 2011-11-11 15:19:27.000000000 -0500
16552+++ linux-3.1.1/arch/x86/kernel/vmlinux.lds.S 2011-11-16 18:39:07.000000000 -0500
16553@@ -26,6 +26,13 @@
16554 #include <asm/page_types.h>
16555 #include <asm/cache.h>
16556 #include <asm/boot.h>
16557+#include <asm/segment.h>
16558+
16559+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16560+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
16561+#else
16562+#define __KERNEL_TEXT_OFFSET 0
16563+#endif
16564
16565 #undef i386 /* in case the preprocessor is a 32bit one */
16566
16567@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
16568
16569 PHDRS {
16570 text PT_LOAD FLAGS(5); /* R_E */
16571+#ifdef CONFIG_X86_32
16572+ module PT_LOAD FLAGS(5); /* R_E */
16573+#endif
16574+#ifdef CONFIG_XEN
16575+ rodata PT_LOAD FLAGS(5); /* R_E */
16576+#else
16577+ rodata PT_LOAD FLAGS(4); /* R__ */
16578+#endif
16579 data PT_LOAD FLAGS(6); /* RW_ */
16580-#ifdef CONFIG_X86_64
16581+ init.begin PT_LOAD FLAGS(6); /* RW_ */
16582 #ifdef CONFIG_SMP
16583 percpu PT_LOAD FLAGS(6); /* RW_ */
16584 #endif
16585+ text.init PT_LOAD FLAGS(5); /* R_E */
16586+ text.exit PT_LOAD FLAGS(5); /* R_E */
16587 init PT_LOAD FLAGS(7); /* RWE */
16588-#endif
16589 note PT_NOTE FLAGS(0); /* ___ */
16590 }
16591
16592 SECTIONS
16593 {
16594 #ifdef CONFIG_X86_32
16595- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
16596- phys_startup_32 = startup_32 - LOAD_OFFSET;
16597+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
16598 #else
16599- . = __START_KERNEL;
16600- phys_startup_64 = startup_64 - LOAD_OFFSET;
16601+ . = __START_KERNEL;
16602 #endif
16603
16604 /* Text and read-only data */
16605- .text : AT(ADDR(.text) - LOAD_OFFSET) {
16606- _text = .;
16607+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16608 /* bootstrapping code */
16609+#ifdef CONFIG_X86_32
16610+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16611+#else
16612+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16613+#endif
16614+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16615+ _text = .;
16616 HEAD_TEXT
16617 #ifdef CONFIG_X86_32
16618 . = ALIGN(PAGE_SIZE);
16619@@ -108,13 +128,47 @@ SECTIONS
16620 IRQENTRY_TEXT
16621 *(.fixup)
16622 *(.gnu.warning)
16623- /* End of text section */
16624- _etext = .;
16625 } :text = 0x9090
16626
16627- NOTES :text :note
16628+ . += __KERNEL_TEXT_OFFSET;
16629+
16630+#ifdef CONFIG_X86_32
16631+ . = ALIGN(PAGE_SIZE);
16632+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
16633+
16634+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
16635+ MODULES_EXEC_VADDR = .;
16636+ BYTE(0)
16637+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
16638+ . = ALIGN(HPAGE_SIZE);
16639+ MODULES_EXEC_END = . - 1;
16640+#endif
16641+
16642+ } :module
16643+#endif
16644+
16645+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
16646+ /* End of text section */
16647+ _etext = . - __KERNEL_TEXT_OFFSET;
16648+ }
16649+
16650+#ifdef CONFIG_X86_32
16651+ . = ALIGN(PAGE_SIZE);
16652+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
16653+ *(.idt)
16654+ . = ALIGN(PAGE_SIZE);
16655+ *(.empty_zero_page)
16656+ *(.initial_pg_fixmap)
16657+ *(.initial_pg_pmd)
16658+ *(.initial_page_table)
16659+ *(.swapper_pg_dir)
16660+ } :rodata
16661+#endif
16662+
16663+ . = ALIGN(PAGE_SIZE);
16664+ NOTES :rodata :note
16665
16666- EXCEPTION_TABLE(16) :text = 0x9090
16667+ EXCEPTION_TABLE(16) :rodata
16668
16669 #if defined(CONFIG_DEBUG_RODATA)
16670 /* .text should occupy whole number of pages */
16671@@ -126,16 +180,20 @@ SECTIONS
16672
16673 /* Data */
16674 .data : AT(ADDR(.data) - LOAD_OFFSET) {
16675+
16676+#ifdef CONFIG_PAX_KERNEXEC
16677+ . = ALIGN(HPAGE_SIZE);
16678+#else
16679+ . = ALIGN(PAGE_SIZE);
16680+#endif
16681+
16682 /* Start of data section */
16683 _sdata = .;
16684
16685 /* init_task */
16686 INIT_TASK_DATA(THREAD_SIZE)
16687
16688-#ifdef CONFIG_X86_32
16689- /* 32 bit has nosave before _edata */
16690 NOSAVE_DATA
16691-#endif
16692
16693 PAGE_ALIGNED_DATA(PAGE_SIZE)
16694
16695@@ -176,12 +234,19 @@ SECTIONS
16696 #endif /* CONFIG_X86_64 */
16697
16698 /* Init code and data - will be freed after init */
16699- . = ALIGN(PAGE_SIZE);
16700 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
16701+ BYTE(0)
16702+
16703+#ifdef CONFIG_PAX_KERNEXEC
16704+ . = ALIGN(HPAGE_SIZE);
16705+#else
16706+ . = ALIGN(PAGE_SIZE);
16707+#endif
16708+
16709 __init_begin = .; /* paired with __init_end */
16710- }
16711+ } :init.begin
16712
16713-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
16714+#ifdef CONFIG_SMP
16715 /*
16716 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
16717 * output PHDR, so the next output section - .init.text - should
16718@@ -190,12 +255,27 @@ SECTIONS
16719 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
16720 #endif
16721
16722- INIT_TEXT_SECTION(PAGE_SIZE)
16723-#ifdef CONFIG_X86_64
16724- :init
16725-#endif
16726+ . = ALIGN(PAGE_SIZE);
16727+ init_begin = .;
16728+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
16729+ VMLINUX_SYMBOL(_sinittext) = .;
16730+ INIT_TEXT
16731+ VMLINUX_SYMBOL(_einittext) = .;
16732+ . = ALIGN(PAGE_SIZE);
16733+ } :text.init
16734
16735- INIT_DATA_SECTION(16)
16736+ /*
16737+ * .exit.text is discard at runtime, not link time, to deal with
16738+ * references from .altinstructions and .eh_frame
16739+ */
16740+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16741+ EXIT_TEXT
16742+ . = ALIGN(16);
16743+ } :text.exit
16744+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
16745+
16746+ . = ALIGN(PAGE_SIZE);
16747+ INIT_DATA_SECTION(16) :init
16748
16749 /*
16750 * Code and data for a variety of lowlevel trampolines, to be
16751@@ -269,19 +349,12 @@ SECTIONS
16752 }
16753
16754 . = ALIGN(8);
16755- /*
16756- * .exit.text is discard at runtime, not link time, to deal with
16757- * references from .altinstructions and .eh_frame
16758- */
16759- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
16760- EXIT_TEXT
16761- }
16762
16763 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
16764 EXIT_DATA
16765 }
16766
16767-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
16768+#ifndef CONFIG_SMP
16769 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
16770 #endif
16771
16772@@ -300,16 +373,10 @@ SECTIONS
16773 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
16774 __smp_locks = .;
16775 *(.smp_locks)
16776- . = ALIGN(PAGE_SIZE);
16777 __smp_locks_end = .;
16778+ . = ALIGN(PAGE_SIZE);
16779 }
16780
16781-#ifdef CONFIG_X86_64
16782- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
16783- NOSAVE_DATA
16784- }
16785-#endif
16786-
16787 /* BSS */
16788 . = ALIGN(PAGE_SIZE);
16789 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
16790@@ -325,6 +392,7 @@ SECTIONS
16791 __brk_base = .;
16792 . += 64 * 1024; /* 64k alignment slop space */
16793 *(.brk_reservation) /* areas brk users have reserved */
16794+ . = ALIGN(HPAGE_SIZE);
16795 __brk_limit = .;
16796 }
16797
16798@@ -351,13 +419,12 @@ SECTIONS
16799 * for the boot processor.
16800 */
16801 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
16802-INIT_PER_CPU(gdt_page);
16803 INIT_PER_CPU(irq_stack_union);
16804
16805 /*
16806 * Build-time check on the image size:
16807 */
16808-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
16809+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
16810 "kernel image bigger than KERNEL_IMAGE_SIZE");
16811
16812 #ifdef CONFIG_SMP
16813diff -urNp linux-3.1.1/arch/x86/kernel/vsyscall_64.c linux-3.1.1/arch/x86/kernel/vsyscall_64.c
16814--- linux-3.1.1/arch/x86/kernel/vsyscall_64.c 2011-11-11 15:19:27.000000000 -0500
16815+++ linux-3.1.1/arch/x86/kernel/vsyscall_64.c 2011-11-16 18:39:07.000000000 -0500
16816@@ -56,15 +56,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, v
16817 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
16818 };
16819
16820-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
16821+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
16822
16823 static int __init vsyscall_setup(char *str)
16824 {
16825 if (str) {
16826 if (!strcmp("emulate", str))
16827 vsyscall_mode = EMULATE;
16828- else if (!strcmp("native", str))
16829- vsyscall_mode = NATIVE;
16830 else if (!strcmp("none", str))
16831 vsyscall_mode = NONE;
16832 else
16833@@ -177,7 +175,7 @@ bool emulate_vsyscall(struct pt_regs *re
16834
16835 tsk = current;
16836 if (seccomp_mode(&tsk->seccomp))
16837- do_exit(SIGKILL);
16838+ do_group_exit(SIGKILL);
16839
16840 switch (vsyscall_nr) {
16841 case 0:
16842@@ -219,8 +217,7 @@ bool emulate_vsyscall(struct pt_regs *re
16843 return true;
16844
16845 sigsegv:
16846- force_sig(SIGSEGV, current);
16847- return true;
16848+ do_group_exit(SIGKILL);
16849 }
16850
16851 /*
16852@@ -273,10 +270,7 @@ void __init map_vsyscall(void)
16853 extern char __vvar_page;
16854 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
16855
16856- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
16857- vsyscall_mode == NATIVE
16858- ? PAGE_KERNEL_VSYSCALL
16859- : PAGE_KERNEL_VVAR);
16860+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
16861 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
16862 (unsigned long)VSYSCALL_START);
16863
16864diff -urNp linux-3.1.1/arch/x86/kernel/x8664_ksyms_64.c linux-3.1.1/arch/x86/kernel/x8664_ksyms_64.c
16865--- linux-3.1.1/arch/x86/kernel/x8664_ksyms_64.c 2011-11-11 15:19:27.000000000 -0500
16866+++ linux-3.1.1/arch/x86/kernel/x8664_ksyms_64.c 2011-11-16 18:39:07.000000000 -0500
16867@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
16868 EXPORT_SYMBOL(copy_user_generic_string);
16869 EXPORT_SYMBOL(copy_user_generic_unrolled);
16870 EXPORT_SYMBOL(__copy_user_nocache);
16871-EXPORT_SYMBOL(_copy_from_user);
16872-EXPORT_SYMBOL(_copy_to_user);
16873
16874 EXPORT_SYMBOL(copy_page);
16875 EXPORT_SYMBOL(clear_page);
16876diff -urNp linux-3.1.1/arch/x86/kernel/xsave.c linux-3.1.1/arch/x86/kernel/xsave.c
16877--- linux-3.1.1/arch/x86/kernel/xsave.c 2011-11-11 15:19:27.000000000 -0500
16878+++ linux-3.1.1/arch/x86/kernel/xsave.c 2011-11-16 18:39:07.000000000 -0500
16879@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
16880 fx_sw_user->xstate_size > fx_sw_user->extended_size)
16881 return -EINVAL;
16882
16883- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
16884+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
16885 fx_sw_user->extended_size -
16886 FP_XSTATE_MAGIC2_SIZE));
16887 if (err)
16888@@ -267,7 +267,7 @@ fx_only:
16889 * the other extended state.
16890 */
16891 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
16892- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
16893+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
16894 }
16895
16896 /*
16897@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
16898 if (use_xsave())
16899 err = restore_user_xstate(buf);
16900 else
16901- err = fxrstor_checking((__force struct i387_fxsave_struct *)
16902+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
16903 buf);
16904 if (unlikely(err)) {
16905 /*
16906diff -urNp linux-3.1.1/arch/x86/kvm/emulate.c linux-3.1.1/arch/x86/kvm/emulate.c
16907--- linux-3.1.1/arch/x86/kvm/emulate.c 2011-11-11 15:19:27.000000000 -0500
16908+++ linux-3.1.1/arch/x86/kvm/emulate.c 2011-11-16 18:39:07.000000000 -0500
16909@@ -96,7 +96,7 @@
16910 #define Src2ImmByte (2<<29)
16911 #define Src2One (3<<29)
16912 #define Src2Imm (4<<29)
16913-#define Src2Mask (7<<29)
16914+#define Src2Mask (7U<<29)
16915
16916 #define X2(x...) x, x
16917 #define X3(x...) X2(x), x
16918@@ -207,6 +207,7 @@ struct gprefix {
16919
16920 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
16921 do { \
16922+ unsigned long _tmp; \
16923 __asm__ __volatile__ ( \
16924 _PRE_EFLAGS("0", "4", "2") \
16925 _op _suffix " %"_x"3,%1; " \
16926@@ -220,8 +221,6 @@ struct gprefix {
16927 /* Raw emulation: instruction has two explicit operands. */
16928 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
16929 do { \
16930- unsigned long _tmp; \
16931- \
16932 switch ((_dst).bytes) { \
16933 case 2: \
16934 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
16935@@ -237,7 +236,6 @@ struct gprefix {
16936
16937 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16938 do { \
16939- unsigned long _tmp; \
16940 switch ((_dst).bytes) { \
16941 case 1: \
16942 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16943diff -urNp linux-3.1.1/arch/x86/kvm/lapic.c linux-3.1.1/arch/x86/kvm/lapic.c
16944--- linux-3.1.1/arch/x86/kvm/lapic.c 2011-11-11 15:19:27.000000000 -0500
16945+++ linux-3.1.1/arch/x86/kvm/lapic.c 2011-11-16 18:39:07.000000000 -0500
16946@@ -53,7 +53,7 @@
16947 #define APIC_BUS_CYCLE_NS 1
16948
16949 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16950-#define apic_debug(fmt, arg...)
16951+#define apic_debug(fmt, arg...) do {} while (0)
16952
16953 #define APIC_LVT_NUM 6
16954 /* 14 is the version for Xeon and Pentium 8.4.8*/
16955diff -urNp linux-3.1.1/arch/x86/kvm/mmu.c linux-3.1.1/arch/x86/kvm/mmu.c
16956--- linux-3.1.1/arch/x86/kvm/mmu.c 2011-11-11 15:19:27.000000000 -0500
16957+++ linux-3.1.1/arch/x86/kvm/mmu.c 2011-11-16 18:39:07.000000000 -0500
16958@@ -3552,7 +3552,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16959
16960 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16961
16962- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16963+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16964
16965 /*
16966 * Assume that the pte write on a page table of the same type
16967@@ -3584,7 +3584,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16968 }
16969
16970 spin_lock(&vcpu->kvm->mmu_lock);
16971- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16972+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16973 gentry = 0;
16974 kvm_mmu_free_some_pages(vcpu);
16975 ++vcpu->kvm->stat.mmu_pte_write;
16976diff -urNp linux-3.1.1/arch/x86/kvm/paging_tmpl.h linux-3.1.1/arch/x86/kvm/paging_tmpl.h
16977--- linux-3.1.1/arch/x86/kvm/paging_tmpl.h 2011-11-11 15:19:27.000000000 -0500
16978+++ linux-3.1.1/arch/x86/kvm/paging_tmpl.h 2011-11-16 19:40:44.000000000 -0500
16979@@ -197,7 +197,7 @@ retry_walk:
16980 if (unlikely(kvm_is_error_hva(host_addr)))
16981 goto error;
16982
16983- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
16984+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
16985 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
16986 goto error;
16987
16988@@ -575,6 +575,8 @@ static int FNAME(page_fault)(struct kvm_
16989 unsigned long mmu_seq;
16990 bool map_writable;
16991
16992+ pax_track_stack();
16993+
16994 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16995
16996 if (unlikely(error_code & PFERR_RSVD_MASK))
16997@@ -701,7 +703,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16998 if (need_flush)
16999 kvm_flush_remote_tlbs(vcpu->kvm);
17000
17001- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
17002+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
17003
17004 spin_unlock(&vcpu->kvm->mmu_lock);
17005
17006diff -urNp linux-3.1.1/arch/x86/kvm/svm.c linux-3.1.1/arch/x86/kvm/svm.c
17007--- linux-3.1.1/arch/x86/kvm/svm.c 2011-11-11 15:19:27.000000000 -0500
17008+++ linux-3.1.1/arch/x86/kvm/svm.c 2011-11-16 18:39:07.000000000 -0500
17009@@ -3381,7 +3381,11 @@ static void reload_tss(struct kvm_vcpu *
17010 int cpu = raw_smp_processor_id();
17011
17012 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
17013+
17014+ pax_open_kernel();
17015 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
17016+ pax_close_kernel();
17017+
17018 load_TR_desc();
17019 }
17020
17021@@ -3759,6 +3763,10 @@ static void svm_vcpu_run(struct kvm_vcpu
17022 #endif
17023 #endif
17024
17025+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17026+ __set_fs(current_thread_info()->addr_limit);
17027+#endif
17028+
17029 reload_tss(vcpu);
17030
17031 local_irq_disable();
17032diff -urNp linux-3.1.1/arch/x86/kvm/vmx.c linux-3.1.1/arch/x86/kvm/vmx.c
17033--- linux-3.1.1/arch/x86/kvm/vmx.c 2011-11-11 15:19:27.000000000 -0500
17034+++ linux-3.1.1/arch/x86/kvm/vmx.c 2011-11-16 18:39:07.000000000 -0500
17035@@ -1251,7 +1251,11 @@ static void reload_tss(void)
17036 struct desc_struct *descs;
17037
17038 descs = (void *)gdt->address;
17039+
17040+ pax_open_kernel();
17041 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
17042+ pax_close_kernel();
17043+
17044 load_TR_desc();
17045 }
17046
17047@@ -2520,8 +2524,11 @@ static __init int hardware_setup(void)
17048 if (!cpu_has_vmx_flexpriority())
17049 flexpriority_enabled = 0;
17050
17051- if (!cpu_has_vmx_tpr_shadow())
17052- kvm_x86_ops->update_cr8_intercept = NULL;
17053+ if (!cpu_has_vmx_tpr_shadow()) {
17054+ pax_open_kernel();
17055+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
17056+ pax_close_kernel();
17057+ }
17058
17059 if (enable_ept && !cpu_has_vmx_ept_2m_page())
17060 kvm_disable_largepages();
17061@@ -3535,7 +3542,7 @@ static void vmx_set_constant_host_state(
17062 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
17063
17064 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
17065- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
17066+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
17067
17068 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
17069 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
17070@@ -6021,6 +6028,12 @@ static void __noclone vmx_vcpu_run(struc
17071 "jmp .Lkvm_vmx_return \n\t"
17072 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
17073 ".Lkvm_vmx_return: "
17074+
17075+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17076+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
17077+ ".Lkvm_vmx_return2: "
17078+#endif
17079+
17080 /* Save guest registers, load host registers, keep flags */
17081 "mov %0, %c[wordsize](%%"R"sp) \n\t"
17082 "pop %0 \n\t"
17083@@ -6069,6 +6082,11 @@ static void __noclone vmx_vcpu_run(struc
17084 #endif
17085 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
17086 [wordsize]"i"(sizeof(ulong))
17087+
17088+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17089+ ,[cs]"i"(__KERNEL_CS)
17090+#endif
17091+
17092 : "cc", "memory"
17093 , R"ax", R"bx", R"di", R"si"
17094 #ifdef CONFIG_X86_64
17095@@ -6097,7 +6115,16 @@ static void __noclone vmx_vcpu_run(struc
17096 }
17097 }
17098
17099- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
17100+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
17101+
17102+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17103+ loadsegment(fs, __KERNEL_PERCPU);
17104+#endif
17105+
17106+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17107+ __set_fs(current_thread_info()->addr_limit);
17108+#endif
17109+
17110 vmx->loaded_vmcs->launched = 1;
17111
17112 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
17113diff -urNp linux-3.1.1/arch/x86/kvm/x86.c linux-3.1.1/arch/x86/kvm/x86.c
17114--- linux-3.1.1/arch/x86/kvm/x86.c 2011-11-11 15:19:27.000000000 -0500
17115+++ linux-3.1.1/arch/x86/kvm/x86.c 2011-11-16 18:39:07.000000000 -0500
17116@@ -1334,8 +1334,8 @@ static int xen_hvm_config(struct kvm_vcp
17117 {
17118 struct kvm *kvm = vcpu->kvm;
17119 int lm = is_long_mode(vcpu);
17120- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
17121- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
17122+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
17123+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
17124 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
17125 : kvm->arch.xen_hvm_config.blob_size_32;
17126 u32 page_num = data & ~PAGE_MASK;
17127@@ -2137,6 +2137,8 @@ long kvm_arch_dev_ioctl(struct file *fil
17128 if (n < msr_list.nmsrs)
17129 goto out;
17130 r = -EFAULT;
17131+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
17132+ goto out;
17133 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
17134 num_msrs_to_save * sizeof(u32)))
17135 goto out;
17136@@ -2312,15 +2314,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
17137 struct kvm_cpuid2 *cpuid,
17138 struct kvm_cpuid_entry2 __user *entries)
17139 {
17140- int r;
17141+ int r, i;
17142
17143 r = -E2BIG;
17144 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
17145 goto out;
17146 r = -EFAULT;
17147- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
17148- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17149+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17150 goto out;
17151+ for (i = 0; i < cpuid->nent; ++i) {
17152+ struct kvm_cpuid_entry2 cpuid_entry;
17153+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
17154+ goto out;
17155+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
17156+ }
17157 vcpu->arch.cpuid_nent = cpuid->nent;
17158 kvm_apic_set_version(vcpu);
17159 kvm_x86_ops->cpuid_update(vcpu);
17160@@ -2335,15 +2342,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
17161 struct kvm_cpuid2 *cpuid,
17162 struct kvm_cpuid_entry2 __user *entries)
17163 {
17164- int r;
17165+ int r, i;
17166
17167 r = -E2BIG;
17168 if (cpuid->nent < vcpu->arch.cpuid_nent)
17169 goto out;
17170 r = -EFAULT;
17171- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
17172- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17173+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17174 goto out;
17175+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
17176+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
17177+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
17178+ goto out;
17179+ }
17180 return 0;
17181
17182 out:
17183@@ -2718,7 +2729,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
17184 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
17185 struct kvm_interrupt *irq)
17186 {
17187- if (irq->irq < 0 || irq->irq >= 256)
17188+ if (irq->irq >= 256)
17189 return -EINVAL;
17190 if (irqchip_in_kernel(vcpu->kvm))
17191 return -ENXIO;
17192@@ -5089,7 +5100,7 @@ static void kvm_set_mmio_spte_mask(void)
17193 kvm_mmu_set_mmio_spte_mask(mask);
17194 }
17195
17196-int kvm_arch_init(void *opaque)
17197+int kvm_arch_init(const void *opaque)
17198 {
17199 int r;
17200 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
17201diff -urNp linux-3.1.1/arch/x86/lguest/boot.c linux-3.1.1/arch/x86/lguest/boot.c
17202--- linux-3.1.1/arch/x86/lguest/boot.c 2011-11-11 15:19:27.000000000 -0500
17203+++ linux-3.1.1/arch/x86/lguest/boot.c 2011-11-16 18:39:07.000000000 -0500
17204@@ -1184,9 +1184,10 @@ static __init int early_put_chars(u32 vt
17205 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
17206 * Launcher to reboot us.
17207 */
17208-static void lguest_restart(char *reason)
17209+static __noreturn void lguest_restart(char *reason)
17210 {
17211 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
17212+ BUG();
17213 }
17214
17215 /*G:050
17216diff -urNp linux-3.1.1/arch/x86/lib/atomic64_32.c linux-3.1.1/arch/x86/lib/atomic64_32.c
17217--- linux-3.1.1/arch/x86/lib/atomic64_32.c 2011-11-11 15:19:27.000000000 -0500
17218+++ linux-3.1.1/arch/x86/lib/atomic64_32.c 2011-11-16 18:39:07.000000000 -0500
17219@@ -8,18 +8,30 @@
17220
17221 long long atomic64_read_cx8(long long, const atomic64_t *v);
17222 EXPORT_SYMBOL(atomic64_read_cx8);
17223+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
17224+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
17225 long long atomic64_set_cx8(long long, const atomic64_t *v);
17226 EXPORT_SYMBOL(atomic64_set_cx8);
17227+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
17228+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
17229 long long atomic64_xchg_cx8(long long, unsigned high);
17230 EXPORT_SYMBOL(atomic64_xchg_cx8);
17231 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
17232 EXPORT_SYMBOL(atomic64_add_return_cx8);
17233+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17234+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
17235 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
17236 EXPORT_SYMBOL(atomic64_sub_return_cx8);
17237+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17238+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
17239 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
17240 EXPORT_SYMBOL(atomic64_inc_return_cx8);
17241+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17242+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
17243 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
17244 EXPORT_SYMBOL(atomic64_dec_return_cx8);
17245+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17246+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
17247 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
17248 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
17249 int atomic64_inc_not_zero_cx8(atomic64_t *v);
17250@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
17251 #ifndef CONFIG_X86_CMPXCHG64
17252 long long atomic64_read_386(long long, const atomic64_t *v);
17253 EXPORT_SYMBOL(atomic64_read_386);
17254+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
17255+EXPORT_SYMBOL(atomic64_read_unchecked_386);
17256 long long atomic64_set_386(long long, const atomic64_t *v);
17257 EXPORT_SYMBOL(atomic64_set_386);
17258+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
17259+EXPORT_SYMBOL(atomic64_set_unchecked_386);
17260 long long atomic64_xchg_386(long long, unsigned high);
17261 EXPORT_SYMBOL(atomic64_xchg_386);
17262 long long atomic64_add_return_386(long long a, atomic64_t *v);
17263 EXPORT_SYMBOL(atomic64_add_return_386);
17264+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17265+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
17266 long long atomic64_sub_return_386(long long a, atomic64_t *v);
17267 EXPORT_SYMBOL(atomic64_sub_return_386);
17268+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17269+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
17270 long long atomic64_inc_return_386(long long a, atomic64_t *v);
17271 EXPORT_SYMBOL(atomic64_inc_return_386);
17272+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17273+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
17274 long long atomic64_dec_return_386(long long a, atomic64_t *v);
17275 EXPORT_SYMBOL(atomic64_dec_return_386);
17276+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17277+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
17278 long long atomic64_add_386(long long a, atomic64_t *v);
17279 EXPORT_SYMBOL(atomic64_add_386);
17280+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
17281+EXPORT_SYMBOL(atomic64_add_unchecked_386);
17282 long long atomic64_sub_386(long long a, atomic64_t *v);
17283 EXPORT_SYMBOL(atomic64_sub_386);
17284+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
17285+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
17286 long long atomic64_inc_386(long long a, atomic64_t *v);
17287 EXPORT_SYMBOL(atomic64_inc_386);
17288+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
17289+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
17290 long long atomic64_dec_386(long long a, atomic64_t *v);
17291 EXPORT_SYMBOL(atomic64_dec_386);
17292+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
17293+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
17294 long long atomic64_dec_if_positive_386(atomic64_t *v);
17295 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
17296 int atomic64_inc_not_zero_386(atomic64_t *v);
17297diff -urNp linux-3.1.1/arch/x86/lib/atomic64_386_32.S linux-3.1.1/arch/x86/lib/atomic64_386_32.S
17298--- linux-3.1.1/arch/x86/lib/atomic64_386_32.S 2011-11-11 15:19:27.000000000 -0500
17299+++ linux-3.1.1/arch/x86/lib/atomic64_386_32.S 2011-11-16 18:39:07.000000000 -0500
17300@@ -48,6 +48,10 @@ BEGIN(read)
17301 movl (v), %eax
17302 movl 4(v), %edx
17303 RET_ENDP
17304+BEGIN(read_unchecked)
17305+ movl (v), %eax
17306+ movl 4(v), %edx
17307+RET_ENDP
17308 #undef v
17309
17310 #define v %esi
17311@@ -55,6 +59,10 @@ BEGIN(set)
17312 movl %ebx, (v)
17313 movl %ecx, 4(v)
17314 RET_ENDP
17315+BEGIN(set_unchecked)
17316+ movl %ebx, (v)
17317+ movl %ecx, 4(v)
17318+RET_ENDP
17319 #undef v
17320
17321 #define v %esi
17322@@ -70,6 +78,20 @@ RET_ENDP
17323 BEGIN(add)
17324 addl %eax, (v)
17325 adcl %edx, 4(v)
17326+
17327+#ifdef CONFIG_PAX_REFCOUNT
17328+ jno 0f
17329+ subl %eax, (v)
17330+ sbbl %edx, 4(v)
17331+ int $4
17332+0:
17333+ _ASM_EXTABLE(0b, 0b)
17334+#endif
17335+
17336+RET_ENDP
17337+BEGIN(add_unchecked)
17338+ addl %eax, (v)
17339+ adcl %edx, 4(v)
17340 RET_ENDP
17341 #undef v
17342
17343@@ -77,6 +99,24 @@ RET_ENDP
17344 BEGIN(add_return)
17345 addl (v), %eax
17346 adcl 4(v), %edx
17347+
17348+#ifdef CONFIG_PAX_REFCOUNT
17349+ into
17350+1234:
17351+ _ASM_EXTABLE(1234b, 2f)
17352+#endif
17353+
17354+ movl %eax, (v)
17355+ movl %edx, 4(v)
17356+
17357+#ifdef CONFIG_PAX_REFCOUNT
17358+2:
17359+#endif
17360+
17361+RET_ENDP
17362+BEGIN(add_return_unchecked)
17363+ addl (v), %eax
17364+ adcl 4(v), %edx
17365 movl %eax, (v)
17366 movl %edx, 4(v)
17367 RET_ENDP
17368@@ -86,6 +126,20 @@ RET_ENDP
17369 BEGIN(sub)
17370 subl %eax, (v)
17371 sbbl %edx, 4(v)
17372+
17373+#ifdef CONFIG_PAX_REFCOUNT
17374+ jno 0f
17375+ addl %eax, (v)
17376+ adcl %edx, 4(v)
17377+ int $4
17378+0:
17379+ _ASM_EXTABLE(0b, 0b)
17380+#endif
17381+
17382+RET_ENDP
17383+BEGIN(sub_unchecked)
17384+ subl %eax, (v)
17385+ sbbl %edx, 4(v)
17386 RET_ENDP
17387 #undef v
17388
17389@@ -96,6 +150,27 @@ BEGIN(sub_return)
17390 sbbl $0, %edx
17391 addl (v), %eax
17392 adcl 4(v), %edx
17393+
17394+#ifdef CONFIG_PAX_REFCOUNT
17395+ into
17396+1234:
17397+ _ASM_EXTABLE(1234b, 2f)
17398+#endif
17399+
17400+ movl %eax, (v)
17401+ movl %edx, 4(v)
17402+
17403+#ifdef CONFIG_PAX_REFCOUNT
17404+2:
17405+#endif
17406+
17407+RET_ENDP
17408+BEGIN(sub_return_unchecked)
17409+ negl %edx
17410+ negl %eax
17411+ sbbl $0, %edx
17412+ addl (v), %eax
17413+ adcl 4(v), %edx
17414 movl %eax, (v)
17415 movl %edx, 4(v)
17416 RET_ENDP
17417@@ -105,6 +180,20 @@ RET_ENDP
17418 BEGIN(inc)
17419 addl $1, (v)
17420 adcl $0, 4(v)
17421+
17422+#ifdef CONFIG_PAX_REFCOUNT
17423+ jno 0f
17424+ subl $1, (v)
17425+ sbbl $0, 4(v)
17426+ int $4
17427+0:
17428+ _ASM_EXTABLE(0b, 0b)
17429+#endif
17430+
17431+RET_ENDP
17432+BEGIN(inc_unchecked)
17433+ addl $1, (v)
17434+ adcl $0, 4(v)
17435 RET_ENDP
17436 #undef v
17437
17438@@ -114,6 +203,26 @@ BEGIN(inc_return)
17439 movl 4(v), %edx
17440 addl $1, %eax
17441 adcl $0, %edx
17442+
17443+#ifdef CONFIG_PAX_REFCOUNT
17444+ into
17445+1234:
17446+ _ASM_EXTABLE(1234b, 2f)
17447+#endif
17448+
17449+ movl %eax, (v)
17450+ movl %edx, 4(v)
17451+
17452+#ifdef CONFIG_PAX_REFCOUNT
17453+2:
17454+#endif
17455+
17456+RET_ENDP
17457+BEGIN(inc_return_unchecked)
17458+ movl (v), %eax
17459+ movl 4(v), %edx
17460+ addl $1, %eax
17461+ adcl $0, %edx
17462 movl %eax, (v)
17463 movl %edx, 4(v)
17464 RET_ENDP
17465@@ -123,6 +232,20 @@ RET_ENDP
17466 BEGIN(dec)
17467 subl $1, (v)
17468 sbbl $0, 4(v)
17469+
17470+#ifdef CONFIG_PAX_REFCOUNT
17471+ jno 0f
17472+ addl $1, (v)
17473+ adcl $0, 4(v)
17474+ int $4
17475+0:
17476+ _ASM_EXTABLE(0b, 0b)
17477+#endif
17478+
17479+RET_ENDP
17480+BEGIN(dec_unchecked)
17481+ subl $1, (v)
17482+ sbbl $0, 4(v)
17483 RET_ENDP
17484 #undef v
17485
17486@@ -132,6 +255,26 @@ BEGIN(dec_return)
17487 movl 4(v), %edx
17488 subl $1, %eax
17489 sbbl $0, %edx
17490+
17491+#ifdef CONFIG_PAX_REFCOUNT
17492+ into
17493+1234:
17494+ _ASM_EXTABLE(1234b, 2f)
17495+#endif
17496+
17497+ movl %eax, (v)
17498+ movl %edx, 4(v)
17499+
17500+#ifdef CONFIG_PAX_REFCOUNT
17501+2:
17502+#endif
17503+
17504+RET_ENDP
17505+BEGIN(dec_return_unchecked)
17506+ movl (v), %eax
17507+ movl 4(v), %edx
17508+ subl $1, %eax
17509+ sbbl $0, %edx
17510 movl %eax, (v)
17511 movl %edx, 4(v)
17512 RET_ENDP
17513@@ -143,6 +286,13 @@ BEGIN(add_unless)
17514 adcl %edx, %edi
17515 addl (v), %eax
17516 adcl 4(v), %edx
17517+
17518+#ifdef CONFIG_PAX_REFCOUNT
17519+ into
17520+1234:
17521+ _ASM_EXTABLE(1234b, 2f)
17522+#endif
17523+
17524 cmpl %eax, %esi
17525 je 3f
17526 1:
17527@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
17528 1:
17529 addl $1, %eax
17530 adcl $0, %edx
17531+
17532+#ifdef CONFIG_PAX_REFCOUNT
17533+ into
17534+1234:
17535+ _ASM_EXTABLE(1234b, 2f)
17536+#endif
17537+
17538 movl %eax, (v)
17539 movl %edx, 4(v)
17540 movl $1, %eax
17541@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
17542 movl 4(v), %edx
17543 subl $1, %eax
17544 sbbl $0, %edx
17545+
17546+#ifdef CONFIG_PAX_REFCOUNT
17547+ into
17548+1234:
17549+ _ASM_EXTABLE(1234b, 1f)
17550+#endif
17551+
17552 js 1f
17553 movl %eax, (v)
17554 movl %edx, 4(v)
17555diff -urNp linux-3.1.1/arch/x86/lib/atomic64_cx8_32.S linux-3.1.1/arch/x86/lib/atomic64_cx8_32.S
17556--- linux-3.1.1/arch/x86/lib/atomic64_cx8_32.S 2011-11-11 15:19:27.000000000 -0500
17557+++ linux-3.1.1/arch/x86/lib/atomic64_cx8_32.S 2011-11-16 18:39:07.000000000 -0500
17558@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
17559 CFI_STARTPROC
17560
17561 read64 %ecx
17562+ pax_force_retaddr
17563 ret
17564 CFI_ENDPROC
17565 ENDPROC(atomic64_read_cx8)
17566
17567+ENTRY(atomic64_read_unchecked_cx8)
17568+ CFI_STARTPROC
17569+
17570+ read64 %ecx
17571+ pax_force_retaddr
17572+ ret
17573+ CFI_ENDPROC
17574+ENDPROC(atomic64_read_unchecked_cx8)
17575+
17576 ENTRY(atomic64_set_cx8)
17577 CFI_STARTPROC
17578
17579@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
17580 cmpxchg8b (%esi)
17581 jne 1b
17582
17583+ pax_force_retaddr
17584 ret
17585 CFI_ENDPROC
17586 ENDPROC(atomic64_set_cx8)
17587
17588+ENTRY(atomic64_set_unchecked_cx8)
17589+ CFI_STARTPROC
17590+
17591+1:
17592+/* we don't need LOCK_PREFIX since aligned 64-bit writes
17593+ * are atomic on 586 and newer */
17594+ cmpxchg8b (%esi)
17595+ jne 1b
17596+
17597+ pax_force_retaddr
17598+ ret
17599+ CFI_ENDPROC
17600+ENDPROC(atomic64_set_unchecked_cx8)
17601+
17602 ENTRY(atomic64_xchg_cx8)
17603 CFI_STARTPROC
17604
17605@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
17606 cmpxchg8b (%esi)
17607 jne 1b
17608
17609+ pax_force_retaddr
17610 ret
17611 CFI_ENDPROC
17612 ENDPROC(atomic64_xchg_cx8)
17613
17614-.macro addsub_return func ins insc
17615-ENTRY(atomic64_\func\()_return_cx8)
17616+.macro addsub_return func ins insc unchecked=""
17617+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17618 CFI_STARTPROC
17619 SAVE ebp
17620 SAVE ebx
17621@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
17622 movl %edx, %ecx
17623 \ins\()l %esi, %ebx
17624 \insc\()l %edi, %ecx
17625+
17626+.ifb \unchecked
17627+#ifdef CONFIG_PAX_REFCOUNT
17628+ into
17629+2:
17630+ _ASM_EXTABLE(2b, 3f)
17631+#endif
17632+.endif
17633+
17634 LOCK_PREFIX
17635 cmpxchg8b (%ebp)
17636 jne 1b
17637-
17638-10:
17639 movl %ebx, %eax
17640 movl %ecx, %edx
17641+
17642+.ifb \unchecked
17643+#ifdef CONFIG_PAX_REFCOUNT
17644+3:
17645+#endif
17646+.endif
17647+
17648 RESTORE edi
17649 RESTORE esi
17650 RESTORE ebx
17651 RESTORE ebp
17652+ pax_force_retaddr
17653 ret
17654 CFI_ENDPROC
17655-ENDPROC(atomic64_\func\()_return_cx8)
17656+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17657 .endm
17658
17659 addsub_return add add adc
17660 addsub_return sub sub sbb
17661+addsub_return add add adc _unchecked
17662+addsub_return sub sub sbb _unchecked
17663
17664-.macro incdec_return func ins insc
17665-ENTRY(atomic64_\func\()_return_cx8)
17666+.macro incdec_return func ins insc unchecked
17667+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17668 CFI_STARTPROC
17669 SAVE ebx
17670
17671@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
17672 movl %edx, %ecx
17673 \ins\()l $1, %ebx
17674 \insc\()l $0, %ecx
17675+
17676+.ifb \unchecked
17677+#ifdef CONFIG_PAX_REFCOUNT
17678+ into
17679+2:
17680+ _ASM_EXTABLE(2b, 3f)
17681+#endif
17682+.endif
17683+
17684 LOCK_PREFIX
17685 cmpxchg8b (%esi)
17686 jne 1b
17687
17688-10:
17689 movl %ebx, %eax
17690 movl %ecx, %edx
17691+
17692+.ifb \unchecked
17693+#ifdef CONFIG_PAX_REFCOUNT
17694+3:
17695+#endif
17696+.endif
17697+
17698 RESTORE ebx
17699+ pax_force_retaddr
17700 ret
17701 CFI_ENDPROC
17702-ENDPROC(atomic64_\func\()_return_cx8)
17703+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17704 .endm
17705
17706 incdec_return inc add adc
17707 incdec_return dec sub sbb
17708+incdec_return inc add adc _unchecked
17709+incdec_return dec sub sbb _unchecked
17710
17711 ENTRY(atomic64_dec_if_positive_cx8)
17712 CFI_STARTPROC
17713@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
17714 movl %edx, %ecx
17715 subl $1, %ebx
17716 sbb $0, %ecx
17717+
17718+#ifdef CONFIG_PAX_REFCOUNT
17719+ into
17720+1234:
17721+ _ASM_EXTABLE(1234b, 2f)
17722+#endif
17723+
17724 js 2f
17725 LOCK_PREFIX
17726 cmpxchg8b (%esi)
17727@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
17728 movl %ebx, %eax
17729 movl %ecx, %edx
17730 RESTORE ebx
17731+ pax_force_retaddr
17732 ret
17733 CFI_ENDPROC
17734 ENDPROC(atomic64_dec_if_positive_cx8)
17735@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
17736 movl %edx, %ecx
17737 addl %esi, %ebx
17738 adcl %edi, %ecx
17739+
17740+#ifdef CONFIG_PAX_REFCOUNT
17741+ into
17742+1234:
17743+ _ASM_EXTABLE(1234b, 3f)
17744+#endif
17745+
17746 LOCK_PREFIX
17747 cmpxchg8b (%ebp)
17748 jne 1b
17749@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
17750 CFI_ADJUST_CFA_OFFSET -8
17751 RESTORE ebx
17752 RESTORE ebp
17753+ pax_force_retaddr
17754 ret
17755 4:
17756 cmpl %edx, 4(%esp)
17757@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
17758 movl %edx, %ecx
17759 addl $1, %ebx
17760 adcl $0, %ecx
17761+
17762+#ifdef CONFIG_PAX_REFCOUNT
17763+ into
17764+1234:
17765+ _ASM_EXTABLE(1234b, 3f)
17766+#endif
17767+
17768 LOCK_PREFIX
17769 cmpxchg8b (%esi)
17770 jne 1b
17771@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
17772 movl $1, %eax
17773 3:
17774 RESTORE ebx
17775+ pax_force_retaddr
17776 ret
17777 4:
17778 testl %edx, %edx
17779diff -urNp linux-3.1.1/arch/x86/lib/checksum_32.S linux-3.1.1/arch/x86/lib/checksum_32.S
17780--- linux-3.1.1/arch/x86/lib/checksum_32.S 2011-11-11 15:19:27.000000000 -0500
17781+++ linux-3.1.1/arch/x86/lib/checksum_32.S 2011-11-16 18:39:07.000000000 -0500
17782@@ -28,7 +28,8 @@
17783 #include <linux/linkage.h>
17784 #include <asm/dwarf2.h>
17785 #include <asm/errno.h>
17786-
17787+#include <asm/segment.h>
17788+
17789 /*
17790 * computes a partial checksum, e.g. for TCP/UDP fragments
17791 */
17792@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
17793
17794 #define ARGBASE 16
17795 #define FP 12
17796-
17797-ENTRY(csum_partial_copy_generic)
17798+
17799+ENTRY(csum_partial_copy_generic_to_user)
17800 CFI_STARTPROC
17801+
17802+#ifdef CONFIG_PAX_MEMORY_UDEREF
17803+ pushl_cfi %gs
17804+ popl_cfi %es
17805+ jmp csum_partial_copy_generic
17806+#endif
17807+
17808+ENTRY(csum_partial_copy_generic_from_user)
17809+
17810+#ifdef CONFIG_PAX_MEMORY_UDEREF
17811+ pushl_cfi %gs
17812+ popl_cfi %ds
17813+#endif
17814+
17815+ENTRY(csum_partial_copy_generic)
17816 subl $4,%esp
17817 CFI_ADJUST_CFA_OFFSET 4
17818 pushl_cfi %edi
17819@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
17820 jmp 4f
17821 SRC(1: movw (%esi), %bx )
17822 addl $2, %esi
17823-DST( movw %bx, (%edi) )
17824+DST( movw %bx, %es:(%edi) )
17825 addl $2, %edi
17826 addw %bx, %ax
17827 adcl $0, %eax
17828@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
17829 SRC(1: movl (%esi), %ebx )
17830 SRC( movl 4(%esi), %edx )
17831 adcl %ebx, %eax
17832-DST( movl %ebx, (%edi) )
17833+DST( movl %ebx, %es:(%edi) )
17834 adcl %edx, %eax
17835-DST( movl %edx, 4(%edi) )
17836+DST( movl %edx, %es:4(%edi) )
17837
17838 SRC( movl 8(%esi), %ebx )
17839 SRC( movl 12(%esi), %edx )
17840 adcl %ebx, %eax
17841-DST( movl %ebx, 8(%edi) )
17842+DST( movl %ebx, %es:8(%edi) )
17843 adcl %edx, %eax
17844-DST( movl %edx, 12(%edi) )
17845+DST( movl %edx, %es:12(%edi) )
17846
17847 SRC( movl 16(%esi), %ebx )
17848 SRC( movl 20(%esi), %edx )
17849 adcl %ebx, %eax
17850-DST( movl %ebx, 16(%edi) )
17851+DST( movl %ebx, %es:16(%edi) )
17852 adcl %edx, %eax
17853-DST( movl %edx, 20(%edi) )
17854+DST( movl %edx, %es:20(%edi) )
17855
17856 SRC( movl 24(%esi), %ebx )
17857 SRC( movl 28(%esi), %edx )
17858 adcl %ebx, %eax
17859-DST( movl %ebx, 24(%edi) )
17860+DST( movl %ebx, %es:24(%edi) )
17861 adcl %edx, %eax
17862-DST( movl %edx, 28(%edi) )
17863+DST( movl %edx, %es:28(%edi) )
17864
17865 lea 32(%esi), %esi
17866 lea 32(%edi), %edi
17867@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
17868 shrl $2, %edx # This clears CF
17869 SRC(3: movl (%esi), %ebx )
17870 adcl %ebx, %eax
17871-DST( movl %ebx, (%edi) )
17872+DST( movl %ebx, %es:(%edi) )
17873 lea 4(%esi), %esi
17874 lea 4(%edi), %edi
17875 dec %edx
17876@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
17877 jb 5f
17878 SRC( movw (%esi), %cx )
17879 leal 2(%esi), %esi
17880-DST( movw %cx, (%edi) )
17881+DST( movw %cx, %es:(%edi) )
17882 leal 2(%edi), %edi
17883 je 6f
17884 shll $16,%ecx
17885 SRC(5: movb (%esi), %cl )
17886-DST( movb %cl, (%edi) )
17887+DST( movb %cl, %es:(%edi) )
17888 6: addl %ecx, %eax
17889 adcl $0, %eax
17890 7:
17891@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
17892
17893 6001:
17894 movl ARGBASE+20(%esp), %ebx # src_err_ptr
17895- movl $-EFAULT, (%ebx)
17896+ movl $-EFAULT, %ss:(%ebx)
17897
17898 # zero the complete destination - computing the rest
17899 # is too much work
17900@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
17901
17902 6002:
17903 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17904- movl $-EFAULT,(%ebx)
17905+ movl $-EFAULT,%ss:(%ebx)
17906 jmp 5000b
17907
17908 .previous
17909
17910+ pushl_cfi %ss
17911+ popl_cfi %ds
17912+ pushl_cfi %ss
17913+ popl_cfi %es
17914 popl_cfi %ebx
17915 CFI_RESTORE ebx
17916 popl_cfi %esi
17917@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
17918 popl_cfi %ecx # equivalent to addl $4,%esp
17919 ret
17920 CFI_ENDPROC
17921-ENDPROC(csum_partial_copy_generic)
17922+ENDPROC(csum_partial_copy_generic_to_user)
17923
17924 #else
17925
17926 /* Version for PentiumII/PPro */
17927
17928 #define ROUND1(x) \
17929+ nop; nop; nop; \
17930 SRC(movl x(%esi), %ebx ) ; \
17931 addl %ebx, %eax ; \
17932- DST(movl %ebx, x(%edi) ) ;
17933+ DST(movl %ebx, %es:x(%edi)) ;
17934
17935 #define ROUND(x) \
17936+ nop; nop; nop; \
17937 SRC(movl x(%esi), %ebx ) ; \
17938 adcl %ebx, %eax ; \
17939- DST(movl %ebx, x(%edi) ) ;
17940+ DST(movl %ebx, %es:x(%edi)) ;
17941
17942 #define ARGBASE 12
17943-
17944-ENTRY(csum_partial_copy_generic)
17945+
17946+ENTRY(csum_partial_copy_generic_to_user)
17947 CFI_STARTPROC
17948+
17949+#ifdef CONFIG_PAX_MEMORY_UDEREF
17950+ pushl_cfi %gs
17951+ popl_cfi %es
17952+ jmp csum_partial_copy_generic
17953+#endif
17954+
17955+ENTRY(csum_partial_copy_generic_from_user)
17956+
17957+#ifdef CONFIG_PAX_MEMORY_UDEREF
17958+ pushl_cfi %gs
17959+ popl_cfi %ds
17960+#endif
17961+
17962+ENTRY(csum_partial_copy_generic)
17963 pushl_cfi %ebx
17964 CFI_REL_OFFSET ebx, 0
17965 pushl_cfi %edi
17966@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
17967 subl %ebx, %edi
17968 lea -1(%esi),%edx
17969 andl $-32,%edx
17970- lea 3f(%ebx,%ebx), %ebx
17971+ lea 3f(%ebx,%ebx,2), %ebx
17972 testl %esi, %esi
17973 jmp *%ebx
17974 1: addl $64,%esi
17975@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
17976 jb 5f
17977 SRC( movw (%esi), %dx )
17978 leal 2(%esi), %esi
17979-DST( movw %dx, (%edi) )
17980+DST( movw %dx, %es:(%edi) )
17981 leal 2(%edi), %edi
17982 je 6f
17983 shll $16,%edx
17984 5:
17985 SRC( movb (%esi), %dl )
17986-DST( movb %dl, (%edi) )
17987+DST( movb %dl, %es:(%edi) )
17988 6: addl %edx, %eax
17989 adcl $0, %eax
17990 7:
17991 .section .fixup, "ax"
17992 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
17993- movl $-EFAULT, (%ebx)
17994+ movl $-EFAULT, %ss:(%ebx)
17995 # zero the complete destination (computing the rest is too much work)
17996 movl ARGBASE+8(%esp),%edi # dst
17997 movl ARGBASE+12(%esp),%ecx # len
17998@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
17999 rep; stosb
18000 jmp 7b
18001 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18002- movl $-EFAULT, (%ebx)
18003+ movl $-EFAULT, %ss:(%ebx)
18004 jmp 7b
18005 .previous
18006
18007+#ifdef CONFIG_PAX_MEMORY_UDEREF
18008+ pushl_cfi %ss
18009+ popl_cfi %ds
18010+ pushl_cfi %ss
18011+ popl_cfi %es
18012+#endif
18013+
18014 popl_cfi %esi
18015 CFI_RESTORE esi
18016 popl_cfi %edi
18017@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
18018 CFI_RESTORE ebx
18019 ret
18020 CFI_ENDPROC
18021-ENDPROC(csum_partial_copy_generic)
18022+ENDPROC(csum_partial_copy_generic_to_user)
18023
18024 #undef ROUND
18025 #undef ROUND1
18026diff -urNp linux-3.1.1/arch/x86/lib/clear_page_64.S linux-3.1.1/arch/x86/lib/clear_page_64.S
18027--- linux-3.1.1/arch/x86/lib/clear_page_64.S 2011-11-11 15:19:27.000000000 -0500
18028+++ linux-3.1.1/arch/x86/lib/clear_page_64.S 2011-11-16 18:39:07.000000000 -0500
18029@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
18030 movl $4096/8,%ecx
18031 xorl %eax,%eax
18032 rep stosq
18033+ pax_force_retaddr
18034 ret
18035 CFI_ENDPROC
18036 ENDPROC(clear_page_c)
18037@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
18038 movl $4096,%ecx
18039 xorl %eax,%eax
18040 rep stosb
18041+ pax_force_retaddr
18042 ret
18043 CFI_ENDPROC
18044 ENDPROC(clear_page_c_e)
18045@@ -43,6 +45,7 @@ ENTRY(clear_page)
18046 leaq 64(%rdi),%rdi
18047 jnz .Lloop
18048 nop
18049+ pax_force_retaddr
18050 ret
18051 CFI_ENDPROC
18052 .Lclear_page_end:
18053@@ -58,7 +61,7 @@ ENDPROC(clear_page)
18054
18055 #include <asm/cpufeature.h>
18056
18057- .section .altinstr_replacement,"ax"
18058+ .section .altinstr_replacement,"a"
18059 1: .byte 0xeb /* jmp <disp8> */
18060 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
18061 2: .byte 0xeb /* jmp <disp8> */
18062diff -urNp linux-3.1.1/arch/x86/lib/cmpxchg16b_emu.S linux-3.1.1/arch/x86/lib/cmpxchg16b_emu.S
18063--- linux-3.1.1/arch/x86/lib/cmpxchg16b_emu.S 2011-11-11 15:19:27.000000000 -0500
18064+++ linux-3.1.1/arch/x86/lib/cmpxchg16b_emu.S 2011-11-16 18:39:07.000000000 -0500
18065@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
18066
18067 popf
18068 mov $1, %al
18069+ pax_force_retaddr
18070 ret
18071
18072 not_same:
18073 popf
18074 xor %al,%al
18075+ pax_force_retaddr
18076 ret
18077
18078 CFI_ENDPROC
18079diff -urNp linux-3.1.1/arch/x86/lib/copy_page_64.S linux-3.1.1/arch/x86/lib/copy_page_64.S
18080--- linux-3.1.1/arch/x86/lib/copy_page_64.S 2011-11-11 15:19:27.000000000 -0500
18081+++ linux-3.1.1/arch/x86/lib/copy_page_64.S 2011-11-16 18:39:07.000000000 -0500
18082@@ -9,6 +9,7 @@ copy_page_c:
18083 CFI_STARTPROC
18084 movl $4096/8,%ecx
18085 rep movsq
18086+ pax_force_retaddr
18087 ret
18088 CFI_ENDPROC
18089 ENDPROC(copy_page_c)
18090@@ -95,6 +96,7 @@ ENTRY(copy_page)
18091 CFI_RESTORE r13
18092 addq $3*8,%rsp
18093 CFI_ADJUST_CFA_OFFSET -3*8
18094+ pax_force_retaddr
18095 ret
18096 .Lcopy_page_end:
18097 CFI_ENDPROC
18098@@ -105,7 +107,7 @@ ENDPROC(copy_page)
18099
18100 #include <asm/cpufeature.h>
18101
18102- .section .altinstr_replacement,"ax"
18103+ .section .altinstr_replacement,"a"
18104 1: .byte 0xeb /* jmp <disp8> */
18105 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
18106 2:
18107diff -urNp linux-3.1.1/arch/x86/lib/copy_user_64.S linux-3.1.1/arch/x86/lib/copy_user_64.S
18108--- linux-3.1.1/arch/x86/lib/copy_user_64.S 2011-11-11 15:19:27.000000000 -0500
18109+++ linux-3.1.1/arch/x86/lib/copy_user_64.S 2011-11-16 18:39:07.000000000 -0500
18110@@ -16,6 +16,7 @@
18111 #include <asm/thread_info.h>
18112 #include <asm/cpufeature.h>
18113 #include <asm/alternative-asm.h>
18114+#include <asm/pgtable.h>
18115
18116 /*
18117 * By placing feature2 after feature1 in altinstructions section, we logically
18118@@ -29,7 +30,7 @@
18119 .byte 0xe9 /* 32bit jump */
18120 .long \orig-1f /* by default jump to orig */
18121 1:
18122- .section .altinstr_replacement,"ax"
18123+ .section .altinstr_replacement,"a"
18124 2: .byte 0xe9 /* near jump with 32bit immediate */
18125 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
18126 3: .byte 0xe9 /* near jump with 32bit immediate */
18127@@ -71,47 +72,20 @@
18128 #endif
18129 .endm
18130
18131-/* Standard copy_to_user with segment limit checking */
18132-ENTRY(_copy_to_user)
18133- CFI_STARTPROC
18134- GET_THREAD_INFO(%rax)
18135- movq %rdi,%rcx
18136- addq %rdx,%rcx
18137- jc bad_to_user
18138- cmpq TI_addr_limit(%rax),%rcx
18139- ja bad_to_user
18140- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
18141- copy_user_generic_unrolled,copy_user_generic_string, \
18142- copy_user_enhanced_fast_string
18143- CFI_ENDPROC
18144-ENDPROC(_copy_to_user)
18145-
18146-/* Standard copy_from_user with segment limit checking */
18147-ENTRY(_copy_from_user)
18148- CFI_STARTPROC
18149- GET_THREAD_INFO(%rax)
18150- movq %rsi,%rcx
18151- addq %rdx,%rcx
18152- jc bad_from_user
18153- cmpq TI_addr_limit(%rax),%rcx
18154- ja bad_from_user
18155- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
18156- copy_user_generic_unrolled,copy_user_generic_string, \
18157- copy_user_enhanced_fast_string
18158- CFI_ENDPROC
18159-ENDPROC(_copy_from_user)
18160-
18161 .section .fixup,"ax"
18162 /* must zero dest */
18163 ENTRY(bad_from_user)
18164 bad_from_user:
18165 CFI_STARTPROC
18166+ testl %edx,%edx
18167+ js bad_to_user
18168 movl %edx,%ecx
18169 xorl %eax,%eax
18170 rep
18171 stosb
18172 bad_to_user:
18173 movl %edx,%eax
18174+ pax_force_retaddr
18175 ret
18176 CFI_ENDPROC
18177 ENDPROC(bad_from_user)
18178@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
18179 decl %ecx
18180 jnz 21b
18181 23: xor %eax,%eax
18182+ pax_force_retaddr
18183 ret
18184
18185 .section .fixup,"ax"
18186@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
18187 3: rep
18188 movsb
18189 4: xorl %eax,%eax
18190+ pax_force_retaddr
18191 ret
18192
18193 .section .fixup,"ax"
18194@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
18195 1: rep
18196 movsb
18197 2: xorl %eax,%eax
18198+ pax_force_retaddr
18199 ret
18200
18201 .section .fixup,"ax"
18202diff -urNp linux-3.1.1/arch/x86/lib/copy_user_nocache_64.S linux-3.1.1/arch/x86/lib/copy_user_nocache_64.S
18203--- linux-3.1.1/arch/x86/lib/copy_user_nocache_64.S 2011-11-11 15:19:27.000000000 -0500
18204+++ linux-3.1.1/arch/x86/lib/copy_user_nocache_64.S 2011-11-16 18:39:07.000000000 -0500
18205@@ -8,12 +8,14 @@
18206
18207 #include <linux/linkage.h>
18208 #include <asm/dwarf2.h>
18209+#include <asm/alternative-asm.h>
18210
18211 #define FIX_ALIGNMENT 1
18212
18213 #include <asm/current.h>
18214 #include <asm/asm-offsets.h>
18215 #include <asm/thread_info.h>
18216+#include <asm/pgtable.h>
18217
18218 .macro ALIGN_DESTINATION
18219 #ifdef FIX_ALIGNMENT
18220@@ -50,6 +52,15 @@
18221 */
18222 ENTRY(__copy_user_nocache)
18223 CFI_STARTPROC
18224+
18225+#ifdef CONFIG_PAX_MEMORY_UDEREF
18226+ mov $PAX_USER_SHADOW_BASE,%rcx
18227+ cmp %rcx,%rsi
18228+ jae 1f
18229+ add %rcx,%rsi
18230+1:
18231+#endif
18232+
18233 cmpl $8,%edx
18234 jb 20f /* less then 8 bytes, go to byte copy loop */
18235 ALIGN_DESTINATION
18236@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
18237 jnz 21b
18238 23: xorl %eax,%eax
18239 sfence
18240+ pax_force_retaddr
18241 ret
18242
18243 .section .fixup,"ax"
18244diff -urNp linux-3.1.1/arch/x86/lib/csum-copy_64.S linux-3.1.1/arch/x86/lib/csum-copy_64.S
18245--- linux-3.1.1/arch/x86/lib/csum-copy_64.S 2011-11-11 15:19:27.000000000 -0500
18246+++ linux-3.1.1/arch/x86/lib/csum-copy_64.S 2011-11-16 18:39:07.000000000 -0500
18247@@ -8,6 +8,7 @@
18248 #include <linux/linkage.h>
18249 #include <asm/dwarf2.h>
18250 #include <asm/errno.h>
18251+#include <asm/alternative-asm.h>
18252
18253 /*
18254 * Checksum copy with exception handling.
18255@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
18256 CFI_RESTORE rbp
18257 addq $7*8, %rsp
18258 CFI_ADJUST_CFA_OFFSET -7*8
18259+ pax_force_retaddr
18260 ret
18261 CFI_RESTORE_STATE
18262
18263diff -urNp linux-3.1.1/arch/x86/lib/csum-wrappers_64.c linux-3.1.1/arch/x86/lib/csum-wrappers_64.c
18264--- linux-3.1.1/arch/x86/lib/csum-wrappers_64.c 2011-11-11 15:19:27.000000000 -0500
18265+++ linux-3.1.1/arch/x86/lib/csum-wrappers_64.c 2011-11-16 18:39:07.000000000 -0500
18266@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void _
18267 len -= 2;
18268 }
18269 }
18270- isum = csum_partial_copy_generic((__force const void *)src,
18271+
18272+#ifdef CONFIG_PAX_MEMORY_UDEREF
18273+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18274+ src += PAX_USER_SHADOW_BASE;
18275+#endif
18276+
18277+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
18278 dst, len, isum, errp, NULL);
18279 if (unlikely(*errp))
18280 goto out_err;
18281@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *sr
18282 }
18283
18284 *errp = 0;
18285- return csum_partial_copy_generic(src, (void __force *)dst,
18286+
18287+#ifdef CONFIG_PAX_MEMORY_UDEREF
18288+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
18289+ dst += PAX_USER_SHADOW_BASE;
18290+#endif
18291+
18292+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
18293 len, isum, NULL, errp);
18294 }
18295 EXPORT_SYMBOL(csum_partial_copy_to_user);
18296diff -urNp linux-3.1.1/arch/x86/lib/getuser.S linux-3.1.1/arch/x86/lib/getuser.S
18297--- linux-3.1.1/arch/x86/lib/getuser.S 2011-11-11 15:19:27.000000000 -0500
18298+++ linux-3.1.1/arch/x86/lib/getuser.S 2011-11-16 18:39:07.000000000 -0500
18299@@ -33,15 +33,38 @@
18300 #include <asm/asm-offsets.h>
18301 #include <asm/thread_info.h>
18302 #include <asm/asm.h>
18303+#include <asm/segment.h>
18304+#include <asm/pgtable.h>
18305+#include <asm/alternative-asm.h>
18306+
18307+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18308+#define __copyuser_seg gs;
18309+#else
18310+#define __copyuser_seg
18311+#endif
18312
18313 .text
18314 ENTRY(__get_user_1)
18315 CFI_STARTPROC
18316+
18317+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18318 GET_THREAD_INFO(%_ASM_DX)
18319 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18320 jae bad_get_user
18321-1: movzb (%_ASM_AX),%edx
18322+
18323+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18324+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18325+ cmp %_ASM_DX,%_ASM_AX
18326+ jae 1234f
18327+ add %_ASM_DX,%_ASM_AX
18328+1234:
18329+#endif
18330+
18331+#endif
18332+
18333+1: __copyuser_seg movzb (%_ASM_AX),%edx
18334 xor %eax,%eax
18335+ pax_force_retaddr
18336 ret
18337 CFI_ENDPROC
18338 ENDPROC(__get_user_1)
18339@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
18340 ENTRY(__get_user_2)
18341 CFI_STARTPROC
18342 add $1,%_ASM_AX
18343+
18344+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18345 jc bad_get_user
18346 GET_THREAD_INFO(%_ASM_DX)
18347 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18348 jae bad_get_user
18349-2: movzwl -1(%_ASM_AX),%edx
18350+
18351+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18352+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18353+ cmp %_ASM_DX,%_ASM_AX
18354+ jae 1234f
18355+ add %_ASM_DX,%_ASM_AX
18356+1234:
18357+#endif
18358+
18359+#endif
18360+
18361+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
18362 xor %eax,%eax
18363+ pax_force_retaddr
18364 ret
18365 CFI_ENDPROC
18366 ENDPROC(__get_user_2)
18367@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
18368 ENTRY(__get_user_4)
18369 CFI_STARTPROC
18370 add $3,%_ASM_AX
18371+
18372+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18373 jc bad_get_user
18374 GET_THREAD_INFO(%_ASM_DX)
18375 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18376 jae bad_get_user
18377-3: mov -3(%_ASM_AX),%edx
18378+
18379+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18380+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18381+ cmp %_ASM_DX,%_ASM_AX
18382+ jae 1234f
18383+ add %_ASM_DX,%_ASM_AX
18384+1234:
18385+#endif
18386+
18387+#endif
18388+
18389+3: __copyuser_seg mov -3(%_ASM_AX),%edx
18390 xor %eax,%eax
18391+ pax_force_retaddr
18392 ret
18393 CFI_ENDPROC
18394 ENDPROC(__get_user_4)
18395@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
18396 GET_THREAD_INFO(%_ASM_DX)
18397 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18398 jae bad_get_user
18399+
18400+#ifdef CONFIG_PAX_MEMORY_UDEREF
18401+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18402+ cmp %_ASM_DX,%_ASM_AX
18403+ jae 1234f
18404+ add %_ASM_DX,%_ASM_AX
18405+1234:
18406+#endif
18407+
18408 4: movq -7(%_ASM_AX),%_ASM_DX
18409 xor %eax,%eax
18410+ pax_force_retaddr
18411 ret
18412 CFI_ENDPROC
18413 ENDPROC(__get_user_8)
18414@@ -91,6 +152,7 @@ bad_get_user:
18415 CFI_STARTPROC
18416 xor %edx,%edx
18417 mov $(-EFAULT),%_ASM_AX
18418+ pax_force_retaddr
18419 ret
18420 CFI_ENDPROC
18421 END(bad_get_user)
18422diff -urNp linux-3.1.1/arch/x86/lib/insn.c linux-3.1.1/arch/x86/lib/insn.c
18423--- linux-3.1.1/arch/x86/lib/insn.c 2011-11-11 15:19:27.000000000 -0500
18424+++ linux-3.1.1/arch/x86/lib/insn.c 2011-11-16 18:39:07.000000000 -0500
18425@@ -21,6 +21,11 @@
18426 #include <linux/string.h>
18427 #include <asm/inat.h>
18428 #include <asm/insn.h>
18429+#ifdef __KERNEL__
18430+#include <asm/pgtable_types.h>
18431+#else
18432+#define ktla_ktva(addr) addr
18433+#endif
18434
18435 #define get_next(t, insn) \
18436 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
18437@@ -40,8 +45,8 @@
18438 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
18439 {
18440 memset(insn, 0, sizeof(*insn));
18441- insn->kaddr = kaddr;
18442- insn->next_byte = kaddr;
18443+ insn->kaddr = ktla_ktva(kaddr);
18444+ insn->next_byte = ktla_ktva(kaddr);
18445 insn->x86_64 = x86_64 ? 1 : 0;
18446 insn->opnd_bytes = 4;
18447 if (x86_64)
18448diff -urNp linux-3.1.1/arch/x86/lib/iomap_copy_64.S linux-3.1.1/arch/x86/lib/iomap_copy_64.S
18449--- linux-3.1.1/arch/x86/lib/iomap_copy_64.S 2011-11-11 15:19:27.000000000 -0500
18450+++ linux-3.1.1/arch/x86/lib/iomap_copy_64.S 2011-11-16 18:39:07.000000000 -0500
18451@@ -17,6 +17,7 @@
18452
18453 #include <linux/linkage.h>
18454 #include <asm/dwarf2.h>
18455+#include <asm/alternative-asm.h>
18456
18457 /*
18458 * override generic version in lib/iomap_copy.c
18459@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
18460 CFI_STARTPROC
18461 movl %edx,%ecx
18462 rep movsd
18463+ pax_force_retaddr
18464 ret
18465 CFI_ENDPROC
18466 ENDPROC(__iowrite32_copy)
18467diff -urNp linux-3.1.1/arch/x86/lib/memcpy_64.S linux-3.1.1/arch/x86/lib/memcpy_64.S
18468--- linux-3.1.1/arch/x86/lib/memcpy_64.S 2011-11-11 15:19:27.000000000 -0500
18469+++ linux-3.1.1/arch/x86/lib/memcpy_64.S 2011-11-16 18:39:07.000000000 -0500
18470@@ -34,6 +34,7 @@
18471 rep movsq
18472 movl %edx, %ecx
18473 rep movsb
18474+ pax_force_retaddr
18475 ret
18476 .Lmemcpy_e:
18477 .previous
18478@@ -51,6 +52,7 @@
18479
18480 movl %edx, %ecx
18481 rep movsb
18482+ pax_force_retaddr
18483 ret
18484 .Lmemcpy_e_e:
18485 .previous
18486@@ -141,6 +143,7 @@ ENTRY(memcpy)
18487 movq %r9, 1*8(%rdi)
18488 movq %r10, -2*8(%rdi, %rdx)
18489 movq %r11, -1*8(%rdi, %rdx)
18490+ pax_force_retaddr
18491 retq
18492 .p2align 4
18493 .Lless_16bytes:
18494@@ -153,6 +156,7 @@ ENTRY(memcpy)
18495 movq -1*8(%rsi, %rdx), %r9
18496 movq %r8, 0*8(%rdi)
18497 movq %r9, -1*8(%rdi, %rdx)
18498+ pax_force_retaddr
18499 retq
18500 .p2align 4
18501 .Lless_8bytes:
18502@@ -166,6 +170,7 @@ ENTRY(memcpy)
18503 movl -4(%rsi, %rdx), %r8d
18504 movl %ecx, (%rdi)
18505 movl %r8d, -4(%rdi, %rdx)
18506+ pax_force_retaddr
18507 retq
18508 .p2align 4
18509 .Lless_3bytes:
18510@@ -183,6 +188,7 @@ ENTRY(memcpy)
18511 jnz .Lloop_1
18512
18513 .Lend:
18514+ pax_force_retaddr
18515 retq
18516 CFI_ENDPROC
18517 ENDPROC(memcpy)
18518diff -urNp linux-3.1.1/arch/x86/lib/memmove_64.S linux-3.1.1/arch/x86/lib/memmove_64.S
18519--- linux-3.1.1/arch/x86/lib/memmove_64.S 2011-11-11 15:19:27.000000000 -0500
18520+++ linux-3.1.1/arch/x86/lib/memmove_64.S 2011-11-16 18:39:07.000000000 -0500
18521@@ -202,6 +202,7 @@ ENTRY(memmove)
18522 movb (%rsi), %r11b
18523 movb %r11b, (%rdi)
18524 13:
18525+ pax_force_retaddr
18526 retq
18527 CFI_ENDPROC
18528
18529@@ -210,6 +211,7 @@ ENTRY(memmove)
18530 /* Forward moving data. */
18531 movq %rdx, %rcx
18532 rep movsb
18533+ pax_force_retaddr
18534 retq
18535 .Lmemmove_end_forward_efs:
18536 .previous
18537diff -urNp linux-3.1.1/arch/x86/lib/memset_64.S linux-3.1.1/arch/x86/lib/memset_64.S
18538--- linux-3.1.1/arch/x86/lib/memset_64.S 2011-11-11 15:19:27.000000000 -0500
18539+++ linux-3.1.1/arch/x86/lib/memset_64.S 2011-11-16 18:39:07.000000000 -0500
18540@@ -31,6 +31,7 @@
18541 movl %r8d,%ecx
18542 rep stosb
18543 movq %r9,%rax
18544+ pax_force_retaddr
18545 ret
18546 .Lmemset_e:
18547 .previous
18548@@ -53,6 +54,7 @@
18549 movl %edx,%ecx
18550 rep stosb
18551 movq %r9,%rax
18552+ pax_force_retaddr
18553 ret
18554 .Lmemset_e_e:
18555 .previous
18556@@ -121,6 +123,7 @@ ENTRY(__memset)
18557
18558 .Lende:
18559 movq %r10,%rax
18560+ pax_force_retaddr
18561 ret
18562
18563 CFI_RESTORE_STATE
18564diff -urNp linux-3.1.1/arch/x86/lib/mmx_32.c linux-3.1.1/arch/x86/lib/mmx_32.c
18565--- linux-3.1.1/arch/x86/lib/mmx_32.c 2011-11-11 15:19:27.000000000 -0500
18566+++ linux-3.1.1/arch/x86/lib/mmx_32.c 2011-11-16 18:39:07.000000000 -0500
18567@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
18568 {
18569 void *p;
18570 int i;
18571+ unsigned long cr0;
18572
18573 if (unlikely(in_interrupt()))
18574 return __memcpy(to, from, len);
18575@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
18576 kernel_fpu_begin();
18577
18578 __asm__ __volatile__ (
18579- "1: prefetch (%0)\n" /* This set is 28 bytes */
18580- " prefetch 64(%0)\n"
18581- " prefetch 128(%0)\n"
18582- " prefetch 192(%0)\n"
18583- " prefetch 256(%0)\n"
18584+ "1: prefetch (%1)\n" /* This set is 28 bytes */
18585+ " prefetch 64(%1)\n"
18586+ " prefetch 128(%1)\n"
18587+ " prefetch 192(%1)\n"
18588+ " prefetch 256(%1)\n"
18589 "2: \n"
18590 ".section .fixup, \"ax\"\n"
18591- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18592+ "3: \n"
18593+
18594+#ifdef CONFIG_PAX_KERNEXEC
18595+ " movl %%cr0, %0\n"
18596+ " movl %0, %%eax\n"
18597+ " andl $0xFFFEFFFF, %%eax\n"
18598+ " movl %%eax, %%cr0\n"
18599+#endif
18600+
18601+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18602+
18603+#ifdef CONFIG_PAX_KERNEXEC
18604+ " movl %0, %%cr0\n"
18605+#endif
18606+
18607 " jmp 2b\n"
18608 ".previous\n"
18609 _ASM_EXTABLE(1b, 3b)
18610- : : "r" (from));
18611+ : "=&r" (cr0) : "r" (from) : "ax");
18612
18613 for ( ; i > 5; i--) {
18614 __asm__ __volatile__ (
18615- "1: prefetch 320(%0)\n"
18616- "2: movq (%0), %%mm0\n"
18617- " movq 8(%0), %%mm1\n"
18618- " movq 16(%0), %%mm2\n"
18619- " movq 24(%0), %%mm3\n"
18620- " movq %%mm0, (%1)\n"
18621- " movq %%mm1, 8(%1)\n"
18622- " movq %%mm2, 16(%1)\n"
18623- " movq %%mm3, 24(%1)\n"
18624- " movq 32(%0), %%mm0\n"
18625- " movq 40(%0), %%mm1\n"
18626- " movq 48(%0), %%mm2\n"
18627- " movq 56(%0), %%mm3\n"
18628- " movq %%mm0, 32(%1)\n"
18629- " movq %%mm1, 40(%1)\n"
18630- " movq %%mm2, 48(%1)\n"
18631- " movq %%mm3, 56(%1)\n"
18632+ "1: prefetch 320(%1)\n"
18633+ "2: movq (%1), %%mm0\n"
18634+ " movq 8(%1), %%mm1\n"
18635+ " movq 16(%1), %%mm2\n"
18636+ " movq 24(%1), %%mm3\n"
18637+ " movq %%mm0, (%2)\n"
18638+ " movq %%mm1, 8(%2)\n"
18639+ " movq %%mm2, 16(%2)\n"
18640+ " movq %%mm3, 24(%2)\n"
18641+ " movq 32(%1), %%mm0\n"
18642+ " movq 40(%1), %%mm1\n"
18643+ " movq 48(%1), %%mm2\n"
18644+ " movq 56(%1), %%mm3\n"
18645+ " movq %%mm0, 32(%2)\n"
18646+ " movq %%mm1, 40(%2)\n"
18647+ " movq %%mm2, 48(%2)\n"
18648+ " movq %%mm3, 56(%2)\n"
18649 ".section .fixup, \"ax\"\n"
18650- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18651+ "3:\n"
18652+
18653+#ifdef CONFIG_PAX_KERNEXEC
18654+ " movl %%cr0, %0\n"
18655+ " movl %0, %%eax\n"
18656+ " andl $0xFFFEFFFF, %%eax\n"
18657+ " movl %%eax, %%cr0\n"
18658+#endif
18659+
18660+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18661+
18662+#ifdef CONFIG_PAX_KERNEXEC
18663+ " movl %0, %%cr0\n"
18664+#endif
18665+
18666 " jmp 2b\n"
18667 ".previous\n"
18668 _ASM_EXTABLE(1b, 3b)
18669- : : "r" (from), "r" (to) : "memory");
18670+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18671
18672 from += 64;
18673 to += 64;
18674@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
18675 static void fast_copy_page(void *to, void *from)
18676 {
18677 int i;
18678+ unsigned long cr0;
18679
18680 kernel_fpu_begin();
18681
18682@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
18683 * but that is for later. -AV
18684 */
18685 __asm__ __volatile__(
18686- "1: prefetch (%0)\n"
18687- " prefetch 64(%0)\n"
18688- " prefetch 128(%0)\n"
18689- " prefetch 192(%0)\n"
18690- " prefetch 256(%0)\n"
18691+ "1: prefetch (%1)\n"
18692+ " prefetch 64(%1)\n"
18693+ " prefetch 128(%1)\n"
18694+ " prefetch 192(%1)\n"
18695+ " prefetch 256(%1)\n"
18696 "2: \n"
18697 ".section .fixup, \"ax\"\n"
18698- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18699+ "3: \n"
18700+
18701+#ifdef CONFIG_PAX_KERNEXEC
18702+ " movl %%cr0, %0\n"
18703+ " movl %0, %%eax\n"
18704+ " andl $0xFFFEFFFF, %%eax\n"
18705+ " movl %%eax, %%cr0\n"
18706+#endif
18707+
18708+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18709+
18710+#ifdef CONFIG_PAX_KERNEXEC
18711+ " movl %0, %%cr0\n"
18712+#endif
18713+
18714 " jmp 2b\n"
18715 ".previous\n"
18716- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18717+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18718
18719 for (i = 0; i < (4096-320)/64; i++) {
18720 __asm__ __volatile__ (
18721- "1: prefetch 320(%0)\n"
18722- "2: movq (%0), %%mm0\n"
18723- " movntq %%mm0, (%1)\n"
18724- " movq 8(%0), %%mm1\n"
18725- " movntq %%mm1, 8(%1)\n"
18726- " movq 16(%0), %%mm2\n"
18727- " movntq %%mm2, 16(%1)\n"
18728- " movq 24(%0), %%mm3\n"
18729- " movntq %%mm3, 24(%1)\n"
18730- " movq 32(%0), %%mm4\n"
18731- " movntq %%mm4, 32(%1)\n"
18732- " movq 40(%0), %%mm5\n"
18733- " movntq %%mm5, 40(%1)\n"
18734- " movq 48(%0), %%mm6\n"
18735- " movntq %%mm6, 48(%1)\n"
18736- " movq 56(%0), %%mm7\n"
18737- " movntq %%mm7, 56(%1)\n"
18738+ "1: prefetch 320(%1)\n"
18739+ "2: movq (%1), %%mm0\n"
18740+ " movntq %%mm0, (%2)\n"
18741+ " movq 8(%1), %%mm1\n"
18742+ " movntq %%mm1, 8(%2)\n"
18743+ " movq 16(%1), %%mm2\n"
18744+ " movntq %%mm2, 16(%2)\n"
18745+ " movq 24(%1), %%mm3\n"
18746+ " movntq %%mm3, 24(%2)\n"
18747+ " movq 32(%1), %%mm4\n"
18748+ " movntq %%mm4, 32(%2)\n"
18749+ " movq 40(%1), %%mm5\n"
18750+ " movntq %%mm5, 40(%2)\n"
18751+ " movq 48(%1), %%mm6\n"
18752+ " movntq %%mm6, 48(%2)\n"
18753+ " movq 56(%1), %%mm7\n"
18754+ " movntq %%mm7, 56(%2)\n"
18755 ".section .fixup, \"ax\"\n"
18756- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18757+ "3:\n"
18758+
18759+#ifdef CONFIG_PAX_KERNEXEC
18760+ " movl %%cr0, %0\n"
18761+ " movl %0, %%eax\n"
18762+ " andl $0xFFFEFFFF, %%eax\n"
18763+ " movl %%eax, %%cr0\n"
18764+#endif
18765+
18766+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18767+
18768+#ifdef CONFIG_PAX_KERNEXEC
18769+ " movl %0, %%cr0\n"
18770+#endif
18771+
18772 " jmp 2b\n"
18773 ".previous\n"
18774- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
18775+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18776
18777 from += 64;
18778 to += 64;
18779@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
18780 static void fast_copy_page(void *to, void *from)
18781 {
18782 int i;
18783+ unsigned long cr0;
18784
18785 kernel_fpu_begin();
18786
18787 __asm__ __volatile__ (
18788- "1: prefetch (%0)\n"
18789- " prefetch 64(%0)\n"
18790- " prefetch 128(%0)\n"
18791- " prefetch 192(%0)\n"
18792- " prefetch 256(%0)\n"
18793+ "1: prefetch (%1)\n"
18794+ " prefetch 64(%1)\n"
18795+ " prefetch 128(%1)\n"
18796+ " prefetch 192(%1)\n"
18797+ " prefetch 256(%1)\n"
18798 "2: \n"
18799 ".section .fixup, \"ax\"\n"
18800- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18801+ "3: \n"
18802+
18803+#ifdef CONFIG_PAX_KERNEXEC
18804+ " movl %%cr0, %0\n"
18805+ " movl %0, %%eax\n"
18806+ " andl $0xFFFEFFFF, %%eax\n"
18807+ " movl %%eax, %%cr0\n"
18808+#endif
18809+
18810+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18811+
18812+#ifdef CONFIG_PAX_KERNEXEC
18813+ " movl %0, %%cr0\n"
18814+#endif
18815+
18816 " jmp 2b\n"
18817 ".previous\n"
18818- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18819+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18820
18821 for (i = 0; i < 4096/64; i++) {
18822 __asm__ __volatile__ (
18823- "1: prefetch 320(%0)\n"
18824- "2: movq (%0), %%mm0\n"
18825- " movq 8(%0), %%mm1\n"
18826- " movq 16(%0), %%mm2\n"
18827- " movq 24(%0), %%mm3\n"
18828- " movq %%mm0, (%1)\n"
18829- " movq %%mm1, 8(%1)\n"
18830- " movq %%mm2, 16(%1)\n"
18831- " movq %%mm3, 24(%1)\n"
18832- " movq 32(%0), %%mm0\n"
18833- " movq 40(%0), %%mm1\n"
18834- " movq 48(%0), %%mm2\n"
18835- " movq 56(%0), %%mm3\n"
18836- " movq %%mm0, 32(%1)\n"
18837- " movq %%mm1, 40(%1)\n"
18838- " movq %%mm2, 48(%1)\n"
18839- " movq %%mm3, 56(%1)\n"
18840+ "1: prefetch 320(%1)\n"
18841+ "2: movq (%1), %%mm0\n"
18842+ " movq 8(%1), %%mm1\n"
18843+ " movq 16(%1), %%mm2\n"
18844+ " movq 24(%1), %%mm3\n"
18845+ " movq %%mm0, (%2)\n"
18846+ " movq %%mm1, 8(%2)\n"
18847+ " movq %%mm2, 16(%2)\n"
18848+ " movq %%mm3, 24(%2)\n"
18849+ " movq 32(%1), %%mm0\n"
18850+ " movq 40(%1), %%mm1\n"
18851+ " movq 48(%1), %%mm2\n"
18852+ " movq 56(%1), %%mm3\n"
18853+ " movq %%mm0, 32(%2)\n"
18854+ " movq %%mm1, 40(%2)\n"
18855+ " movq %%mm2, 48(%2)\n"
18856+ " movq %%mm3, 56(%2)\n"
18857 ".section .fixup, \"ax\"\n"
18858- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18859+ "3:\n"
18860+
18861+#ifdef CONFIG_PAX_KERNEXEC
18862+ " movl %%cr0, %0\n"
18863+ " movl %0, %%eax\n"
18864+ " andl $0xFFFEFFFF, %%eax\n"
18865+ " movl %%eax, %%cr0\n"
18866+#endif
18867+
18868+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18869+
18870+#ifdef CONFIG_PAX_KERNEXEC
18871+ " movl %0, %%cr0\n"
18872+#endif
18873+
18874 " jmp 2b\n"
18875 ".previous\n"
18876 _ASM_EXTABLE(1b, 3b)
18877- : : "r" (from), "r" (to) : "memory");
18878+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18879
18880 from += 64;
18881 to += 64;
18882diff -urNp linux-3.1.1/arch/x86/lib/msr-reg.S linux-3.1.1/arch/x86/lib/msr-reg.S
18883--- linux-3.1.1/arch/x86/lib/msr-reg.S 2011-11-11 15:19:27.000000000 -0500
18884+++ linux-3.1.1/arch/x86/lib/msr-reg.S 2011-11-16 18:39:07.000000000 -0500
18885@@ -3,6 +3,7 @@
18886 #include <asm/dwarf2.h>
18887 #include <asm/asm.h>
18888 #include <asm/msr.h>
18889+#include <asm/alternative-asm.h>
18890
18891 #ifdef CONFIG_X86_64
18892 /*
18893@@ -37,6 +38,7 @@ ENTRY(native_\op\()_safe_regs)
18894 movl %edi, 28(%r10)
18895 popq_cfi %rbp
18896 popq_cfi %rbx
18897+ pax_force_retaddr
18898 ret
18899 3:
18900 CFI_RESTORE_STATE
18901diff -urNp linux-3.1.1/arch/x86/lib/putuser.S linux-3.1.1/arch/x86/lib/putuser.S
18902--- linux-3.1.1/arch/x86/lib/putuser.S 2011-11-11 15:19:27.000000000 -0500
18903+++ linux-3.1.1/arch/x86/lib/putuser.S 2011-11-16 18:39:07.000000000 -0500
18904@@ -15,7 +15,9 @@
18905 #include <asm/thread_info.h>
18906 #include <asm/errno.h>
18907 #include <asm/asm.h>
18908-
18909+#include <asm/segment.h>
18910+#include <asm/pgtable.h>
18911+#include <asm/alternative-asm.h>
18912
18913 /*
18914 * __put_user_X
18915@@ -29,52 +31,119 @@
18916 * as they get called from within inline assembly.
18917 */
18918
18919-#define ENTER CFI_STARTPROC ; \
18920- GET_THREAD_INFO(%_ASM_BX)
18921-#define EXIT ret ; \
18922+#define ENTER CFI_STARTPROC
18923+#define EXIT pax_force_retaddr; ret ; \
18924 CFI_ENDPROC
18925
18926+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18927+#define _DEST %_ASM_CX,%_ASM_BX
18928+#else
18929+#define _DEST %_ASM_CX
18930+#endif
18931+
18932+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18933+#define __copyuser_seg gs;
18934+#else
18935+#define __copyuser_seg
18936+#endif
18937+
18938 .text
18939 ENTRY(__put_user_1)
18940 ENTER
18941+
18942+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18943+ GET_THREAD_INFO(%_ASM_BX)
18944 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
18945 jae bad_put_user
18946-1: movb %al,(%_ASM_CX)
18947+
18948+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18949+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18950+ cmp %_ASM_BX,%_ASM_CX
18951+ jb 1234f
18952+ xor %ebx,%ebx
18953+1234:
18954+#endif
18955+
18956+#endif
18957+
18958+1: __copyuser_seg movb %al,(_DEST)
18959 xor %eax,%eax
18960 EXIT
18961 ENDPROC(__put_user_1)
18962
18963 ENTRY(__put_user_2)
18964 ENTER
18965+
18966+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18967+ GET_THREAD_INFO(%_ASM_BX)
18968 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18969 sub $1,%_ASM_BX
18970 cmp %_ASM_BX,%_ASM_CX
18971 jae bad_put_user
18972-2: movw %ax,(%_ASM_CX)
18973+
18974+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18975+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18976+ cmp %_ASM_BX,%_ASM_CX
18977+ jb 1234f
18978+ xor %ebx,%ebx
18979+1234:
18980+#endif
18981+
18982+#endif
18983+
18984+2: __copyuser_seg movw %ax,(_DEST)
18985 xor %eax,%eax
18986 EXIT
18987 ENDPROC(__put_user_2)
18988
18989 ENTRY(__put_user_4)
18990 ENTER
18991+
18992+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18993+ GET_THREAD_INFO(%_ASM_BX)
18994 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18995 sub $3,%_ASM_BX
18996 cmp %_ASM_BX,%_ASM_CX
18997 jae bad_put_user
18998-3: movl %eax,(%_ASM_CX)
18999+
19000+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19001+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19002+ cmp %_ASM_BX,%_ASM_CX
19003+ jb 1234f
19004+ xor %ebx,%ebx
19005+1234:
19006+#endif
19007+
19008+#endif
19009+
19010+3: __copyuser_seg movl %eax,(_DEST)
19011 xor %eax,%eax
19012 EXIT
19013 ENDPROC(__put_user_4)
19014
19015 ENTRY(__put_user_8)
19016 ENTER
19017+
19018+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19019+ GET_THREAD_INFO(%_ASM_BX)
19020 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19021 sub $7,%_ASM_BX
19022 cmp %_ASM_BX,%_ASM_CX
19023 jae bad_put_user
19024-4: mov %_ASM_AX,(%_ASM_CX)
19025+
19026+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19027+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19028+ cmp %_ASM_BX,%_ASM_CX
19029+ jb 1234f
19030+ xor %ebx,%ebx
19031+1234:
19032+#endif
19033+
19034+#endif
19035+
19036+4: __copyuser_seg mov %_ASM_AX,(_DEST)
19037 #ifdef CONFIG_X86_32
19038-5: movl %edx,4(%_ASM_CX)
19039+5: __copyuser_seg movl %edx,4(_DEST)
19040 #endif
19041 xor %eax,%eax
19042 EXIT
19043diff -urNp linux-3.1.1/arch/x86/lib/rwlock.S linux-3.1.1/arch/x86/lib/rwlock.S
19044--- linux-3.1.1/arch/x86/lib/rwlock.S 2011-11-11 15:19:27.000000000 -0500
19045+++ linux-3.1.1/arch/x86/lib/rwlock.S 2011-11-16 18:39:07.000000000 -0500
19046@@ -23,6 +23,7 @@ ENTRY(__write_lock_failed)
19047 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
19048 jnz 0b
19049 ENDFRAME
19050+ pax_force_retaddr
19051 ret
19052 CFI_ENDPROC
19053 END(__write_lock_failed)
19054@@ -39,6 +40,7 @@ ENTRY(__read_lock_failed)
19055 READ_LOCK_SIZE(dec) (%__lock_ptr)
19056 js 0b
19057 ENDFRAME
19058+ pax_force_retaddr
19059 ret
19060 CFI_ENDPROC
19061 END(__read_lock_failed)
19062diff -urNp linux-3.1.1/arch/x86/lib/rwsem.S linux-3.1.1/arch/x86/lib/rwsem.S
19063--- linux-3.1.1/arch/x86/lib/rwsem.S 2011-11-11 15:19:27.000000000 -0500
19064+++ linux-3.1.1/arch/x86/lib/rwsem.S 2011-11-16 18:39:07.000000000 -0500
19065@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
19066 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
19067 CFI_RESTORE __ASM_REG(dx)
19068 restore_common_regs
19069+ pax_force_retaddr
19070 ret
19071 CFI_ENDPROC
19072 ENDPROC(call_rwsem_down_read_failed)
19073@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
19074 movq %rax,%rdi
19075 call rwsem_down_write_failed
19076 restore_common_regs
19077+ pax_force_retaddr
19078 ret
19079 CFI_ENDPROC
19080 ENDPROC(call_rwsem_down_write_failed)
19081@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
19082 movq %rax,%rdi
19083 call rwsem_wake
19084 restore_common_regs
19085-1: ret
19086+1: pax_force_retaddr
19087+ ret
19088 CFI_ENDPROC
19089 ENDPROC(call_rwsem_wake)
19090
19091@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
19092 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
19093 CFI_RESTORE __ASM_REG(dx)
19094 restore_common_regs
19095+ pax_force_retaddr
19096 ret
19097 CFI_ENDPROC
19098 ENDPROC(call_rwsem_downgrade_wake)
19099diff -urNp linux-3.1.1/arch/x86/lib/thunk_64.S linux-3.1.1/arch/x86/lib/thunk_64.S
19100--- linux-3.1.1/arch/x86/lib/thunk_64.S 2011-11-11 15:19:27.000000000 -0500
19101+++ linux-3.1.1/arch/x86/lib/thunk_64.S 2011-11-16 18:39:07.000000000 -0500
19102@@ -8,6 +8,7 @@
19103 #include <linux/linkage.h>
19104 #include <asm/dwarf2.h>
19105 #include <asm/calling.h>
19106+#include <asm/alternative-asm.h>
19107
19108 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
19109 .macro THUNK name, func, put_ret_addr_in_rdi=0
19110@@ -41,5 +42,6 @@
19111 SAVE_ARGS
19112 restore:
19113 RESTORE_ARGS
19114+ pax_force_retaddr
19115 ret
19116 CFI_ENDPROC
19117diff -urNp linux-3.1.1/arch/x86/lib/usercopy_32.c linux-3.1.1/arch/x86/lib/usercopy_32.c
19118--- linux-3.1.1/arch/x86/lib/usercopy_32.c 2011-11-11 15:19:27.000000000 -0500
19119+++ linux-3.1.1/arch/x86/lib/usercopy_32.c 2011-11-16 18:39:07.000000000 -0500
19120@@ -43,7 +43,7 @@ do { \
19121 __asm__ __volatile__( \
19122 " testl %1,%1\n" \
19123 " jz 2f\n" \
19124- "0: lodsb\n" \
19125+ "0: "__copyuser_seg"lodsb\n" \
19126 " stosb\n" \
19127 " testb %%al,%%al\n" \
19128 " jz 1f\n" \
19129@@ -128,10 +128,12 @@ do { \
19130 int __d0; \
19131 might_fault(); \
19132 __asm__ __volatile__( \
19133+ __COPYUSER_SET_ES \
19134 "0: rep; stosl\n" \
19135 " movl %2,%0\n" \
19136 "1: rep; stosb\n" \
19137 "2:\n" \
19138+ __COPYUSER_RESTORE_ES \
19139 ".section .fixup,\"ax\"\n" \
19140 "3: lea 0(%2,%0,4),%0\n" \
19141 " jmp 2b\n" \
19142@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19143 might_fault();
19144
19145 __asm__ __volatile__(
19146+ __COPYUSER_SET_ES
19147 " testl %0, %0\n"
19148 " jz 3f\n"
19149 " andl %0,%%ecx\n"
19150@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19151 " subl %%ecx,%0\n"
19152 " addl %0,%%eax\n"
19153 "1:\n"
19154+ __COPYUSER_RESTORE_ES
19155 ".section .fixup,\"ax\"\n"
19156 "2: xorl %%eax,%%eax\n"
19157 " jmp 1b\n"
19158@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19159
19160 #ifdef CONFIG_X86_INTEL_USERCOPY
19161 static unsigned long
19162-__copy_user_intel(void __user *to, const void *from, unsigned long size)
19163+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19164 {
19165 int d0, d1;
19166 __asm__ __volatile__(
19167@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19168 " .align 2,0x90\n"
19169 "3: movl 0(%4), %%eax\n"
19170 "4: movl 4(%4), %%edx\n"
19171- "5: movl %%eax, 0(%3)\n"
19172- "6: movl %%edx, 4(%3)\n"
19173+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19174+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19175 "7: movl 8(%4), %%eax\n"
19176 "8: movl 12(%4),%%edx\n"
19177- "9: movl %%eax, 8(%3)\n"
19178- "10: movl %%edx, 12(%3)\n"
19179+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19180+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19181 "11: movl 16(%4), %%eax\n"
19182 "12: movl 20(%4), %%edx\n"
19183- "13: movl %%eax, 16(%3)\n"
19184- "14: movl %%edx, 20(%3)\n"
19185+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19186+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19187 "15: movl 24(%4), %%eax\n"
19188 "16: movl 28(%4), %%edx\n"
19189- "17: movl %%eax, 24(%3)\n"
19190- "18: movl %%edx, 28(%3)\n"
19191+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19192+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19193 "19: movl 32(%4), %%eax\n"
19194 "20: movl 36(%4), %%edx\n"
19195- "21: movl %%eax, 32(%3)\n"
19196- "22: movl %%edx, 36(%3)\n"
19197+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19198+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19199 "23: movl 40(%4), %%eax\n"
19200 "24: movl 44(%4), %%edx\n"
19201- "25: movl %%eax, 40(%3)\n"
19202- "26: movl %%edx, 44(%3)\n"
19203+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19204+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19205 "27: movl 48(%4), %%eax\n"
19206 "28: movl 52(%4), %%edx\n"
19207- "29: movl %%eax, 48(%3)\n"
19208- "30: movl %%edx, 52(%3)\n"
19209+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19210+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19211 "31: movl 56(%4), %%eax\n"
19212 "32: movl 60(%4), %%edx\n"
19213- "33: movl %%eax, 56(%3)\n"
19214- "34: movl %%edx, 60(%3)\n"
19215+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19216+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19217 " addl $-64, %0\n"
19218 " addl $64, %4\n"
19219 " addl $64, %3\n"
19220@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19221 " shrl $2, %0\n"
19222 " andl $3, %%eax\n"
19223 " cld\n"
19224+ __COPYUSER_SET_ES
19225 "99: rep; movsl\n"
19226 "36: movl %%eax, %0\n"
19227 "37: rep; movsb\n"
19228 "100:\n"
19229+ __COPYUSER_RESTORE_ES
19230+ ".section .fixup,\"ax\"\n"
19231+ "101: lea 0(%%eax,%0,4),%0\n"
19232+ " jmp 100b\n"
19233+ ".previous\n"
19234+ ".section __ex_table,\"a\"\n"
19235+ " .align 4\n"
19236+ " .long 1b,100b\n"
19237+ " .long 2b,100b\n"
19238+ " .long 3b,100b\n"
19239+ " .long 4b,100b\n"
19240+ " .long 5b,100b\n"
19241+ " .long 6b,100b\n"
19242+ " .long 7b,100b\n"
19243+ " .long 8b,100b\n"
19244+ " .long 9b,100b\n"
19245+ " .long 10b,100b\n"
19246+ " .long 11b,100b\n"
19247+ " .long 12b,100b\n"
19248+ " .long 13b,100b\n"
19249+ " .long 14b,100b\n"
19250+ " .long 15b,100b\n"
19251+ " .long 16b,100b\n"
19252+ " .long 17b,100b\n"
19253+ " .long 18b,100b\n"
19254+ " .long 19b,100b\n"
19255+ " .long 20b,100b\n"
19256+ " .long 21b,100b\n"
19257+ " .long 22b,100b\n"
19258+ " .long 23b,100b\n"
19259+ " .long 24b,100b\n"
19260+ " .long 25b,100b\n"
19261+ " .long 26b,100b\n"
19262+ " .long 27b,100b\n"
19263+ " .long 28b,100b\n"
19264+ " .long 29b,100b\n"
19265+ " .long 30b,100b\n"
19266+ " .long 31b,100b\n"
19267+ " .long 32b,100b\n"
19268+ " .long 33b,100b\n"
19269+ " .long 34b,100b\n"
19270+ " .long 35b,100b\n"
19271+ " .long 36b,100b\n"
19272+ " .long 37b,100b\n"
19273+ " .long 99b,101b\n"
19274+ ".previous"
19275+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
19276+ : "1"(to), "2"(from), "0"(size)
19277+ : "eax", "edx", "memory");
19278+ return size;
19279+}
19280+
19281+static unsigned long
19282+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
19283+{
19284+ int d0, d1;
19285+ __asm__ __volatile__(
19286+ " .align 2,0x90\n"
19287+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
19288+ " cmpl $67, %0\n"
19289+ " jbe 3f\n"
19290+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
19291+ " .align 2,0x90\n"
19292+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
19293+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
19294+ "5: movl %%eax, 0(%3)\n"
19295+ "6: movl %%edx, 4(%3)\n"
19296+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
19297+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
19298+ "9: movl %%eax, 8(%3)\n"
19299+ "10: movl %%edx, 12(%3)\n"
19300+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
19301+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
19302+ "13: movl %%eax, 16(%3)\n"
19303+ "14: movl %%edx, 20(%3)\n"
19304+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
19305+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
19306+ "17: movl %%eax, 24(%3)\n"
19307+ "18: movl %%edx, 28(%3)\n"
19308+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
19309+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
19310+ "21: movl %%eax, 32(%3)\n"
19311+ "22: movl %%edx, 36(%3)\n"
19312+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
19313+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
19314+ "25: movl %%eax, 40(%3)\n"
19315+ "26: movl %%edx, 44(%3)\n"
19316+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
19317+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
19318+ "29: movl %%eax, 48(%3)\n"
19319+ "30: movl %%edx, 52(%3)\n"
19320+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
19321+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
19322+ "33: movl %%eax, 56(%3)\n"
19323+ "34: movl %%edx, 60(%3)\n"
19324+ " addl $-64, %0\n"
19325+ " addl $64, %4\n"
19326+ " addl $64, %3\n"
19327+ " cmpl $63, %0\n"
19328+ " ja 1b\n"
19329+ "35: movl %0, %%eax\n"
19330+ " shrl $2, %0\n"
19331+ " andl $3, %%eax\n"
19332+ " cld\n"
19333+ "99: rep; "__copyuser_seg" movsl\n"
19334+ "36: movl %%eax, %0\n"
19335+ "37: rep; "__copyuser_seg" movsb\n"
19336+ "100:\n"
19337 ".section .fixup,\"ax\"\n"
19338 "101: lea 0(%%eax,%0,4),%0\n"
19339 " jmp 100b\n"
19340@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
19341 int d0, d1;
19342 __asm__ __volatile__(
19343 " .align 2,0x90\n"
19344- "0: movl 32(%4), %%eax\n"
19345+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19346 " cmpl $67, %0\n"
19347 " jbe 2f\n"
19348- "1: movl 64(%4), %%eax\n"
19349+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19350 " .align 2,0x90\n"
19351- "2: movl 0(%4), %%eax\n"
19352- "21: movl 4(%4), %%edx\n"
19353+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19354+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19355 " movl %%eax, 0(%3)\n"
19356 " movl %%edx, 4(%3)\n"
19357- "3: movl 8(%4), %%eax\n"
19358- "31: movl 12(%4),%%edx\n"
19359+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19360+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19361 " movl %%eax, 8(%3)\n"
19362 " movl %%edx, 12(%3)\n"
19363- "4: movl 16(%4), %%eax\n"
19364- "41: movl 20(%4), %%edx\n"
19365+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19366+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19367 " movl %%eax, 16(%3)\n"
19368 " movl %%edx, 20(%3)\n"
19369- "10: movl 24(%4), %%eax\n"
19370- "51: movl 28(%4), %%edx\n"
19371+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19372+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19373 " movl %%eax, 24(%3)\n"
19374 " movl %%edx, 28(%3)\n"
19375- "11: movl 32(%4), %%eax\n"
19376- "61: movl 36(%4), %%edx\n"
19377+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19378+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19379 " movl %%eax, 32(%3)\n"
19380 " movl %%edx, 36(%3)\n"
19381- "12: movl 40(%4), %%eax\n"
19382- "71: movl 44(%4), %%edx\n"
19383+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19384+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19385 " movl %%eax, 40(%3)\n"
19386 " movl %%edx, 44(%3)\n"
19387- "13: movl 48(%4), %%eax\n"
19388- "81: movl 52(%4), %%edx\n"
19389+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19390+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19391 " movl %%eax, 48(%3)\n"
19392 " movl %%edx, 52(%3)\n"
19393- "14: movl 56(%4), %%eax\n"
19394- "91: movl 60(%4), %%edx\n"
19395+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19396+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19397 " movl %%eax, 56(%3)\n"
19398 " movl %%edx, 60(%3)\n"
19399 " addl $-64, %0\n"
19400@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
19401 " shrl $2, %0\n"
19402 " andl $3, %%eax\n"
19403 " cld\n"
19404- "6: rep; movsl\n"
19405+ "6: rep; "__copyuser_seg" movsl\n"
19406 " movl %%eax,%0\n"
19407- "7: rep; movsb\n"
19408+ "7: rep; "__copyuser_seg" movsb\n"
19409 "8:\n"
19410 ".section .fixup,\"ax\"\n"
19411 "9: lea 0(%%eax,%0,4),%0\n"
19412@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
19413
19414 __asm__ __volatile__(
19415 " .align 2,0x90\n"
19416- "0: movl 32(%4), %%eax\n"
19417+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19418 " cmpl $67, %0\n"
19419 " jbe 2f\n"
19420- "1: movl 64(%4), %%eax\n"
19421+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19422 " .align 2,0x90\n"
19423- "2: movl 0(%4), %%eax\n"
19424- "21: movl 4(%4), %%edx\n"
19425+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19426+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19427 " movnti %%eax, 0(%3)\n"
19428 " movnti %%edx, 4(%3)\n"
19429- "3: movl 8(%4), %%eax\n"
19430- "31: movl 12(%4),%%edx\n"
19431+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19432+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19433 " movnti %%eax, 8(%3)\n"
19434 " movnti %%edx, 12(%3)\n"
19435- "4: movl 16(%4), %%eax\n"
19436- "41: movl 20(%4), %%edx\n"
19437+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19438+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19439 " movnti %%eax, 16(%3)\n"
19440 " movnti %%edx, 20(%3)\n"
19441- "10: movl 24(%4), %%eax\n"
19442- "51: movl 28(%4), %%edx\n"
19443+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19444+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19445 " movnti %%eax, 24(%3)\n"
19446 " movnti %%edx, 28(%3)\n"
19447- "11: movl 32(%4), %%eax\n"
19448- "61: movl 36(%4), %%edx\n"
19449+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19450+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19451 " movnti %%eax, 32(%3)\n"
19452 " movnti %%edx, 36(%3)\n"
19453- "12: movl 40(%4), %%eax\n"
19454- "71: movl 44(%4), %%edx\n"
19455+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19456+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19457 " movnti %%eax, 40(%3)\n"
19458 " movnti %%edx, 44(%3)\n"
19459- "13: movl 48(%4), %%eax\n"
19460- "81: movl 52(%4), %%edx\n"
19461+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19462+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19463 " movnti %%eax, 48(%3)\n"
19464 " movnti %%edx, 52(%3)\n"
19465- "14: movl 56(%4), %%eax\n"
19466- "91: movl 60(%4), %%edx\n"
19467+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19468+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19469 " movnti %%eax, 56(%3)\n"
19470 " movnti %%edx, 60(%3)\n"
19471 " addl $-64, %0\n"
19472@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
19473 " shrl $2, %0\n"
19474 " andl $3, %%eax\n"
19475 " cld\n"
19476- "6: rep; movsl\n"
19477+ "6: rep; "__copyuser_seg" movsl\n"
19478 " movl %%eax,%0\n"
19479- "7: rep; movsb\n"
19480+ "7: rep; "__copyuser_seg" movsb\n"
19481 "8:\n"
19482 ".section .fixup,\"ax\"\n"
19483 "9: lea 0(%%eax,%0,4),%0\n"
19484@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
19485
19486 __asm__ __volatile__(
19487 " .align 2,0x90\n"
19488- "0: movl 32(%4), %%eax\n"
19489+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19490 " cmpl $67, %0\n"
19491 " jbe 2f\n"
19492- "1: movl 64(%4), %%eax\n"
19493+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19494 " .align 2,0x90\n"
19495- "2: movl 0(%4), %%eax\n"
19496- "21: movl 4(%4), %%edx\n"
19497+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19498+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19499 " movnti %%eax, 0(%3)\n"
19500 " movnti %%edx, 4(%3)\n"
19501- "3: movl 8(%4), %%eax\n"
19502- "31: movl 12(%4),%%edx\n"
19503+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19504+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19505 " movnti %%eax, 8(%3)\n"
19506 " movnti %%edx, 12(%3)\n"
19507- "4: movl 16(%4), %%eax\n"
19508- "41: movl 20(%4), %%edx\n"
19509+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19510+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19511 " movnti %%eax, 16(%3)\n"
19512 " movnti %%edx, 20(%3)\n"
19513- "10: movl 24(%4), %%eax\n"
19514- "51: movl 28(%4), %%edx\n"
19515+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19516+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19517 " movnti %%eax, 24(%3)\n"
19518 " movnti %%edx, 28(%3)\n"
19519- "11: movl 32(%4), %%eax\n"
19520- "61: movl 36(%4), %%edx\n"
19521+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19522+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19523 " movnti %%eax, 32(%3)\n"
19524 " movnti %%edx, 36(%3)\n"
19525- "12: movl 40(%4), %%eax\n"
19526- "71: movl 44(%4), %%edx\n"
19527+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19528+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19529 " movnti %%eax, 40(%3)\n"
19530 " movnti %%edx, 44(%3)\n"
19531- "13: movl 48(%4), %%eax\n"
19532- "81: movl 52(%4), %%edx\n"
19533+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19534+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19535 " movnti %%eax, 48(%3)\n"
19536 " movnti %%edx, 52(%3)\n"
19537- "14: movl 56(%4), %%eax\n"
19538- "91: movl 60(%4), %%edx\n"
19539+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19540+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19541 " movnti %%eax, 56(%3)\n"
19542 " movnti %%edx, 60(%3)\n"
19543 " addl $-64, %0\n"
19544@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
19545 " shrl $2, %0\n"
19546 " andl $3, %%eax\n"
19547 " cld\n"
19548- "6: rep; movsl\n"
19549+ "6: rep; "__copyuser_seg" movsl\n"
19550 " movl %%eax,%0\n"
19551- "7: rep; movsb\n"
19552+ "7: rep; "__copyuser_seg" movsb\n"
19553 "8:\n"
19554 ".section .fixup,\"ax\"\n"
19555 "9: lea 0(%%eax,%0,4),%0\n"
19556@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
19557 */
19558 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
19559 unsigned long size);
19560-unsigned long __copy_user_intel(void __user *to, const void *from,
19561+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
19562+ unsigned long size);
19563+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
19564 unsigned long size);
19565 unsigned long __copy_user_zeroing_intel_nocache(void *to,
19566 const void __user *from, unsigned long size);
19567 #endif /* CONFIG_X86_INTEL_USERCOPY */
19568
19569 /* Generic arbitrary sized copy. */
19570-#define __copy_user(to, from, size) \
19571+#define __copy_user(to, from, size, prefix, set, restore) \
19572 do { \
19573 int __d0, __d1, __d2; \
19574 __asm__ __volatile__( \
19575+ set \
19576 " cmp $7,%0\n" \
19577 " jbe 1f\n" \
19578 " movl %1,%0\n" \
19579 " negl %0\n" \
19580 " andl $7,%0\n" \
19581 " subl %0,%3\n" \
19582- "4: rep; movsb\n" \
19583+ "4: rep; "prefix"movsb\n" \
19584 " movl %3,%0\n" \
19585 " shrl $2,%0\n" \
19586 " andl $3,%3\n" \
19587 " .align 2,0x90\n" \
19588- "0: rep; movsl\n" \
19589+ "0: rep; "prefix"movsl\n" \
19590 " movl %3,%0\n" \
19591- "1: rep; movsb\n" \
19592+ "1: rep; "prefix"movsb\n" \
19593 "2:\n" \
19594+ restore \
19595 ".section .fixup,\"ax\"\n" \
19596 "5: addl %3,%0\n" \
19597 " jmp 2b\n" \
19598@@ -682,14 +799,14 @@ do { \
19599 " negl %0\n" \
19600 " andl $7,%0\n" \
19601 " subl %0,%3\n" \
19602- "4: rep; movsb\n" \
19603+ "4: rep; "__copyuser_seg"movsb\n" \
19604 " movl %3,%0\n" \
19605 " shrl $2,%0\n" \
19606 " andl $3,%3\n" \
19607 " .align 2,0x90\n" \
19608- "0: rep; movsl\n" \
19609+ "0: rep; "__copyuser_seg"movsl\n" \
19610 " movl %3,%0\n" \
19611- "1: rep; movsb\n" \
19612+ "1: rep; "__copyuser_seg"movsb\n" \
19613 "2:\n" \
19614 ".section .fixup,\"ax\"\n" \
19615 "5: addl %3,%0\n" \
19616@@ -775,9 +892,9 @@ survive:
19617 }
19618 #endif
19619 if (movsl_is_ok(to, from, n))
19620- __copy_user(to, from, n);
19621+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
19622 else
19623- n = __copy_user_intel(to, from, n);
19624+ n = __generic_copy_to_user_intel(to, from, n);
19625 return n;
19626 }
19627 EXPORT_SYMBOL(__copy_to_user_ll);
19628@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
19629 unsigned long n)
19630 {
19631 if (movsl_is_ok(to, from, n))
19632- __copy_user(to, from, n);
19633+ __copy_user(to, from, n, __copyuser_seg, "", "");
19634 else
19635- n = __copy_user_intel((void __user *)to,
19636- (const void *)from, n);
19637+ n = __generic_copy_from_user_intel(to, from, n);
19638 return n;
19639 }
19640 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
19641@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
19642 if (n > 64 && cpu_has_xmm2)
19643 n = __copy_user_intel_nocache(to, from, n);
19644 else
19645- __copy_user(to, from, n);
19646+ __copy_user(to, from, n, __copyuser_seg, "", "");
19647 #else
19648- __copy_user(to, from, n);
19649+ __copy_user(to, from, n, __copyuser_seg, "", "");
19650 #endif
19651 return n;
19652 }
19653 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
19654
19655-/**
19656- * copy_to_user: - Copy a block of data into user space.
19657- * @to: Destination address, in user space.
19658- * @from: Source address, in kernel space.
19659- * @n: Number of bytes to copy.
19660- *
19661- * Context: User context only. This function may sleep.
19662- *
19663- * Copy data from kernel space to user space.
19664- *
19665- * Returns number of bytes that could not be copied.
19666- * On success, this will be zero.
19667- */
19668-unsigned long
19669-copy_to_user(void __user *to, const void *from, unsigned long n)
19670+void copy_from_user_overflow(void)
19671 {
19672- if (access_ok(VERIFY_WRITE, to, n))
19673- n = __copy_to_user(to, from, n);
19674- return n;
19675+ WARN(1, "Buffer overflow detected!\n");
19676 }
19677-EXPORT_SYMBOL(copy_to_user);
19678+EXPORT_SYMBOL(copy_from_user_overflow);
19679
19680-/**
19681- * copy_from_user: - Copy a block of data from user space.
19682- * @to: Destination address, in kernel space.
19683- * @from: Source address, in user space.
19684- * @n: Number of bytes to copy.
19685- *
19686- * Context: User context only. This function may sleep.
19687- *
19688- * Copy data from user space to kernel space.
19689- *
19690- * Returns number of bytes that could not be copied.
19691- * On success, this will be zero.
19692- *
19693- * If some data could not be copied, this function will pad the copied
19694- * data to the requested size using zero bytes.
19695- */
19696-unsigned long
19697-_copy_from_user(void *to, const void __user *from, unsigned long n)
19698+void copy_to_user_overflow(void)
19699 {
19700- if (access_ok(VERIFY_READ, from, n))
19701- n = __copy_from_user(to, from, n);
19702- else
19703- memset(to, 0, n);
19704- return n;
19705+ WARN(1, "Buffer overflow detected!\n");
19706 }
19707-EXPORT_SYMBOL(_copy_from_user);
19708+EXPORT_SYMBOL(copy_to_user_overflow);
19709
19710-void copy_from_user_overflow(void)
19711+#ifdef CONFIG_PAX_MEMORY_UDEREF
19712+void __set_fs(mm_segment_t x)
19713 {
19714- WARN(1, "Buffer overflow detected!\n");
19715+ switch (x.seg) {
19716+ case 0:
19717+ loadsegment(gs, 0);
19718+ break;
19719+ case TASK_SIZE_MAX:
19720+ loadsegment(gs, __USER_DS);
19721+ break;
19722+ case -1UL:
19723+ loadsegment(gs, __KERNEL_DS);
19724+ break;
19725+ default:
19726+ BUG();
19727+ }
19728+ return;
19729 }
19730-EXPORT_SYMBOL(copy_from_user_overflow);
19731+EXPORT_SYMBOL(__set_fs);
19732+
19733+void set_fs(mm_segment_t x)
19734+{
19735+ current_thread_info()->addr_limit = x;
19736+ __set_fs(x);
19737+}
19738+EXPORT_SYMBOL(set_fs);
19739+#endif
19740diff -urNp linux-3.1.1/arch/x86/lib/usercopy_64.c linux-3.1.1/arch/x86/lib/usercopy_64.c
19741--- linux-3.1.1/arch/x86/lib/usercopy_64.c 2011-11-11 15:19:27.000000000 -0500
19742+++ linux-3.1.1/arch/x86/lib/usercopy_64.c 2011-11-16 18:39:07.000000000 -0500
19743@@ -42,6 +42,12 @@ long
19744 __strncpy_from_user(char *dst, const char __user *src, long count)
19745 {
19746 long res;
19747+
19748+#ifdef CONFIG_PAX_MEMORY_UDEREF
19749+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19750+ src += PAX_USER_SHADOW_BASE;
19751+#endif
19752+
19753 __do_strncpy_from_user(dst, src, count, res);
19754 return res;
19755 }
19756@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
19757 {
19758 long __d0;
19759 might_fault();
19760+
19761+#ifdef CONFIG_PAX_MEMORY_UDEREF
19762+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
19763+ addr += PAX_USER_SHADOW_BASE;
19764+#endif
19765+
19766 /* no memory constraint because it doesn't change any memory gcc knows
19767 about */
19768 asm volatile(
19769@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
19770
19771 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
19772 {
19773- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19774- return copy_user_generic((__force void *)to, (__force void *)from, len);
19775- }
19776- return len;
19777+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19778+
19779+#ifdef CONFIG_PAX_MEMORY_UDEREF
19780+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
19781+ to += PAX_USER_SHADOW_BASE;
19782+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
19783+ from += PAX_USER_SHADOW_BASE;
19784+#endif
19785+
19786+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
19787+ }
19788+ return len;
19789 }
19790 EXPORT_SYMBOL(copy_in_user);
19791
19792@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
19793 * it is not necessary to optimize tail handling.
19794 */
19795 unsigned long
19796-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
19797+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest)
19798 {
19799 char c;
19800 unsigned zero_len;
19801diff -urNp linux-3.1.1/arch/x86/Makefile linux-3.1.1/arch/x86/Makefile
19802--- linux-3.1.1/arch/x86/Makefile 2011-11-11 15:19:27.000000000 -0500
19803+++ linux-3.1.1/arch/x86/Makefile 2011-11-17 18:30:30.000000000 -0500
19804@@ -46,6 +46,7 @@ else
19805 UTS_MACHINE := x86_64
19806 CHECKFLAGS += -D__x86_64__ -m64
19807
19808+ biarch := $(call cc-option,-m64)
19809 KBUILD_AFLAGS += -m64
19810 KBUILD_CFLAGS += -m64
19811
19812@@ -195,3 +196,12 @@ define archhelp
19813 echo ' FDARGS="..." arguments for the booted kernel'
19814 echo ' FDINITRD=file initrd for the booted kernel'
19815 endef
19816+
19817+define OLD_LD
19818+
19819+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
19820+*** Please upgrade your binutils to 2.18 or newer
19821+endef
19822+
19823+archprepare:
19824+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
19825diff -urNp linux-3.1.1/arch/x86/mm/extable.c linux-3.1.1/arch/x86/mm/extable.c
19826--- linux-3.1.1/arch/x86/mm/extable.c 2011-11-11 15:19:27.000000000 -0500
19827+++ linux-3.1.1/arch/x86/mm/extable.c 2011-11-16 18:39:07.000000000 -0500
19828@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
19829 const struct exception_table_entry *fixup;
19830
19831 #ifdef CONFIG_PNPBIOS
19832- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19833+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19834 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19835 extern u32 pnp_bios_is_utter_crap;
19836 pnp_bios_is_utter_crap = 1;
19837diff -urNp linux-3.1.1/arch/x86/mm/fault.c linux-3.1.1/arch/x86/mm/fault.c
19838--- linux-3.1.1/arch/x86/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
19839+++ linux-3.1.1/arch/x86/mm/fault.c 2011-11-16 20:43:50.000000000 -0500
19840@@ -13,11 +13,18 @@
19841 #include <linux/perf_event.h> /* perf_sw_event */
19842 #include <linux/hugetlb.h> /* hstate_index_to_shift */
19843 #include <linux/prefetch.h> /* prefetchw */
19844+#include <linux/unistd.h>
19845+#include <linux/compiler.h>
19846
19847 #include <asm/traps.h> /* dotraplinkage, ... */
19848 #include <asm/pgalloc.h> /* pgd_*(), ... */
19849 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
19850 #include <asm/vsyscall.h>
19851+#include <asm/tlbflush.h>
19852+
19853+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19854+#include <asm/stacktrace.h>
19855+#endif
19856
19857 /*
19858 * Page fault error code bits:
19859@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_
19860 int ret = 0;
19861
19862 /* kprobe_running() needs smp_processor_id() */
19863- if (kprobes_built_in() && !user_mode_vm(regs)) {
19864+ if (kprobes_built_in() && !user_mode(regs)) {
19865 preempt_disable();
19866 if (kprobe_running() && kprobe_fault_handler(regs, 14))
19867 ret = 1;
19868@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
19869 return !instr_lo || (instr_lo>>1) == 1;
19870 case 0x00:
19871 /* Prefetch instruction is 0x0F0D or 0x0F18 */
19872- if (probe_kernel_address(instr, opcode))
19873+ if (user_mode(regs)) {
19874+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19875+ return 0;
19876+ } else if (probe_kernel_address(instr, opcode))
19877 return 0;
19878
19879 *prefetch = (instr_lo == 0xF) &&
19880@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
19881 while (instr < max_instr) {
19882 unsigned char opcode;
19883
19884- if (probe_kernel_address(instr, opcode))
19885+ if (user_mode(regs)) {
19886+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19887+ break;
19888+ } else if (probe_kernel_address(instr, opcode))
19889 break;
19890
19891 instr++;
19892@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int s
19893 force_sig_info(si_signo, &info, tsk);
19894 }
19895
19896+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19897+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
19898+#endif
19899+
19900+#ifdef CONFIG_PAX_EMUTRAMP
19901+static int pax_handle_fetch_fault(struct pt_regs *regs);
19902+#endif
19903+
19904+#ifdef CONFIG_PAX_PAGEEXEC
19905+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
19906+{
19907+ pgd_t *pgd;
19908+ pud_t *pud;
19909+ pmd_t *pmd;
19910+
19911+ pgd = pgd_offset(mm, address);
19912+ if (!pgd_present(*pgd))
19913+ return NULL;
19914+ pud = pud_offset(pgd, address);
19915+ if (!pud_present(*pud))
19916+ return NULL;
19917+ pmd = pmd_offset(pud, address);
19918+ if (!pmd_present(*pmd))
19919+ return NULL;
19920+ return pmd;
19921+}
19922+#endif
19923+
19924 DEFINE_SPINLOCK(pgd_lock);
19925 LIST_HEAD(pgd_list);
19926
19927@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
19928 for (address = VMALLOC_START & PMD_MASK;
19929 address >= TASK_SIZE && address < FIXADDR_TOP;
19930 address += PMD_SIZE) {
19931+
19932+#ifdef CONFIG_PAX_PER_CPU_PGD
19933+ unsigned long cpu;
19934+#else
19935 struct page *page;
19936+#endif
19937
19938 spin_lock(&pgd_lock);
19939+
19940+#ifdef CONFIG_PAX_PER_CPU_PGD
19941+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19942+ pgd_t *pgd = get_cpu_pgd(cpu);
19943+ pmd_t *ret;
19944+#else
19945 list_for_each_entry(page, &pgd_list, lru) {
19946+ pgd_t *pgd = page_address(page);
19947 spinlock_t *pgt_lock;
19948 pmd_t *ret;
19949
19950@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
19951 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19952
19953 spin_lock(pgt_lock);
19954- ret = vmalloc_sync_one(page_address(page), address);
19955+#endif
19956+
19957+ ret = vmalloc_sync_one(pgd, address);
19958+
19959+#ifndef CONFIG_PAX_PER_CPU_PGD
19960 spin_unlock(pgt_lock);
19961+#endif
19962
19963 if (!ret)
19964 break;
19965@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fa
19966 * an interrupt in the middle of a task switch..
19967 */
19968 pgd_paddr = read_cr3();
19969+
19970+#ifdef CONFIG_PAX_PER_CPU_PGD
19971+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19972+#endif
19973+
19974 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19975 if (!pmd_k)
19976 return -1;
19977@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fa
19978 * happen within a race in page table update. In the later
19979 * case just flush:
19980 */
19981+
19982+#ifdef CONFIG_PAX_PER_CPU_PGD
19983+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
19984+ pgd = pgd_offset_cpu(smp_processor_id(), address);
19985+#else
19986 pgd = pgd_offset(current->active_mm, address);
19987+#endif
19988+
19989 pgd_ref = pgd_offset_k(address);
19990 if (pgd_none(*pgd_ref))
19991 return -1;
19992@@ -534,7 +604,7 @@ static int is_errata93(struct pt_regs *r
19993 static int is_errata100(struct pt_regs *regs, unsigned long address)
19994 {
19995 #ifdef CONFIG_X86_64
19996- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
19997+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
19998 return 1;
19999 #endif
20000 return 0;
20001@@ -561,7 +631,7 @@ static int is_f00f_bug(struct pt_regs *r
20002 }
20003
20004 static const char nx_warning[] = KERN_CRIT
20005-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20006+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20007
20008 static void
20009 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20010@@ -570,14 +640,25 @@ show_fault_oops(struct pt_regs *regs, un
20011 if (!oops_may_print())
20012 return;
20013
20014- if (error_code & PF_INSTR) {
20015+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
20016 unsigned int level;
20017
20018 pte_t *pte = lookup_address(address, &level);
20019
20020 if (pte && pte_present(*pte) && !pte_exec(*pte))
20021- printk(nx_warning, current_uid());
20022+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20023+ }
20024+
20025+#ifdef CONFIG_PAX_KERNEXEC
20026+ if (init_mm.start_code <= address && address < init_mm.end_code) {
20027+ if (current->signal->curr_ip)
20028+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20029+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20030+ else
20031+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20032+ current->comm, task_pid_nr(current), current_uid(), current_euid());
20033 }
20034+#endif
20035
20036 printk(KERN_ALERT "BUG: unable to handle kernel ");
20037 if (address < PAGE_SIZE)
20038@@ -733,6 +814,21 @@ __bad_area_nosemaphore(struct pt_regs *r
20039 }
20040 #endif
20041
20042+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20043+ if (pax_is_fetch_fault(regs, error_code, address)) {
20044+
20045+#ifdef CONFIG_PAX_EMUTRAMP
20046+ switch (pax_handle_fetch_fault(regs)) {
20047+ case 2:
20048+ return;
20049+ }
20050+#endif
20051+
20052+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20053+ do_group_exit(SIGKILL);
20054+ }
20055+#endif
20056+
20057 if (unlikely(show_unhandled_signals))
20058 show_signal_msg(regs, error_code, address, tsk);
20059
20060@@ -829,7 +925,7 @@ do_sigbus(struct pt_regs *regs, unsigned
20061 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
20062 printk(KERN_ERR
20063 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
20064- tsk->comm, tsk->pid, address);
20065+ tsk->comm, task_pid_nr(tsk), address);
20066 code = BUS_MCEERR_AR;
20067 }
20068 #endif
20069@@ -884,6 +980,99 @@ static int spurious_fault_check(unsigned
20070 return 1;
20071 }
20072
20073+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20074+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20075+{
20076+ pte_t *pte;
20077+ pmd_t *pmd;
20078+ spinlock_t *ptl;
20079+ unsigned char pte_mask;
20080+
20081+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20082+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
20083+ return 0;
20084+
20085+ /* PaX: it's our fault, let's handle it if we can */
20086+
20087+ /* PaX: take a look at read faults before acquiring any locks */
20088+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20089+ /* instruction fetch attempt from a protected page in user mode */
20090+ up_read(&mm->mmap_sem);
20091+
20092+#ifdef CONFIG_PAX_EMUTRAMP
20093+ switch (pax_handle_fetch_fault(regs)) {
20094+ case 2:
20095+ return 1;
20096+ }
20097+#endif
20098+
20099+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20100+ do_group_exit(SIGKILL);
20101+ }
20102+
20103+ pmd = pax_get_pmd(mm, address);
20104+ if (unlikely(!pmd))
20105+ return 0;
20106+
20107+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20108+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20109+ pte_unmap_unlock(pte, ptl);
20110+ return 0;
20111+ }
20112+
20113+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20114+ /* write attempt to a protected page in user mode */
20115+ pte_unmap_unlock(pte, ptl);
20116+ return 0;
20117+ }
20118+
20119+#ifdef CONFIG_SMP
20120+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20121+#else
20122+ if (likely(address > get_limit(regs->cs)))
20123+#endif
20124+ {
20125+ set_pte(pte, pte_mkread(*pte));
20126+ __flush_tlb_one(address);
20127+ pte_unmap_unlock(pte, ptl);
20128+ up_read(&mm->mmap_sem);
20129+ return 1;
20130+ }
20131+
20132+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20133+
20134+ /*
20135+ * PaX: fill DTLB with user rights and retry
20136+ */
20137+ __asm__ __volatile__ (
20138+ "orb %2,(%1)\n"
20139+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20140+/*
20141+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20142+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20143+ * page fault when examined during a TLB load attempt. this is true not only
20144+ * for PTEs holding a non-present entry but also present entries that will
20145+ * raise a page fault (such as those set up by PaX, or the copy-on-write
20146+ * mechanism). in effect it means that we do *not* need to flush the TLBs
20147+ * for our target pages since their PTEs are simply not in the TLBs at all.
20148+
20149+ * the best thing in omitting it is that we gain around 15-20% speed in the
20150+ * fast path of the page fault handler and can get rid of tracing since we
20151+ * can no longer flush unintended entries.
20152+ */
20153+ "invlpg (%0)\n"
20154+#endif
20155+ __copyuser_seg"testb $0,(%0)\n"
20156+ "xorb %3,(%1)\n"
20157+ :
20158+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
20159+ : "memory", "cc");
20160+ pte_unmap_unlock(pte, ptl);
20161+ up_read(&mm->mmap_sem);
20162+ return 1;
20163+}
20164+#endif
20165+
20166 /*
20167 * Handle a spurious fault caused by a stale TLB entry.
20168 *
20169@@ -956,6 +1145,9 @@ int show_unhandled_signals = 1;
20170 static inline int
20171 access_error(unsigned long error_code, struct vm_area_struct *vma)
20172 {
20173+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
20174+ return 1;
20175+
20176 if (error_code & PF_WRITE) {
20177 /* write, present and write, not present: */
20178 if (unlikely(!(vma->vm_flags & VM_WRITE)))
20179@@ -989,19 +1181,33 @@ do_page_fault(struct pt_regs *regs, unsi
20180 {
20181 struct vm_area_struct *vma;
20182 struct task_struct *tsk;
20183- unsigned long address;
20184 struct mm_struct *mm;
20185 int fault;
20186 int write = error_code & PF_WRITE;
20187 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
20188 (write ? FAULT_FLAG_WRITE : 0);
20189
20190+ /* Get the faulting address: */
20191+ unsigned long address = read_cr2();
20192+
20193+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20194+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
20195+ if (!search_exception_tables(regs->ip)) {
20196+ bad_area_nosemaphore(regs, error_code, address);
20197+ return;
20198+ }
20199+ if (address < PAX_USER_SHADOW_BASE) {
20200+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
20201+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
20202+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
20203+ } else
20204+ address -= PAX_USER_SHADOW_BASE;
20205+ }
20206+#endif
20207+
20208 tsk = current;
20209 mm = tsk->mm;
20210
20211- /* Get the faulting address: */
20212- address = read_cr2();
20213-
20214 /*
20215 * Detect and handle instructions that would cause a page fault for
20216 * both a tracked kernel page and a userspace page.
20217@@ -1061,7 +1267,7 @@ do_page_fault(struct pt_regs *regs, unsi
20218 * User-mode registers count as a user access even for any
20219 * potential system fault or CPU buglet:
20220 */
20221- if (user_mode_vm(regs)) {
20222+ if (user_mode(regs)) {
20223 local_irq_enable();
20224 error_code |= PF_USER;
20225 } else {
20226@@ -1116,6 +1322,11 @@ retry:
20227 might_sleep();
20228 }
20229
20230+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20231+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
20232+ return;
20233+#endif
20234+
20235 vma = find_vma(mm, address);
20236 if (unlikely(!vma)) {
20237 bad_area(regs, error_code, address);
20238@@ -1127,18 +1338,24 @@ retry:
20239 bad_area(regs, error_code, address);
20240 return;
20241 }
20242- if (error_code & PF_USER) {
20243- /*
20244- * Accessing the stack below %sp is always a bug.
20245- * The large cushion allows instructions like enter
20246- * and pusha to work. ("enter $65535, $31" pushes
20247- * 32 pointers and then decrements %sp by 65535.)
20248- */
20249- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20250- bad_area(regs, error_code, address);
20251- return;
20252- }
20253+ /*
20254+ * Accessing the stack below %sp is always a bug.
20255+ * The large cushion allows instructions like enter
20256+ * and pusha to work. ("enter $65535, $31" pushes
20257+ * 32 pointers and then decrements %sp by 65535.)
20258+ */
20259+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20260+ bad_area(regs, error_code, address);
20261+ return;
20262 }
20263+
20264+#ifdef CONFIG_PAX_SEGMEXEC
20265+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
20266+ bad_area(regs, error_code, address);
20267+ return;
20268+ }
20269+#endif
20270+
20271 if (unlikely(expand_stack(vma, address))) {
20272 bad_area(regs, error_code, address);
20273 return;
20274@@ -1193,3 +1410,240 @@ good_area:
20275
20276 up_read(&mm->mmap_sem);
20277 }
20278+
20279+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20280+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
20281+{
20282+ struct mm_struct *mm = current->mm;
20283+ unsigned long ip = regs->ip;
20284+
20285+ if (v8086_mode(regs))
20286+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20287+
20288+#ifdef CONFIG_PAX_PAGEEXEC
20289+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
20290+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
20291+ return true;
20292+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
20293+ return true;
20294+ return false;
20295+ }
20296+#endif
20297+
20298+#ifdef CONFIG_PAX_SEGMEXEC
20299+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
20300+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
20301+ return true;
20302+ return false;
20303+ }
20304+#endif
20305+
20306+ return false;
20307+}
20308+#endif
20309+
20310+#ifdef CONFIG_PAX_EMUTRAMP
20311+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
20312+{
20313+ int err;
20314+
20315+ do { /* PaX: gcc trampoline emulation #1 */
20316+ unsigned char mov1, mov2;
20317+ unsigned short jmp;
20318+ unsigned int addr1, addr2;
20319+
20320+#ifdef CONFIG_X86_64
20321+ if ((regs->ip + 11) >> 32)
20322+ break;
20323+#endif
20324+
20325+ err = get_user(mov1, (unsigned char __user *)regs->ip);
20326+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20327+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
20328+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20329+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
20330+
20331+ if (err)
20332+ break;
20333+
20334+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
20335+ regs->cx = addr1;
20336+ regs->ax = addr2;
20337+ regs->ip = addr2;
20338+ return 2;
20339+ }
20340+ } while (0);
20341+
20342+ do { /* PaX: gcc trampoline emulation #2 */
20343+ unsigned char mov, jmp;
20344+ unsigned int addr1, addr2;
20345+
20346+#ifdef CONFIG_X86_64
20347+ if ((regs->ip + 9) >> 32)
20348+ break;
20349+#endif
20350+
20351+ err = get_user(mov, (unsigned char __user *)regs->ip);
20352+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20353+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
20354+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20355+
20356+ if (err)
20357+ break;
20358+
20359+ if (mov == 0xB9 && jmp == 0xE9) {
20360+ regs->cx = addr1;
20361+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
20362+ return 2;
20363+ }
20364+ } while (0);
20365+
20366+ return 1; /* PaX in action */
20367+}
20368+
20369+#ifdef CONFIG_X86_64
20370+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
20371+{
20372+ int err;
20373+
20374+ do { /* PaX: gcc trampoline emulation #1 */
20375+ unsigned short mov1, mov2, jmp1;
20376+ unsigned char jmp2;
20377+ unsigned int addr1;
20378+ unsigned long addr2;
20379+
20380+ err = get_user(mov1, (unsigned short __user *)regs->ip);
20381+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
20382+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
20383+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
20384+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
20385+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
20386+
20387+ if (err)
20388+ break;
20389+
20390+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20391+ regs->r11 = addr1;
20392+ regs->r10 = addr2;
20393+ regs->ip = addr1;
20394+ return 2;
20395+ }
20396+ } while (0);
20397+
20398+ do { /* PaX: gcc trampoline emulation #2 */
20399+ unsigned short mov1, mov2, jmp1;
20400+ unsigned char jmp2;
20401+ unsigned long addr1, addr2;
20402+
20403+ err = get_user(mov1, (unsigned short __user *)regs->ip);
20404+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
20405+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
20406+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
20407+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
20408+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
20409+
20410+ if (err)
20411+ break;
20412+
20413+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20414+ regs->r11 = addr1;
20415+ regs->r10 = addr2;
20416+ regs->ip = addr1;
20417+ return 2;
20418+ }
20419+ } while (0);
20420+
20421+ return 1; /* PaX in action */
20422+}
20423+#endif
20424+
20425+/*
20426+ * PaX: decide what to do with offenders (regs->ip = fault address)
20427+ *
20428+ * returns 1 when task should be killed
20429+ * 2 when gcc trampoline was detected
20430+ */
20431+static int pax_handle_fetch_fault(struct pt_regs *regs)
20432+{
20433+ if (v8086_mode(regs))
20434+ return 1;
20435+
20436+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
20437+ return 1;
20438+
20439+#ifdef CONFIG_X86_32
20440+ return pax_handle_fetch_fault_32(regs);
20441+#else
20442+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
20443+ return pax_handle_fetch_fault_32(regs);
20444+ else
20445+ return pax_handle_fetch_fault_64(regs);
20446+#endif
20447+}
20448+#endif
20449+
20450+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20451+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
20452+{
20453+ long i;
20454+
20455+ printk(KERN_ERR "PAX: bytes at PC: ");
20456+ for (i = 0; i < 20; i++) {
20457+ unsigned char c;
20458+ if (get_user(c, (unsigned char __force_user *)pc+i))
20459+ printk(KERN_CONT "?? ");
20460+ else
20461+ printk(KERN_CONT "%02x ", c);
20462+ }
20463+ printk("\n");
20464+
20465+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
20466+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
20467+ unsigned long c;
20468+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
20469+#ifdef CONFIG_X86_32
20470+ printk(KERN_CONT "???????? ");
20471+#else
20472+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
20473+ printk(KERN_CONT "???????? ???????? ");
20474+ else
20475+ printk(KERN_CONT "???????????????? ");
20476+#endif
20477+ } else {
20478+#ifdef CONFIG_X86_64
20479+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
20480+ printk(KERN_CONT "%08x ", (unsigned int)c);
20481+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
20482+ } else
20483+#endif
20484+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
20485+ }
20486+ }
20487+ printk("\n");
20488+}
20489+#endif
20490+
20491+/**
20492+ * probe_kernel_write(): safely attempt to write to a location
20493+ * @dst: address to write to
20494+ * @src: pointer to the data that shall be written
20495+ * @size: size of the data chunk
20496+ *
20497+ * Safely write to address @dst from the buffer at @src. If a kernel fault
20498+ * happens, handle that and return -EFAULT.
20499+ */
20500+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
20501+{
20502+ long ret;
20503+ mm_segment_t old_fs = get_fs();
20504+
20505+ set_fs(KERNEL_DS);
20506+ pagefault_disable();
20507+ pax_open_kernel();
20508+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
20509+ pax_close_kernel();
20510+ pagefault_enable();
20511+ set_fs(old_fs);
20512+
20513+ return ret ? -EFAULT : 0;
20514+}
20515diff -urNp linux-3.1.1/arch/x86/mm/gup.c linux-3.1.1/arch/x86/mm/gup.c
20516--- linux-3.1.1/arch/x86/mm/gup.c 2011-11-11 15:19:27.000000000 -0500
20517+++ linux-3.1.1/arch/x86/mm/gup.c 2011-11-16 18:39:07.000000000 -0500
20518@@ -253,7 +253,7 @@ int __get_user_pages_fast(unsigned long
20519 addr = start;
20520 len = (unsigned long) nr_pages << PAGE_SHIFT;
20521 end = start + len;
20522- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20523+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20524 (void __user *)start, len)))
20525 return 0;
20526
20527diff -urNp linux-3.1.1/arch/x86/mm/highmem_32.c linux-3.1.1/arch/x86/mm/highmem_32.c
20528--- linux-3.1.1/arch/x86/mm/highmem_32.c 2011-11-11 15:19:27.000000000 -0500
20529+++ linux-3.1.1/arch/x86/mm/highmem_32.c 2011-11-16 18:39:07.000000000 -0500
20530@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
20531 idx = type + KM_TYPE_NR*smp_processor_id();
20532 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20533 BUG_ON(!pte_none(*(kmap_pte-idx)));
20534+
20535+ pax_open_kernel();
20536 set_pte(kmap_pte-idx, mk_pte(page, prot));
20537+ pax_close_kernel();
20538
20539 return (void *)vaddr;
20540 }
20541diff -urNp linux-3.1.1/arch/x86/mm/hugetlbpage.c linux-3.1.1/arch/x86/mm/hugetlbpage.c
20542--- linux-3.1.1/arch/x86/mm/hugetlbpage.c 2011-11-11 15:19:27.000000000 -0500
20543+++ linux-3.1.1/arch/x86/mm/hugetlbpage.c 2011-11-16 18:39:07.000000000 -0500
20544@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
20545 struct hstate *h = hstate_file(file);
20546 struct mm_struct *mm = current->mm;
20547 struct vm_area_struct *vma;
20548- unsigned long start_addr;
20549+ unsigned long start_addr, pax_task_size = TASK_SIZE;
20550+
20551+#ifdef CONFIG_PAX_SEGMEXEC
20552+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20553+ pax_task_size = SEGMEXEC_TASK_SIZE;
20554+#endif
20555+
20556+ pax_task_size -= PAGE_SIZE;
20557
20558 if (len > mm->cached_hole_size) {
20559- start_addr = mm->free_area_cache;
20560+ start_addr = mm->free_area_cache;
20561 } else {
20562- start_addr = TASK_UNMAPPED_BASE;
20563- mm->cached_hole_size = 0;
20564+ start_addr = mm->mmap_base;
20565+ mm->cached_hole_size = 0;
20566 }
20567
20568 full_search:
20569@@ -280,26 +287,27 @@ full_search:
20570
20571 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20572 /* At this point: (!vma || addr < vma->vm_end). */
20573- if (TASK_SIZE - len < addr) {
20574+ if (pax_task_size - len < addr) {
20575 /*
20576 * Start a new search - just in case we missed
20577 * some holes.
20578 */
20579- if (start_addr != TASK_UNMAPPED_BASE) {
20580- start_addr = TASK_UNMAPPED_BASE;
20581+ if (start_addr != mm->mmap_base) {
20582+ start_addr = mm->mmap_base;
20583 mm->cached_hole_size = 0;
20584 goto full_search;
20585 }
20586 return -ENOMEM;
20587 }
20588- if (!vma || addr + len <= vma->vm_start) {
20589- mm->free_area_cache = addr + len;
20590- return addr;
20591- }
20592+ if (check_heap_stack_gap(vma, addr, len))
20593+ break;
20594 if (addr + mm->cached_hole_size < vma->vm_start)
20595 mm->cached_hole_size = vma->vm_start - addr;
20596 addr = ALIGN(vma->vm_end, huge_page_size(h));
20597 }
20598+
20599+ mm->free_area_cache = addr + len;
20600+ return addr;
20601 }
20602
20603 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
20604@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
20605 {
20606 struct hstate *h = hstate_file(file);
20607 struct mm_struct *mm = current->mm;
20608- struct vm_area_struct *vma, *prev_vma;
20609- unsigned long base = mm->mmap_base, addr = addr0;
20610+ struct vm_area_struct *vma;
20611+ unsigned long base = mm->mmap_base, addr;
20612 unsigned long largest_hole = mm->cached_hole_size;
20613- int first_time = 1;
20614
20615 /* don't allow allocations above current base */
20616 if (mm->free_area_cache > base)
20617@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
20618 largest_hole = 0;
20619 mm->free_area_cache = base;
20620 }
20621-try_again:
20622+
20623 /* make sure it can fit in the remaining address space */
20624 if (mm->free_area_cache < len)
20625 goto fail;
20626
20627 /* either no address requested or can't fit in requested address hole */
20628- addr = (mm->free_area_cache - len) & huge_page_mask(h);
20629+ addr = (mm->free_area_cache - len);
20630 do {
20631+ addr &= huge_page_mask(h);
20632+ vma = find_vma(mm, addr);
20633 /*
20634 * Lookup failure means no vma is above this address,
20635 * i.e. return with success:
20636- */
20637- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
20638- return addr;
20639-
20640- /*
20641 * new region fits between prev_vma->vm_end and
20642 * vma->vm_start, use it:
20643 */
20644- if (addr + len <= vma->vm_start &&
20645- (!prev_vma || (addr >= prev_vma->vm_end))) {
20646+ if (check_heap_stack_gap(vma, addr, len)) {
20647 /* remember the address as a hint for next time */
20648- mm->cached_hole_size = largest_hole;
20649- return (mm->free_area_cache = addr);
20650- } else {
20651- /* pull free_area_cache down to the first hole */
20652- if (mm->free_area_cache == vma->vm_end) {
20653- mm->free_area_cache = vma->vm_start;
20654- mm->cached_hole_size = largest_hole;
20655- }
20656+ mm->cached_hole_size = largest_hole;
20657+ return (mm->free_area_cache = addr);
20658+ }
20659+ /* pull free_area_cache down to the first hole */
20660+ if (mm->free_area_cache == vma->vm_end) {
20661+ mm->free_area_cache = vma->vm_start;
20662+ mm->cached_hole_size = largest_hole;
20663 }
20664
20665 /* remember the largest hole we saw so far */
20666 if (addr + largest_hole < vma->vm_start)
20667- largest_hole = vma->vm_start - addr;
20668+ largest_hole = vma->vm_start - addr;
20669
20670 /* try just below the current vma->vm_start */
20671- addr = (vma->vm_start - len) & huge_page_mask(h);
20672- } while (len <= vma->vm_start);
20673+ addr = skip_heap_stack_gap(vma, len);
20674+ } while (!IS_ERR_VALUE(addr));
20675
20676 fail:
20677 /*
20678- * if hint left us with no space for the requested
20679- * mapping then try again:
20680- */
20681- if (first_time) {
20682- mm->free_area_cache = base;
20683- largest_hole = 0;
20684- first_time = 0;
20685- goto try_again;
20686- }
20687- /*
20688 * A failed mmap() very likely causes application failure,
20689 * so fall back to the bottom-up function here. This scenario
20690 * can happen with large stack limits and large mmap()
20691 * allocations.
20692 */
20693- mm->free_area_cache = TASK_UNMAPPED_BASE;
20694+
20695+#ifdef CONFIG_PAX_SEGMEXEC
20696+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20697+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20698+ else
20699+#endif
20700+
20701+ mm->mmap_base = TASK_UNMAPPED_BASE;
20702+
20703+#ifdef CONFIG_PAX_RANDMMAP
20704+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20705+ mm->mmap_base += mm->delta_mmap;
20706+#endif
20707+
20708+ mm->free_area_cache = mm->mmap_base;
20709 mm->cached_hole_size = ~0UL;
20710 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
20711 len, pgoff, flags);
20712@@ -386,6 +392,7 @@ fail:
20713 /*
20714 * Restore the topdown base:
20715 */
20716+ mm->mmap_base = base;
20717 mm->free_area_cache = base;
20718 mm->cached_hole_size = ~0UL;
20719
20720@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
20721 struct hstate *h = hstate_file(file);
20722 struct mm_struct *mm = current->mm;
20723 struct vm_area_struct *vma;
20724+ unsigned long pax_task_size = TASK_SIZE;
20725
20726 if (len & ~huge_page_mask(h))
20727 return -EINVAL;
20728- if (len > TASK_SIZE)
20729+
20730+#ifdef CONFIG_PAX_SEGMEXEC
20731+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20732+ pax_task_size = SEGMEXEC_TASK_SIZE;
20733+#endif
20734+
20735+ pax_task_size -= PAGE_SIZE;
20736+
20737+ if (len > pax_task_size)
20738 return -ENOMEM;
20739
20740 if (flags & MAP_FIXED) {
20741@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
20742 if (addr) {
20743 addr = ALIGN(addr, huge_page_size(h));
20744 vma = find_vma(mm, addr);
20745- if (TASK_SIZE - len >= addr &&
20746- (!vma || addr + len <= vma->vm_start))
20747+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
20748 return addr;
20749 }
20750 if (mm->get_unmapped_area == arch_get_unmapped_area)
20751diff -urNp linux-3.1.1/arch/x86/mm/init_32.c linux-3.1.1/arch/x86/mm/init_32.c
20752--- linux-3.1.1/arch/x86/mm/init_32.c 2011-11-11 15:19:27.000000000 -0500
20753+++ linux-3.1.1/arch/x86/mm/init_32.c 2011-11-16 18:39:07.000000000 -0500
20754@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
20755 }
20756
20757 /*
20758- * Creates a middle page table and puts a pointer to it in the
20759- * given global directory entry. This only returns the gd entry
20760- * in non-PAE compilation mode, since the middle layer is folded.
20761- */
20762-static pmd_t * __init one_md_table_init(pgd_t *pgd)
20763-{
20764- pud_t *pud;
20765- pmd_t *pmd_table;
20766-
20767-#ifdef CONFIG_X86_PAE
20768- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
20769- if (after_bootmem)
20770- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
20771- else
20772- pmd_table = (pmd_t *)alloc_low_page();
20773- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
20774- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
20775- pud = pud_offset(pgd, 0);
20776- BUG_ON(pmd_table != pmd_offset(pud, 0));
20777-
20778- return pmd_table;
20779- }
20780-#endif
20781- pud = pud_offset(pgd, 0);
20782- pmd_table = pmd_offset(pud, 0);
20783-
20784- return pmd_table;
20785-}
20786-
20787-/*
20788 * Create a page table and place a pointer to it in a middle page
20789 * directory entry:
20790 */
20791@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
20792 page_table = (pte_t *)alloc_low_page();
20793
20794 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20795+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20796+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20797+#else
20798 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20799+#endif
20800 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20801 }
20802
20803 return pte_offset_kernel(pmd, 0);
20804 }
20805
20806+static pmd_t * __init one_md_table_init(pgd_t *pgd)
20807+{
20808+ pud_t *pud;
20809+ pmd_t *pmd_table;
20810+
20811+ pud = pud_offset(pgd, 0);
20812+ pmd_table = pmd_offset(pud, 0);
20813+
20814+ return pmd_table;
20815+}
20816+
20817 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20818 {
20819 int pgd_idx = pgd_index(vaddr);
20820@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
20821 int pgd_idx, pmd_idx;
20822 unsigned long vaddr;
20823 pgd_t *pgd;
20824+ pud_t *pud;
20825 pmd_t *pmd;
20826 pte_t *pte = NULL;
20827
20828@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
20829 pgd = pgd_base + pgd_idx;
20830
20831 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
20832- pmd = one_md_table_init(pgd);
20833- pmd = pmd + pmd_index(vaddr);
20834+ pud = pud_offset(pgd, vaddr);
20835+ pmd = pmd_offset(pud, vaddr);
20836+
20837+#ifdef CONFIG_X86_PAE
20838+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20839+#endif
20840+
20841 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
20842 pmd++, pmd_idx++) {
20843 pte = page_table_kmap_check(one_page_table_init(pmd),
20844@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
20845 }
20846 }
20847
20848-static inline int is_kernel_text(unsigned long addr)
20849+static inline int is_kernel_text(unsigned long start, unsigned long end)
20850 {
20851- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
20852- return 1;
20853- return 0;
20854+ if ((start > ktla_ktva((unsigned long)_etext) ||
20855+ end <= ktla_ktva((unsigned long)_stext)) &&
20856+ (start > ktla_ktva((unsigned long)_einittext) ||
20857+ end <= ktla_ktva((unsigned long)_sinittext)) &&
20858+
20859+#ifdef CONFIG_ACPI_SLEEP
20860+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
20861+#endif
20862+
20863+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
20864+ return 0;
20865+ return 1;
20866 }
20867
20868 /*
20869@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
20870 unsigned long last_map_addr = end;
20871 unsigned long start_pfn, end_pfn;
20872 pgd_t *pgd_base = swapper_pg_dir;
20873- int pgd_idx, pmd_idx, pte_ofs;
20874+ unsigned int pgd_idx, pmd_idx, pte_ofs;
20875 unsigned long pfn;
20876 pgd_t *pgd;
20877+ pud_t *pud;
20878 pmd_t *pmd;
20879 pte_t *pte;
20880 unsigned pages_2m, pages_4k;
20881@@ -281,8 +282,13 @@ repeat:
20882 pfn = start_pfn;
20883 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20884 pgd = pgd_base + pgd_idx;
20885- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
20886- pmd = one_md_table_init(pgd);
20887+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
20888+ pud = pud_offset(pgd, 0);
20889+ pmd = pmd_offset(pud, 0);
20890+
20891+#ifdef CONFIG_X86_PAE
20892+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20893+#endif
20894
20895 if (pfn >= end_pfn)
20896 continue;
20897@@ -294,14 +300,13 @@ repeat:
20898 #endif
20899 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
20900 pmd++, pmd_idx++) {
20901- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
20902+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
20903
20904 /*
20905 * Map with big pages if possible, otherwise
20906 * create normal page tables:
20907 */
20908 if (use_pse) {
20909- unsigned int addr2;
20910 pgprot_t prot = PAGE_KERNEL_LARGE;
20911 /*
20912 * first pass will use the same initial
20913@@ -311,11 +316,7 @@ repeat:
20914 __pgprot(PTE_IDENT_ATTR |
20915 _PAGE_PSE);
20916
20917- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
20918- PAGE_OFFSET + PAGE_SIZE-1;
20919-
20920- if (is_kernel_text(addr) ||
20921- is_kernel_text(addr2))
20922+ if (is_kernel_text(address, address + PMD_SIZE))
20923 prot = PAGE_KERNEL_LARGE_EXEC;
20924
20925 pages_2m++;
20926@@ -332,7 +333,7 @@ repeat:
20927 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20928 pte += pte_ofs;
20929 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
20930- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
20931+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
20932 pgprot_t prot = PAGE_KERNEL;
20933 /*
20934 * first pass will use the same initial
20935@@ -340,7 +341,7 @@ repeat:
20936 */
20937 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
20938
20939- if (is_kernel_text(addr))
20940+ if (is_kernel_text(address, address + PAGE_SIZE))
20941 prot = PAGE_KERNEL_EXEC;
20942
20943 pages_4k++;
20944@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
20945
20946 pud = pud_offset(pgd, va);
20947 pmd = pmd_offset(pud, va);
20948- if (!pmd_present(*pmd))
20949+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
20950 break;
20951
20952 pte = pte_offset_kernel(pmd, va);
20953@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
20954
20955 static void __init pagetable_init(void)
20956 {
20957- pgd_t *pgd_base = swapper_pg_dir;
20958-
20959- permanent_kmaps_init(pgd_base);
20960+ permanent_kmaps_init(swapper_pg_dir);
20961 }
20962
20963-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20964+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20965 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20966
20967 /* user-defined highmem size */
20968@@ -757,6 +756,12 @@ void __init mem_init(void)
20969
20970 pci_iommu_alloc();
20971
20972+#ifdef CONFIG_PAX_PER_CPU_PGD
20973+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20974+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20975+ KERNEL_PGD_PTRS);
20976+#endif
20977+
20978 #ifdef CONFIG_FLATMEM
20979 BUG_ON(!mem_map);
20980 #endif
20981@@ -774,7 +779,7 @@ void __init mem_init(void)
20982 set_highmem_pages_init();
20983
20984 codesize = (unsigned long) &_etext - (unsigned long) &_text;
20985- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
20986+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
20987 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
20988
20989 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
20990@@ -815,10 +820,10 @@ void __init mem_init(void)
20991 ((unsigned long)&__init_end -
20992 (unsigned long)&__init_begin) >> 10,
20993
20994- (unsigned long)&_etext, (unsigned long)&_edata,
20995- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
20996+ (unsigned long)&_sdata, (unsigned long)&_edata,
20997+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
20998
20999- (unsigned long)&_text, (unsigned long)&_etext,
21000+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21001 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21002
21003 /*
21004@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
21005 if (!kernel_set_to_readonly)
21006 return;
21007
21008+ start = ktla_ktva(start);
21009 pr_debug("Set kernel text: %lx - %lx for read write\n",
21010 start, start+size);
21011
21012@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
21013 if (!kernel_set_to_readonly)
21014 return;
21015
21016+ start = ktla_ktva(start);
21017 pr_debug("Set kernel text: %lx - %lx for read only\n",
21018 start, start+size);
21019
21020@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
21021 unsigned long start = PFN_ALIGN(_text);
21022 unsigned long size = PFN_ALIGN(_etext) - start;
21023
21024+ start = ktla_ktva(start);
21025 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21026 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21027 size >> 10);
21028diff -urNp linux-3.1.1/arch/x86/mm/init_64.c linux-3.1.1/arch/x86/mm/init_64.c
21029--- linux-3.1.1/arch/x86/mm/init_64.c 2011-11-11 15:19:27.000000000 -0500
21030+++ linux-3.1.1/arch/x86/mm/init_64.c 2011-11-16 18:39:07.000000000 -0500
21031@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
21032 * around without checking the pgd every time.
21033 */
21034
21035-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
21036+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
21037 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21038
21039 int force_personality32;
21040@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
21041
21042 for (address = start; address <= end; address += PGDIR_SIZE) {
21043 const pgd_t *pgd_ref = pgd_offset_k(address);
21044+
21045+#ifdef CONFIG_PAX_PER_CPU_PGD
21046+ unsigned long cpu;
21047+#else
21048 struct page *page;
21049+#endif
21050
21051 if (pgd_none(*pgd_ref))
21052 continue;
21053
21054 spin_lock(&pgd_lock);
21055+
21056+#ifdef CONFIG_PAX_PER_CPU_PGD
21057+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21058+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
21059+#else
21060 list_for_each_entry(page, &pgd_list, lru) {
21061 pgd_t *pgd;
21062 spinlock_t *pgt_lock;
21063@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
21064 /* the pgt_lock only for Xen */
21065 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21066 spin_lock(pgt_lock);
21067+#endif
21068
21069 if (pgd_none(*pgd))
21070 set_pgd(pgd, *pgd_ref);
21071@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
21072 BUG_ON(pgd_page_vaddr(*pgd)
21073 != pgd_page_vaddr(*pgd_ref));
21074
21075+#ifndef CONFIG_PAX_PER_CPU_PGD
21076 spin_unlock(pgt_lock);
21077+#endif
21078+
21079 }
21080 spin_unlock(&pgd_lock);
21081 }
21082@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21083 pmd = fill_pmd(pud, vaddr);
21084 pte = fill_pte(pmd, vaddr);
21085
21086+ pax_open_kernel();
21087 set_pte(pte, new_pte);
21088+ pax_close_kernel();
21089
21090 /*
21091 * It's enough to flush this one mapping.
21092@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
21093 pgd = pgd_offset_k((unsigned long)__va(phys));
21094 if (pgd_none(*pgd)) {
21095 pud = (pud_t *) spp_getpage();
21096- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21097- _PAGE_USER));
21098+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21099 }
21100 pud = pud_offset(pgd, (unsigned long)__va(phys));
21101 if (pud_none(*pud)) {
21102 pmd = (pmd_t *) spp_getpage();
21103- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21104- _PAGE_USER));
21105+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21106 }
21107 pmd = pmd_offset(pud, phys);
21108 BUG_ON(!pmd_none(*pmd));
21109@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsign
21110 if (pfn >= pgt_buf_top)
21111 panic("alloc_low_page: ran out of memory");
21112
21113- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
21114+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
21115 clear_page(adr);
21116 *phys = pfn * PAGE_SIZE;
21117 return adr;
21118@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *vi
21119
21120 phys = __pa(virt);
21121 left = phys & (PAGE_SIZE - 1);
21122- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
21123+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
21124 adr = (void *)(((unsigned long)adr) | left);
21125
21126 return adr;
21127@@ -693,6 +707,12 @@ void __init mem_init(void)
21128
21129 pci_iommu_alloc();
21130
21131+#ifdef CONFIG_PAX_PER_CPU_PGD
21132+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21133+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21134+ KERNEL_PGD_PTRS);
21135+#endif
21136+
21137 /* clear_bss() already clear the empty_zero_page */
21138
21139 reservedpages = 0;
21140@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
21141 static struct vm_area_struct gate_vma = {
21142 .vm_start = VSYSCALL_START,
21143 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21144- .vm_page_prot = PAGE_READONLY_EXEC,
21145- .vm_flags = VM_READ | VM_EXEC
21146+ .vm_page_prot = PAGE_READONLY,
21147+ .vm_flags = VM_READ
21148 };
21149
21150 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
21151@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
21152
21153 const char *arch_vma_name(struct vm_area_struct *vma)
21154 {
21155- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21156+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21157 return "[vdso]";
21158 if (vma == &gate_vma)
21159 return "[vsyscall]";
21160diff -urNp linux-3.1.1/arch/x86/mm/init.c linux-3.1.1/arch/x86/mm/init.c
21161--- linux-3.1.1/arch/x86/mm/init.c 2011-11-11 15:19:27.000000000 -0500
21162+++ linux-3.1.1/arch/x86/mm/init.c 2011-11-17 18:31:28.000000000 -0500
21163@@ -31,7 +31,7 @@ int direct_gbpages
21164 static void __init find_early_table_space(unsigned long end, int use_pse,
21165 int use_gbpages)
21166 {
21167- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
21168+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
21169 phys_addr_t base;
21170
21171 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
21172@@ -312,8 +312,29 @@ unsigned long __init_refok init_memory_m
21173 */
21174 int devmem_is_allowed(unsigned long pagenr)
21175 {
21176- if (pagenr <= 256)
21177+#ifdef CONFIG_GRKERNSEC_KMEM
21178+ /* allow BDA */
21179+ if (!pagenr)
21180+ return 1;
21181+ /* allow EBDA */
21182+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
21183+ return 1;
21184+#else
21185+ if (!pagenr)
21186+ return 1;
21187+#ifdef CONFIG_VM86
21188+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
21189+ return 1;
21190+#endif
21191+#endif
21192+
21193+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21194 return 1;
21195+#ifdef CONFIG_GRKERNSEC_KMEM
21196+ /* throw out everything else below 1MB */
21197+ if (pagenr <= 256)
21198+ return 0;
21199+#endif
21200 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21201 return 0;
21202 if (!page_is_ram(pagenr))
21203@@ -372,6 +393,86 @@ void free_init_pages(char *what, unsigne
21204
21205 void free_initmem(void)
21206 {
21207+
21208+#ifdef CONFIG_PAX_KERNEXEC
21209+#ifdef CONFIG_X86_32
21210+ /* PaX: limit KERNEL_CS to actual size */
21211+ unsigned long addr, limit;
21212+ struct desc_struct d;
21213+ int cpu;
21214+
21215+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21216+ limit = (limit - 1UL) >> PAGE_SHIFT;
21217+
21218+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21219+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
21220+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21221+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
21222+ }
21223+
21224+ /* PaX: make KERNEL_CS read-only */
21225+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
21226+ if (!paravirt_enabled())
21227+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
21228+/*
21229+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
21230+ pgd = pgd_offset_k(addr);
21231+ pud = pud_offset(pgd, addr);
21232+ pmd = pmd_offset(pud, addr);
21233+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21234+ }
21235+*/
21236+#ifdef CONFIG_X86_PAE
21237+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
21238+/*
21239+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
21240+ pgd = pgd_offset_k(addr);
21241+ pud = pud_offset(pgd, addr);
21242+ pmd = pmd_offset(pud, addr);
21243+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21244+ }
21245+*/
21246+#endif
21247+
21248+#ifdef CONFIG_MODULES
21249+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
21250+#endif
21251+
21252+#else
21253+ pgd_t *pgd;
21254+ pud_t *pud;
21255+ pmd_t *pmd;
21256+ unsigned long addr, end;
21257+
21258+ /* PaX: make kernel code/rodata read-only, rest non-executable */
21259+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
21260+ pgd = pgd_offset_k(addr);
21261+ pud = pud_offset(pgd, addr);
21262+ pmd = pmd_offset(pud, addr);
21263+ if (!pmd_present(*pmd))
21264+ continue;
21265+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
21266+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21267+ else
21268+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21269+ }
21270+
21271+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
21272+ end = addr + KERNEL_IMAGE_SIZE;
21273+ for (; addr < end; addr += PMD_SIZE) {
21274+ pgd = pgd_offset_k(addr);
21275+ pud = pud_offset(pgd, addr);
21276+ pmd = pmd_offset(pud, addr);
21277+ if (!pmd_present(*pmd))
21278+ continue;
21279+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
21280+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21281+ }
21282+#endif
21283+
21284+ flush_tlb_all();
21285+#endif
21286+
21287 free_init_pages("unused kernel memory",
21288 (unsigned long)(&__init_begin),
21289 (unsigned long)(&__init_end));
21290diff -urNp linux-3.1.1/arch/x86/mm/iomap_32.c linux-3.1.1/arch/x86/mm/iomap_32.c
21291--- linux-3.1.1/arch/x86/mm/iomap_32.c 2011-11-11 15:19:27.000000000 -0500
21292+++ linux-3.1.1/arch/x86/mm/iomap_32.c 2011-11-16 18:39:07.000000000 -0500
21293@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
21294 type = kmap_atomic_idx_push();
21295 idx = type + KM_TYPE_NR * smp_processor_id();
21296 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21297+
21298+ pax_open_kernel();
21299 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
21300+ pax_close_kernel();
21301+
21302 arch_flush_lazy_mmu_mode();
21303
21304 return (void *)vaddr;
21305diff -urNp linux-3.1.1/arch/x86/mm/ioremap.c linux-3.1.1/arch/x86/mm/ioremap.c
21306--- linux-3.1.1/arch/x86/mm/ioremap.c 2011-11-11 15:19:27.000000000 -0500
21307+++ linux-3.1.1/arch/x86/mm/ioremap.c 2011-11-16 18:39:07.000000000 -0500
21308@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
21309 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
21310 int is_ram = page_is_ram(pfn);
21311
21312- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
21313+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
21314 return NULL;
21315 WARN_ON_ONCE(is_ram);
21316 }
21317@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
21318 early_param("early_ioremap_debug", early_ioremap_debug_setup);
21319
21320 static __initdata int after_paging_init;
21321-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
21322+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
21323
21324 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
21325 {
21326@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
21327 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
21328
21329 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
21330- memset(bm_pte, 0, sizeof(bm_pte));
21331- pmd_populate_kernel(&init_mm, pmd, bm_pte);
21332+ pmd_populate_user(&init_mm, pmd, bm_pte);
21333
21334 /*
21335 * The boot-ioremap range spans multiple pmds, for which
21336diff -urNp linux-3.1.1/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.1.1/arch/x86/mm/kmemcheck/kmemcheck.c
21337--- linux-3.1.1/arch/x86/mm/kmemcheck/kmemcheck.c 2011-11-11 15:19:27.000000000 -0500
21338+++ linux-3.1.1/arch/x86/mm/kmemcheck/kmemcheck.c 2011-11-16 18:39:07.000000000 -0500
21339@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
21340 * memory (e.g. tracked pages)? For now, we need this to avoid
21341 * invoking kmemcheck for PnP BIOS calls.
21342 */
21343- if (regs->flags & X86_VM_MASK)
21344+ if (v8086_mode(regs))
21345 return false;
21346- if (regs->cs != __KERNEL_CS)
21347+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
21348 return false;
21349
21350 pte = kmemcheck_pte_lookup(address);
21351diff -urNp linux-3.1.1/arch/x86/mm/mmap.c linux-3.1.1/arch/x86/mm/mmap.c
21352--- linux-3.1.1/arch/x86/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
21353+++ linux-3.1.1/arch/x86/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
21354@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
21355 * Leave an at least ~128 MB hole with possible stack randomization.
21356 */
21357 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
21358-#define MAX_GAP (TASK_SIZE/6*5)
21359+#define MAX_GAP (pax_task_size/6*5)
21360
21361 /*
21362 * True on X86_32 or when emulating IA32 on X86_64
21363@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
21364 return rnd << PAGE_SHIFT;
21365 }
21366
21367-static unsigned long mmap_base(void)
21368+static unsigned long mmap_base(struct mm_struct *mm)
21369 {
21370 unsigned long gap = rlimit(RLIMIT_STACK);
21371+ unsigned long pax_task_size = TASK_SIZE;
21372+
21373+#ifdef CONFIG_PAX_SEGMEXEC
21374+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21375+ pax_task_size = SEGMEXEC_TASK_SIZE;
21376+#endif
21377
21378 if (gap < MIN_GAP)
21379 gap = MIN_GAP;
21380 else if (gap > MAX_GAP)
21381 gap = MAX_GAP;
21382
21383- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
21384+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
21385 }
21386
21387 /*
21388 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
21389 * does, but not when emulating X86_32
21390 */
21391-static unsigned long mmap_legacy_base(void)
21392+static unsigned long mmap_legacy_base(struct mm_struct *mm)
21393 {
21394- if (mmap_is_ia32())
21395+ if (mmap_is_ia32()) {
21396+
21397+#ifdef CONFIG_PAX_SEGMEXEC
21398+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21399+ return SEGMEXEC_TASK_UNMAPPED_BASE;
21400+ else
21401+#endif
21402+
21403 return TASK_UNMAPPED_BASE;
21404- else
21405+ } else
21406 return TASK_UNMAPPED_BASE + mmap_rnd();
21407 }
21408
21409@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
21410 void arch_pick_mmap_layout(struct mm_struct *mm)
21411 {
21412 if (mmap_is_legacy()) {
21413- mm->mmap_base = mmap_legacy_base();
21414+ mm->mmap_base = mmap_legacy_base(mm);
21415+
21416+#ifdef CONFIG_PAX_RANDMMAP
21417+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21418+ mm->mmap_base += mm->delta_mmap;
21419+#endif
21420+
21421 mm->get_unmapped_area = arch_get_unmapped_area;
21422 mm->unmap_area = arch_unmap_area;
21423 } else {
21424- mm->mmap_base = mmap_base();
21425+ mm->mmap_base = mmap_base(mm);
21426+
21427+#ifdef CONFIG_PAX_RANDMMAP
21428+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21429+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
21430+#endif
21431+
21432 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
21433 mm->unmap_area = arch_unmap_area_topdown;
21434 }
21435diff -urNp linux-3.1.1/arch/x86/mm/mmio-mod.c linux-3.1.1/arch/x86/mm/mmio-mod.c
21436--- linux-3.1.1/arch/x86/mm/mmio-mod.c 2011-11-11 15:19:27.000000000 -0500
21437+++ linux-3.1.1/arch/x86/mm/mmio-mod.c 2011-11-16 18:39:07.000000000 -0500
21438@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
21439 break;
21440 default:
21441 {
21442- unsigned char *ip = (unsigned char *)instptr;
21443+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
21444 my_trace->opcode = MMIO_UNKNOWN_OP;
21445 my_trace->width = 0;
21446 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
21447@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
21448 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
21449 void __iomem *addr)
21450 {
21451- static atomic_t next_id;
21452+ static atomic_unchecked_t next_id;
21453 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
21454 /* These are page-unaligned. */
21455 struct mmiotrace_map map = {
21456@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
21457 .private = trace
21458 },
21459 .phys = offset,
21460- .id = atomic_inc_return(&next_id)
21461+ .id = atomic_inc_return_unchecked(&next_id)
21462 };
21463 map.map_id = trace->id;
21464
21465diff -urNp linux-3.1.1/arch/x86/mm/pageattr.c linux-3.1.1/arch/x86/mm/pageattr.c
21466--- linux-3.1.1/arch/x86/mm/pageattr.c 2011-11-11 15:19:27.000000000 -0500
21467+++ linux-3.1.1/arch/x86/mm/pageattr.c 2011-11-16 18:39:07.000000000 -0500
21468@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
21469 */
21470 #ifdef CONFIG_PCI_BIOS
21471 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
21472- pgprot_val(forbidden) |= _PAGE_NX;
21473+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21474 #endif
21475
21476 /*
21477@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
21478 * Does not cover __inittext since that is gone later on. On
21479 * 64bit we do not enforce !NX on the low mapping
21480 */
21481- if (within(address, (unsigned long)_text, (unsigned long)_etext))
21482- pgprot_val(forbidden) |= _PAGE_NX;
21483+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
21484+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21485
21486+#ifdef CONFIG_DEBUG_RODATA
21487 /*
21488 * The .rodata section needs to be read-only. Using the pfn
21489 * catches all aliases.
21490@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
21491 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
21492 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
21493 pgprot_val(forbidden) |= _PAGE_RW;
21494+#endif
21495
21496 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
21497 /*
21498@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
21499 }
21500 #endif
21501
21502+#ifdef CONFIG_PAX_KERNEXEC
21503+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
21504+ pgprot_val(forbidden) |= _PAGE_RW;
21505+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21506+ }
21507+#endif
21508+
21509 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
21510
21511 return prot;
21512@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
21513 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
21514 {
21515 /* change init_mm */
21516+ pax_open_kernel();
21517 set_pte_atomic(kpte, pte);
21518+
21519 #ifdef CONFIG_X86_32
21520 if (!SHARED_KERNEL_PMD) {
21521+
21522+#ifdef CONFIG_PAX_PER_CPU_PGD
21523+ unsigned long cpu;
21524+#else
21525 struct page *page;
21526+#endif
21527
21528+#ifdef CONFIG_PAX_PER_CPU_PGD
21529+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21530+ pgd_t *pgd = get_cpu_pgd(cpu);
21531+#else
21532 list_for_each_entry(page, &pgd_list, lru) {
21533- pgd_t *pgd;
21534+ pgd_t *pgd = (pgd_t *)page_address(page);
21535+#endif
21536+
21537 pud_t *pud;
21538 pmd_t *pmd;
21539
21540- pgd = (pgd_t *)page_address(page) + pgd_index(address);
21541+ pgd += pgd_index(address);
21542 pud = pud_offset(pgd, address);
21543 pmd = pmd_offset(pud, address);
21544 set_pte_atomic((pte_t *)pmd, pte);
21545 }
21546 }
21547 #endif
21548+ pax_close_kernel();
21549 }
21550
21551 static int
21552diff -urNp linux-3.1.1/arch/x86/mm/pageattr-test.c linux-3.1.1/arch/x86/mm/pageattr-test.c
21553--- linux-3.1.1/arch/x86/mm/pageattr-test.c 2011-11-11 15:19:27.000000000 -0500
21554+++ linux-3.1.1/arch/x86/mm/pageattr-test.c 2011-11-16 18:39:07.000000000 -0500
21555@@ -36,7 +36,7 @@ enum {
21556
21557 static int pte_testbit(pte_t pte)
21558 {
21559- return pte_flags(pte) & _PAGE_UNUSED1;
21560+ return pte_flags(pte) & _PAGE_CPA_TEST;
21561 }
21562
21563 struct split_state {
21564diff -urNp linux-3.1.1/arch/x86/mm/pat.c linux-3.1.1/arch/x86/mm/pat.c
21565--- linux-3.1.1/arch/x86/mm/pat.c 2011-11-11 15:19:27.000000000 -0500
21566+++ linux-3.1.1/arch/x86/mm/pat.c 2011-11-16 18:39:07.000000000 -0500
21567@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
21568
21569 if (!entry) {
21570 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
21571- current->comm, current->pid, start, end);
21572+ current->comm, task_pid_nr(current), start, end);
21573 return -EINVAL;
21574 }
21575
21576@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
21577 while (cursor < to) {
21578 if (!devmem_is_allowed(pfn)) {
21579 printk(KERN_INFO
21580- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
21581- current->comm, from, to);
21582+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
21583+ current->comm, from, to, cursor);
21584 return 0;
21585 }
21586 cursor += PAGE_SIZE;
21587@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
21588 printk(KERN_INFO
21589 "%s:%d ioremap_change_attr failed %s "
21590 "for %Lx-%Lx\n",
21591- current->comm, current->pid,
21592+ current->comm, task_pid_nr(current),
21593 cattr_name(flags),
21594 base, (unsigned long long)(base + size));
21595 return -EINVAL;
21596@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
21597 if (want_flags != flags) {
21598 printk(KERN_WARNING
21599 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
21600- current->comm, current->pid,
21601+ current->comm, task_pid_nr(current),
21602 cattr_name(want_flags),
21603 (unsigned long long)paddr,
21604 (unsigned long long)(paddr + size),
21605@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
21606 free_memtype(paddr, paddr + size);
21607 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
21608 " for %Lx-%Lx, got %s\n",
21609- current->comm, current->pid,
21610+ current->comm, task_pid_nr(current),
21611 cattr_name(want_flags),
21612 (unsigned long long)paddr,
21613 (unsigned long long)(paddr + size),
21614diff -urNp linux-3.1.1/arch/x86/mm/pf_in.c linux-3.1.1/arch/x86/mm/pf_in.c
21615--- linux-3.1.1/arch/x86/mm/pf_in.c 2011-11-11 15:19:27.000000000 -0500
21616+++ linux-3.1.1/arch/x86/mm/pf_in.c 2011-11-16 18:39:07.000000000 -0500
21617@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
21618 int i;
21619 enum reason_type rv = OTHERS;
21620
21621- p = (unsigned char *)ins_addr;
21622+ p = (unsigned char *)ktla_ktva(ins_addr);
21623 p += skip_prefix(p, &prf);
21624 p += get_opcode(p, &opcode);
21625
21626@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
21627 struct prefix_bits prf;
21628 int i;
21629
21630- p = (unsigned char *)ins_addr;
21631+ p = (unsigned char *)ktla_ktva(ins_addr);
21632 p += skip_prefix(p, &prf);
21633 p += get_opcode(p, &opcode);
21634
21635@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
21636 struct prefix_bits prf;
21637 int i;
21638
21639- p = (unsigned char *)ins_addr;
21640+ p = (unsigned char *)ktla_ktva(ins_addr);
21641 p += skip_prefix(p, &prf);
21642 p += get_opcode(p, &opcode);
21643
21644@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
21645 struct prefix_bits prf;
21646 int i;
21647
21648- p = (unsigned char *)ins_addr;
21649+ p = (unsigned char *)ktla_ktva(ins_addr);
21650 p += skip_prefix(p, &prf);
21651 p += get_opcode(p, &opcode);
21652 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
21653@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
21654 struct prefix_bits prf;
21655 int i;
21656
21657- p = (unsigned char *)ins_addr;
21658+ p = (unsigned char *)ktla_ktva(ins_addr);
21659 p += skip_prefix(p, &prf);
21660 p += get_opcode(p, &opcode);
21661 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
21662diff -urNp linux-3.1.1/arch/x86/mm/pgtable_32.c linux-3.1.1/arch/x86/mm/pgtable_32.c
21663--- linux-3.1.1/arch/x86/mm/pgtable_32.c 2011-11-11 15:19:27.000000000 -0500
21664+++ linux-3.1.1/arch/x86/mm/pgtable_32.c 2011-11-16 18:39:07.000000000 -0500
21665@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
21666 return;
21667 }
21668 pte = pte_offset_kernel(pmd, vaddr);
21669+
21670+ pax_open_kernel();
21671 if (pte_val(pteval))
21672 set_pte_at(&init_mm, vaddr, pte, pteval);
21673 else
21674 pte_clear(&init_mm, vaddr, pte);
21675+ pax_close_kernel();
21676
21677 /*
21678 * It's enough to flush this one mapping.
21679diff -urNp linux-3.1.1/arch/x86/mm/pgtable.c linux-3.1.1/arch/x86/mm/pgtable.c
21680--- linux-3.1.1/arch/x86/mm/pgtable.c 2011-11-11 15:19:27.000000000 -0500
21681+++ linux-3.1.1/arch/x86/mm/pgtable.c 2011-11-16 18:39:07.000000000 -0500
21682@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
21683 list_del(&page->lru);
21684 }
21685
21686-#define UNSHARED_PTRS_PER_PGD \
21687- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21688+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21689+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
21690
21691+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21692+{
21693+ while (count--)
21694+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
21695+}
21696+#endif
21697+
21698+#ifdef CONFIG_PAX_PER_CPU_PGD
21699+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21700+{
21701+ while (count--)
21702+
21703+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21704+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
21705+#else
21706+ *dst++ = *src++;
21707+#endif
21708
21709+}
21710+#endif
21711+
21712+#ifdef CONFIG_X86_64
21713+#define pxd_t pud_t
21714+#define pyd_t pgd_t
21715+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
21716+#define pxd_free(mm, pud) pud_free((mm), (pud))
21717+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
21718+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
21719+#define PYD_SIZE PGDIR_SIZE
21720+#else
21721+#define pxd_t pmd_t
21722+#define pyd_t pud_t
21723+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
21724+#define pxd_free(mm, pud) pmd_free((mm), (pud))
21725+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
21726+#define pyd_offset(mm ,address) pud_offset((mm), (address))
21727+#define PYD_SIZE PUD_SIZE
21728+#endif
21729+
21730+#ifdef CONFIG_PAX_PER_CPU_PGD
21731+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
21732+static inline void pgd_dtor(pgd_t *pgd) {}
21733+#else
21734 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
21735 {
21736 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
21737@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
21738 pgd_list_del(pgd);
21739 spin_unlock(&pgd_lock);
21740 }
21741+#endif
21742
21743 /*
21744 * List of all pgd's needed for non-PAE so it can invalidate entries
21745@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
21746 * -- wli
21747 */
21748
21749-#ifdef CONFIG_X86_PAE
21750+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21751 /*
21752 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
21753 * updating the top-level pagetable entries to guarantee the
21754@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
21755 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
21756 * and initialize the kernel pmds here.
21757 */
21758-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
21759+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21760
21761 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
21762 {
21763@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
21764 */
21765 flush_tlb_mm(mm);
21766 }
21767+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
21768+#define PREALLOCATED_PXDS USER_PGD_PTRS
21769 #else /* !CONFIG_X86_PAE */
21770
21771 /* No need to prepopulate any pagetable entries in non-PAE modes. */
21772-#define PREALLOCATED_PMDS 0
21773+#define PREALLOCATED_PXDS 0
21774
21775 #endif /* CONFIG_X86_PAE */
21776
21777-static void free_pmds(pmd_t *pmds[])
21778+static void free_pxds(pxd_t *pxds[])
21779 {
21780 int i;
21781
21782- for(i = 0; i < PREALLOCATED_PMDS; i++)
21783- if (pmds[i])
21784- free_page((unsigned long)pmds[i]);
21785+ for(i = 0; i < PREALLOCATED_PXDS; i++)
21786+ if (pxds[i])
21787+ free_page((unsigned long)pxds[i]);
21788 }
21789
21790-static int preallocate_pmds(pmd_t *pmds[])
21791+static int preallocate_pxds(pxd_t *pxds[])
21792 {
21793 int i;
21794 bool failed = false;
21795
21796- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21797- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21798- if (pmd == NULL)
21799+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21800+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21801+ if (pxd == NULL)
21802 failed = true;
21803- pmds[i] = pmd;
21804+ pxds[i] = pxd;
21805 }
21806
21807 if (failed) {
21808- free_pmds(pmds);
21809+ free_pxds(pxds);
21810 return -ENOMEM;
21811 }
21812
21813@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
21814 * preallocate which never got a corresponding vma will need to be
21815 * freed manually.
21816 */
21817-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21818+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21819 {
21820 int i;
21821
21822- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21823+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21824 pgd_t pgd = pgdp[i];
21825
21826 if (pgd_val(pgd) != 0) {
21827- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21828+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21829
21830- pgdp[i] = native_make_pgd(0);
21831+ set_pgd(pgdp + i, native_make_pgd(0));
21832
21833- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21834- pmd_free(mm, pmd);
21835+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21836+ pxd_free(mm, pxd);
21837 }
21838 }
21839 }
21840
21841-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21842+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21843 {
21844- pud_t *pud;
21845+ pyd_t *pyd;
21846 unsigned long addr;
21847 int i;
21848
21849- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21850+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21851 return;
21852
21853- pud = pud_offset(pgd, 0);
21854+#ifdef CONFIG_X86_64
21855+ pyd = pyd_offset(mm, 0L);
21856+#else
21857+ pyd = pyd_offset(pgd, 0L);
21858+#endif
21859
21860- for (addr = i = 0; i < PREALLOCATED_PMDS;
21861- i++, pud++, addr += PUD_SIZE) {
21862- pmd_t *pmd = pmds[i];
21863+ for (addr = i = 0; i < PREALLOCATED_PXDS;
21864+ i++, pyd++, addr += PYD_SIZE) {
21865+ pxd_t *pxd = pxds[i];
21866
21867 if (i >= KERNEL_PGD_BOUNDARY)
21868- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21869- sizeof(pmd_t) * PTRS_PER_PMD);
21870+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21871+ sizeof(pxd_t) * PTRS_PER_PMD);
21872
21873- pud_populate(mm, pud, pmd);
21874+ pyd_populate(mm, pyd, pxd);
21875 }
21876 }
21877
21878 pgd_t *pgd_alloc(struct mm_struct *mm)
21879 {
21880 pgd_t *pgd;
21881- pmd_t *pmds[PREALLOCATED_PMDS];
21882+ pxd_t *pxds[PREALLOCATED_PXDS];
21883
21884 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21885
21886@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21887
21888 mm->pgd = pgd;
21889
21890- if (preallocate_pmds(pmds) != 0)
21891+ if (preallocate_pxds(pxds) != 0)
21892 goto out_free_pgd;
21893
21894 if (paravirt_pgd_alloc(mm) != 0)
21895- goto out_free_pmds;
21896+ goto out_free_pxds;
21897
21898 /*
21899 * Make sure that pre-populating the pmds is atomic with
21900@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21901 spin_lock(&pgd_lock);
21902
21903 pgd_ctor(mm, pgd);
21904- pgd_prepopulate_pmd(mm, pgd, pmds);
21905+ pgd_prepopulate_pxd(mm, pgd, pxds);
21906
21907 spin_unlock(&pgd_lock);
21908
21909 return pgd;
21910
21911-out_free_pmds:
21912- free_pmds(pmds);
21913+out_free_pxds:
21914+ free_pxds(pxds);
21915 out_free_pgd:
21916 free_page((unsigned long)pgd);
21917 out:
21918@@ -295,7 +344,7 @@ out:
21919
21920 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
21921 {
21922- pgd_mop_up_pmds(mm, pgd);
21923+ pgd_mop_up_pxds(mm, pgd);
21924 pgd_dtor(pgd);
21925 paravirt_pgd_free(mm, pgd);
21926 free_page((unsigned long)pgd);
21927diff -urNp linux-3.1.1/arch/x86/mm/setup_nx.c linux-3.1.1/arch/x86/mm/setup_nx.c
21928--- linux-3.1.1/arch/x86/mm/setup_nx.c 2011-11-11 15:19:27.000000000 -0500
21929+++ linux-3.1.1/arch/x86/mm/setup_nx.c 2011-11-16 18:39:07.000000000 -0500
21930@@ -5,8 +5,10 @@
21931 #include <asm/pgtable.h>
21932 #include <asm/proto.h>
21933
21934+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21935 static int disable_nx __cpuinitdata;
21936
21937+#ifndef CONFIG_PAX_PAGEEXEC
21938 /*
21939 * noexec = on|off
21940 *
21941@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
21942 return 0;
21943 }
21944 early_param("noexec", noexec_setup);
21945+#endif
21946+
21947+#endif
21948
21949 void __cpuinit x86_configure_nx(void)
21950 {
21951+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21952 if (cpu_has_nx && !disable_nx)
21953 __supported_pte_mask |= _PAGE_NX;
21954 else
21955+#endif
21956 __supported_pte_mask &= ~_PAGE_NX;
21957 }
21958
21959diff -urNp linux-3.1.1/arch/x86/mm/tlb.c linux-3.1.1/arch/x86/mm/tlb.c
21960--- linux-3.1.1/arch/x86/mm/tlb.c 2011-11-11 15:19:27.000000000 -0500
21961+++ linux-3.1.1/arch/x86/mm/tlb.c 2011-11-16 18:39:07.000000000 -0500
21962@@ -65,7 +65,11 @@ void leave_mm(int cpu)
21963 BUG();
21964 cpumask_clear_cpu(cpu,
21965 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
21966+
21967+#ifndef CONFIG_PAX_PER_CPU_PGD
21968 load_cr3(swapper_pg_dir);
21969+#endif
21970+
21971 }
21972 EXPORT_SYMBOL_GPL(leave_mm);
21973
21974diff -urNp linux-3.1.1/arch/x86/net/bpf_jit_comp.c linux-3.1.1/arch/x86/net/bpf_jit_comp.c
21975--- linux-3.1.1/arch/x86/net/bpf_jit_comp.c 2011-11-11 15:19:27.000000000 -0500
21976+++ linux-3.1.1/arch/x86/net/bpf_jit_comp.c 2011-11-20 19:21:53.000000000 -0500
21977@@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void
21978 set_fs(old_fs);
21979 }
21980
21981+struct bpf_jit_work {
21982+ struct work_struct work;
21983+ void *image;
21984+};
21985
21986 void bpf_jit_compile(struct sk_filter *fp)
21987 {
21988@@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *f
21989 if (addrs == NULL)
21990 return;
21991
21992+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
21993+ if (!fp->work)
21994+ goto out;
21995+
21996 /* Before first pass, make a rough estimation of addrs[]
21997 * each bpf instruction is translated to less than 64 bytes
21998 */
21999@@ -585,11 +593,12 @@ cond_branch: f_offset = addrs[i + filt
22000 if (image) {
22001 if (unlikely(proglen + ilen > oldproglen)) {
22002 pr_err("bpb_jit_compile fatal error\n");
22003- kfree(addrs);
22004- module_free(NULL, image);
22005- return;
22006+ module_free_exec(NULL, image);
22007+ goto out;
22008 }
22009+ pax_open_kernel();
22010 memcpy(image + proglen, temp, ilen);
22011+ pax_close_kernel();
22012 }
22013 proglen += ilen;
22014 addrs[i] = proglen;
22015@@ -609,7 +618,7 @@ cond_branch: f_offset = addrs[i + filt
22016 break;
22017 }
22018 if (proglen == oldproglen) {
22019- image = module_alloc(max_t(unsigned int,
22020+ image = module_alloc_exec(max_t(unsigned int,
22021 proglen,
22022 sizeof(struct work_struct)));
22023 if (!image)
22024@@ -631,24 +640,27 @@ cond_branch: f_offset = addrs[i + filt
22025 fp->bpf_func = (void *)image;
22026 }
22027 out:
22028+ kfree(fp->work);
22029 kfree(addrs);
22030 return;
22031 }
22032
22033 static void jit_free_defer(struct work_struct *arg)
22034 {
22035- module_free(NULL, arg);
22036+ module_free_exec(NULL, ((struct bpf_jit_work*)arg)->image);
22037+ kfree(arg);
22038 }
22039
22040 /* run from softirq, we must use a work_struct to call
22041- * module_free() from process context
22042+ * module_free_exec() from process context
22043 */
22044 void bpf_jit_free(struct sk_filter *fp)
22045 {
22046 if (fp->bpf_func != sk_run_filter) {
22047- struct work_struct *work = (struct work_struct *)fp->bpf_func;
22048+ struct work_struct *work = &fp->work->work;
22049
22050 INIT_WORK(work, jit_free_defer);
22051+ fp->work->image = fp->bpf_func;
22052 schedule_work(work);
22053 }
22054 }
22055diff -urNp linux-3.1.1/arch/x86/net/bpf_jit.S linux-3.1.1/arch/x86/net/bpf_jit.S
22056--- linux-3.1.1/arch/x86/net/bpf_jit.S 2011-11-11 15:19:27.000000000 -0500
22057+++ linux-3.1.1/arch/x86/net/bpf_jit.S 2011-11-16 18:39:07.000000000 -0500
22058@@ -9,6 +9,7 @@
22059 */
22060 #include <linux/linkage.h>
22061 #include <asm/dwarf2.h>
22062+#include <asm/alternative-asm.h>
22063
22064 /*
22065 * Calling convention :
22066@@ -35,6 +36,7 @@ sk_load_word:
22067 jle bpf_slow_path_word
22068 mov (SKBDATA,%rsi),%eax
22069 bswap %eax /* ntohl() */
22070+ pax_force_retaddr
22071 ret
22072
22073
22074@@ -53,6 +55,7 @@ sk_load_half:
22075 jle bpf_slow_path_half
22076 movzwl (SKBDATA,%rsi),%eax
22077 rol $8,%ax # ntohs()
22078+ pax_force_retaddr
22079 ret
22080
22081 sk_load_byte_ind:
22082@@ -66,6 +69,7 @@ sk_load_byte:
22083 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
22084 jle bpf_slow_path_byte
22085 movzbl (SKBDATA,%rsi),%eax
22086+ pax_force_retaddr
22087 ret
22088
22089 /**
22090@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
22091 movzbl (SKBDATA,%rsi),%ebx
22092 and $15,%bl
22093 shl $2,%bl
22094+ pax_force_retaddr
22095 ret
22096 CFI_ENDPROC
22097 ENDPROC(sk_load_byte_msh)
22098@@ -91,6 +96,7 @@ bpf_error:
22099 xor %eax,%eax
22100 mov -8(%rbp),%rbx
22101 leaveq
22102+ pax_force_retaddr
22103 ret
22104
22105 /* rsi contains offset and can be scratched */
22106@@ -113,6 +119,7 @@ bpf_slow_path_word:
22107 js bpf_error
22108 mov -12(%rbp),%eax
22109 bswap %eax
22110+ pax_force_retaddr
22111 ret
22112
22113 bpf_slow_path_half:
22114@@ -121,12 +128,14 @@ bpf_slow_path_half:
22115 mov -12(%rbp),%ax
22116 rol $8,%ax
22117 movzwl %ax,%eax
22118+ pax_force_retaddr
22119 ret
22120
22121 bpf_slow_path_byte:
22122 bpf_slow_path_common(1)
22123 js bpf_error
22124 movzbl -12(%rbp),%eax
22125+ pax_force_retaddr
22126 ret
22127
22128 bpf_slow_path_byte_msh:
22129@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
22130 and $15,%al
22131 shl $2,%al
22132 xchg %eax,%ebx
22133+ pax_force_retaddr
22134 ret
22135diff -urNp linux-3.1.1/arch/x86/oprofile/backtrace.c linux-3.1.1/arch/x86/oprofile/backtrace.c
22136--- linux-3.1.1/arch/x86/oprofile/backtrace.c 2011-11-11 15:19:27.000000000 -0500
22137+++ linux-3.1.1/arch/x86/oprofile/backtrace.c 2011-11-16 18:39:07.000000000 -0500
22138@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_fram
22139 struct stack_frame_ia32 *fp;
22140 unsigned long bytes;
22141
22142- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
22143+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
22144 if (bytes != sizeof(bufhead))
22145 return NULL;
22146
22147- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
22148+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
22149
22150 oprofile_add_trace(bufhead[0].return_address);
22151
22152@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_bac
22153 struct stack_frame bufhead[2];
22154 unsigned long bytes;
22155
22156- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
22157+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
22158 if (bytes != sizeof(bufhead))
22159 return NULL;
22160
22161@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const reg
22162 {
22163 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
22164
22165- if (!user_mode_vm(regs)) {
22166+ if (!user_mode(regs)) {
22167 unsigned long stack = kernel_stack_pointer(regs);
22168 if (depth)
22169 dump_trace(NULL, regs, (unsigned long *)stack, 0,
22170diff -urNp linux-3.1.1/arch/x86/pci/mrst.c linux-3.1.1/arch/x86/pci/mrst.c
22171--- linux-3.1.1/arch/x86/pci/mrst.c 2011-11-11 15:19:27.000000000 -0500
22172+++ linux-3.1.1/arch/x86/pci/mrst.c 2011-11-16 18:39:07.000000000 -0500
22173@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
22174 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
22175 pci_mmcfg_late_init();
22176 pcibios_enable_irq = mrst_pci_irq_enable;
22177- pci_root_ops = pci_mrst_ops;
22178+ pax_open_kernel();
22179+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
22180+ pax_close_kernel();
22181 /* Continue with standard init */
22182 return 1;
22183 }
22184diff -urNp linux-3.1.1/arch/x86/pci/pcbios.c linux-3.1.1/arch/x86/pci/pcbios.c
22185--- linux-3.1.1/arch/x86/pci/pcbios.c 2011-11-11 15:19:27.000000000 -0500
22186+++ linux-3.1.1/arch/x86/pci/pcbios.c 2011-11-16 18:39:07.000000000 -0500
22187@@ -79,50 +79,93 @@ union bios32 {
22188 static struct {
22189 unsigned long address;
22190 unsigned short segment;
22191-} bios32_indirect = { 0, __KERNEL_CS };
22192+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22193
22194 /*
22195 * Returns the entry point for the given service, NULL on error
22196 */
22197
22198-static unsigned long bios32_service(unsigned long service)
22199+static unsigned long __devinit bios32_service(unsigned long service)
22200 {
22201 unsigned char return_code; /* %al */
22202 unsigned long address; /* %ebx */
22203 unsigned long length; /* %ecx */
22204 unsigned long entry; /* %edx */
22205 unsigned long flags;
22206+ struct desc_struct d, *gdt;
22207
22208 local_irq_save(flags);
22209- __asm__("lcall *(%%edi); cld"
22210+
22211+ gdt = get_cpu_gdt_table(smp_processor_id());
22212+
22213+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
22214+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22215+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
22216+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22217+
22218+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
22219 : "=a" (return_code),
22220 "=b" (address),
22221 "=c" (length),
22222 "=d" (entry)
22223 : "0" (service),
22224 "1" (0),
22225- "D" (&bios32_indirect));
22226+ "D" (&bios32_indirect),
22227+ "r"(__PCIBIOS_DS)
22228+ : "memory");
22229+
22230+ pax_open_kernel();
22231+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
22232+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
22233+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
22234+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
22235+ pax_close_kernel();
22236+
22237 local_irq_restore(flags);
22238
22239 switch (return_code) {
22240- case 0:
22241- return address + entry;
22242- case 0x80: /* Not present */
22243- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22244- return 0;
22245- default: /* Shouldn't happen */
22246- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22247- service, return_code);
22248+ case 0: {
22249+ int cpu;
22250+ unsigned char flags;
22251+
22252+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
22253+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
22254+ printk(KERN_WARNING "bios32_service: not valid\n");
22255 return 0;
22256+ }
22257+ address = address + PAGE_OFFSET;
22258+ length += 16UL; /* some BIOSs underreport this... */
22259+ flags = 4;
22260+ if (length >= 64*1024*1024) {
22261+ length >>= PAGE_SHIFT;
22262+ flags |= 8;
22263+ }
22264+
22265+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22266+ gdt = get_cpu_gdt_table(cpu);
22267+ pack_descriptor(&d, address, length, 0x9b, flags);
22268+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22269+ pack_descriptor(&d, address, length, 0x93, flags);
22270+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22271+ }
22272+ return entry;
22273+ }
22274+ case 0x80: /* Not present */
22275+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22276+ return 0;
22277+ default: /* Shouldn't happen */
22278+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22279+ service, return_code);
22280+ return 0;
22281 }
22282 }
22283
22284 static struct {
22285 unsigned long address;
22286 unsigned short segment;
22287-} pci_indirect = { 0, __KERNEL_CS };
22288+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
22289
22290-static int pci_bios_present;
22291+static int pci_bios_present __read_only;
22292
22293 static int __devinit check_pcibios(void)
22294 {
22295@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
22296 unsigned long flags, pcibios_entry;
22297
22298 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
22299- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
22300+ pci_indirect.address = pcibios_entry;
22301
22302 local_irq_save(flags);
22303- __asm__(
22304- "lcall *(%%edi); cld\n\t"
22305+ __asm__("movw %w6, %%ds\n\t"
22306+ "lcall *%%ss:(%%edi); cld\n\t"
22307+ "push %%ss\n\t"
22308+ "pop %%ds\n\t"
22309 "jc 1f\n\t"
22310 "xor %%ah, %%ah\n"
22311 "1:"
22312@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
22313 "=b" (ebx),
22314 "=c" (ecx)
22315 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
22316- "D" (&pci_indirect)
22317+ "D" (&pci_indirect),
22318+ "r" (__PCIBIOS_DS)
22319 : "memory");
22320 local_irq_restore(flags);
22321
22322@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int se
22323
22324 switch (len) {
22325 case 1:
22326- __asm__("lcall *(%%esi); cld\n\t"
22327+ __asm__("movw %w6, %%ds\n\t"
22328+ "lcall *%%ss:(%%esi); cld\n\t"
22329+ "push %%ss\n\t"
22330+ "pop %%ds\n\t"
22331 "jc 1f\n\t"
22332 "xor %%ah, %%ah\n"
22333 "1:"
22334@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int se
22335 : "1" (PCIBIOS_READ_CONFIG_BYTE),
22336 "b" (bx),
22337 "D" ((long)reg),
22338- "S" (&pci_indirect));
22339+ "S" (&pci_indirect),
22340+ "r" (__PCIBIOS_DS));
22341 /*
22342 * Zero-extend the result beyond 8 bits, do not trust the
22343 * BIOS having done it:
22344@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int se
22345 *value &= 0xff;
22346 break;
22347 case 2:
22348- __asm__("lcall *(%%esi); cld\n\t"
22349+ __asm__("movw %w6, %%ds\n\t"
22350+ "lcall *%%ss:(%%esi); cld\n\t"
22351+ "push %%ss\n\t"
22352+ "pop %%ds\n\t"
22353 "jc 1f\n\t"
22354 "xor %%ah, %%ah\n"
22355 "1:"
22356@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int se
22357 : "1" (PCIBIOS_READ_CONFIG_WORD),
22358 "b" (bx),
22359 "D" ((long)reg),
22360- "S" (&pci_indirect));
22361+ "S" (&pci_indirect),
22362+ "r" (__PCIBIOS_DS));
22363 /*
22364 * Zero-extend the result beyond 16 bits, do not trust the
22365 * BIOS having done it:
22366@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int se
22367 *value &= 0xffff;
22368 break;
22369 case 4:
22370- __asm__("lcall *(%%esi); cld\n\t"
22371+ __asm__("movw %w6, %%ds\n\t"
22372+ "lcall *%%ss:(%%esi); cld\n\t"
22373+ "push %%ss\n\t"
22374+ "pop %%ds\n\t"
22375 "jc 1f\n\t"
22376 "xor %%ah, %%ah\n"
22377 "1:"
22378@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int se
22379 : "1" (PCIBIOS_READ_CONFIG_DWORD),
22380 "b" (bx),
22381 "D" ((long)reg),
22382- "S" (&pci_indirect));
22383+ "S" (&pci_indirect),
22384+ "r" (__PCIBIOS_DS));
22385 break;
22386 }
22387
22388@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int s
22389
22390 switch (len) {
22391 case 1:
22392- __asm__("lcall *(%%esi); cld\n\t"
22393+ __asm__("movw %w6, %%ds\n\t"
22394+ "lcall *%%ss:(%%esi); cld\n\t"
22395+ "push %%ss\n\t"
22396+ "pop %%ds\n\t"
22397 "jc 1f\n\t"
22398 "xor %%ah, %%ah\n"
22399 "1:"
22400@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int s
22401 "c" (value),
22402 "b" (bx),
22403 "D" ((long)reg),
22404- "S" (&pci_indirect));
22405+ "S" (&pci_indirect),
22406+ "r" (__PCIBIOS_DS));
22407 break;
22408 case 2:
22409- __asm__("lcall *(%%esi); cld\n\t"
22410+ __asm__("movw %w6, %%ds\n\t"
22411+ "lcall *%%ss:(%%esi); cld\n\t"
22412+ "push %%ss\n\t"
22413+ "pop %%ds\n\t"
22414 "jc 1f\n\t"
22415 "xor %%ah, %%ah\n"
22416 "1:"
22417@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int s
22418 "c" (value),
22419 "b" (bx),
22420 "D" ((long)reg),
22421- "S" (&pci_indirect));
22422+ "S" (&pci_indirect),
22423+ "r" (__PCIBIOS_DS));
22424 break;
22425 case 4:
22426- __asm__("lcall *(%%esi); cld\n\t"
22427+ __asm__("movw %w6, %%ds\n\t"
22428+ "lcall *%%ss:(%%esi); cld\n\t"
22429+ "push %%ss\n\t"
22430+ "pop %%ds\n\t"
22431 "jc 1f\n\t"
22432 "xor %%ah, %%ah\n"
22433 "1:"
22434@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int s
22435 "c" (value),
22436 "b" (bx),
22437 "D" ((long)reg),
22438- "S" (&pci_indirect));
22439+ "S" (&pci_indirect),
22440+ "r" (__PCIBIOS_DS));
22441 break;
22442 }
22443
22444@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_i
22445
22446 DBG("PCI: Fetching IRQ routing table... ");
22447 __asm__("push %%es\n\t"
22448+ "movw %w8, %%ds\n\t"
22449 "push %%ds\n\t"
22450 "pop %%es\n\t"
22451- "lcall *(%%esi); cld\n\t"
22452+ "lcall *%%ss:(%%esi); cld\n\t"
22453 "pop %%es\n\t"
22454+ "push %%ss\n\t"
22455+ "pop %%ds\n"
22456 "jc 1f\n\t"
22457 "xor %%ah, %%ah\n"
22458 "1:"
22459@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_i
22460 "1" (0),
22461 "D" ((long) &opt),
22462 "S" (&pci_indirect),
22463- "m" (opt)
22464+ "m" (opt),
22465+ "r" (__PCIBIOS_DS)
22466 : "memory");
22467 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
22468 if (ret & 0xff00)
22469@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_d
22470 {
22471 int ret;
22472
22473- __asm__("lcall *(%%esi); cld\n\t"
22474+ __asm__("movw %w5, %%ds\n\t"
22475+ "lcall *%%ss:(%%esi); cld\n\t"
22476+ "push %%ss\n\t"
22477+ "pop %%ds\n"
22478 "jc 1f\n\t"
22479 "xor %%ah, %%ah\n"
22480 "1:"
22481@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_d
22482 : "0" (PCIBIOS_SET_PCI_HW_INT),
22483 "b" ((dev->bus->number << 8) | dev->devfn),
22484 "c" ((irq << 8) | (pin + 10)),
22485- "S" (&pci_indirect));
22486+ "S" (&pci_indirect),
22487+ "r" (__PCIBIOS_DS));
22488 return !(ret & 0xff00);
22489 }
22490 EXPORT_SYMBOL(pcibios_set_irq_routing);
22491diff -urNp linux-3.1.1/arch/x86/platform/efi/efi_32.c linux-3.1.1/arch/x86/platform/efi/efi_32.c
22492--- linux-3.1.1/arch/x86/platform/efi/efi_32.c 2011-11-11 15:19:27.000000000 -0500
22493+++ linux-3.1.1/arch/x86/platform/efi/efi_32.c 2011-11-16 18:39:07.000000000 -0500
22494@@ -38,70 +38,56 @@
22495 */
22496
22497 static unsigned long efi_rt_eflags;
22498-static pgd_t efi_bak_pg_dir_pointer[2];
22499+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
22500
22501-void efi_call_phys_prelog(void)
22502+void __init efi_call_phys_prelog(void)
22503 {
22504- unsigned long cr4;
22505- unsigned long temp;
22506 struct desc_ptr gdt_descr;
22507
22508- local_irq_save(efi_rt_eflags);
22509+#ifdef CONFIG_PAX_KERNEXEC
22510+ struct desc_struct d;
22511+#endif
22512
22513- /*
22514- * If I don't have PAE, I should just duplicate two entries in page
22515- * directory. If I have PAE, I just need to duplicate one entry in
22516- * page directory.
22517- */
22518- cr4 = read_cr4_safe();
22519+ local_irq_save(efi_rt_eflags);
22520
22521- if (cr4 & X86_CR4_PAE) {
22522- efi_bak_pg_dir_pointer[0].pgd =
22523- swapper_pg_dir[pgd_index(0)].pgd;
22524- swapper_pg_dir[0].pgd =
22525- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
22526- } else {
22527- efi_bak_pg_dir_pointer[0].pgd =
22528- swapper_pg_dir[pgd_index(0)].pgd;
22529- efi_bak_pg_dir_pointer[1].pgd =
22530- swapper_pg_dir[pgd_index(0x400000)].pgd;
22531- swapper_pg_dir[pgd_index(0)].pgd =
22532- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
22533- temp = PAGE_OFFSET + 0x400000;
22534- swapper_pg_dir[pgd_index(0x400000)].pgd =
22535- swapper_pg_dir[pgd_index(temp)].pgd;
22536- }
22537+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
22538+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22539+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
22540
22541 /*
22542 * After the lock is released, the original page table is restored.
22543 */
22544 __flush_tlb_all();
22545
22546+#ifdef CONFIG_PAX_KERNEXEC
22547+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
22548+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
22549+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
22550+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
22551+#endif
22552+
22553 gdt_descr.address = __pa(get_cpu_gdt_table(0));
22554 gdt_descr.size = GDT_SIZE - 1;
22555 load_gdt(&gdt_descr);
22556 }
22557
22558-void efi_call_phys_epilog(void)
22559+void __init efi_call_phys_epilog(void)
22560 {
22561- unsigned long cr4;
22562 struct desc_ptr gdt_descr;
22563
22564+#ifdef CONFIG_PAX_KERNEXEC
22565+ struct desc_struct d;
22566+
22567+ memset(&d, 0, sizeof d);
22568+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
22569+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
22570+#endif
22571+
22572 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
22573 gdt_descr.size = GDT_SIZE - 1;
22574 load_gdt(&gdt_descr);
22575
22576- cr4 = read_cr4_safe();
22577-
22578- if (cr4 & X86_CR4_PAE) {
22579- swapper_pg_dir[pgd_index(0)].pgd =
22580- efi_bak_pg_dir_pointer[0].pgd;
22581- } else {
22582- swapper_pg_dir[pgd_index(0)].pgd =
22583- efi_bak_pg_dir_pointer[0].pgd;
22584- swapper_pg_dir[pgd_index(0x400000)].pgd =
22585- efi_bak_pg_dir_pointer[1].pgd;
22586- }
22587+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
22588
22589 /*
22590 * After the lock is released, the original page table is restored.
22591diff -urNp linux-3.1.1/arch/x86/platform/efi/efi_stub_32.S linux-3.1.1/arch/x86/platform/efi/efi_stub_32.S
22592--- linux-3.1.1/arch/x86/platform/efi/efi_stub_32.S 2011-11-11 15:19:27.000000000 -0500
22593+++ linux-3.1.1/arch/x86/platform/efi/efi_stub_32.S 2011-11-16 18:39:07.000000000 -0500
22594@@ -6,7 +6,9 @@
22595 */
22596
22597 #include <linux/linkage.h>
22598+#include <linux/init.h>
22599 #include <asm/page_types.h>
22600+#include <asm/segment.h>
22601
22602 /*
22603 * efi_call_phys(void *, ...) is a function with variable parameters.
22604@@ -20,7 +22,7 @@
22605 * service functions will comply with gcc calling convention, too.
22606 */
22607
22608-.text
22609+__INIT
22610 ENTRY(efi_call_phys)
22611 /*
22612 * 0. The function can only be called in Linux kernel. So CS has been
22613@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
22614 * The mapping of lower virtual memory has been created in prelog and
22615 * epilog.
22616 */
22617- movl $1f, %edx
22618- subl $__PAGE_OFFSET, %edx
22619- jmp *%edx
22620+ movl $(__KERNEXEC_EFI_DS), %edx
22621+ mov %edx, %ds
22622+ mov %edx, %es
22623+ mov %edx, %ss
22624+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
22625 1:
22626
22627 /*
22628@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
22629 * parameter 2, ..., param n. To make things easy, we save the return
22630 * address of efi_call_phys in a global variable.
22631 */
22632- popl %edx
22633- movl %edx, saved_return_addr
22634- /* get the function pointer into ECX*/
22635- popl %ecx
22636- movl %ecx, efi_rt_function_ptr
22637- movl $2f, %edx
22638- subl $__PAGE_OFFSET, %edx
22639- pushl %edx
22640+ popl (saved_return_addr)
22641+ popl (efi_rt_function_ptr)
22642
22643 /*
22644 * 3. Clear PG bit in %CR0.
22645@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
22646 /*
22647 * 5. Call the physical function.
22648 */
22649- jmp *%ecx
22650+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
22651
22652-2:
22653 /*
22654 * 6. After EFI runtime service returns, control will return to
22655 * following instruction. We'd better readjust stack pointer first.
22656@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
22657 movl %cr0, %edx
22658 orl $0x80000000, %edx
22659 movl %edx, %cr0
22660- jmp 1f
22661-1:
22662+
22663 /*
22664 * 8. Now restore the virtual mode from flat mode by
22665 * adding EIP with PAGE_OFFSET.
22666 */
22667- movl $1f, %edx
22668- jmp *%edx
22669+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
22670 1:
22671+ movl $(__KERNEL_DS), %edx
22672+ mov %edx, %ds
22673+ mov %edx, %es
22674+ mov %edx, %ss
22675
22676 /*
22677 * 9. Balance the stack. And because EAX contain the return value,
22678 * we'd better not clobber it.
22679 */
22680- leal efi_rt_function_ptr, %edx
22681- movl (%edx), %ecx
22682- pushl %ecx
22683+ pushl (efi_rt_function_ptr)
22684
22685 /*
22686- * 10. Push the saved return address onto the stack and return.
22687+ * 10. Return to the saved return address.
22688 */
22689- leal saved_return_addr, %edx
22690- movl (%edx), %ecx
22691- pushl %ecx
22692- ret
22693+ jmpl *(saved_return_addr)
22694 ENDPROC(efi_call_phys)
22695 .previous
22696
22697-.data
22698+__INITDATA
22699 saved_return_addr:
22700 .long 0
22701 efi_rt_function_ptr:
22702diff -urNp linux-3.1.1/arch/x86/platform/efi/efi_stub_64.S linux-3.1.1/arch/x86/platform/efi/efi_stub_64.S
22703--- linux-3.1.1/arch/x86/platform/efi/efi_stub_64.S 2011-11-11 15:19:27.000000000 -0500
22704+++ linux-3.1.1/arch/x86/platform/efi/efi_stub_64.S 2011-11-16 18:39:07.000000000 -0500
22705@@ -7,6 +7,7 @@
22706 */
22707
22708 #include <linux/linkage.h>
22709+#include <asm/alternative-asm.h>
22710
22711 #define SAVE_XMM \
22712 mov %rsp, %rax; \
22713@@ -40,6 +41,7 @@ ENTRY(efi_call0)
22714 call *%rdi
22715 addq $32, %rsp
22716 RESTORE_XMM
22717+ pax_force_retaddr
22718 ret
22719 ENDPROC(efi_call0)
22720
22721@@ -50,6 +52,7 @@ ENTRY(efi_call1)
22722 call *%rdi
22723 addq $32, %rsp
22724 RESTORE_XMM
22725+ pax_force_retaddr
22726 ret
22727 ENDPROC(efi_call1)
22728
22729@@ -60,6 +63,7 @@ ENTRY(efi_call2)
22730 call *%rdi
22731 addq $32, %rsp
22732 RESTORE_XMM
22733+ pax_force_retaddr
22734 ret
22735 ENDPROC(efi_call2)
22736
22737@@ -71,6 +75,7 @@ ENTRY(efi_call3)
22738 call *%rdi
22739 addq $32, %rsp
22740 RESTORE_XMM
22741+ pax_force_retaddr
22742 ret
22743 ENDPROC(efi_call3)
22744
22745@@ -83,6 +88,7 @@ ENTRY(efi_call4)
22746 call *%rdi
22747 addq $32, %rsp
22748 RESTORE_XMM
22749+ pax_force_retaddr
22750 ret
22751 ENDPROC(efi_call4)
22752
22753@@ -96,6 +102,7 @@ ENTRY(efi_call5)
22754 call *%rdi
22755 addq $48, %rsp
22756 RESTORE_XMM
22757+ pax_force_retaddr
22758 ret
22759 ENDPROC(efi_call5)
22760
22761@@ -112,5 +119,6 @@ ENTRY(efi_call6)
22762 call *%rdi
22763 addq $48, %rsp
22764 RESTORE_XMM
22765+ pax_force_retaddr
22766 ret
22767 ENDPROC(efi_call6)
22768diff -urNp linux-3.1.1/arch/x86/platform/mrst/mrst.c linux-3.1.1/arch/x86/platform/mrst/mrst.c
22769--- linux-3.1.1/arch/x86/platform/mrst/mrst.c 2011-11-11 15:19:27.000000000 -0500
22770+++ linux-3.1.1/arch/x86/platform/mrst/mrst.c 2011-11-16 18:39:07.000000000 -0500
22771@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
22772 }
22773
22774 /* Reboot and power off are handled by the SCU on a MID device */
22775-static void mrst_power_off(void)
22776+static __noreturn void mrst_power_off(void)
22777 {
22778 intel_scu_ipc_simple_command(0xf1, 1);
22779+ BUG();
22780 }
22781
22782-static void mrst_reboot(void)
22783+static __noreturn void mrst_reboot(void)
22784 {
22785 intel_scu_ipc_simple_command(0xf1, 0);
22786+ BUG();
22787 }
22788
22789 /*
22790diff -urNp linux-3.1.1/arch/x86/platform/uv/tlb_uv.c linux-3.1.1/arch/x86/platform/uv/tlb_uv.c
22791--- linux-3.1.1/arch/x86/platform/uv/tlb_uv.c 2011-11-11 15:19:27.000000000 -0500
22792+++ linux-3.1.1/arch/x86/platform/uv/tlb_uv.c 2011-11-16 19:39:11.000000000 -0500
22793@@ -377,6 +377,8 @@ static void reset_with_ipi(struct pnmask
22794 struct bau_control *smaster = bcp->socket_master;
22795 struct reset_args reset_args;
22796
22797+ pax_track_stack();
22798+
22799 reset_args.sender = sender;
22800 cpus_clear(*mask);
22801 /* find a single cpu for each uvhub in this distribution mask */
22802diff -urNp linux-3.1.1/arch/x86/power/cpu.c linux-3.1.1/arch/x86/power/cpu.c
22803--- linux-3.1.1/arch/x86/power/cpu.c 2011-11-11 15:19:27.000000000 -0500
22804+++ linux-3.1.1/arch/x86/power/cpu.c 2011-11-16 18:39:07.000000000 -0500
22805@@ -130,7 +130,7 @@ static void do_fpu_end(void)
22806 static void fix_processor_context(void)
22807 {
22808 int cpu = smp_processor_id();
22809- struct tss_struct *t = &per_cpu(init_tss, cpu);
22810+ struct tss_struct *t = init_tss + cpu;
22811
22812 set_tss_desc(cpu, t); /*
22813 * This just modifies memory; should not be
22814@@ -140,7 +140,9 @@ static void fix_processor_context(void)
22815 */
22816
22817 #ifdef CONFIG_X86_64
22818+ pax_open_kernel();
22819 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22820+ pax_close_kernel();
22821
22822 syscall_init(); /* This sets MSR_*STAR and related */
22823 #endif
22824diff -urNp linux-3.1.1/arch/x86/vdso/Makefile linux-3.1.1/arch/x86/vdso/Makefile
22825--- linux-3.1.1/arch/x86/vdso/Makefile 2011-11-11 15:19:27.000000000 -0500
22826+++ linux-3.1.1/arch/x86/vdso/Makefile 2011-11-16 18:39:07.000000000 -0500
22827@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
22828 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
22829 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
22830
22831-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22832+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22833 GCOV_PROFILE := n
22834
22835 #
22836diff -urNp linux-3.1.1/arch/x86/vdso/vdso32-setup.c linux-3.1.1/arch/x86/vdso/vdso32-setup.c
22837--- linux-3.1.1/arch/x86/vdso/vdso32-setup.c 2011-11-11 15:19:27.000000000 -0500
22838+++ linux-3.1.1/arch/x86/vdso/vdso32-setup.c 2011-11-16 18:39:07.000000000 -0500
22839@@ -25,6 +25,7 @@
22840 #include <asm/tlbflush.h>
22841 #include <asm/vdso.h>
22842 #include <asm/proto.h>
22843+#include <asm/mman.h>
22844
22845 enum {
22846 VDSO_DISABLED = 0,
22847@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
22848 void enable_sep_cpu(void)
22849 {
22850 int cpu = get_cpu();
22851- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22852+ struct tss_struct *tss = init_tss + cpu;
22853
22854 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22855 put_cpu();
22856@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
22857 gate_vma.vm_start = FIXADDR_USER_START;
22858 gate_vma.vm_end = FIXADDR_USER_END;
22859 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
22860- gate_vma.vm_page_prot = __P101;
22861+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
22862 /*
22863 * Make sure the vDSO gets into every core dump.
22864 * Dumping its contents makes post-mortem fully interpretable later
22865@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
22866 if (compat)
22867 addr = VDSO_HIGH_BASE;
22868 else {
22869- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
22870+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
22871 if (IS_ERR_VALUE(addr)) {
22872 ret = addr;
22873 goto up_fail;
22874 }
22875 }
22876
22877- current->mm->context.vdso = (void *)addr;
22878+ current->mm->context.vdso = addr;
22879
22880 if (compat_uses_vma || !compat) {
22881 /*
22882@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
22883 }
22884
22885 current_thread_info()->sysenter_return =
22886- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22887+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22888
22889 up_fail:
22890 if (ret)
22891- current->mm->context.vdso = NULL;
22892+ current->mm->context.vdso = 0;
22893
22894 up_write(&mm->mmap_sem);
22895
22896@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
22897
22898 const char *arch_vma_name(struct vm_area_struct *vma)
22899 {
22900- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22901+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22902 return "[vdso]";
22903+
22904+#ifdef CONFIG_PAX_SEGMEXEC
22905+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
22906+ return "[vdso]";
22907+#endif
22908+
22909 return NULL;
22910 }
22911
22912@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
22913 * Check to see if the corresponding task was created in compat vdso
22914 * mode.
22915 */
22916- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
22917+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
22918 return &gate_vma;
22919 return NULL;
22920 }
22921diff -urNp linux-3.1.1/arch/x86/vdso/vma.c linux-3.1.1/arch/x86/vdso/vma.c
22922--- linux-3.1.1/arch/x86/vdso/vma.c 2011-11-11 15:19:27.000000000 -0500
22923+++ linux-3.1.1/arch/x86/vdso/vma.c 2011-11-16 18:39:07.000000000 -0500
22924@@ -16,8 +16,6 @@
22925 #include <asm/vdso.h>
22926 #include <asm/page.h>
22927
22928-unsigned int __read_mostly vdso_enabled = 1;
22929-
22930 extern char vdso_start[], vdso_end[];
22931 extern unsigned short vdso_sync_cpuid;
22932
22933@@ -97,13 +95,15 @@ static unsigned long vdso_addr(unsigned
22934 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
22935 {
22936 struct mm_struct *mm = current->mm;
22937- unsigned long addr;
22938+ unsigned long addr = 0;
22939 int ret;
22940
22941- if (!vdso_enabled)
22942- return 0;
22943-
22944 down_write(&mm->mmap_sem);
22945+
22946+#ifdef CONFIG_PAX_RANDMMAP
22947+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22948+#endif
22949+
22950 addr = vdso_addr(mm->start_stack, vdso_size);
22951 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
22952 if (IS_ERR_VALUE(addr)) {
22953@@ -111,26 +111,18 @@ int arch_setup_additional_pages(struct l
22954 goto up_fail;
22955 }
22956
22957- current->mm->context.vdso = (void *)addr;
22958+ mm->context.vdso = addr;
22959
22960 ret = install_special_mapping(mm, addr, vdso_size,
22961 VM_READ|VM_EXEC|
22962 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22963 VM_ALWAYSDUMP,
22964 vdso_pages);
22965- if (ret) {
22966- current->mm->context.vdso = NULL;
22967- goto up_fail;
22968- }
22969+
22970+ if (ret)
22971+ mm->context.vdso = 0;
22972
22973 up_fail:
22974 up_write(&mm->mmap_sem);
22975 return ret;
22976 }
22977-
22978-static __init int vdso_setup(char *s)
22979-{
22980- vdso_enabled = simple_strtoul(s, NULL, 0);
22981- return 0;
22982-}
22983-__setup("vdso=", vdso_setup);
22984diff -urNp linux-3.1.1/arch/x86/xen/enlighten.c linux-3.1.1/arch/x86/xen/enlighten.c
22985--- linux-3.1.1/arch/x86/xen/enlighten.c 2011-11-11 15:19:27.000000000 -0500
22986+++ linux-3.1.1/arch/x86/xen/enlighten.c 2011-11-16 18:39:07.000000000 -0500
22987@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22988
22989 struct shared_info xen_dummy_shared_info;
22990
22991-void *xen_initial_gdt;
22992-
22993 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
22994 __read_mostly int xen_have_vector_callback;
22995 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
22996@@ -1028,7 +1026,7 @@ static const struct pv_apic_ops xen_apic
22997 #endif
22998 };
22999
23000-static void xen_reboot(int reason)
23001+static __noreturn void xen_reboot(int reason)
23002 {
23003 struct sched_shutdown r = { .reason = reason };
23004
23005@@ -1036,17 +1034,17 @@ static void xen_reboot(int reason)
23006 BUG();
23007 }
23008
23009-static void xen_restart(char *msg)
23010+static __noreturn void xen_restart(char *msg)
23011 {
23012 xen_reboot(SHUTDOWN_reboot);
23013 }
23014
23015-static void xen_emergency_restart(void)
23016+static __noreturn void xen_emergency_restart(void)
23017 {
23018 xen_reboot(SHUTDOWN_reboot);
23019 }
23020
23021-static void xen_machine_halt(void)
23022+static __noreturn void xen_machine_halt(void)
23023 {
23024 xen_reboot(SHUTDOWN_poweroff);
23025 }
23026@@ -1152,7 +1150,17 @@ asmlinkage void __init xen_start_kernel(
23027 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23028
23029 /* Work out if we support NX */
23030- x86_configure_nx();
23031+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23032+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23033+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23034+ unsigned l, h;
23035+
23036+ __supported_pte_mask |= _PAGE_NX;
23037+ rdmsr(MSR_EFER, l, h);
23038+ l |= EFER_NX;
23039+ wrmsr(MSR_EFER, l, h);
23040+ }
23041+#endif
23042
23043 xen_setup_features();
23044
23045@@ -1183,13 +1191,6 @@ asmlinkage void __init xen_start_kernel(
23046
23047 machine_ops = xen_machine_ops;
23048
23049- /*
23050- * The only reliable way to retain the initial address of the
23051- * percpu gdt_page is to remember it here, so we can go and
23052- * mark it RW later, when the initial percpu area is freed.
23053- */
23054- xen_initial_gdt = &per_cpu(gdt_page, 0);
23055-
23056 xen_smp_init();
23057
23058 #ifdef CONFIG_ACPI_NUMA
23059diff -urNp linux-3.1.1/arch/x86/xen/mmu.c linux-3.1.1/arch/x86/xen/mmu.c
23060--- linux-3.1.1/arch/x86/xen/mmu.c 2011-11-11 15:19:27.000000000 -0500
23061+++ linux-3.1.1/arch/x86/xen/mmu.c 2011-11-16 18:39:07.000000000 -0500
23062@@ -1768,6 +1768,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
23063 convert_pfn_mfn(init_level4_pgt);
23064 convert_pfn_mfn(level3_ident_pgt);
23065 convert_pfn_mfn(level3_kernel_pgt);
23066+ convert_pfn_mfn(level3_vmalloc_pgt);
23067+ convert_pfn_mfn(level3_vmemmap_pgt);
23068
23069 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23070 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23071@@ -1786,7 +1788,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
23072 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23073 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23074 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23075+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23076+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23077 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23078+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23079 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23080 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23081
23082@@ -2000,6 +2005,7 @@ static void __init xen_post_allocator_in
23083 pv_mmu_ops.set_pud = xen_set_pud;
23084 #if PAGETABLE_LEVELS == 4
23085 pv_mmu_ops.set_pgd = xen_set_pgd;
23086+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
23087 #endif
23088
23089 /* This will work as long as patching hasn't happened yet
23090@@ -2081,6 +2087,7 @@ static const struct pv_mmu_ops xen_mmu_o
23091 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
23092 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
23093 .set_pgd = xen_set_pgd_hyper,
23094+ .set_pgd_batched = xen_set_pgd_hyper,
23095
23096 .alloc_pud = xen_alloc_pmd_init,
23097 .release_pud = xen_release_pmd_init,
23098diff -urNp linux-3.1.1/arch/x86/xen/smp.c linux-3.1.1/arch/x86/xen/smp.c
23099--- linux-3.1.1/arch/x86/xen/smp.c 2011-11-11 15:19:27.000000000 -0500
23100+++ linux-3.1.1/arch/x86/xen/smp.c 2011-11-16 18:39:07.000000000 -0500
23101@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
23102 {
23103 BUG_ON(smp_processor_id() != 0);
23104 native_smp_prepare_boot_cpu();
23105-
23106- /* We've switched to the "real" per-cpu gdt, so make sure the
23107- old memory can be recycled */
23108- make_lowmem_page_readwrite(xen_initial_gdt);
23109-
23110 xen_filter_cpu_maps();
23111 xen_setup_vcpu_info_placement();
23112 }
23113@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu,
23114 gdt = get_cpu_gdt_table(cpu);
23115
23116 ctxt->flags = VGCF_IN_KERNEL;
23117- ctxt->user_regs.ds = __USER_DS;
23118- ctxt->user_regs.es = __USER_DS;
23119+ ctxt->user_regs.ds = __KERNEL_DS;
23120+ ctxt->user_regs.es = __KERNEL_DS;
23121 ctxt->user_regs.ss = __KERNEL_DS;
23122 #ifdef CONFIG_X86_32
23123 ctxt->user_regs.fs = __KERNEL_PERCPU;
23124- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23125+ savesegment(gs, ctxt->user_regs.gs);
23126 #else
23127 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23128 #endif
23129@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned
23130 int rc;
23131
23132 per_cpu(current_task, cpu) = idle;
23133+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
23134 #ifdef CONFIG_X86_32
23135 irq_ctx_init(cpu);
23136 #else
23137 clear_tsk_thread_flag(idle, TIF_FORK);
23138- per_cpu(kernel_stack, cpu) =
23139- (unsigned long)task_stack_page(idle) -
23140- KERNEL_STACK_OFFSET + THREAD_SIZE;
23141+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23142 #endif
23143 xen_setup_runstate_info(cpu);
23144 xen_setup_timer(cpu);
23145diff -urNp linux-3.1.1/arch/x86/xen/xen-asm_32.S linux-3.1.1/arch/x86/xen/xen-asm_32.S
23146--- linux-3.1.1/arch/x86/xen/xen-asm_32.S 2011-11-11 15:19:27.000000000 -0500
23147+++ linux-3.1.1/arch/x86/xen/xen-asm_32.S 2011-11-16 18:39:07.000000000 -0500
23148@@ -83,14 +83,14 @@ ENTRY(xen_iret)
23149 ESP_OFFSET=4 # bytes pushed onto stack
23150
23151 /*
23152- * Store vcpu_info pointer for easy access. Do it this way to
23153- * avoid having to reload %fs
23154+ * Store vcpu_info pointer for easy access.
23155 */
23156 #ifdef CONFIG_SMP
23157- GET_THREAD_INFO(%eax)
23158- movl TI_cpu(%eax), %eax
23159- movl __per_cpu_offset(,%eax,4), %eax
23160- mov xen_vcpu(%eax), %eax
23161+ push %fs
23162+ mov $(__KERNEL_PERCPU), %eax
23163+ mov %eax, %fs
23164+ mov PER_CPU_VAR(xen_vcpu), %eax
23165+ pop %fs
23166 #else
23167 movl xen_vcpu, %eax
23168 #endif
23169diff -urNp linux-3.1.1/arch/x86/xen/xen-head.S linux-3.1.1/arch/x86/xen/xen-head.S
23170--- linux-3.1.1/arch/x86/xen/xen-head.S 2011-11-11 15:19:27.000000000 -0500
23171+++ linux-3.1.1/arch/x86/xen/xen-head.S 2011-11-16 18:39:07.000000000 -0500
23172@@ -19,6 +19,17 @@ ENTRY(startup_xen)
23173 #ifdef CONFIG_X86_32
23174 mov %esi,xen_start_info
23175 mov $init_thread_union+THREAD_SIZE,%esp
23176+#ifdef CONFIG_SMP
23177+ movl $cpu_gdt_table,%edi
23178+ movl $__per_cpu_load,%eax
23179+ movw %ax,__KERNEL_PERCPU + 2(%edi)
23180+ rorl $16,%eax
23181+ movb %al,__KERNEL_PERCPU + 4(%edi)
23182+ movb %ah,__KERNEL_PERCPU + 7(%edi)
23183+ movl $__per_cpu_end - 1,%eax
23184+ subl $__per_cpu_start,%eax
23185+ movw %ax,__KERNEL_PERCPU + 0(%edi)
23186+#endif
23187 #else
23188 mov %rsi,xen_start_info
23189 mov $init_thread_union+THREAD_SIZE,%rsp
23190diff -urNp linux-3.1.1/arch/x86/xen/xen-ops.h linux-3.1.1/arch/x86/xen/xen-ops.h
23191--- linux-3.1.1/arch/x86/xen/xen-ops.h 2011-11-11 15:19:27.000000000 -0500
23192+++ linux-3.1.1/arch/x86/xen/xen-ops.h 2011-11-16 18:39:07.000000000 -0500
23193@@ -10,8 +10,6 @@
23194 extern const char xen_hypervisor_callback[];
23195 extern const char xen_failsafe_callback[];
23196
23197-extern void *xen_initial_gdt;
23198-
23199 struct trap_info;
23200 void xen_copy_trap_info(struct trap_info *traps);
23201
23202diff -urNp linux-3.1.1/block/blk-iopoll.c linux-3.1.1/block/blk-iopoll.c
23203--- linux-3.1.1/block/blk-iopoll.c 2011-11-11 15:19:27.000000000 -0500
23204+++ linux-3.1.1/block/blk-iopoll.c 2011-11-16 18:39:07.000000000 -0500
23205@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23206 }
23207 EXPORT_SYMBOL(blk_iopoll_complete);
23208
23209-static void blk_iopoll_softirq(struct softirq_action *h)
23210+static void blk_iopoll_softirq(void)
23211 {
23212 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23213 int rearm = 0, budget = blk_iopoll_budget;
23214diff -urNp linux-3.1.1/block/blk-map.c linux-3.1.1/block/blk-map.c
23215--- linux-3.1.1/block/blk-map.c 2011-11-11 15:19:27.000000000 -0500
23216+++ linux-3.1.1/block/blk-map.c 2011-11-16 18:39:07.000000000 -0500
23217@@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
23218 if (!len || !kbuf)
23219 return -EINVAL;
23220
23221- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
23222+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
23223 if (do_copy)
23224 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23225 else
23226diff -urNp linux-3.1.1/block/blk-softirq.c linux-3.1.1/block/blk-softirq.c
23227--- linux-3.1.1/block/blk-softirq.c 2011-11-11 15:19:27.000000000 -0500
23228+++ linux-3.1.1/block/blk-softirq.c 2011-11-16 18:39:07.000000000 -0500
23229@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
23230 * Softirq action handler - move entries to local list and loop over them
23231 * while passing them to the queue registered handler.
23232 */
23233-static void blk_done_softirq(struct softirq_action *h)
23234+static void blk_done_softirq(void)
23235 {
23236 struct list_head *cpu_list, local_list;
23237
23238diff -urNp linux-3.1.1/block/bsg.c linux-3.1.1/block/bsg.c
23239--- linux-3.1.1/block/bsg.c 2011-11-11 15:19:27.000000000 -0500
23240+++ linux-3.1.1/block/bsg.c 2011-11-16 18:39:07.000000000 -0500
23241@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23242 struct sg_io_v4 *hdr, struct bsg_device *bd,
23243 fmode_t has_write_perm)
23244 {
23245+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23246+ unsigned char *cmdptr;
23247+
23248 if (hdr->request_len > BLK_MAX_CDB) {
23249 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23250 if (!rq->cmd)
23251 return -ENOMEM;
23252- }
23253+ cmdptr = rq->cmd;
23254+ } else
23255+ cmdptr = tmpcmd;
23256
23257- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
23258+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
23259 hdr->request_len))
23260 return -EFAULT;
23261
23262+ if (cmdptr != rq->cmd)
23263+ memcpy(rq->cmd, cmdptr, hdr->request_len);
23264+
23265 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23266 if (blk_verify_command(rq->cmd, has_write_perm))
23267 return -EPERM;
23268diff -urNp linux-3.1.1/block/compat_ioctl.c linux-3.1.1/block/compat_ioctl.c
23269--- linux-3.1.1/block/compat_ioctl.c 2011-11-11 15:19:27.000000000 -0500
23270+++ linux-3.1.1/block/compat_ioctl.c 2011-11-16 18:39:07.000000000 -0500
23271@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_
23272 err |= __get_user(f->spec1, &uf->spec1);
23273 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
23274 err |= __get_user(name, &uf->name);
23275- f->name = compat_ptr(name);
23276+ f->name = (void __force_kernel *)compat_ptr(name);
23277 if (err) {
23278 err = -EFAULT;
23279 goto out;
23280diff -urNp linux-3.1.1/block/scsi_ioctl.c linux-3.1.1/block/scsi_ioctl.c
23281--- linux-3.1.1/block/scsi_ioctl.c 2011-11-11 15:19:27.000000000 -0500
23282+++ linux-3.1.1/block/scsi_ioctl.c 2011-11-16 18:39:07.000000000 -0500
23283@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
23284 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23285 struct sg_io_hdr *hdr, fmode_t mode)
23286 {
23287- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23288+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23289+ unsigned char *cmdptr;
23290+
23291+ if (rq->cmd != rq->__cmd)
23292+ cmdptr = rq->cmd;
23293+ else
23294+ cmdptr = tmpcmd;
23295+
23296+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23297 return -EFAULT;
23298+
23299+ if (cmdptr != rq->cmd)
23300+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23301+
23302 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23303 return -EPERM;
23304
23305@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
23306 int err;
23307 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23308 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23309+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23310+ unsigned char *cmdptr;
23311
23312 if (!sic)
23313 return -EINVAL;
23314@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
23315 */
23316 err = -EFAULT;
23317 rq->cmd_len = cmdlen;
23318- if (copy_from_user(rq->cmd, sic->data, cmdlen))
23319+
23320+ if (rq->cmd != rq->__cmd)
23321+ cmdptr = rq->cmd;
23322+ else
23323+ cmdptr = tmpcmd;
23324+
23325+ if (copy_from_user(cmdptr, sic->data, cmdlen))
23326 goto error;
23327
23328+ if (rq->cmd != cmdptr)
23329+ memcpy(rq->cmd, cmdptr, cmdlen);
23330+
23331 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23332 goto error;
23333
23334diff -urNp linux-3.1.1/crypto/cryptd.c linux-3.1.1/crypto/cryptd.c
23335--- linux-3.1.1/crypto/cryptd.c 2011-11-11 15:19:27.000000000 -0500
23336+++ linux-3.1.1/crypto/cryptd.c 2011-11-16 18:39:07.000000000 -0500
23337@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
23338
23339 struct cryptd_blkcipher_request_ctx {
23340 crypto_completion_t complete;
23341-};
23342+} __no_const;
23343
23344 struct cryptd_hash_ctx {
23345 struct crypto_shash *child;
23346@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
23347
23348 struct cryptd_aead_request_ctx {
23349 crypto_completion_t complete;
23350-};
23351+} __no_const;
23352
23353 static void cryptd_queue_worker(struct work_struct *work);
23354
23355diff -urNp linux-3.1.1/crypto/serpent.c linux-3.1.1/crypto/serpent.c
23356--- linux-3.1.1/crypto/serpent.c 2011-11-11 15:19:27.000000000 -0500
23357+++ linux-3.1.1/crypto/serpent.c 2011-11-16 18:40:10.000000000 -0500
23358@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
23359 u32 r0,r1,r2,r3,r4;
23360 int i;
23361
23362+ pax_track_stack();
23363+
23364 /* Copy key, add padding */
23365
23366 for (i = 0; i < keylen; ++i)
23367diff -urNp linux-3.1.1/Documentation/dontdiff linux-3.1.1/Documentation/dontdiff
23368--- linux-3.1.1/Documentation/dontdiff 2011-11-11 15:19:27.000000000 -0500
23369+++ linux-3.1.1/Documentation/dontdiff 2011-11-16 18:39:07.000000000 -0500
23370@@ -5,6 +5,7 @@
23371 *.cis
23372 *.cpio
23373 *.csp
23374+*.dbg
23375 *.dsp
23376 *.dvi
23377 *.elf
23378@@ -48,9 +49,11 @@
23379 *.tab.h
23380 *.tex
23381 *.ver
23382+*.vim
23383 *.xml
23384 *.xz
23385 *_MODULES
23386+*_reg_safe.h
23387 *_vga16.c
23388 *~
23389 \#*#
23390@@ -70,6 +73,7 @@ Kerntypes
23391 Module.markers
23392 Module.symvers
23393 PENDING
23394+PERF*
23395 SCCS
23396 System.map*
23397 TAGS
23398@@ -93,19 +97,24 @@ bounds.h
23399 bsetup
23400 btfixupprep
23401 build
23402+builtin-policy.h
23403 bvmlinux
23404 bzImage*
23405 capability_names.h
23406 capflags.c
23407 classlist.h*
23408+clut_vga16.c
23409+common-cmds.h
23410 comp*.log
23411 compile.h*
23412 conf
23413 config
23414 config-*
23415 config_data.h*
23416+config.c
23417 config.mak
23418 config.mak.autogen
23419+config.tmp
23420 conmakehash
23421 consolemap_deftbl.c*
23422 cpustr.h
23423@@ -119,6 +128,7 @@ dslm
23424 elf2ecoff
23425 elfconfig.h*
23426 evergreen_reg_safe.h
23427+exception_policy.conf
23428 fixdep
23429 flask.h
23430 fore200e_mkfirm
23431@@ -126,12 +136,14 @@ fore200e_pca_fw.c*
23432 gconf
23433 gconf.glade.h
23434 gen-devlist
23435+gen-kdb_cmds.c
23436 gen_crc32table
23437 gen_init_cpio
23438 generated
23439 genheaders
23440 genksyms
23441 *_gray256.c
23442+hash
23443 hpet_example
23444 hugepage-mmap
23445 hugepage-shm
23446@@ -146,7 +158,7 @@ int32.c
23447 int4.c
23448 int8.c
23449 kallsyms
23450-kconfig
23451+kern_constants.h
23452 keywords.c
23453 ksym.c*
23454 ksym.h*
23455@@ -154,7 +166,6 @@ kxgettext
23456 lkc_defs.h
23457 lex.c
23458 lex.*.c
23459-linux
23460 logo_*.c
23461 logo_*_clut224.c
23462 logo_*_mono.c
23463@@ -166,7 +177,6 @@ machtypes.h
23464 map
23465 map_hugetlb
23466 maui_boot.h
23467-media
23468 mconf
23469 miboot*
23470 mk_elfconfig
23471@@ -174,6 +184,7 @@ mkboot
23472 mkbugboot
23473 mkcpustr
23474 mkdep
23475+mkpiggy
23476 mkprep
23477 mkregtable
23478 mktables
23479@@ -209,6 +220,7 @@ r300_reg_safe.h
23480 r420_reg_safe.h
23481 r600_reg_safe.h
23482 recordmcount
23483+regdb.c
23484 relocs
23485 rlim_names.h
23486 rn50_reg_safe.h
23487@@ -219,6 +231,7 @@ setup
23488 setup.bin
23489 setup.elf
23490 sImage
23491+slabinfo
23492 sm_tbl*
23493 split-include
23494 syscalltab.h
23495@@ -229,6 +242,7 @@ tftpboot.img
23496 timeconst.h
23497 times.h*
23498 trix_boot.h
23499+user_constants.h
23500 utsrelease.h*
23501 vdso-syms.lds
23502 vdso.lds
23503@@ -246,7 +260,9 @@ vmlinux
23504 vmlinux-*
23505 vmlinux.aout
23506 vmlinux.bin.all
23507+vmlinux.bin.bz2
23508 vmlinux.lds
23509+vmlinux.relocs
23510 vmlinuz
23511 voffset.h
23512 vsyscall.lds
23513@@ -254,9 +270,11 @@ vsyscall_32.lds
23514 wanxlfw.inc
23515 uImage
23516 unifdef
23517+utsrelease.h
23518 wakeup.bin
23519 wakeup.elf
23520 wakeup.lds
23521 zImage*
23522 zconf.hash.c
23523+zconf.lex.c
23524 zoffset.h
23525diff -urNp linux-3.1.1/Documentation/kernel-parameters.txt linux-3.1.1/Documentation/kernel-parameters.txt
23526--- linux-3.1.1/Documentation/kernel-parameters.txt 2011-11-11 15:19:27.000000000 -0500
23527+++ linux-3.1.1/Documentation/kernel-parameters.txt 2011-11-16 18:39:07.000000000 -0500
23528@@ -1898,6 +1898,13 @@ bytes respectively. Such letter suffixes
23529 the specified number of seconds. This is to be used if
23530 your oopses keep scrolling off the screen.
23531
23532+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
23533+ virtualization environments that don't cope well with the
23534+ expand down segment used by UDEREF on X86-32 or the frequent
23535+ page table updates on X86-64.
23536+
23537+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
23538+
23539 pcbit= [HW,ISDN]
23540
23541 pcd. [PARIDE]
23542diff -urNp linux-3.1.1/drivers/acpi/apei/cper.c linux-3.1.1/drivers/acpi/apei/cper.c
23543--- linux-3.1.1/drivers/acpi/apei/cper.c 2011-11-11 15:19:27.000000000 -0500
23544+++ linux-3.1.1/drivers/acpi/apei/cper.c 2011-11-16 18:39:07.000000000 -0500
23545@@ -38,12 +38,12 @@
23546 */
23547 u64 cper_next_record_id(void)
23548 {
23549- static atomic64_t seq;
23550+ static atomic64_unchecked_t seq;
23551
23552- if (!atomic64_read(&seq))
23553- atomic64_set(&seq, ((u64)get_seconds()) << 32);
23554+ if (!atomic64_read_unchecked(&seq))
23555+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
23556
23557- return atomic64_inc_return(&seq);
23558+ return atomic64_inc_return_unchecked(&seq);
23559 }
23560 EXPORT_SYMBOL_GPL(cper_next_record_id);
23561
23562diff -urNp linux-3.1.1/drivers/acpi/ec_sys.c linux-3.1.1/drivers/acpi/ec_sys.c
23563--- linux-3.1.1/drivers/acpi/ec_sys.c 2011-11-11 15:19:27.000000000 -0500
23564+++ linux-3.1.1/drivers/acpi/ec_sys.c 2011-11-16 18:39:07.000000000 -0500
23565@@ -11,6 +11,7 @@
23566 #include <linux/kernel.h>
23567 #include <linux/acpi.h>
23568 #include <linux/debugfs.h>
23569+#include <asm/uaccess.h>
23570 #include "internal.h"
23571
23572 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
23573@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
23574 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
23575 */
23576 unsigned int size = EC_SPACE_SIZE;
23577- u8 *data = (u8 *) buf;
23578+ u8 data;
23579 loff_t init_off = *off;
23580 int err = 0;
23581
23582@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
23583 size = count;
23584
23585 while (size) {
23586- err = ec_read(*off, &data[*off - init_off]);
23587+ err = ec_read(*off, &data);
23588 if (err)
23589 return err;
23590+ if (put_user(data, &buf[*off - init_off]))
23591+ return -EFAULT;
23592 *off += 1;
23593 size--;
23594 }
23595@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
23596
23597 unsigned int size = count;
23598 loff_t init_off = *off;
23599- u8 *data = (u8 *) buf;
23600 int err = 0;
23601
23602 if (*off >= EC_SPACE_SIZE)
23603@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
23604 }
23605
23606 while (size) {
23607- u8 byte_write = data[*off - init_off];
23608+ u8 byte_write;
23609+ if (get_user(byte_write, &buf[*off - init_off]))
23610+ return -EFAULT;
23611 err = ec_write(*off, byte_write);
23612 if (err)
23613 return err;
23614diff -urNp linux-3.1.1/drivers/acpi/proc.c linux-3.1.1/drivers/acpi/proc.c
23615--- linux-3.1.1/drivers/acpi/proc.c 2011-11-11 15:19:27.000000000 -0500
23616+++ linux-3.1.1/drivers/acpi/proc.c 2011-11-16 18:39:07.000000000 -0500
23617@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
23618 size_t count, loff_t * ppos)
23619 {
23620 struct list_head *node, *next;
23621- char strbuf[5];
23622- char str[5] = "";
23623- unsigned int len = count;
23624-
23625- if (len > 4)
23626- len = 4;
23627- if (len < 0)
23628- return -EFAULT;
23629+ char strbuf[5] = {0};
23630
23631- if (copy_from_user(strbuf, buffer, len))
23632+ if (count > 4)
23633+ count = 4;
23634+ if (copy_from_user(strbuf, buffer, count))
23635 return -EFAULT;
23636- strbuf[len] = '\0';
23637- sscanf(strbuf, "%s", str);
23638+ strbuf[count] = '\0';
23639
23640 mutex_lock(&acpi_device_lock);
23641 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23642@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
23643 if (!dev->wakeup.flags.valid)
23644 continue;
23645
23646- if (!strncmp(dev->pnp.bus_id, str, 4)) {
23647+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23648 if (device_can_wakeup(&dev->dev)) {
23649 bool enable = !device_may_wakeup(&dev->dev);
23650 device_set_wakeup_enable(&dev->dev, enable);
23651diff -urNp linux-3.1.1/drivers/acpi/processor_driver.c linux-3.1.1/drivers/acpi/processor_driver.c
23652--- linux-3.1.1/drivers/acpi/processor_driver.c 2011-11-11 15:19:27.000000000 -0500
23653+++ linux-3.1.1/drivers/acpi/processor_driver.c 2011-11-16 18:39:07.000000000 -0500
23654@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
23655 return 0;
23656 #endif
23657
23658- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23659+ BUG_ON(pr->id >= nr_cpu_ids);
23660
23661 /*
23662 * Buggy BIOS check
23663diff -urNp linux-3.1.1/drivers/ata/libata-core.c linux-3.1.1/drivers/ata/libata-core.c
23664--- linux-3.1.1/drivers/ata/libata-core.c 2011-11-11 15:19:27.000000000 -0500
23665+++ linux-3.1.1/drivers/ata/libata-core.c 2011-11-16 18:39:07.000000000 -0500
23666@@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *
23667 struct ata_port *ap;
23668 unsigned int tag;
23669
23670- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23671+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23672 ap = qc->ap;
23673
23674 qc->flags = 0;
23675@@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued
23676 struct ata_port *ap;
23677 struct ata_link *link;
23678
23679- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23680+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23681 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
23682 ap = qc->ap;
23683 link = qc->dev->link;
23684@@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct
23685 return;
23686
23687 spin_lock(&lock);
23688+ pax_open_kernel();
23689
23690 for (cur = ops->inherits; cur; cur = cur->inherits) {
23691 void **inherit = (void **)cur;
23692@@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct
23693 if (IS_ERR(*pp))
23694 *pp = NULL;
23695
23696- ops->inherits = NULL;
23697+ *(struct ata_port_operations **)&ops->inherits = NULL;
23698
23699+ pax_close_kernel();
23700 spin_unlock(&lock);
23701 }
23702
23703diff -urNp linux-3.1.1/drivers/ata/libata-eh.c linux-3.1.1/drivers/ata/libata-eh.c
23704--- linux-3.1.1/drivers/ata/libata-eh.c 2011-11-11 15:19:27.000000000 -0500
23705+++ linux-3.1.1/drivers/ata/libata-eh.c 2011-11-16 18:40:10.000000000 -0500
23706@@ -2515,6 +2515,8 @@ void ata_eh_report(struct ata_port *ap)
23707 {
23708 struct ata_link *link;
23709
23710+ pax_track_stack();
23711+
23712 ata_for_each_link(link, ap, HOST_FIRST)
23713 ata_eh_link_report(link);
23714 }
23715diff -urNp linux-3.1.1/drivers/ata/pata_arasan_cf.c linux-3.1.1/drivers/ata/pata_arasan_cf.c
23716--- linux-3.1.1/drivers/ata/pata_arasan_cf.c 2011-11-11 15:19:27.000000000 -0500
23717+++ linux-3.1.1/drivers/ata/pata_arasan_cf.c 2011-11-16 18:39:07.000000000 -0500
23718@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
23719 /* Handle platform specific quirks */
23720 if (pdata->quirk) {
23721 if (pdata->quirk & CF_BROKEN_PIO) {
23722- ap->ops->set_piomode = NULL;
23723+ pax_open_kernel();
23724+ *(void **)&ap->ops->set_piomode = NULL;
23725+ pax_close_kernel();
23726 ap->pio_mask = 0;
23727 }
23728 if (pdata->quirk & CF_BROKEN_MWDMA)
23729diff -urNp linux-3.1.1/drivers/atm/adummy.c linux-3.1.1/drivers/atm/adummy.c
23730--- linux-3.1.1/drivers/atm/adummy.c 2011-11-11 15:19:27.000000000 -0500
23731+++ linux-3.1.1/drivers/atm/adummy.c 2011-11-16 18:39:07.000000000 -0500
23732@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
23733 vcc->pop(vcc, skb);
23734 else
23735 dev_kfree_skb_any(skb);
23736- atomic_inc(&vcc->stats->tx);
23737+ atomic_inc_unchecked(&vcc->stats->tx);
23738
23739 return 0;
23740 }
23741diff -urNp linux-3.1.1/drivers/atm/ambassador.c linux-3.1.1/drivers/atm/ambassador.c
23742--- linux-3.1.1/drivers/atm/ambassador.c 2011-11-11 15:19:27.000000000 -0500
23743+++ linux-3.1.1/drivers/atm/ambassador.c 2011-11-16 18:39:07.000000000 -0500
23744@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
23745 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
23746
23747 // VC layer stats
23748- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23749+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23750
23751 // free the descriptor
23752 kfree (tx_descr);
23753@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
23754 dump_skb ("<<<", vc, skb);
23755
23756 // VC layer stats
23757- atomic_inc(&atm_vcc->stats->rx);
23758+ atomic_inc_unchecked(&atm_vcc->stats->rx);
23759 __net_timestamp(skb);
23760 // end of our responsibility
23761 atm_vcc->push (atm_vcc, skb);
23762@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
23763 } else {
23764 PRINTK (KERN_INFO, "dropped over-size frame");
23765 // should we count this?
23766- atomic_inc(&atm_vcc->stats->rx_drop);
23767+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23768 }
23769
23770 } else {
23771@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * at
23772 }
23773
23774 if (check_area (skb->data, skb->len)) {
23775- atomic_inc(&atm_vcc->stats->tx_err);
23776+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
23777 return -ENOMEM; // ?
23778 }
23779
23780diff -urNp linux-3.1.1/drivers/atm/atmtcp.c linux-3.1.1/drivers/atm/atmtcp.c
23781--- linux-3.1.1/drivers/atm/atmtcp.c 2011-11-11 15:19:27.000000000 -0500
23782+++ linux-3.1.1/drivers/atm/atmtcp.c 2011-11-16 18:39:07.000000000 -0500
23783@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
23784 if (vcc->pop) vcc->pop(vcc,skb);
23785 else dev_kfree_skb(skb);
23786 if (dev_data) return 0;
23787- atomic_inc(&vcc->stats->tx_err);
23788+ atomic_inc_unchecked(&vcc->stats->tx_err);
23789 return -ENOLINK;
23790 }
23791 size = skb->len+sizeof(struct atmtcp_hdr);
23792@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
23793 if (!new_skb) {
23794 if (vcc->pop) vcc->pop(vcc,skb);
23795 else dev_kfree_skb(skb);
23796- atomic_inc(&vcc->stats->tx_err);
23797+ atomic_inc_unchecked(&vcc->stats->tx_err);
23798 return -ENOBUFS;
23799 }
23800 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
23801@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
23802 if (vcc->pop) vcc->pop(vcc,skb);
23803 else dev_kfree_skb(skb);
23804 out_vcc->push(out_vcc,new_skb);
23805- atomic_inc(&vcc->stats->tx);
23806- atomic_inc(&out_vcc->stats->rx);
23807+ atomic_inc_unchecked(&vcc->stats->tx);
23808+ atomic_inc_unchecked(&out_vcc->stats->rx);
23809 return 0;
23810 }
23811
23812@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
23813 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
23814 read_unlock(&vcc_sklist_lock);
23815 if (!out_vcc) {
23816- atomic_inc(&vcc->stats->tx_err);
23817+ atomic_inc_unchecked(&vcc->stats->tx_err);
23818 goto done;
23819 }
23820 skb_pull(skb,sizeof(struct atmtcp_hdr));
23821@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
23822 __net_timestamp(new_skb);
23823 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
23824 out_vcc->push(out_vcc,new_skb);
23825- atomic_inc(&vcc->stats->tx);
23826- atomic_inc(&out_vcc->stats->rx);
23827+ atomic_inc_unchecked(&vcc->stats->tx);
23828+ atomic_inc_unchecked(&out_vcc->stats->rx);
23829 done:
23830 if (vcc->pop) vcc->pop(vcc,skb);
23831 else dev_kfree_skb(skb);
23832diff -urNp linux-3.1.1/drivers/atm/eni.c linux-3.1.1/drivers/atm/eni.c
23833--- linux-3.1.1/drivers/atm/eni.c 2011-11-11 15:19:27.000000000 -0500
23834+++ linux-3.1.1/drivers/atm/eni.c 2011-11-16 18:39:07.000000000 -0500
23835@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
23836 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
23837 vcc->dev->number);
23838 length = 0;
23839- atomic_inc(&vcc->stats->rx_err);
23840+ atomic_inc_unchecked(&vcc->stats->rx_err);
23841 }
23842 else {
23843 length = ATM_CELL_SIZE-1; /* no HEC */
23844@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23845 size);
23846 }
23847 eff = length = 0;
23848- atomic_inc(&vcc->stats->rx_err);
23849+ atomic_inc_unchecked(&vcc->stats->rx_err);
23850 }
23851 else {
23852 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
23853@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23854 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
23855 vcc->dev->number,vcc->vci,length,size << 2,descr);
23856 length = eff = 0;
23857- atomic_inc(&vcc->stats->rx_err);
23858+ atomic_inc_unchecked(&vcc->stats->rx_err);
23859 }
23860 }
23861 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
23862@@ -771,7 +771,7 @@ rx_dequeued++;
23863 vcc->push(vcc,skb);
23864 pushed++;
23865 }
23866- atomic_inc(&vcc->stats->rx);
23867+ atomic_inc_unchecked(&vcc->stats->rx);
23868 }
23869 wake_up(&eni_dev->rx_wait);
23870 }
23871@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
23872 PCI_DMA_TODEVICE);
23873 if (vcc->pop) vcc->pop(vcc,skb);
23874 else dev_kfree_skb_irq(skb);
23875- atomic_inc(&vcc->stats->tx);
23876+ atomic_inc_unchecked(&vcc->stats->tx);
23877 wake_up(&eni_dev->tx_wait);
23878 dma_complete++;
23879 }
23880@@ -1568,7 +1568,7 @@ tx_complete++;
23881 /*--------------------------------- entries ---------------------------------*/
23882
23883
23884-static const char *media_name[] __devinitdata = {
23885+static const char *media_name[] __devinitconst = {
23886 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
23887 "UTP", "05?", "06?", "07?", /* 4- 7 */
23888 "TAXI","09?", "10?", "11?", /* 8-11 */
23889diff -urNp linux-3.1.1/drivers/atm/firestream.c linux-3.1.1/drivers/atm/firestream.c
23890--- linux-3.1.1/drivers/atm/firestream.c 2011-11-11 15:19:27.000000000 -0500
23891+++ linux-3.1.1/drivers/atm/firestream.c 2011-11-16 18:39:07.000000000 -0500
23892@@ -750,7 +750,7 @@ static void process_txdone_queue (struct
23893 }
23894 }
23895
23896- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23897+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23898
23899 fs_dprintk (FS_DEBUG_TXMEM, "i");
23900 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
23901@@ -817,7 +817,7 @@ static void process_incoming (struct fs_
23902 #endif
23903 skb_put (skb, qe->p1 & 0xffff);
23904 ATM_SKB(skb)->vcc = atm_vcc;
23905- atomic_inc(&atm_vcc->stats->rx);
23906+ atomic_inc_unchecked(&atm_vcc->stats->rx);
23907 __net_timestamp(skb);
23908 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
23909 atm_vcc->push (atm_vcc, skb);
23910@@ -838,12 +838,12 @@ static void process_incoming (struct fs_
23911 kfree (pe);
23912 }
23913 if (atm_vcc)
23914- atomic_inc(&atm_vcc->stats->rx_drop);
23915+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23916 break;
23917 case 0x1f: /* Reassembly abort: no buffers. */
23918 /* Silently increment error counter. */
23919 if (atm_vcc)
23920- atomic_inc(&atm_vcc->stats->rx_drop);
23921+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23922 break;
23923 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
23924 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
23925diff -urNp linux-3.1.1/drivers/atm/fore200e.c linux-3.1.1/drivers/atm/fore200e.c
23926--- linux-3.1.1/drivers/atm/fore200e.c 2011-11-11 15:19:27.000000000 -0500
23927+++ linux-3.1.1/drivers/atm/fore200e.c 2011-11-16 18:39:07.000000000 -0500
23928@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
23929 #endif
23930 /* check error condition */
23931 if (*entry->status & STATUS_ERROR)
23932- atomic_inc(&vcc->stats->tx_err);
23933+ atomic_inc_unchecked(&vcc->stats->tx_err);
23934 else
23935- atomic_inc(&vcc->stats->tx);
23936+ atomic_inc_unchecked(&vcc->stats->tx);
23937 }
23938 }
23939
23940@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
23941 if (skb == NULL) {
23942 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
23943
23944- atomic_inc(&vcc->stats->rx_drop);
23945+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23946 return -ENOMEM;
23947 }
23948
23949@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
23950
23951 dev_kfree_skb_any(skb);
23952
23953- atomic_inc(&vcc->stats->rx_drop);
23954+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23955 return -ENOMEM;
23956 }
23957
23958 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23959
23960 vcc->push(vcc, skb);
23961- atomic_inc(&vcc->stats->rx);
23962+ atomic_inc_unchecked(&vcc->stats->rx);
23963
23964 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23965
23966@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
23967 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
23968 fore200e->atm_dev->number,
23969 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
23970- atomic_inc(&vcc->stats->rx_err);
23971+ atomic_inc_unchecked(&vcc->stats->rx_err);
23972 }
23973 }
23974
23975@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
23976 goto retry_here;
23977 }
23978
23979- atomic_inc(&vcc->stats->tx_err);
23980+ atomic_inc_unchecked(&vcc->stats->tx_err);
23981
23982 fore200e->tx_sat++;
23983 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
23984diff -urNp linux-3.1.1/drivers/atm/he.c linux-3.1.1/drivers/atm/he.c
23985--- linux-3.1.1/drivers/atm/he.c 2011-11-11 15:19:27.000000000 -0500
23986+++ linux-3.1.1/drivers/atm/he.c 2011-11-16 18:39:07.000000000 -0500
23987@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23988
23989 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
23990 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
23991- atomic_inc(&vcc->stats->rx_drop);
23992+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23993 goto return_host_buffers;
23994 }
23995
23996@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23997 RBRQ_LEN_ERR(he_dev->rbrq_head)
23998 ? "LEN_ERR" : "",
23999 vcc->vpi, vcc->vci);
24000- atomic_inc(&vcc->stats->rx_err);
24001+ atomic_inc_unchecked(&vcc->stats->rx_err);
24002 goto return_host_buffers;
24003 }
24004
24005@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
24006 vcc->push(vcc, skb);
24007 spin_lock(&he_dev->global_lock);
24008
24009- atomic_inc(&vcc->stats->rx);
24010+ atomic_inc_unchecked(&vcc->stats->rx);
24011
24012 return_host_buffers:
24013 ++pdus_assembled;
24014@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
24015 tpd->vcc->pop(tpd->vcc, tpd->skb);
24016 else
24017 dev_kfree_skb_any(tpd->skb);
24018- atomic_inc(&tpd->vcc->stats->tx_err);
24019+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
24020 }
24021 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
24022 return;
24023@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24024 vcc->pop(vcc, skb);
24025 else
24026 dev_kfree_skb_any(skb);
24027- atomic_inc(&vcc->stats->tx_err);
24028+ atomic_inc_unchecked(&vcc->stats->tx_err);
24029 return -EINVAL;
24030 }
24031
24032@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24033 vcc->pop(vcc, skb);
24034 else
24035 dev_kfree_skb_any(skb);
24036- atomic_inc(&vcc->stats->tx_err);
24037+ atomic_inc_unchecked(&vcc->stats->tx_err);
24038 return -EINVAL;
24039 }
24040 #endif
24041@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24042 vcc->pop(vcc, skb);
24043 else
24044 dev_kfree_skb_any(skb);
24045- atomic_inc(&vcc->stats->tx_err);
24046+ atomic_inc_unchecked(&vcc->stats->tx_err);
24047 spin_unlock_irqrestore(&he_dev->global_lock, flags);
24048 return -ENOMEM;
24049 }
24050@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24051 vcc->pop(vcc, skb);
24052 else
24053 dev_kfree_skb_any(skb);
24054- atomic_inc(&vcc->stats->tx_err);
24055+ atomic_inc_unchecked(&vcc->stats->tx_err);
24056 spin_unlock_irqrestore(&he_dev->global_lock, flags);
24057 return -ENOMEM;
24058 }
24059@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24060 __enqueue_tpd(he_dev, tpd, cid);
24061 spin_unlock_irqrestore(&he_dev->global_lock, flags);
24062
24063- atomic_inc(&vcc->stats->tx);
24064+ atomic_inc_unchecked(&vcc->stats->tx);
24065
24066 return 0;
24067 }
24068diff -urNp linux-3.1.1/drivers/atm/horizon.c linux-3.1.1/drivers/atm/horizon.c
24069--- linux-3.1.1/drivers/atm/horizon.c 2011-11-11 15:19:27.000000000 -0500
24070+++ linux-3.1.1/drivers/atm/horizon.c 2011-11-16 18:39:07.000000000 -0500
24071@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev,
24072 {
24073 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
24074 // VC layer stats
24075- atomic_inc(&vcc->stats->rx);
24076+ atomic_inc_unchecked(&vcc->stats->rx);
24077 __net_timestamp(skb);
24078 // end of our responsibility
24079 vcc->push (vcc, skb);
24080@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const
24081 dev->tx_iovec = NULL;
24082
24083 // VC layer stats
24084- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
24085+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
24086
24087 // free the skb
24088 hrz_kfree_skb (skb);
24089diff -urNp linux-3.1.1/drivers/atm/idt77252.c linux-3.1.1/drivers/atm/idt77252.c
24090--- linux-3.1.1/drivers/atm/idt77252.c 2011-11-11 15:19:27.000000000 -0500
24091+++ linux-3.1.1/drivers/atm/idt77252.c 2011-11-16 18:39:07.000000000 -0500
24092@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, str
24093 else
24094 dev_kfree_skb(skb);
24095
24096- atomic_inc(&vcc->stats->tx);
24097+ atomic_inc_unchecked(&vcc->stats->tx);
24098 }
24099
24100 atomic_dec(&scq->used);
24101@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, st
24102 if ((sb = dev_alloc_skb(64)) == NULL) {
24103 printk("%s: Can't allocate buffers for aal0.\n",
24104 card->name);
24105- atomic_add(i, &vcc->stats->rx_drop);
24106+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
24107 break;
24108 }
24109 if (!atm_charge(vcc, sb->truesize)) {
24110 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
24111 card->name);
24112- atomic_add(i - 1, &vcc->stats->rx_drop);
24113+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
24114 dev_kfree_skb(sb);
24115 break;
24116 }
24117@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, st
24118 ATM_SKB(sb)->vcc = vcc;
24119 __net_timestamp(sb);
24120 vcc->push(vcc, sb);
24121- atomic_inc(&vcc->stats->rx);
24122+ atomic_inc_unchecked(&vcc->stats->rx);
24123
24124 cell += ATM_CELL_PAYLOAD;
24125 }
24126@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, st
24127 "(CDC: %08x)\n",
24128 card->name, len, rpp->len, readl(SAR_REG_CDC));
24129 recycle_rx_pool_skb(card, rpp);
24130- atomic_inc(&vcc->stats->rx_err);
24131+ atomic_inc_unchecked(&vcc->stats->rx_err);
24132 return;
24133 }
24134 if (stat & SAR_RSQE_CRC) {
24135 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
24136 recycle_rx_pool_skb(card, rpp);
24137- atomic_inc(&vcc->stats->rx_err);
24138+ atomic_inc_unchecked(&vcc->stats->rx_err);
24139 return;
24140 }
24141 if (skb_queue_len(&rpp->queue) > 1) {
24142@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, st
24143 RXPRINTK("%s: Can't alloc RX skb.\n",
24144 card->name);
24145 recycle_rx_pool_skb(card, rpp);
24146- atomic_inc(&vcc->stats->rx_err);
24147+ atomic_inc_unchecked(&vcc->stats->rx_err);
24148 return;
24149 }
24150 if (!atm_charge(vcc, skb->truesize)) {
24151@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, st
24152 __net_timestamp(skb);
24153
24154 vcc->push(vcc, skb);
24155- atomic_inc(&vcc->stats->rx);
24156+ atomic_inc_unchecked(&vcc->stats->rx);
24157
24158 return;
24159 }
24160@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, st
24161 __net_timestamp(skb);
24162
24163 vcc->push(vcc, skb);
24164- atomic_inc(&vcc->stats->rx);
24165+ atomic_inc_unchecked(&vcc->stats->rx);
24166
24167 if (skb->truesize > SAR_FB_SIZE_3)
24168 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
24169@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car
24170 if (vcc->qos.aal != ATM_AAL0) {
24171 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
24172 card->name, vpi, vci);
24173- atomic_inc(&vcc->stats->rx_drop);
24174+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24175 goto drop;
24176 }
24177
24178 if ((sb = dev_alloc_skb(64)) == NULL) {
24179 printk("%s: Can't allocate buffers for AAL0.\n",
24180 card->name);
24181- atomic_inc(&vcc->stats->rx_err);
24182+ atomic_inc_unchecked(&vcc->stats->rx_err);
24183 goto drop;
24184 }
24185
24186@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car
24187 ATM_SKB(sb)->vcc = vcc;
24188 __net_timestamp(sb);
24189 vcc->push(vcc, sb);
24190- atomic_inc(&vcc->stats->rx);
24191+ atomic_inc_unchecked(&vcc->stats->rx);
24192
24193 drop:
24194 skb_pull(queue, 64);
24195@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24196
24197 if (vc == NULL) {
24198 printk("%s: NULL connection in send().\n", card->name);
24199- atomic_inc(&vcc->stats->tx_err);
24200+ atomic_inc_unchecked(&vcc->stats->tx_err);
24201 dev_kfree_skb(skb);
24202 return -EINVAL;
24203 }
24204 if (!test_bit(VCF_TX, &vc->flags)) {
24205 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
24206- atomic_inc(&vcc->stats->tx_err);
24207+ atomic_inc_unchecked(&vcc->stats->tx_err);
24208 dev_kfree_skb(skb);
24209 return -EINVAL;
24210 }
24211@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24212 break;
24213 default:
24214 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
24215- atomic_inc(&vcc->stats->tx_err);
24216+ atomic_inc_unchecked(&vcc->stats->tx_err);
24217 dev_kfree_skb(skb);
24218 return -EINVAL;
24219 }
24220
24221 if (skb_shinfo(skb)->nr_frags != 0) {
24222 printk("%s: No scatter-gather yet.\n", card->name);
24223- atomic_inc(&vcc->stats->tx_err);
24224+ atomic_inc_unchecked(&vcc->stats->tx_err);
24225 dev_kfree_skb(skb);
24226 return -EINVAL;
24227 }
24228@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24229
24230 err = queue_skb(card, vc, skb, oam);
24231 if (err) {
24232- atomic_inc(&vcc->stats->tx_err);
24233+ atomic_inc_unchecked(&vcc->stats->tx_err);
24234 dev_kfree_skb(skb);
24235 return err;
24236 }
24237@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
24238 skb = dev_alloc_skb(64);
24239 if (!skb) {
24240 printk("%s: Out of memory in send_oam().\n", card->name);
24241- atomic_inc(&vcc->stats->tx_err);
24242+ atomic_inc_unchecked(&vcc->stats->tx_err);
24243 return -ENOMEM;
24244 }
24245 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
24246diff -urNp linux-3.1.1/drivers/atm/iphase.c linux-3.1.1/drivers/atm/iphase.c
24247--- linux-3.1.1/drivers/atm/iphase.c 2011-11-11 15:19:27.000000000 -0500
24248+++ linux-3.1.1/drivers/atm/iphase.c 2011-11-16 18:39:07.000000000 -0500
24249@@ -1121,7 +1121,7 @@ static int rx_pkt(struct atm_dev *dev)
24250 status = (u_short) (buf_desc_ptr->desc_mode);
24251 if (status & (RX_CER | RX_PTE | RX_OFL))
24252 {
24253- atomic_inc(&vcc->stats->rx_err);
24254+ atomic_inc_unchecked(&vcc->stats->rx_err);
24255 IF_ERR(printk("IA: bad packet, dropping it");)
24256 if (status & RX_CER) {
24257 IF_ERR(printk(" cause: packet CRC error\n");)
24258@@ -1144,7 +1144,7 @@ static int rx_pkt(struct atm_dev *dev)
24259 len = dma_addr - buf_addr;
24260 if (len > iadev->rx_buf_sz) {
24261 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
24262- atomic_inc(&vcc->stats->rx_err);
24263+ atomic_inc_unchecked(&vcc->stats->rx_err);
24264 goto out_free_desc;
24265 }
24266
24267@@ -1294,7 +1294,7 @@ static void rx_dle_intr(struct atm_dev *
24268 ia_vcc = INPH_IA_VCC(vcc);
24269 if (ia_vcc == NULL)
24270 {
24271- atomic_inc(&vcc->stats->rx_err);
24272+ atomic_inc_unchecked(&vcc->stats->rx_err);
24273 dev_kfree_skb_any(skb);
24274 atm_return(vcc, atm_guess_pdu2truesize(len));
24275 goto INCR_DLE;
24276@@ -1306,7 +1306,7 @@ static void rx_dle_intr(struct atm_dev *
24277 if ((length > iadev->rx_buf_sz) || (length >
24278 (skb->len - sizeof(struct cpcs_trailer))))
24279 {
24280- atomic_inc(&vcc->stats->rx_err);
24281+ atomic_inc_unchecked(&vcc->stats->rx_err);
24282 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
24283 length, skb->len);)
24284 dev_kfree_skb_any(skb);
24285@@ -1322,7 +1322,7 @@ static void rx_dle_intr(struct atm_dev *
24286
24287 IF_RX(printk("rx_dle_intr: skb push");)
24288 vcc->push(vcc,skb);
24289- atomic_inc(&vcc->stats->rx);
24290+ atomic_inc_unchecked(&vcc->stats->rx);
24291 iadev->rx_pkt_cnt++;
24292 }
24293 INCR_DLE:
24294@@ -2802,15 +2802,15 @@ static int ia_ioctl(struct atm_dev *dev,
24295 {
24296 struct k_sonet_stats *stats;
24297 stats = &PRIV(_ia_dev[board])->sonet_stats;
24298- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
24299- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
24300- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
24301- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
24302- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
24303- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
24304- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
24305- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
24306- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
24307+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
24308+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
24309+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
24310+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
24311+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
24312+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
24313+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
24314+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
24315+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
24316 }
24317 ia_cmds.status = 0;
24318 break;
24319@@ -2915,7 +2915,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
24320 if ((desc == 0) || (desc > iadev->num_tx_desc))
24321 {
24322 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
24323- atomic_inc(&vcc->stats->tx);
24324+ atomic_inc_unchecked(&vcc->stats->tx);
24325 if (vcc->pop)
24326 vcc->pop(vcc, skb);
24327 else
24328@@ -3020,14 +3020,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
24329 ATM_DESC(skb) = vcc->vci;
24330 skb_queue_tail(&iadev->tx_dma_q, skb);
24331
24332- atomic_inc(&vcc->stats->tx);
24333+ atomic_inc_unchecked(&vcc->stats->tx);
24334 iadev->tx_pkt_cnt++;
24335 /* Increment transaction counter */
24336 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
24337
24338 #if 0
24339 /* add flow control logic */
24340- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
24341+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
24342 if (iavcc->vc_desc_cnt > 10) {
24343 vcc->tx_quota = vcc->tx_quota * 3 / 4;
24344 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
24345diff -urNp linux-3.1.1/drivers/atm/lanai.c linux-3.1.1/drivers/atm/lanai.c
24346--- linux-3.1.1/drivers/atm/lanai.c 2011-11-11 15:19:27.000000000 -0500
24347+++ linux-3.1.1/drivers/atm/lanai.c 2011-11-16 18:39:07.000000000 -0500
24348@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
24349 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
24350 lanai_endtx(lanai, lvcc);
24351 lanai_free_skb(lvcc->tx.atmvcc, skb);
24352- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
24353+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
24354 }
24355
24356 /* Try to fill the buffer - don't call unless there is backlog */
24357@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
24358 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
24359 __net_timestamp(skb);
24360 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
24361- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
24362+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
24363 out:
24364 lvcc->rx.buf.ptr = end;
24365 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
24366@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
24367 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
24368 "vcc %d\n", lanai->number, (unsigned int) s, vci);
24369 lanai->stats.service_rxnotaal5++;
24370- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24371+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24372 return 0;
24373 }
24374 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
24375@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
24376 int bytes;
24377 read_unlock(&vcc_sklist_lock);
24378 DPRINTK("got trashed rx pdu on vci %d\n", vci);
24379- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24380+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24381 lvcc->stats.x.aal5.service_trash++;
24382 bytes = (SERVICE_GET_END(s) * 16) -
24383 (((unsigned long) lvcc->rx.buf.ptr) -
24384@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
24385 }
24386 if (s & SERVICE_STREAM) {
24387 read_unlock(&vcc_sklist_lock);
24388- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24389+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24390 lvcc->stats.x.aal5.service_stream++;
24391 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
24392 "PDU on VCI %d!\n", lanai->number, vci);
24393@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
24394 return 0;
24395 }
24396 DPRINTK("got rx crc error on vci %d\n", vci);
24397- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24398+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24399 lvcc->stats.x.aal5.service_rxcrc++;
24400 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
24401 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
24402diff -urNp linux-3.1.1/drivers/atm/nicstar.c linux-3.1.1/drivers/atm/nicstar.c
24403--- linux-3.1.1/drivers/atm/nicstar.c 2011-11-11 15:19:27.000000000 -0500
24404+++ linux-3.1.1/drivers/atm/nicstar.c 2011-11-16 18:39:07.000000000 -0500
24405@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
24406 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
24407 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
24408 card->index);
24409- atomic_inc(&vcc->stats->tx_err);
24410+ atomic_inc_unchecked(&vcc->stats->tx_err);
24411 dev_kfree_skb_any(skb);
24412 return -EINVAL;
24413 }
24414@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
24415 if (!vc->tx) {
24416 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
24417 card->index);
24418- atomic_inc(&vcc->stats->tx_err);
24419+ atomic_inc_unchecked(&vcc->stats->tx_err);
24420 dev_kfree_skb_any(skb);
24421 return -EINVAL;
24422 }
24423@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
24424 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
24425 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
24426 card->index);
24427- atomic_inc(&vcc->stats->tx_err);
24428+ atomic_inc_unchecked(&vcc->stats->tx_err);
24429 dev_kfree_skb_any(skb);
24430 return -EINVAL;
24431 }
24432
24433 if (skb_shinfo(skb)->nr_frags != 0) {
24434 printk("nicstar%d: No scatter-gather yet.\n", card->index);
24435- atomic_inc(&vcc->stats->tx_err);
24436+ atomic_inc_unchecked(&vcc->stats->tx_err);
24437 dev_kfree_skb_any(skb);
24438 return -EINVAL;
24439 }
24440@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
24441 }
24442
24443 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
24444- atomic_inc(&vcc->stats->tx_err);
24445+ atomic_inc_unchecked(&vcc->stats->tx_err);
24446 dev_kfree_skb_any(skb);
24447 return -EIO;
24448 }
24449- atomic_inc(&vcc->stats->tx);
24450+ atomic_inc_unchecked(&vcc->stats->tx);
24451
24452 return 0;
24453 }
24454@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
24455 printk
24456 ("nicstar%d: Can't allocate buffers for aal0.\n",
24457 card->index);
24458- atomic_add(i, &vcc->stats->rx_drop);
24459+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
24460 break;
24461 }
24462 if (!atm_charge(vcc, sb->truesize)) {
24463 RXPRINTK
24464 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
24465 card->index);
24466- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
24467+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
24468 dev_kfree_skb_any(sb);
24469 break;
24470 }
24471@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
24472 ATM_SKB(sb)->vcc = vcc;
24473 __net_timestamp(sb);
24474 vcc->push(vcc, sb);
24475- atomic_inc(&vcc->stats->rx);
24476+ atomic_inc_unchecked(&vcc->stats->rx);
24477 cell += ATM_CELL_PAYLOAD;
24478 }
24479
24480@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
24481 if (iovb == NULL) {
24482 printk("nicstar%d: Out of iovec buffers.\n",
24483 card->index);
24484- atomic_inc(&vcc->stats->rx_drop);
24485+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24486 recycle_rx_buf(card, skb);
24487 return;
24488 }
24489@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
24490 small or large buffer itself. */
24491 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
24492 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
24493- atomic_inc(&vcc->stats->rx_err);
24494+ atomic_inc_unchecked(&vcc->stats->rx_err);
24495 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24496 NS_MAX_IOVECS);
24497 NS_PRV_IOVCNT(iovb) = 0;
24498@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
24499 ("nicstar%d: Expected a small buffer, and this is not one.\n",
24500 card->index);
24501 which_list(card, skb);
24502- atomic_inc(&vcc->stats->rx_err);
24503+ atomic_inc_unchecked(&vcc->stats->rx_err);
24504 recycle_rx_buf(card, skb);
24505 vc->rx_iov = NULL;
24506 recycle_iov_buf(card, iovb);
24507@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
24508 ("nicstar%d: Expected a large buffer, and this is not one.\n",
24509 card->index);
24510 which_list(card, skb);
24511- atomic_inc(&vcc->stats->rx_err);
24512+ atomic_inc_unchecked(&vcc->stats->rx_err);
24513 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24514 NS_PRV_IOVCNT(iovb));
24515 vc->rx_iov = NULL;
24516@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
24517 printk(" - PDU size mismatch.\n");
24518 else
24519 printk(".\n");
24520- atomic_inc(&vcc->stats->rx_err);
24521+ atomic_inc_unchecked(&vcc->stats->rx_err);
24522 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24523 NS_PRV_IOVCNT(iovb));
24524 vc->rx_iov = NULL;
24525@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
24526 /* skb points to a small buffer */
24527 if (!atm_charge(vcc, skb->truesize)) {
24528 push_rxbufs(card, skb);
24529- atomic_inc(&vcc->stats->rx_drop);
24530+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24531 } else {
24532 skb_put(skb, len);
24533 dequeue_sm_buf(card, skb);
24534@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
24535 ATM_SKB(skb)->vcc = vcc;
24536 __net_timestamp(skb);
24537 vcc->push(vcc, skb);
24538- atomic_inc(&vcc->stats->rx);
24539+ atomic_inc_unchecked(&vcc->stats->rx);
24540 }
24541 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
24542 struct sk_buff *sb;
24543@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
24544 if (len <= NS_SMBUFSIZE) {
24545 if (!atm_charge(vcc, sb->truesize)) {
24546 push_rxbufs(card, sb);
24547- atomic_inc(&vcc->stats->rx_drop);
24548+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24549 } else {
24550 skb_put(sb, len);
24551 dequeue_sm_buf(card, sb);
24552@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
24553 ATM_SKB(sb)->vcc = vcc;
24554 __net_timestamp(sb);
24555 vcc->push(vcc, sb);
24556- atomic_inc(&vcc->stats->rx);
24557+ atomic_inc_unchecked(&vcc->stats->rx);
24558 }
24559
24560 push_rxbufs(card, skb);
24561@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
24562
24563 if (!atm_charge(vcc, skb->truesize)) {
24564 push_rxbufs(card, skb);
24565- atomic_inc(&vcc->stats->rx_drop);
24566+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24567 } else {
24568 dequeue_lg_buf(card, skb);
24569 #ifdef NS_USE_DESTRUCTORS
24570@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
24571 ATM_SKB(skb)->vcc = vcc;
24572 __net_timestamp(skb);
24573 vcc->push(vcc, skb);
24574- atomic_inc(&vcc->stats->rx);
24575+ atomic_inc_unchecked(&vcc->stats->rx);
24576 }
24577
24578 push_rxbufs(card, sb);
24579@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
24580 printk
24581 ("nicstar%d: Out of huge buffers.\n",
24582 card->index);
24583- atomic_inc(&vcc->stats->rx_drop);
24584+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24585 recycle_iovec_rx_bufs(card,
24586 (struct iovec *)
24587 iovb->data,
24588@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
24589 card->hbpool.count++;
24590 } else
24591 dev_kfree_skb_any(hb);
24592- atomic_inc(&vcc->stats->rx_drop);
24593+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24594 } else {
24595 /* Copy the small buffer to the huge buffer */
24596 sb = (struct sk_buff *)iov->iov_base;
24597@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
24598 #endif /* NS_USE_DESTRUCTORS */
24599 __net_timestamp(hb);
24600 vcc->push(vcc, hb);
24601- atomic_inc(&vcc->stats->rx);
24602+ atomic_inc_unchecked(&vcc->stats->rx);
24603 }
24604 }
24605
24606diff -urNp linux-3.1.1/drivers/atm/solos-pci.c linux-3.1.1/drivers/atm/solos-pci.c
24607--- linux-3.1.1/drivers/atm/solos-pci.c 2011-11-11 15:19:27.000000000 -0500
24608+++ linux-3.1.1/drivers/atm/solos-pci.c 2011-11-16 18:40:10.000000000 -0500
24609@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
24610 }
24611 atm_charge(vcc, skb->truesize);
24612 vcc->push(vcc, skb);
24613- atomic_inc(&vcc->stats->rx);
24614+ atomic_inc_unchecked(&vcc->stats->rx);
24615 break;
24616
24617 case PKT_STATUS:
24618@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
24619 char msg[500];
24620 char item[10];
24621
24622+ pax_track_stack();
24623+
24624 len = buf->len;
24625 for (i = 0; i < len; i++){
24626 if(i % 8 == 0)
24627@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
24628 vcc = SKB_CB(oldskb)->vcc;
24629
24630 if (vcc) {
24631- atomic_inc(&vcc->stats->tx);
24632+ atomic_inc_unchecked(&vcc->stats->tx);
24633 solos_pop(vcc, oldskb);
24634 } else
24635 dev_kfree_skb_irq(oldskb);
24636diff -urNp linux-3.1.1/drivers/atm/suni.c linux-3.1.1/drivers/atm/suni.c
24637--- linux-3.1.1/drivers/atm/suni.c 2011-11-11 15:19:27.000000000 -0500
24638+++ linux-3.1.1/drivers/atm/suni.c 2011-11-16 18:39:07.000000000 -0500
24639@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
24640
24641
24642 #define ADD_LIMITED(s,v) \
24643- atomic_add((v),&stats->s); \
24644- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
24645+ atomic_add_unchecked((v),&stats->s); \
24646+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
24647
24648
24649 static void suni_hz(unsigned long from_timer)
24650diff -urNp linux-3.1.1/drivers/atm/uPD98402.c linux-3.1.1/drivers/atm/uPD98402.c
24651--- linux-3.1.1/drivers/atm/uPD98402.c 2011-11-11 15:19:27.000000000 -0500
24652+++ linux-3.1.1/drivers/atm/uPD98402.c 2011-11-16 18:39:07.000000000 -0500
24653@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
24654 struct sonet_stats tmp;
24655 int error = 0;
24656
24657- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24658+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24659 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
24660 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
24661 if (zero && !error) {
24662@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
24663
24664
24665 #define ADD_LIMITED(s,v) \
24666- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
24667- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
24668- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24669+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
24670+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
24671+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24672
24673
24674 static void stat_event(struct atm_dev *dev)
24675@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
24676 if (reason & uPD98402_INT_PFM) stat_event(dev);
24677 if (reason & uPD98402_INT_PCO) {
24678 (void) GET(PCOCR); /* clear interrupt cause */
24679- atomic_add(GET(HECCT),
24680+ atomic_add_unchecked(GET(HECCT),
24681 &PRIV(dev)->sonet_stats.uncorr_hcs);
24682 }
24683 if ((reason & uPD98402_INT_RFO) &&
24684@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
24685 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
24686 uPD98402_INT_LOS),PIMR); /* enable them */
24687 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
24688- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24689- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
24690- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
24691+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24692+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
24693+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
24694 return 0;
24695 }
24696
24697diff -urNp linux-3.1.1/drivers/atm/zatm.c linux-3.1.1/drivers/atm/zatm.c
24698--- linux-3.1.1/drivers/atm/zatm.c 2011-11-11 15:19:27.000000000 -0500
24699+++ linux-3.1.1/drivers/atm/zatm.c 2011-11-16 18:39:07.000000000 -0500
24700@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24701 }
24702 if (!size) {
24703 dev_kfree_skb_irq(skb);
24704- if (vcc) atomic_inc(&vcc->stats->rx_err);
24705+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
24706 continue;
24707 }
24708 if (!atm_charge(vcc,skb->truesize)) {
24709@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24710 skb->len = size;
24711 ATM_SKB(skb)->vcc = vcc;
24712 vcc->push(vcc,skb);
24713- atomic_inc(&vcc->stats->rx);
24714+ atomic_inc_unchecked(&vcc->stats->rx);
24715 }
24716 zout(pos & 0xffff,MTA(mbx));
24717 #if 0 /* probably a stupid idea */
24718@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
24719 skb_queue_head(&zatm_vcc->backlog,skb);
24720 break;
24721 }
24722- atomic_inc(&vcc->stats->tx);
24723+ atomic_inc_unchecked(&vcc->stats->tx);
24724 wake_up(&zatm_vcc->tx_wait);
24725 }
24726
24727diff -urNp linux-3.1.1/drivers/base/devtmpfs.c linux-3.1.1/drivers/base/devtmpfs.c
24728--- linux-3.1.1/drivers/base/devtmpfs.c 2011-11-11 15:19:27.000000000 -0500
24729+++ linux-3.1.1/drivers/base/devtmpfs.c 2011-11-16 18:39:07.000000000 -0500
24730@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
24731 if (!thread)
24732 return 0;
24733
24734- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
24735+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
24736 if (err)
24737 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
24738 else
24739diff -urNp linux-3.1.1/drivers/base/power/wakeup.c linux-3.1.1/drivers/base/power/wakeup.c
24740--- linux-3.1.1/drivers/base/power/wakeup.c 2011-11-11 15:19:27.000000000 -0500
24741+++ linux-3.1.1/drivers/base/power/wakeup.c 2011-11-16 18:39:07.000000000 -0500
24742@@ -29,14 +29,14 @@ bool events_check_enabled;
24743 * They need to be modified together atomically, so it's better to use one
24744 * atomic variable to hold them both.
24745 */
24746-static atomic_t combined_event_count = ATOMIC_INIT(0);
24747+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
24748
24749 #define IN_PROGRESS_BITS (sizeof(int) * 4)
24750 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
24751
24752 static void split_counters(unsigned int *cnt, unsigned int *inpr)
24753 {
24754- unsigned int comb = atomic_read(&combined_event_count);
24755+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
24756
24757 *cnt = (comb >> IN_PROGRESS_BITS);
24758 *inpr = comb & MAX_IN_PROGRESS;
24759@@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
24760 ws->last_time = ktime_get();
24761
24762 /* Increment the counter of events in progress. */
24763- atomic_inc(&combined_event_count);
24764+ atomic_inc_unchecked(&combined_event_count);
24765 }
24766
24767 /**
24768@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
24769 * Increment the counter of registered wakeup events and decrement the
24770 * couter of wakeup events in progress simultaneously.
24771 */
24772- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
24773+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
24774 }
24775
24776 /**
24777diff -urNp linux-3.1.1/drivers/block/cciss.c linux-3.1.1/drivers/block/cciss.c
24778--- linux-3.1.1/drivers/block/cciss.c 2011-11-11 15:19:27.000000000 -0500
24779+++ linux-3.1.1/drivers/block/cciss.c 2011-11-16 18:40:10.000000000 -0500
24780@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
24781 int err;
24782 u32 cp;
24783
24784+ memset(&arg64, 0, sizeof(arg64));
24785+
24786 err = 0;
24787 err |=
24788 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
24789@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
24790 while (!list_empty(&h->reqQ)) {
24791 c = list_entry(h->reqQ.next, CommandList_struct, list);
24792 /* can't do anything if fifo is full */
24793- if ((h->access.fifo_full(h))) {
24794+ if ((h->access->fifo_full(h))) {
24795 dev_warn(&h->pdev->dev, "fifo full\n");
24796 break;
24797 }
24798@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
24799 h->Qdepth--;
24800
24801 /* Tell the controller execute command */
24802- h->access.submit_command(h, c);
24803+ h->access->submit_command(h, c);
24804
24805 /* Put job onto the completed Q */
24806 addQ(&h->cmpQ, c);
24807@@ -3422,17 +3424,17 @@ startio:
24808
24809 static inline unsigned long get_next_completion(ctlr_info_t *h)
24810 {
24811- return h->access.command_completed(h);
24812+ return h->access->command_completed(h);
24813 }
24814
24815 static inline int interrupt_pending(ctlr_info_t *h)
24816 {
24817- return h->access.intr_pending(h);
24818+ return h->access->intr_pending(h);
24819 }
24820
24821 static inline long interrupt_not_for_us(ctlr_info_t *h)
24822 {
24823- return ((h->access.intr_pending(h) == 0) ||
24824+ return ((h->access->intr_pending(h) == 0) ||
24825 (h->interrupts_enabled == 0));
24826 }
24827
24828@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
24829 u32 a;
24830
24831 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
24832- return h->access.command_completed(h);
24833+ return h->access->command_completed(h);
24834
24835 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
24836 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
24837@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
24838 trans_support & CFGTBL_Trans_use_short_tags);
24839
24840 /* Change the access methods to the performant access methods */
24841- h->access = SA5_performant_access;
24842+ h->access = &SA5_performant_access;
24843 h->transMethod = CFGTBL_Trans_Performant;
24844
24845 return;
24846@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
24847 if (prod_index < 0)
24848 return -ENODEV;
24849 h->product_name = products[prod_index].product_name;
24850- h->access = *(products[prod_index].access);
24851+ h->access = products[prod_index].access;
24852
24853 if (cciss_board_disabled(h)) {
24854 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
24855@@ -5009,7 +5011,7 @@ reinit_after_soft_reset:
24856 }
24857
24858 /* make sure the board interrupts are off */
24859- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24860+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24861 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
24862 if (rc)
24863 goto clean2;
24864@@ -5061,7 +5063,7 @@ reinit_after_soft_reset:
24865 * fake ones to scoop up any residual completions.
24866 */
24867 spin_lock_irqsave(&h->lock, flags);
24868- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24869+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24870 spin_unlock_irqrestore(&h->lock, flags);
24871 free_irq(h->intr[PERF_MODE_INT], h);
24872 rc = cciss_request_irq(h, cciss_msix_discard_completions,
24873@@ -5081,9 +5083,9 @@ reinit_after_soft_reset:
24874 dev_info(&h->pdev->dev, "Board READY.\n");
24875 dev_info(&h->pdev->dev,
24876 "Waiting for stale completions to drain.\n");
24877- h->access.set_intr_mask(h, CCISS_INTR_ON);
24878+ h->access->set_intr_mask(h, CCISS_INTR_ON);
24879 msleep(10000);
24880- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24881+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24882
24883 rc = controller_reset_failed(h->cfgtable);
24884 if (rc)
24885@@ -5106,7 +5108,7 @@ reinit_after_soft_reset:
24886 cciss_scsi_setup(h);
24887
24888 /* Turn the interrupts on so we can service requests */
24889- h->access.set_intr_mask(h, CCISS_INTR_ON);
24890+ h->access->set_intr_mask(h, CCISS_INTR_ON);
24891
24892 /* Get the firmware version */
24893 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
24894@@ -5178,7 +5180,7 @@ static void cciss_shutdown(struct pci_de
24895 kfree(flush_buf);
24896 if (return_code != IO_OK)
24897 dev_warn(&h->pdev->dev, "Error flushing cache\n");
24898- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24899+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24900 free_irq(h->intr[PERF_MODE_INT], h);
24901 }
24902
24903diff -urNp linux-3.1.1/drivers/block/cciss.h linux-3.1.1/drivers/block/cciss.h
24904--- linux-3.1.1/drivers/block/cciss.h 2011-11-11 15:19:27.000000000 -0500
24905+++ linux-3.1.1/drivers/block/cciss.h 2011-11-16 18:39:07.000000000 -0500
24906@@ -100,7 +100,7 @@ struct ctlr_info
24907 /* information about each logical volume */
24908 drive_info_struct *drv[CISS_MAX_LUN];
24909
24910- struct access_method access;
24911+ struct access_method *access;
24912
24913 /* queue and queue Info */
24914 struct list_head reqQ;
24915diff -urNp linux-3.1.1/drivers/block/cpqarray.c linux-3.1.1/drivers/block/cpqarray.c
24916--- linux-3.1.1/drivers/block/cpqarray.c 2011-11-11 15:19:27.000000000 -0500
24917+++ linux-3.1.1/drivers/block/cpqarray.c 2011-11-16 18:40:10.000000000 -0500
24918@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
24919 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
24920 goto Enomem4;
24921 }
24922- hba[i]->access.set_intr_mask(hba[i], 0);
24923+ hba[i]->access->set_intr_mask(hba[i], 0);
24924 if (request_irq(hba[i]->intr, do_ida_intr,
24925 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
24926 {
24927@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
24928 add_timer(&hba[i]->timer);
24929
24930 /* Enable IRQ now that spinlock and rate limit timer are set up */
24931- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24932+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24933
24934 for(j=0; j<NWD; j++) {
24935 struct gendisk *disk = ida_gendisk[i][j];
24936@@ -694,7 +694,7 @@ DBGINFO(
24937 for(i=0; i<NR_PRODUCTS; i++) {
24938 if (board_id == products[i].board_id) {
24939 c->product_name = products[i].product_name;
24940- c->access = *(products[i].access);
24941+ c->access = products[i].access;
24942 break;
24943 }
24944 }
24945@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
24946 hba[ctlr]->intr = intr;
24947 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
24948 hba[ctlr]->product_name = products[j].product_name;
24949- hba[ctlr]->access = *(products[j].access);
24950+ hba[ctlr]->access = products[j].access;
24951 hba[ctlr]->ctlr = ctlr;
24952 hba[ctlr]->board_id = board_id;
24953 hba[ctlr]->pci_dev = NULL; /* not PCI */
24954@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
24955 struct scatterlist tmp_sg[SG_MAX];
24956 int i, dir, seg;
24957
24958+ pax_track_stack();
24959+
24960 queue_next:
24961 creq = blk_peek_request(q);
24962 if (!creq)
24963@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
24964
24965 while((c = h->reqQ) != NULL) {
24966 /* Can't do anything if we're busy */
24967- if (h->access.fifo_full(h) == 0)
24968+ if (h->access->fifo_full(h) == 0)
24969 return;
24970
24971 /* Get the first entry from the request Q */
24972@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
24973 h->Qdepth--;
24974
24975 /* Tell the controller to do our bidding */
24976- h->access.submit_command(h, c);
24977+ h->access->submit_command(h, c);
24978
24979 /* Get onto the completion Q */
24980 addQ(&h->cmpQ, c);
24981@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
24982 unsigned long flags;
24983 __u32 a,a1;
24984
24985- istat = h->access.intr_pending(h);
24986+ istat = h->access->intr_pending(h);
24987 /* Is this interrupt for us? */
24988 if (istat == 0)
24989 return IRQ_NONE;
24990@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
24991 */
24992 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
24993 if (istat & FIFO_NOT_EMPTY) {
24994- while((a = h->access.command_completed(h))) {
24995+ while((a = h->access->command_completed(h))) {
24996 a1 = a; a &= ~3;
24997 if ((c = h->cmpQ) == NULL)
24998 {
24999@@ -1449,11 +1451,11 @@ static int sendcmd(
25000 /*
25001 * Disable interrupt
25002 */
25003- info_p->access.set_intr_mask(info_p, 0);
25004+ info_p->access->set_intr_mask(info_p, 0);
25005 /* Make sure there is room in the command FIFO */
25006 /* Actually it should be completely empty at this time. */
25007 for (i = 200000; i > 0; i--) {
25008- temp = info_p->access.fifo_full(info_p);
25009+ temp = info_p->access->fifo_full(info_p);
25010 if (temp != 0) {
25011 break;
25012 }
25013@@ -1466,7 +1468,7 @@ DBG(
25014 /*
25015 * Send the cmd
25016 */
25017- info_p->access.submit_command(info_p, c);
25018+ info_p->access->submit_command(info_p, c);
25019 complete = pollcomplete(ctlr);
25020
25021 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
25022@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
25023 * we check the new geometry. Then turn interrupts back on when
25024 * we're done.
25025 */
25026- host->access.set_intr_mask(host, 0);
25027+ host->access->set_intr_mask(host, 0);
25028 getgeometry(ctlr);
25029- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
25030+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
25031
25032 for(i=0; i<NWD; i++) {
25033 struct gendisk *disk = ida_gendisk[ctlr][i];
25034@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
25035 /* Wait (up to 2 seconds) for a command to complete */
25036
25037 for (i = 200000; i > 0; i--) {
25038- done = hba[ctlr]->access.command_completed(hba[ctlr]);
25039+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
25040 if (done == 0) {
25041 udelay(10); /* a short fixed delay */
25042 } else
25043diff -urNp linux-3.1.1/drivers/block/cpqarray.h linux-3.1.1/drivers/block/cpqarray.h
25044--- linux-3.1.1/drivers/block/cpqarray.h 2011-11-11 15:19:27.000000000 -0500
25045+++ linux-3.1.1/drivers/block/cpqarray.h 2011-11-16 18:39:07.000000000 -0500
25046@@ -99,7 +99,7 @@ struct ctlr_info {
25047 drv_info_t drv[NWD];
25048 struct proc_dir_entry *proc;
25049
25050- struct access_method access;
25051+ struct access_method *access;
25052
25053 cmdlist_t *reqQ;
25054 cmdlist_t *cmpQ;
25055diff -urNp linux-3.1.1/drivers/block/DAC960.c linux-3.1.1/drivers/block/DAC960.c
25056--- linux-3.1.1/drivers/block/DAC960.c 2011-11-11 15:19:27.000000000 -0500
25057+++ linux-3.1.1/drivers/block/DAC960.c 2011-11-16 18:40:10.000000000 -0500
25058@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
25059 unsigned long flags;
25060 int Channel, TargetID;
25061
25062+ pax_track_stack();
25063+
25064 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
25065 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
25066 sizeof(DAC960_SCSI_Inquiry_T) +
25067diff -urNp linux-3.1.1/drivers/block/drbd/drbd_int.h linux-3.1.1/drivers/block/drbd/drbd_int.h
25068--- linux-3.1.1/drivers/block/drbd/drbd_int.h 2011-11-11 15:19:27.000000000 -0500
25069+++ linux-3.1.1/drivers/block/drbd/drbd_int.h 2011-11-16 18:39:07.000000000 -0500
25070@@ -737,7 +737,7 @@ struct drbd_request;
25071 struct drbd_epoch {
25072 struct list_head list;
25073 unsigned int barrier_nr;
25074- atomic_t epoch_size; /* increased on every request added. */
25075+ atomic_unchecked_t epoch_size; /* increased on every request added. */
25076 atomic_t active; /* increased on every req. added, and dec on every finished. */
25077 unsigned long flags;
25078 };
25079@@ -1109,7 +1109,7 @@ struct drbd_conf {
25080 void *int_dig_in;
25081 void *int_dig_vv;
25082 wait_queue_head_t seq_wait;
25083- atomic_t packet_seq;
25084+ atomic_unchecked_t packet_seq;
25085 unsigned int peer_seq;
25086 spinlock_t peer_seq_lock;
25087 unsigned int minor;
25088@@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct
25089
25090 static inline void drbd_tcp_cork(struct socket *sock)
25091 {
25092- int __user val = 1;
25093+ int val = 1;
25094 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
25095- (char __user *)&val, sizeof(val));
25096+ (char __force_user *)&val, sizeof(val));
25097 }
25098
25099 static inline void drbd_tcp_uncork(struct socket *sock)
25100 {
25101- int __user val = 0;
25102+ int val = 0;
25103 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
25104- (char __user *)&val, sizeof(val));
25105+ (char __force_user *)&val, sizeof(val));
25106 }
25107
25108 static inline void drbd_tcp_nodelay(struct socket *sock)
25109 {
25110- int __user val = 1;
25111+ int val = 1;
25112 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
25113- (char __user *)&val, sizeof(val));
25114+ (char __force_user *)&val, sizeof(val));
25115 }
25116
25117 static inline void drbd_tcp_quickack(struct socket *sock)
25118 {
25119- int __user val = 2;
25120+ int val = 2;
25121 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
25122- (char __user *)&val, sizeof(val));
25123+ (char __force_user *)&val, sizeof(val));
25124 }
25125
25126 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
25127diff -urNp linux-3.1.1/drivers/block/drbd/drbd_main.c linux-3.1.1/drivers/block/drbd/drbd_main.c
25128--- linux-3.1.1/drivers/block/drbd/drbd_main.c 2011-11-11 15:19:27.000000000 -0500
25129+++ linux-3.1.1/drivers/block/drbd/drbd_main.c 2011-11-16 18:39:07.000000000 -0500
25130@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
25131 p.sector = sector;
25132 p.block_id = block_id;
25133 p.blksize = blksize;
25134- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
25135+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
25136
25137 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
25138 return false;
25139@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
25140 p.sector = cpu_to_be64(req->sector);
25141 p.block_id = (unsigned long)req;
25142 p.seq_num = cpu_to_be32(req->seq_num =
25143- atomic_add_return(1, &mdev->packet_seq));
25144+ atomic_add_return_unchecked(1, &mdev->packet_seq));
25145
25146 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
25147
25148@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
25149 atomic_set(&mdev->unacked_cnt, 0);
25150 atomic_set(&mdev->local_cnt, 0);
25151 atomic_set(&mdev->net_cnt, 0);
25152- atomic_set(&mdev->packet_seq, 0);
25153+ atomic_set_unchecked(&mdev->packet_seq, 0);
25154 atomic_set(&mdev->pp_in_use, 0);
25155 atomic_set(&mdev->pp_in_use_by_net, 0);
25156 atomic_set(&mdev->rs_sect_in, 0);
25157@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
25158 mdev->receiver.t_state);
25159
25160 /* no need to lock it, I'm the only thread alive */
25161- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
25162- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
25163+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
25164+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
25165 mdev->al_writ_cnt =
25166 mdev->bm_writ_cnt =
25167 mdev->read_cnt =
25168diff -urNp linux-3.1.1/drivers/block/drbd/drbd_nl.c linux-3.1.1/drivers/block/drbd/drbd_nl.c
25169--- linux-3.1.1/drivers/block/drbd/drbd_nl.c 2011-11-11 15:19:27.000000000 -0500
25170+++ linux-3.1.1/drivers/block/drbd/drbd_nl.c 2011-11-16 18:39:07.000000000 -0500
25171@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
25172 module_put(THIS_MODULE);
25173 }
25174
25175-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
25176+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
25177
25178 static unsigned short *
25179 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
25180@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
25181 cn_reply->id.idx = CN_IDX_DRBD;
25182 cn_reply->id.val = CN_VAL_DRBD;
25183
25184- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25185+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25186 cn_reply->ack = 0; /* not used here. */
25187 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25188 (int)((char *)tl - (char *)reply->tag_list);
25189@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
25190 cn_reply->id.idx = CN_IDX_DRBD;
25191 cn_reply->id.val = CN_VAL_DRBD;
25192
25193- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25194+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25195 cn_reply->ack = 0; /* not used here. */
25196 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25197 (int)((char *)tl - (char *)reply->tag_list);
25198@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
25199 cn_reply->id.idx = CN_IDX_DRBD;
25200 cn_reply->id.val = CN_VAL_DRBD;
25201
25202- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
25203+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
25204 cn_reply->ack = 0; // not used here.
25205 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25206 (int)((char*)tl - (char*)reply->tag_list);
25207@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
25208 cn_reply->id.idx = CN_IDX_DRBD;
25209 cn_reply->id.val = CN_VAL_DRBD;
25210
25211- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25212+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25213 cn_reply->ack = 0; /* not used here. */
25214 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25215 (int)((char *)tl - (char *)reply->tag_list);
25216diff -urNp linux-3.1.1/drivers/block/drbd/drbd_receiver.c linux-3.1.1/drivers/block/drbd/drbd_receiver.c
25217--- linux-3.1.1/drivers/block/drbd/drbd_receiver.c 2011-11-11 15:19:27.000000000 -0500
25218+++ linux-3.1.1/drivers/block/drbd/drbd_receiver.c 2011-11-16 18:39:07.000000000 -0500
25219@@ -894,7 +894,7 @@ retry:
25220 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
25221 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
25222
25223- atomic_set(&mdev->packet_seq, 0);
25224+ atomic_set_unchecked(&mdev->packet_seq, 0);
25225 mdev->peer_seq = 0;
25226
25227 drbd_thread_start(&mdev->asender);
25228@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
25229 do {
25230 next_epoch = NULL;
25231
25232- epoch_size = atomic_read(&epoch->epoch_size);
25233+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
25234
25235 switch (ev & ~EV_CLEANUP) {
25236 case EV_PUT:
25237@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
25238 rv = FE_DESTROYED;
25239 } else {
25240 epoch->flags = 0;
25241- atomic_set(&epoch->epoch_size, 0);
25242+ atomic_set_unchecked(&epoch->epoch_size, 0);
25243 /* atomic_set(&epoch->active, 0); is already zero */
25244 if (rv == FE_STILL_LIVE)
25245 rv = FE_RECYCLED;
25246@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
25247 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
25248 drbd_flush(mdev);
25249
25250- if (atomic_read(&mdev->current_epoch->epoch_size)) {
25251+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
25252 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
25253 if (epoch)
25254 break;
25255 }
25256
25257 epoch = mdev->current_epoch;
25258- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
25259+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
25260
25261 D_ASSERT(atomic_read(&epoch->active) == 0);
25262 D_ASSERT(epoch->flags == 0);
25263@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
25264 }
25265
25266 epoch->flags = 0;
25267- atomic_set(&epoch->epoch_size, 0);
25268+ atomic_set_unchecked(&epoch->epoch_size, 0);
25269 atomic_set(&epoch->active, 0);
25270
25271 spin_lock(&mdev->epoch_lock);
25272- if (atomic_read(&mdev->current_epoch->epoch_size)) {
25273+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
25274 list_add(&epoch->list, &mdev->current_epoch->list);
25275 mdev->current_epoch = epoch;
25276 mdev->epochs++;
25277@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
25278 spin_unlock(&mdev->peer_seq_lock);
25279
25280 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
25281- atomic_inc(&mdev->current_epoch->epoch_size);
25282+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
25283 return drbd_drain_block(mdev, data_size);
25284 }
25285
25286@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
25287
25288 spin_lock(&mdev->epoch_lock);
25289 e->epoch = mdev->current_epoch;
25290- atomic_inc(&e->epoch->epoch_size);
25291+ atomic_inc_unchecked(&e->epoch->epoch_size);
25292 atomic_inc(&e->epoch->active);
25293 spin_unlock(&mdev->epoch_lock);
25294
25295@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
25296 D_ASSERT(list_empty(&mdev->done_ee));
25297
25298 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
25299- atomic_set(&mdev->current_epoch->epoch_size, 0);
25300+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
25301 D_ASSERT(list_empty(&mdev->current_epoch->list));
25302 }
25303
25304diff -urNp linux-3.1.1/drivers/block/loop.c linux-3.1.1/drivers/block/loop.c
25305--- linux-3.1.1/drivers/block/loop.c 2011-11-11 15:19:27.000000000 -0500
25306+++ linux-3.1.1/drivers/block/loop.c 2011-11-16 18:39:07.000000000 -0500
25307@@ -283,7 +283,7 @@ static int __do_lo_send_write(struct fil
25308 mm_segment_t old_fs = get_fs();
25309
25310 set_fs(get_ds());
25311- bw = file->f_op->write(file, buf, len, &pos);
25312+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
25313 set_fs(old_fs);
25314 if (likely(bw == len))
25315 return 0;
25316diff -urNp linux-3.1.1/drivers/block/nbd.c linux-3.1.1/drivers/block/nbd.c
25317--- linux-3.1.1/drivers/block/nbd.c 2011-11-11 15:19:27.000000000 -0500
25318+++ linux-3.1.1/drivers/block/nbd.c 2011-11-16 18:40:10.000000000 -0500
25319@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
25320 struct kvec iov;
25321 sigset_t blocked, oldset;
25322
25323+ pax_track_stack();
25324+
25325 if (unlikely(!sock)) {
25326 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
25327 lo->disk->disk_name, (send ? "send" : "recv"));
25328@@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
25329 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
25330 unsigned int cmd, unsigned long arg)
25331 {
25332+ pax_track_stack();
25333+
25334 switch (cmd) {
25335 case NBD_DISCONNECT: {
25336 struct request sreq;
25337diff -urNp linux-3.1.1/drivers/char/agp/frontend.c linux-3.1.1/drivers/char/agp/frontend.c
25338--- linux-3.1.1/drivers/char/agp/frontend.c 2011-11-11 15:19:27.000000000 -0500
25339+++ linux-3.1.1/drivers/char/agp/frontend.c 2011-11-16 18:39:07.000000000 -0500
25340@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
25341 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
25342 return -EFAULT;
25343
25344- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
25345+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
25346 return -EFAULT;
25347
25348 client = agp_find_client_by_pid(reserve.pid);
25349diff -urNp linux-3.1.1/drivers/char/briq_panel.c linux-3.1.1/drivers/char/briq_panel.c
25350--- linux-3.1.1/drivers/char/briq_panel.c 2011-11-11 15:19:27.000000000 -0500
25351+++ linux-3.1.1/drivers/char/briq_panel.c 2011-11-16 18:40:10.000000000 -0500
25352@@ -9,6 +9,7 @@
25353 #include <linux/types.h>
25354 #include <linux/errno.h>
25355 #include <linux/tty.h>
25356+#include <linux/mutex.h>
25357 #include <linux/timer.h>
25358 #include <linux/kernel.h>
25359 #include <linux/wait.h>
25360@@ -34,6 +35,7 @@ static int vfd_is_open;
25361 static unsigned char vfd[40];
25362 static int vfd_cursor;
25363 static unsigned char ledpb, led;
25364+static DEFINE_MUTEX(vfd_mutex);
25365
25366 static void update_vfd(void)
25367 {
25368@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
25369 if (!vfd_is_open)
25370 return -EBUSY;
25371
25372+ mutex_lock(&vfd_mutex);
25373 for (;;) {
25374 char c;
25375 if (!indx)
25376 break;
25377- if (get_user(c, buf))
25378+ if (get_user(c, buf)) {
25379+ mutex_unlock(&vfd_mutex);
25380 return -EFAULT;
25381+ }
25382 if (esc) {
25383 set_led(c);
25384 esc = 0;
25385@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
25386 buf++;
25387 }
25388 update_vfd();
25389+ mutex_unlock(&vfd_mutex);
25390
25391 return len;
25392 }
25393diff -urNp linux-3.1.1/drivers/char/genrtc.c linux-3.1.1/drivers/char/genrtc.c
25394--- linux-3.1.1/drivers/char/genrtc.c 2011-11-11 15:19:27.000000000 -0500
25395+++ linux-3.1.1/drivers/char/genrtc.c 2011-11-16 18:40:10.000000000 -0500
25396@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
25397 switch (cmd) {
25398
25399 case RTC_PLL_GET:
25400+ memset(&pll, 0, sizeof(pll));
25401 if (get_rtc_pll(&pll))
25402 return -EINVAL;
25403 else
25404diff -urNp linux-3.1.1/drivers/char/hpet.c linux-3.1.1/drivers/char/hpet.c
25405--- linux-3.1.1/drivers/char/hpet.c 2011-11-11 15:19:27.000000000 -0500
25406+++ linux-3.1.1/drivers/char/hpet.c 2011-11-16 18:39:07.000000000 -0500
25407@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
25408 }
25409
25410 static int
25411-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
25412+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
25413 struct hpet_info *info)
25414 {
25415 struct hpet_timer __iomem *timer;
25416diff -urNp linux-3.1.1/drivers/char/ipmi/ipmi_msghandler.c linux-3.1.1/drivers/char/ipmi/ipmi_msghandler.c
25417--- linux-3.1.1/drivers/char/ipmi/ipmi_msghandler.c 2011-11-11 15:19:27.000000000 -0500
25418+++ linux-3.1.1/drivers/char/ipmi/ipmi_msghandler.c 2011-11-16 18:40:10.000000000 -0500
25419@@ -415,7 +415,7 @@ struct ipmi_smi {
25420 struct proc_dir_entry *proc_dir;
25421 char proc_dir_name[10];
25422
25423- atomic_t stats[IPMI_NUM_STATS];
25424+ atomic_unchecked_t stats[IPMI_NUM_STATS];
25425
25426 /*
25427 * run_to_completion duplicate of smb_info, smi_info
25428@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
25429
25430
25431 #define ipmi_inc_stat(intf, stat) \
25432- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
25433+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
25434 #define ipmi_get_stat(intf, stat) \
25435- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
25436+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
25437
25438 static int is_lan_addr(struct ipmi_addr *addr)
25439 {
25440@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
25441 INIT_LIST_HEAD(&intf->cmd_rcvrs);
25442 init_waitqueue_head(&intf->waitq);
25443 for (i = 0; i < IPMI_NUM_STATS; i++)
25444- atomic_set(&intf->stats[i], 0);
25445+ atomic_set_unchecked(&intf->stats[i], 0);
25446
25447 intf->proc_dir = NULL;
25448
25449@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
25450 struct ipmi_smi_msg smi_msg;
25451 struct ipmi_recv_msg recv_msg;
25452
25453+ pax_track_stack();
25454+
25455 si = (struct ipmi_system_interface_addr *) &addr;
25456 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
25457 si->channel = IPMI_BMC_CHANNEL;
25458diff -urNp linux-3.1.1/drivers/char/ipmi/ipmi_si_intf.c linux-3.1.1/drivers/char/ipmi/ipmi_si_intf.c
25459--- linux-3.1.1/drivers/char/ipmi/ipmi_si_intf.c 2011-11-11 15:19:27.000000000 -0500
25460+++ linux-3.1.1/drivers/char/ipmi/ipmi_si_intf.c 2011-11-16 18:39:07.000000000 -0500
25461@@ -277,7 +277,7 @@ struct smi_info {
25462 unsigned char slave_addr;
25463
25464 /* Counters and things for the proc filesystem. */
25465- atomic_t stats[SI_NUM_STATS];
25466+ atomic_unchecked_t stats[SI_NUM_STATS];
25467
25468 struct task_struct *thread;
25469
25470@@ -286,9 +286,9 @@ struct smi_info {
25471 };
25472
25473 #define smi_inc_stat(smi, stat) \
25474- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
25475+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
25476 #define smi_get_stat(smi, stat) \
25477- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
25478+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
25479
25480 #define SI_MAX_PARMS 4
25481
25482@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
25483 atomic_set(&new_smi->req_events, 0);
25484 new_smi->run_to_completion = 0;
25485 for (i = 0; i < SI_NUM_STATS; i++)
25486- atomic_set(&new_smi->stats[i], 0);
25487+ atomic_set_unchecked(&new_smi->stats[i], 0);
25488
25489 new_smi->interrupt_disabled = 1;
25490 atomic_set(&new_smi->stop_operation, 0);
25491diff -urNp linux-3.1.1/drivers/char/Kconfig linux-3.1.1/drivers/char/Kconfig
25492--- linux-3.1.1/drivers/char/Kconfig 2011-11-11 15:19:27.000000000 -0500
25493+++ linux-3.1.1/drivers/char/Kconfig 2011-11-16 18:40:10.000000000 -0500
25494@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
25495
25496 config DEVKMEM
25497 bool "/dev/kmem virtual device support"
25498- default y
25499+ default n
25500+ depends on !GRKERNSEC_KMEM
25501 help
25502 Say Y here if you want to support the /dev/kmem device. The
25503 /dev/kmem device is rarely used, but can be used for certain
25504@@ -596,6 +597,7 @@ config DEVPORT
25505 bool
25506 depends on !M68K
25507 depends on ISA || PCI
25508+ depends on !GRKERNSEC_KMEM
25509 default y
25510
25511 source "drivers/s390/char/Kconfig"
25512diff -urNp linux-3.1.1/drivers/char/mbcs.c linux-3.1.1/drivers/char/mbcs.c
25513--- linux-3.1.1/drivers/char/mbcs.c 2011-11-11 15:19:27.000000000 -0500
25514+++ linux-3.1.1/drivers/char/mbcs.c 2011-11-16 18:39:07.000000000 -0500
25515@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *de
25516 return 0;
25517 }
25518
25519-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
25520+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
25521 {
25522 .part_num = MBCS_PART_NUM,
25523 .mfg_num = MBCS_MFG_NUM,
25524diff -urNp linux-3.1.1/drivers/char/mem.c linux-3.1.1/drivers/char/mem.c
25525--- linux-3.1.1/drivers/char/mem.c 2011-11-11 15:19:27.000000000 -0500
25526+++ linux-3.1.1/drivers/char/mem.c 2011-11-17 18:31:56.000000000 -0500
25527@@ -18,6 +18,7 @@
25528 #include <linux/raw.h>
25529 #include <linux/tty.h>
25530 #include <linux/capability.h>
25531+#include <linux/security.h>
25532 #include <linux/ptrace.h>
25533 #include <linux/device.h>
25534 #include <linux/highmem.h>
25535@@ -34,6 +35,10 @@
25536 # include <linux/efi.h>
25537 #endif
25538
25539+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
25540+extern const struct file_operations grsec_fops;
25541+#endif
25542+
25543 static inline unsigned long size_inside_page(unsigned long start,
25544 unsigned long size)
25545 {
25546@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
25547
25548 while (cursor < to) {
25549 if (!devmem_is_allowed(pfn)) {
25550+#ifdef CONFIG_GRKERNSEC_KMEM
25551+ gr_handle_mem_readwrite(from, to);
25552+#else
25553 printk(KERN_INFO
25554 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25555 current->comm, from, to);
25556+#endif
25557 return 0;
25558 }
25559 cursor += PAGE_SIZE;
25560@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
25561 }
25562 return 1;
25563 }
25564+#elif defined(CONFIG_GRKERNSEC_KMEM)
25565+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25566+{
25567+ return 0;
25568+}
25569 #else
25570 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25571 {
25572@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
25573
25574 while (count > 0) {
25575 unsigned long remaining;
25576+ char *temp;
25577
25578 sz = size_inside_page(p, count);
25579
25580@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
25581 if (!ptr)
25582 return -EFAULT;
25583
25584- remaining = copy_to_user(buf, ptr, sz);
25585+#ifdef CONFIG_PAX_USERCOPY
25586+ temp = kmalloc(sz, GFP_KERNEL);
25587+ if (!temp) {
25588+ unxlate_dev_mem_ptr(p, ptr);
25589+ return -ENOMEM;
25590+ }
25591+ memcpy(temp, ptr, sz);
25592+#else
25593+ temp = ptr;
25594+#endif
25595+
25596+ remaining = copy_to_user(buf, temp, sz);
25597+
25598+#ifdef CONFIG_PAX_USERCOPY
25599+ kfree(temp);
25600+#endif
25601+
25602 unxlate_dev_mem_ptr(p, ptr);
25603 if (remaining)
25604 return -EFAULT;
25605@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
25606 size_t count, loff_t *ppos)
25607 {
25608 unsigned long p = *ppos;
25609- ssize_t low_count, read, sz;
25610+ ssize_t low_count, read, sz, err = 0;
25611 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
25612- int err = 0;
25613
25614 read = 0;
25615 if (p < (unsigned long) high_memory) {
25616@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
25617 }
25618 #endif
25619 while (low_count > 0) {
25620+ char *temp;
25621+
25622 sz = size_inside_page(p, low_count);
25623
25624 /*
25625@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
25626 */
25627 kbuf = xlate_dev_kmem_ptr((char *)p);
25628
25629- if (copy_to_user(buf, kbuf, sz))
25630+#ifdef CONFIG_PAX_USERCOPY
25631+ temp = kmalloc(sz, GFP_KERNEL);
25632+ if (!temp)
25633+ return -ENOMEM;
25634+ memcpy(temp, kbuf, sz);
25635+#else
25636+ temp = kbuf;
25637+#endif
25638+
25639+ err = copy_to_user(buf, temp, sz);
25640+
25641+#ifdef CONFIG_PAX_USERCOPY
25642+ kfree(temp);
25643+#endif
25644+
25645+ if (err)
25646 return -EFAULT;
25647 buf += sz;
25648 p += sz;
25649@@ -866,6 +913,9 @@ static const struct memdev {
25650 #ifdef CONFIG_CRASH_DUMP
25651 [12] = { "oldmem", 0, &oldmem_fops, NULL },
25652 #endif
25653+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
25654+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
25655+#endif
25656 };
25657
25658 static int memory_open(struct inode *inode, struct file *filp)
25659diff -urNp linux-3.1.1/drivers/char/nvram.c linux-3.1.1/drivers/char/nvram.c
25660--- linux-3.1.1/drivers/char/nvram.c 2011-11-11 15:19:27.000000000 -0500
25661+++ linux-3.1.1/drivers/char/nvram.c 2011-11-16 18:39:07.000000000 -0500
25662@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *f
25663
25664 spin_unlock_irq(&rtc_lock);
25665
25666- if (copy_to_user(buf, contents, tmp - contents))
25667+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
25668 return -EFAULT;
25669
25670 *ppos = i;
25671diff -urNp linux-3.1.1/drivers/char/random.c linux-3.1.1/drivers/char/random.c
25672--- linux-3.1.1/drivers/char/random.c 2011-11-11 15:19:27.000000000 -0500
25673+++ linux-3.1.1/drivers/char/random.c 2011-11-16 18:40:10.000000000 -0500
25674@@ -261,8 +261,13 @@
25675 /*
25676 * Configuration information
25677 */
25678+#ifdef CONFIG_GRKERNSEC_RANDNET
25679+#define INPUT_POOL_WORDS 512
25680+#define OUTPUT_POOL_WORDS 128
25681+#else
25682 #define INPUT_POOL_WORDS 128
25683 #define OUTPUT_POOL_WORDS 32
25684+#endif
25685 #define SEC_XFER_SIZE 512
25686 #define EXTRACT_SIZE 10
25687
25688@@ -300,10 +305,17 @@ static struct poolinfo {
25689 int poolwords;
25690 int tap1, tap2, tap3, tap4, tap5;
25691 } poolinfo_table[] = {
25692+#ifdef CONFIG_GRKERNSEC_RANDNET
25693+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
25694+ { 512, 411, 308, 208, 104, 1 },
25695+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
25696+ { 128, 103, 76, 51, 25, 1 },
25697+#else
25698 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
25699 { 128, 103, 76, 51, 25, 1 },
25700 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
25701 { 32, 26, 20, 14, 7, 1 },
25702+#endif
25703 #if 0
25704 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
25705 { 2048, 1638, 1231, 819, 411, 1 },
25706@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
25707
25708 extract_buf(r, tmp);
25709 i = min_t(int, nbytes, EXTRACT_SIZE);
25710- if (copy_to_user(buf, tmp, i)) {
25711+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
25712 ret = -EFAULT;
25713 break;
25714 }
25715@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
25716 #include <linux/sysctl.h>
25717
25718 static int min_read_thresh = 8, min_write_thresh;
25719-static int max_read_thresh = INPUT_POOL_WORDS * 32;
25720+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
25721 static int max_write_thresh = INPUT_POOL_WORDS * 32;
25722 static char sysctl_bootid[16];
25723
25724diff -urNp linux-3.1.1/drivers/char/sonypi.c linux-3.1.1/drivers/char/sonypi.c
25725--- linux-3.1.1/drivers/char/sonypi.c 2011-11-11 15:19:27.000000000 -0500
25726+++ linux-3.1.1/drivers/char/sonypi.c 2011-11-16 18:39:07.000000000 -0500
25727@@ -55,6 +55,7 @@
25728 #include <asm/uaccess.h>
25729 #include <asm/io.h>
25730 #include <asm/system.h>
25731+#include <asm/local.h>
25732
25733 #include <linux/sonypi.h>
25734
25735@@ -491,7 +492,7 @@ static struct sonypi_device {
25736 spinlock_t fifo_lock;
25737 wait_queue_head_t fifo_proc_list;
25738 struct fasync_struct *fifo_async;
25739- int open_count;
25740+ local_t open_count;
25741 int model;
25742 struct input_dev *input_jog_dev;
25743 struct input_dev *input_key_dev;
25744@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
25745 static int sonypi_misc_release(struct inode *inode, struct file *file)
25746 {
25747 mutex_lock(&sonypi_device.lock);
25748- sonypi_device.open_count--;
25749+ local_dec(&sonypi_device.open_count);
25750 mutex_unlock(&sonypi_device.lock);
25751 return 0;
25752 }
25753@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
25754 {
25755 mutex_lock(&sonypi_device.lock);
25756 /* Flush input queue on first open */
25757- if (!sonypi_device.open_count)
25758+ if (!local_read(&sonypi_device.open_count))
25759 kfifo_reset(&sonypi_device.fifo);
25760- sonypi_device.open_count++;
25761+ local_inc(&sonypi_device.open_count);
25762 mutex_unlock(&sonypi_device.lock);
25763
25764 return 0;
25765diff -urNp linux-3.1.1/drivers/char/tpm/tpm_bios.c linux-3.1.1/drivers/char/tpm/tpm_bios.c
25766--- linux-3.1.1/drivers/char/tpm/tpm_bios.c 2011-11-11 15:19:27.000000000 -0500
25767+++ linux-3.1.1/drivers/char/tpm/tpm_bios.c 2011-11-16 18:39:07.000000000 -0500
25768@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
25769 event = addr;
25770
25771 if ((event->event_type == 0 && event->event_size == 0) ||
25772- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
25773+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
25774 return NULL;
25775
25776 return addr;
25777@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
25778 return NULL;
25779
25780 if ((event->event_type == 0 && event->event_size == 0) ||
25781- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
25782+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
25783 return NULL;
25784
25785 (*pos)++;
25786@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
25787 int i;
25788
25789 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
25790- seq_putc(m, data[i]);
25791+ if (!seq_putc(m, data[i]))
25792+ return -EFAULT;
25793
25794 return 0;
25795 }
25796@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log
25797 log->bios_event_log_end = log->bios_event_log + len;
25798
25799 virt = acpi_os_map_memory(start, len);
25800+ if (!virt) {
25801+ kfree(log->bios_event_log);
25802+ log->bios_event_log = NULL;
25803+ return -EFAULT;
25804+ }
25805
25806- memcpy(log->bios_event_log, virt, len);
25807+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
25808
25809 acpi_os_unmap_memory(virt, len);
25810 return 0;
25811diff -urNp linux-3.1.1/drivers/char/tpm/tpm.c linux-3.1.1/drivers/char/tpm/tpm.c
25812--- linux-3.1.1/drivers/char/tpm/tpm.c 2011-11-11 15:19:27.000000000 -0500
25813+++ linux-3.1.1/drivers/char/tpm/tpm.c 2011-11-16 18:40:10.000000000 -0500
25814@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_c
25815 chip->vendor.req_complete_val)
25816 goto out_recv;
25817
25818- if ((status == chip->vendor.req_canceled)) {
25819+ if (status == chip->vendor.req_canceled) {
25820 dev_err(chip->dev, "Operation Canceled\n");
25821 rc = -ECANCELED;
25822 goto out;
25823@@ -862,6 +862,8 @@ ssize_t tpm_show_pubek(struct device *de
25824
25825 struct tpm_chip *chip = dev_get_drvdata(dev);
25826
25827+ pax_track_stack();
25828+
25829 tpm_cmd.header.in = tpm_readpubek_header;
25830 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
25831 "attempting to read the PUBEK");
25832diff -urNp linux-3.1.1/drivers/char/virtio_console.c linux-3.1.1/drivers/char/virtio_console.c
25833--- linux-3.1.1/drivers/char/virtio_console.c 2011-11-11 15:19:27.000000000 -0500
25834+++ linux-3.1.1/drivers/char/virtio_console.c 2011-11-16 18:39:07.000000000 -0500
25835@@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port
25836 if (to_user) {
25837 ssize_t ret;
25838
25839- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
25840+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
25841 if (ret)
25842 return -EFAULT;
25843 } else {
25844@@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct fil
25845 if (!port_has_data(port) && !port->host_connected)
25846 return 0;
25847
25848- return fill_readbuf(port, ubuf, count, true);
25849+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
25850 }
25851
25852 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
25853diff -urNp linux-3.1.1/drivers/crypto/hifn_795x.c linux-3.1.1/drivers/crypto/hifn_795x.c
25854--- linux-3.1.1/drivers/crypto/hifn_795x.c 2011-11-11 15:19:27.000000000 -0500
25855+++ linux-3.1.1/drivers/crypto/hifn_795x.c 2011-11-16 18:40:10.000000000 -0500
25856@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
25857 0xCA, 0x34, 0x2B, 0x2E};
25858 struct scatterlist sg;
25859
25860+ pax_track_stack();
25861+
25862 memset(src, 0, sizeof(src));
25863 memset(ctx.key, 0, sizeof(ctx.key));
25864
25865diff -urNp linux-3.1.1/drivers/crypto/padlock-aes.c linux-3.1.1/drivers/crypto/padlock-aes.c
25866--- linux-3.1.1/drivers/crypto/padlock-aes.c 2011-11-11 15:19:27.000000000 -0500
25867+++ linux-3.1.1/drivers/crypto/padlock-aes.c 2011-11-16 18:40:10.000000000 -0500
25868@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
25869 struct crypto_aes_ctx gen_aes;
25870 int cpu;
25871
25872+ pax_track_stack();
25873+
25874 if (key_len % 8) {
25875 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
25876 return -EINVAL;
25877diff -urNp linux-3.1.1/drivers/edac/amd64_edac.c linux-3.1.1/drivers/edac/amd64_edac.c
25878--- linux-3.1.1/drivers/edac/amd64_edac.c 2011-11-11 15:19:27.000000000 -0500
25879+++ linux-3.1.1/drivers/edac/amd64_edac.c 2011-11-16 18:39:07.000000000 -0500
25880@@ -2670,7 +2670,7 @@ static void __devexit amd64_remove_one_i
25881 * PCI core identifies what devices are on a system during boot, and then
25882 * inquiry this table to see if this driver is for a given device found.
25883 */
25884-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
25885+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
25886 {
25887 .vendor = PCI_VENDOR_ID_AMD,
25888 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
25889diff -urNp linux-3.1.1/drivers/edac/amd76x_edac.c linux-3.1.1/drivers/edac/amd76x_edac.c
25890--- linux-3.1.1/drivers/edac/amd76x_edac.c 2011-11-11 15:19:27.000000000 -0500
25891+++ linux-3.1.1/drivers/edac/amd76x_edac.c 2011-11-16 18:39:07.000000000 -0500
25892@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(
25893 edac_mc_free(mci);
25894 }
25895
25896-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
25897+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
25898 {
25899 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25900 AMD762},
25901diff -urNp linux-3.1.1/drivers/edac/e752x_edac.c linux-3.1.1/drivers/edac/e752x_edac.c
25902--- linux-3.1.1/drivers/edac/e752x_edac.c 2011-11-11 15:19:27.000000000 -0500
25903+++ linux-3.1.1/drivers/edac/e752x_edac.c 2011-11-16 18:39:07.000000000 -0500
25904@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(s
25905 edac_mc_free(mci);
25906 }
25907
25908-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
25909+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
25910 {
25911 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25912 E7520},
25913diff -urNp linux-3.1.1/drivers/edac/e7xxx_edac.c linux-3.1.1/drivers/edac/e7xxx_edac.c
25914--- linux-3.1.1/drivers/edac/e7xxx_edac.c 2011-11-11 15:19:27.000000000 -0500
25915+++ linux-3.1.1/drivers/edac/e7xxx_edac.c 2011-11-16 18:39:07.000000000 -0500
25916@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(s
25917 edac_mc_free(mci);
25918 }
25919
25920-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
25921+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
25922 {
25923 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25924 E7205},
25925diff -urNp linux-3.1.1/drivers/edac/edac_pci_sysfs.c linux-3.1.1/drivers/edac/edac_pci_sysfs.c
25926--- linux-3.1.1/drivers/edac/edac_pci_sysfs.c 2011-11-11 15:19:27.000000000 -0500
25927+++ linux-3.1.1/drivers/edac/edac_pci_sysfs.c 2011-11-16 18:39:07.000000000 -0500
25928@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
25929 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
25930 static int edac_pci_poll_msec = 1000; /* one second workq period */
25931
25932-static atomic_t pci_parity_count = ATOMIC_INIT(0);
25933-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
25934+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
25935+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
25936
25937 static struct kobject *edac_pci_top_main_kobj;
25938 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
25939@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
25940 edac_printk(KERN_CRIT, EDAC_PCI,
25941 "Signaled System Error on %s\n",
25942 pci_name(dev));
25943- atomic_inc(&pci_nonparity_count);
25944+ atomic_inc_unchecked(&pci_nonparity_count);
25945 }
25946
25947 if (status & (PCI_STATUS_PARITY)) {
25948@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
25949 "Master Data Parity Error on %s\n",
25950 pci_name(dev));
25951
25952- atomic_inc(&pci_parity_count);
25953+ atomic_inc_unchecked(&pci_parity_count);
25954 }
25955
25956 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25957@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
25958 "Detected Parity Error on %s\n",
25959 pci_name(dev));
25960
25961- atomic_inc(&pci_parity_count);
25962+ atomic_inc_unchecked(&pci_parity_count);
25963 }
25964 }
25965
25966@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
25967 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
25968 "Signaled System Error on %s\n",
25969 pci_name(dev));
25970- atomic_inc(&pci_nonparity_count);
25971+ atomic_inc_unchecked(&pci_nonparity_count);
25972 }
25973
25974 if (status & (PCI_STATUS_PARITY)) {
25975@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
25976 "Master Data Parity Error on "
25977 "%s\n", pci_name(dev));
25978
25979- atomic_inc(&pci_parity_count);
25980+ atomic_inc_unchecked(&pci_parity_count);
25981 }
25982
25983 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25984@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
25985 "Detected Parity Error on %s\n",
25986 pci_name(dev));
25987
25988- atomic_inc(&pci_parity_count);
25989+ atomic_inc_unchecked(&pci_parity_count);
25990 }
25991 }
25992 }
25993@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
25994 if (!check_pci_errors)
25995 return;
25996
25997- before_count = atomic_read(&pci_parity_count);
25998+ before_count = atomic_read_unchecked(&pci_parity_count);
25999
26000 /* scan all PCI devices looking for a Parity Error on devices and
26001 * bridges.
26002@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
26003 /* Only if operator has selected panic on PCI Error */
26004 if (edac_pci_get_panic_on_pe()) {
26005 /* If the count is different 'after' from 'before' */
26006- if (before_count != atomic_read(&pci_parity_count))
26007+ if (before_count != atomic_read_unchecked(&pci_parity_count))
26008 panic("EDAC: PCI Parity Error");
26009 }
26010 }
26011diff -urNp linux-3.1.1/drivers/edac/i3000_edac.c linux-3.1.1/drivers/edac/i3000_edac.c
26012--- linux-3.1.1/drivers/edac/i3000_edac.c 2011-11-11 15:19:27.000000000 -0500
26013+++ linux-3.1.1/drivers/edac/i3000_edac.c 2011-11-16 18:39:07.000000000 -0500
26014@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(s
26015 edac_mc_free(mci);
26016 }
26017
26018-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
26019+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
26020 {
26021 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26022 I3000},
26023diff -urNp linux-3.1.1/drivers/edac/i3200_edac.c linux-3.1.1/drivers/edac/i3200_edac.c
26024--- linux-3.1.1/drivers/edac/i3200_edac.c 2011-11-11 15:19:27.000000000 -0500
26025+++ linux-3.1.1/drivers/edac/i3200_edac.c 2011-11-16 18:39:07.000000000 -0500
26026@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(s
26027 edac_mc_free(mci);
26028 }
26029
26030-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
26031+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
26032 {
26033 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26034 I3200},
26035diff -urNp linux-3.1.1/drivers/edac/i5000_edac.c linux-3.1.1/drivers/edac/i5000_edac.c
26036--- linux-3.1.1/drivers/edac/i5000_edac.c 2011-11-11 15:19:27.000000000 -0500
26037+++ linux-3.1.1/drivers/edac/i5000_edac.c 2011-11-16 18:39:07.000000000 -0500
26038@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(s
26039 *
26040 * The "E500P" device is the first device supported.
26041 */
26042-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
26043+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
26044 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
26045 .driver_data = I5000P},
26046
26047diff -urNp linux-3.1.1/drivers/edac/i5100_edac.c linux-3.1.1/drivers/edac/i5100_edac.c
26048--- linux-3.1.1/drivers/edac/i5100_edac.c 2011-11-11 15:19:27.000000000 -0500
26049+++ linux-3.1.1/drivers/edac/i5100_edac.c 2011-11-16 18:39:07.000000000 -0500
26050@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(s
26051 edac_mc_free(mci);
26052 }
26053
26054-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
26055+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
26056 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
26057 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
26058 { 0, }
26059diff -urNp linux-3.1.1/drivers/edac/i5400_edac.c linux-3.1.1/drivers/edac/i5400_edac.c
26060--- linux-3.1.1/drivers/edac/i5400_edac.c 2011-11-11 15:19:27.000000000 -0500
26061+++ linux-3.1.1/drivers/edac/i5400_edac.c 2011-11-16 18:39:07.000000000 -0500
26062@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(s
26063 *
26064 * The "E500P" device is the first device supported.
26065 */
26066-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
26067+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
26068 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
26069 {0,} /* 0 terminated list. */
26070 };
26071diff -urNp linux-3.1.1/drivers/edac/i7300_edac.c linux-3.1.1/drivers/edac/i7300_edac.c
26072--- linux-3.1.1/drivers/edac/i7300_edac.c 2011-11-11 15:19:27.000000000 -0500
26073+++ linux-3.1.1/drivers/edac/i7300_edac.c 2011-11-16 18:39:07.000000000 -0500
26074@@ -1191,7 +1191,7 @@ static void __devexit i7300_remove_one(s
26075 *
26076 * Has only 8086:360c PCI ID
26077 */
26078-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
26079+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
26080 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
26081 {0,} /* 0 terminated list. */
26082 };
26083diff -urNp linux-3.1.1/drivers/edac/i7core_edac.c linux-3.1.1/drivers/edac/i7core_edac.c
26084--- linux-3.1.1/drivers/edac/i7core_edac.c 2011-11-11 15:19:27.000000000 -0500
26085+++ linux-3.1.1/drivers/edac/i7core_edac.c 2011-11-16 18:39:07.000000000 -0500
26086@@ -359,7 +359,7 @@ static const struct pci_id_table pci_dev
26087 /*
26088 * pci_device_id table for which devices we are looking for
26089 */
26090-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
26091+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
26092 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
26093 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
26094 {0,} /* 0 terminated list. */
26095diff -urNp linux-3.1.1/drivers/edac/i82443bxgx_edac.c linux-3.1.1/drivers/edac/i82443bxgx_edac.c
26096--- linux-3.1.1/drivers/edac/i82443bxgx_edac.c 2011-11-11 15:19:27.000000000 -0500
26097+++ linux-3.1.1/drivers/edac/i82443bxgx_edac.c 2011-11-16 18:39:07.000000000 -0500
26098@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_
26099
26100 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
26101
26102-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
26103+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
26104 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
26105 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
26106 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
26107diff -urNp linux-3.1.1/drivers/edac/i82860_edac.c linux-3.1.1/drivers/edac/i82860_edac.c
26108--- linux-3.1.1/drivers/edac/i82860_edac.c 2011-11-11 15:19:27.000000000 -0500
26109+++ linux-3.1.1/drivers/edac/i82860_edac.c 2011-11-16 18:39:07.000000000 -0500
26110@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(
26111 edac_mc_free(mci);
26112 }
26113
26114-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
26115+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
26116 {
26117 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26118 I82860},
26119diff -urNp linux-3.1.1/drivers/edac/i82875p_edac.c linux-3.1.1/drivers/edac/i82875p_edac.c
26120--- linux-3.1.1/drivers/edac/i82875p_edac.c 2011-11-11 15:19:27.000000000 -0500
26121+++ linux-3.1.1/drivers/edac/i82875p_edac.c 2011-11-16 18:39:07.000000000 -0500
26122@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one
26123 edac_mc_free(mci);
26124 }
26125
26126-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
26127+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
26128 {
26129 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26130 I82875P},
26131diff -urNp linux-3.1.1/drivers/edac/i82975x_edac.c linux-3.1.1/drivers/edac/i82975x_edac.c
26132--- linux-3.1.1/drivers/edac/i82975x_edac.c 2011-11-11 15:19:27.000000000 -0500
26133+++ linux-3.1.1/drivers/edac/i82975x_edac.c 2011-11-16 18:39:07.000000000 -0500
26134@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one
26135 edac_mc_free(mci);
26136 }
26137
26138-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
26139+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
26140 {
26141 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26142 I82975X
26143diff -urNp linux-3.1.1/drivers/edac/mce_amd.h linux-3.1.1/drivers/edac/mce_amd.h
26144--- linux-3.1.1/drivers/edac/mce_amd.h 2011-11-11 15:19:27.000000000 -0500
26145+++ linux-3.1.1/drivers/edac/mce_amd.h 2011-11-16 18:39:07.000000000 -0500
26146@@ -83,7 +83,7 @@ struct amd_decoder_ops {
26147 bool (*dc_mce)(u16, u8);
26148 bool (*ic_mce)(u16, u8);
26149 bool (*nb_mce)(u16, u8);
26150-};
26151+} __no_const;
26152
26153 void amd_report_gart_errors(bool);
26154 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
26155diff -urNp linux-3.1.1/drivers/edac/r82600_edac.c linux-3.1.1/drivers/edac/r82600_edac.c
26156--- linux-3.1.1/drivers/edac/r82600_edac.c 2011-11-11 15:19:27.000000000 -0500
26157+++ linux-3.1.1/drivers/edac/r82600_edac.c 2011-11-16 18:39:07.000000000 -0500
26158@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(
26159 edac_mc_free(mci);
26160 }
26161
26162-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
26163+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
26164 {
26165 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
26166 },
26167diff -urNp linux-3.1.1/drivers/edac/x38_edac.c linux-3.1.1/drivers/edac/x38_edac.c
26168--- linux-3.1.1/drivers/edac/x38_edac.c 2011-11-11 15:19:27.000000000 -0500
26169+++ linux-3.1.1/drivers/edac/x38_edac.c 2011-11-16 18:39:07.000000000 -0500
26170@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(str
26171 edac_mc_free(mci);
26172 }
26173
26174-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
26175+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
26176 {
26177 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26178 X38},
26179diff -urNp linux-3.1.1/drivers/firewire/core-card.c linux-3.1.1/drivers/firewire/core-card.c
26180--- linux-3.1.1/drivers/firewire/core-card.c 2011-11-11 15:19:27.000000000 -0500
26181+++ linux-3.1.1/drivers/firewire/core-card.c 2011-11-16 18:39:07.000000000 -0500
26182@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
26183
26184 void fw_core_remove_card(struct fw_card *card)
26185 {
26186- struct fw_card_driver dummy_driver = dummy_driver_template;
26187+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
26188
26189 card->driver->update_phy_reg(card, 4,
26190 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
26191diff -urNp linux-3.1.1/drivers/firewire/core-cdev.c linux-3.1.1/drivers/firewire/core-cdev.c
26192--- linux-3.1.1/drivers/firewire/core-cdev.c 2011-11-11 15:19:27.000000000 -0500
26193+++ linux-3.1.1/drivers/firewire/core-cdev.c 2011-11-16 18:39:07.000000000 -0500
26194@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct clie
26195 int ret;
26196
26197 if ((request->channels == 0 && request->bandwidth == 0) ||
26198- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
26199- request->bandwidth < 0)
26200+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
26201 return -EINVAL;
26202
26203 r = kmalloc(sizeof(*r), GFP_KERNEL);
26204diff -urNp linux-3.1.1/drivers/firewire/core.h linux-3.1.1/drivers/firewire/core.h
26205--- linux-3.1.1/drivers/firewire/core.h 2011-11-11 15:19:27.000000000 -0500
26206+++ linux-3.1.1/drivers/firewire/core.h 2011-11-16 18:39:07.000000000 -0500
26207@@ -101,6 +101,7 @@ struct fw_card_driver {
26208
26209 int (*stop_iso)(struct fw_iso_context *ctx);
26210 };
26211+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
26212
26213 void fw_card_initialize(struct fw_card *card,
26214 const struct fw_card_driver *driver, struct device *device);
26215diff -urNp linux-3.1.1/drivers/firewire/core-transaction.c linux-3.1.1/drivers/firewire/core-transaction.c
26216--- linux-3.1.1/drivers/firewire/core-transaction.c 2011-11-11 15:19:27.000000000 -0500
26217+++ linux-3.1.1/drivers/firewire/core-transaction.c 2011-11-16 18:40:10.000000000 -0500
26218@@ -37,6 +37,7 @@
26219 #include <linux/timer.h>
26220 #include <linux/types.h>
26221 #include <linux/workqueue.h>
26222+#include <linux/sched.h>
26223
26224 #include <asm/byteorder.h>
26225
26226@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
26227 struct transaction_callback_data d;
26228 struct fw_transaction t;
26229
26230+ pax_track_stack();
26231+
26232 init_timer_on_stack(&t.split_timeout_timer);
26233 init_completion(&d.done);
26234 d.payload = payload;
26235diff -urNp linux-3.1.1/drivers/firmware/dmi_scan.c linux-3.1.1/drivers/firmware/dmi_scan.c
26236--- linux-3.1.1/drivers/firmware/dmi_scan.c 2011-11-11 15:19:27.000000000 -0500
26237+++ linux-3.1.1/drivers/firmware/dmi_scan.c 2011-11-16 18:39:07.000000000 -0500
26238@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
26239 }
26240 }
26241 else {
26242- /*
26243- * no iounmap() for that ioremap(); it would be a no-op, but
26244- * it's so early in setup that sucker gets confused into doing
26245- * what it shouldn't if we actually call it.
26246- */
26247 p = dmi_ioremap(0xF0000, 0x10000);
26248 if (p == NULL)
26249 goto error;
26250@@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct
26251 if (buf == NULL)
26252 return -1;
26253
26254- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
26255+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
26256
26257 iounmap(buf);
26258 return 0;
26259diff -urNp linux-3.1.1/drivers/gpio/gpio-vr41xx.c linux-3.1.1/drivers/gpio/gpio-vr41xx.c
26260--- linux-3.1.1/drivers/gpio/gpio-vr41xx.c 2011-11-11 15:19:27.000000000 -0500
26261+++ linux-3.1.1/drivers/gpio/gpio-vr41xx.c 2011-11-16 18:39:07.000000000 -0500
26262@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
26263 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
26264 maskl, pendl, maskh, pendh);
26265
26266- atomic_inc(&irq_err_count);
26267+ atomic_inc_unchecked(&irq_err_count);
26268
26269 return -EINVAL;
26270 }
26271diff -urNp linux-3.1.1/drivers/gpu/drm/drm_crtc.c linux-3.1.1/drivers/gpu/drm/drm_crtc.c
26272--- linux-3.1.1/drivers/gpu/drm/drm_crtc.c 2011-11-11 15:19:27.000000000 -0500
26273+++ linux-3.1.1/drivers/gpu/drm/drm_crtc.c 2011-11-16 18:39:07.000000000 -0500
26274@@ -1374,7 +1374,7 @@ int drm_mode_getconnector(struct drm_dev
26275 */
26276 if ((out_resp->count_modes >= mode_count) && mode_count) {
26277 copied = 0;
26278- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
26279+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
26280 list_for_each_entry(mode, &connector->modes, head) {
26281 drm_crtc_convert_to_umode(&u_mode, mode);
26282 if (copy_to_user(mode_ptr + copied,
26283@@ -1389,8 +1389,8 @@ int drm_mode_getconnector(struct drm_dev
26284
26285 if ((out_resp->count_props >= props_count) && props_count) {
26286 copied = 0;
26287- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
26288- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
26289+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
26290+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
26291 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
26292 if (connector->property_ids[i] != 0) {
26293 if (put_user(connector->property_ids[i],
26294@@ -1412,7 +1412,7 @@ int drm_mode_getconnector(struct drm_dev
26295
26296 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
26297 copied = 0;
26298- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
26299+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
26300 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
26301 if (connector->encoder_ids[i] != 0) {
26302 if (put_user(connector->encoder_ids[i],
26303@@ -1571,7 +1571,7 @@ int drm_mode_setcrtc(struct drm_device *
26304 }
26305
26306 for (i = 0; i < crtc_req->count_connectors; i++) {
26307- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
26308+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
26309 if (get_user(out_id, &set_connectors_ptr[i])) {
26310 ret = -EFAULT;
26311 goto out;
26312@@ -1852,7 +1852,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_de
26313 fb = obj_to_fb(obj);
26314
26315 num_clips = r->num_clips;
26316- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
26317+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
26318
26319 if (!num_clips != !clips_ptr) {
26320 ret = -EINVAL;
26321@@ -2272,7 +2272,7 @@ int drm_mode_getproperty_ioctl(struct dr
26322 out_resp->flags = property->flags;
26323
26324 if ((out_resp->count_values >= value_count) && value_count) {
26325- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
26326+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
26327 for (i = 0; i < value_count; i++) {
26328 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
26329 ret = -EFAULT;
26330@@ -2285,7 +2285,7 @@ int drm_mode_getproperty_ioctl(struct dr
26331 if (property->flags & DRM_MODE_PROP_ENUM) {
26332 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
26333 copied = 0;
26334- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
26335+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
26336 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
26337
26338 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
26339@@ -2308,7 +2308,7 @@ int drm_mode_getproperty_ioctl(struct dr
26340 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
26341 copied = 0;
26342 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
26343- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
26344+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
26345
26346 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
26347 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
26348@@ -2369,7 +2369,7 @@ int drm_mode_getblob_ioctl(struct drm_de
26349 struct drm_mode_get_blob *out_resp = data;
26350 struct drm_property_blob *blob;
26351 int ret = 0;
26352- void *blob_ptr;
26353+ void __user *blob_ptr;
26354
26355 if (!drm_core_check_feature(dev, DRIVER_MODESET))
26356 return -EINVAL;
26357@@ -2383,7 +2383,7 @@ int drm_mode_getblob_ioctl(struct drm_de
26358 blob = obj_to_blob(obj);
26359
26360 if (out_resp->length == blob->length) {
26361- blob_ptr = (void *)(unsigned long)out_resp->data;
26362+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
26363 if (copy_to_user(blob_ptr, blob->data, blob->length)){
26364 ret = -EFAULT;
26365 goto done;
26366diff -urNp linux-3.1.1/drivers/gpu/drm/drm_crtc_helper.c linux-3.1.1/drivers/gpu/drm/drm_crtc_helper.c
26367--- linux-3.1.1/drivers/gpu/drm/drm_crtc_helper.c 2011-11-11 15:19:27.000000000 -0500
26368+++ linux-3.1.1/drivers/gpu/drm/drm_crtc_helper.c 2011-11-16 18:40:10.000000000 -0500
26369@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
26370 struct drm_crtc *tmp;
26371 int crtc_mask = 1;
26372
26373- WARN(!crtc, "checking null crtc?\n");
26374+ BUG_ON(!crtc);
26375
26376 dev = crtc->dev;
26377
26378@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
26379 struct drm_encoder *encoder;
26380 bool ret = true;
26381
26382+ pax_track_stack();
26383+
26384 crtc->enabled = drm_helper_crtc_in_use(crtc);
26385 if (!crtc->enabled)
26386 return true;
26387diff -urNp linux-3.1.1/drivers/gpu/drm/drm_drv.c linux-3.1.1/drivers/gpu/drm/drm_drv.c
26388--- linux-3.1.1/drivers/gpu/drm/drm_drv.c 2011-11-11 15:19:27.000000000 -0500
26389+++ linux-3.1.1/drivers/gpu/drm/drm_drv.c 2011-11-16 18:39:07.000000000 -0500
26390@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
26391 /**
26392 * Copy and IOCTL return string to user space
26393 */
26394-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
26395+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
26396 {
26397 int len;
26398
26399@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
26400
26401 dev = file_priv->minor->dev;
26402 atomic_inc(&dev->ioctl_count);
26403- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
26404+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
26405 ++file_priv->ioctl_count;
26406
26407 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
26408diff -urNp linux-3.1.1/drivers/gpu/drm/drm_fops.c linux-3.1.1/drivers/gpu/drm/drm_fops.c
26409--- linux-3.1.1/drivers/gpu/drm/drm_fops.c 2011-11-11 15:19:27.000000000 -0500
26410+++ linux-3.1.1/drivers/gpu/drm/drm_fops.c 2011-11-16 18:39:07.000000000 -0500
26411@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
26412 }
26413
26414 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
26415- atomic_set(&dev->counts[i], 0);
26416+ atomic_set_unchecked(&dev->counts[i], 0);
26417
26418 dev->sigdata.lock = NULL;
26419
26420@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
26421
26422 retcode = drm_open_helper(inode, filp, dev);
26423 if (!retcode) {
26424- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
26425- if (!dev->open_count++)
26426+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
26427+ if (local_inc_return(&dev->open_count) == 1)
26428 retcode = drm_setup(dev);
26429 }
26430 if (!retcode) {
26431@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
26432
26433 mutex_lock(&drm_global_mutex);
26434
26435- DRM_DEBUG("open_count = %d\n", dev->open_count);
26436+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
26437
26438 if (dev->driver->preclose)
26439 dev->driver->preclose(dev, file_priv);
26440@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
26441 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
26442 task_pid_nr(current),
26443 (long)old_encode_dev(file_priv->minor->device),
26444- dev->open_count);
26445+ local_read(&dev->open_count));
26446
26447 /* if the master has gone away we can't do anything with the lock */
26448 if (file_priv->minor->master)
26449@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
26450 * End inline drm_release
26451 */
26452
26453- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
26454- if (!--dev->open_count) {
26455+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
26456+ if (local_dec_and_test(&dev->open_count)) {
26457 if (atomic_read(&dev->ioctl_count)) {
26458 DRM_ERROR("Device busy: %d\n",
26459 atomic_read(&dev->ioctl_count));
26460diff -urNp linux-3.1.1/drivers/gpu/drm/drm_global.c linux-3.1.1/drivers/gpu/drm/drm_global.c
26461--- linux-3.1.1/drivers/gpu/drm/drm_global.c 2011-11-11 15:19:27.000000000 -0500
26462+++ linux-3.1.1/drivers/gpu/drm/drm_global.c 2011-11-16 18:39:07.000000000 -0500
26463@@ -36,7 +36,7 @@
26464 struct drm_global_item {
26465 struct mutex mutex;
26466 void *object;
26467- int refcount;
26468+ atomic_t refcount;
26469 };
26470
26471 static struct drm_global_item glob[DRM_GLOBAL_NUM];
26472@@ -49,7 +49,7 @@ void drm_global_init(void)
26473 struct drm_global_item *item = &glob[i];
26474 mutex_init(&item->mutex);
26475 item->object = NULL;
26476- item->refcount = 0;
26477+ atomic_set(&item->refcount, 0);
26478 }
26479 }
26480
26481@@ -59,7 +59,7 @@ void drm_global_release(void)
26482 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
26483 struct drm_global_item *item = &glob[i];
26484 BUG_ON(item->object != NULL);
26485- BUG_ON(item->refcount != 0);
26486+ BUG_ON(atomic_read(&item->refcount) != 0);
26487 }
26488 }
26489
26490@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
26491 void *object;
26492
26493 mutex_lock(&item->mutex);
26494- if (item->refcount == 0) {
26495+ if (atomic_read(&item->refcount) == 0) {
26496 item->object = kzalloc(ref->size, GFP_KERNEL);
26497 if (unlikely(item->object == NULL)) {
26498 ret = -ENOMEM;
26499@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
26500 goto out_err;
26501
26502 }
26503- ++item->refcount;
26504+ atomic_inc(&item->refcount);
26505 ref->object = item->object;
26506 object = item->object;
26507 mutex_unlock(&item->mutex);
26508@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
26509 struct drm_global_item *item = &glob[ref->global_type];
26510
26511 mutex_lock(&item->mutex);
26512- BUG_ON(item->refcount == 0);
26513+ BUG_ON(atomic_read(&item->refcount) == 0);
26514 BUG_ON(ref->object != item->object);
26515- if (--item->refcount == 0) {
26516+ if (atomic_dec_and_test(&item->refcount)) {
26517 ref->release(ref);
26518 item->object = NULL;
26519 }
26520diff -urNp linux-3.1.1/drivers/gpu/drm/drm_info.c linux-3.1.1/drivers/gpu/drm/drm_info.c
26521--- linux-3.1.1/drivers/gpu/drm/drm_info.c 2011-11-11 15:19:27.000000000 -0500
26522+++ linux-3.1.1/drivers/gpu/drm/drm_info.c 2011-11-16 18:40:10.000000000 -0500
26523@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
26524 struct drm_local_map *map;
26525 struct drm_map_list *r_list;
26526
26527- /* Hardcoded from _DRM_FRAME_BUFFER,
26528- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
26529- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
26530- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
26531+ static const char * const types[] = {
26532+ [_DRM_FRAME_BUFFER] = "FB",
26533+ [_DRM_REGISTERS] = "REG",
26534+ [_DRM_SHM] = "SHM",
26535+ [_DRM_AGP] = "AGP",
26536+ [_DRM_SCATTER_GATHER] = "SG",
26537+ [_DRM_CONSISTENT] = "PCI",
26538+ [_DRM_GEM] = "GEM" };
26539 const char *type;
26540 int i;
26541
26542@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
26543 map = r_list->map;
26544 if (!map)
26545 continue;
26546- if (map->type < 0 || map->type > 5)
26547+ if (map->type >= ARRAY_SIZE(types))
26548 type = "??";
26549 else
26550 type = types[map->type];
26551@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
26552 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
26553 vma->vm_flags & VM_LOCKED ? 'l' : '-',
26554 vma->vm_flags & VM_IO ? 'i' : '-',
26555+#ifdef CONFIG_GRKERNSEC_HIDESYM
26556+ 0);
26557+#else
26558 vma->vm_pgoff);
26559+#endif
26560
26561 #if defined(__i386__)
26562 pgprot = pgprot_val(vma->vm_page_prot);
26563diff -urNp linux-3.1.1/drivers/gpu/drm/drm_ioc32.c linux-3.1.1/drivers/gpu/drm/drm_ioc32.c
26564--- linux-3.1.1/drivers/gpu/drm/drm_ioc32.c 2011-11-11 15:19:27.000000000 -0500
26565+++ linux-3.1.1/drivers/gpu/drm/drm_ioc32.c 2011-11-16 18:39:07.000000000 -0500
26566@@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct fi
26567 request = compat_alloc_user_space(nbytes);
26568 if (!access_ok(VERIFY_WRITE, request, nbytes))
26569 return -EFAULT;
26570- list = (struct drm_buf_desc *) (request + 1);
26571+ list = (struct drm_buf_desc __user *) (request + 1);
26572
26573 if (__put_user(count, &request->count)
26574 || __put_user(list, &request->list))
26575@@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct fil
26576 request = compat_alloc_user_space(nbytes);
26577 if (!access_ok(VERIFY_WRITE, request, nbytes))
26578 return -EFAULT;
26579- list = (struct drm_buf_pub *) (request + 1);
26580+ list = (struct drm_buf_pub __user *) (request + 1);
26581
26582 if (__put_user(count, &request->count)
26583 || __put_user(list, &request->list))
26584diff -urNp linux-3.1.1/drivers/gpu/drm/drm_ioctl.c linux-3.1.1/drivers/gpu/drm/drm_ioctl.c
26585--- linux-3.1.1/drivers/gpu/drm/drm_ioctl.c 2011-11-11 15:19:27.000000000 -0500
26586+++ linux-3.1.1/drivers/gpu/drm/drm_ioctl.c 2011-11-16 18:39:07.000000000 -0500
26587@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
26588 stats->data[i].value =
26589 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
26590 else
26591- stats->data[i].value = atomic_read(&dev->counts[i]);
26592+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
26593 stats->data[i].type = dev->types[i];
26594 }
26595
26596diff -urNp linux-3.1.1/drivers/gpu/drm/drm_lock.c linux-3.1.1/drivers/gpu/drm/drm_lock.c
26597--- linux-3.1.1/drivers/gpu/drm/drm_lock.c 2011-11-11 15:19:27.000000000 -0500
26598+++ linux-3.1.1/drivers/gpu/drm/drm_lock.c 2011-11-16 18:39:07.000000000 -0500
26599@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
26600 if (drm_lock_take(&master->lock, lock->context)) {
26601 master->lock.file_priv = file_priv;
26602 master->lock.lock_time = jiffies;
26603- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
26604+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
26605 break; /* Got lock */
26606 }
26607
26608@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
26609 return -EINVAL;
26610 }
26611
26612- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
26613+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
26614
26615 if (drm_lock_free(&master->lock, lock->context)) {
26616 /* FIXME: Should really bail out here. */
26617diff -urNp linux-3.1.1/drivers/gpu/drm/i810/i810_dma.c linux-3.1.1/drivers/gpu/drm/i810/i810_dma.c
26618--- linux-3.1.1/drivers/gpu/drm/i810/i810_dma.c 2011-11-11 15:19:27.000000000 -0500
26619+++ linux-3.1.1/drivers/gpu/drm/i810/i810_dma.c 2011-11-16 18:39:07.000000000 -0500
26620@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
26621 dma->buflist[vertex->idx],
26622 vertex->discard, vertex->used);
26623
26624- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26625- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26626+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26627+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26628 sarea_priv->last_enqueue = dev_priv->counter - 1;
26629 sarea_priv->last_dispatch = (int)hw_status[5];
26630
26631@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
26632 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
26633 mc->last_render);
26634
26635- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26636- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26637+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26638+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26639 sarea_priv->last_enqueue = dev_priv->counter - 1;
26640 sarea_priv->last_dispatch = (int)hw_status[5];
26641
26642diff -urNp linux-3.1.1/drivers/gpu/drm/i810/i810_drv.h linux-3.1.1/drivers/gpu/drm/i810/i810_drv.h
26643--- linux-3.1.1/drivers/gpu/drm/i810/i810_drv.h 2011-11-11 15:19:27.000000000 -0500
26644+++ linux-3.1.1/drivers/gpu/drm/i810/i810_drv.h 2011-11-16 18:39:07.000000000 -0500
26645@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
26646 int page_flipping;
26647
26648 wait_queue_head_t irq_queue;
26649- atomic_t irq_received;
26650- atomic_t irq_emitted;
26651+ atomic_unchecked_t irq_received;
26652+ atomic_unchecked_t irq_emitted;
26653
26654 int front_offset;
26655 } drm_i810_private_t;
26656diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_debugfs.c linux-3.1.1/drivers/gpu/drm/i915/i915_debugfs.c
26657--- linux-3.1.1/drivers/gpu/drm/i915/i915_debugfs.c 2011-11-11 15:19:27.000000000 -0500
26658+++ linux-3.1.1/drivers/gpu/drm/i915/i915_debugfs.c 2011-11-16 18:39:07.000000000 -0500
26659@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
26660 I915_READ(GTIMR));
26661 }
26662 seq_printf(m, "Interrupts received: %d\n",
26663- atomic_read(&dev_priv->irq_received));
26664+ atomic_read_unchecked(&dev_priv->irq_received));
26665 for (i = 0; i < I915_NUM_RINGS; i++) {
26666 if (IS_GEN6(dev) || IS_GEN7(dev)) {
26667 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
26668@@ -1185,7 +1185,7 @@ static int i915_opregion(struct seq_file
26669 return ret;
26670
26671 if (opregion->header)
26672- seq_write(m, opregion->header, OPREGION_SIZE);
26673+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
26674
26675 mutex_unlock(&dev->struct_mutex);
26676
26677diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_dma.c linux-3.1.1/drivers/gpu/drm/i915/i915_dma.c
26678--- linux-3.1.1/drivers/gpu/drm/i915/i915_dma.c 2011-11-11 15:19:27.000000000 -0500
26679+++ linux-3.1.1/drivers/gpu/drm/i915/i915_dma.c 2011-11-16 18:39:07.000000000 -0500
26680@@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(s
26681 bool can_switch;
26682
26683 spin_lock(&dev->count_lock);
26684- can_switch = (dev->open_count == 0);
26685+ can_switch = (local_read(&dev->open_count) == 0);
26686 spin_unlock(&dev->count_lock);
26687 return can_switch;
26688 }
26689diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_drv.h linux-3.1.1/drivers/gpu/drm/i915/i915_drv.h
26690--- linux-3.1.1/drivers/gpu/drm/i915/i915_drv.h 2011-11-11 15:19:27.000000000 -0500
26691+++ linux-3.1.1/drivers/gpu/drm/i915/i915_drv.h 2011-11-16 18:39:07.000000000 -0500
26692@@ -222,7 +222,7 @@ struct drm_i915_display_funcs {
26693 /* render clock increase/decrease */
26694 /* display clock increase/decrease */
26695 /* pll clock increase/decrease */
26696-};
26697+} __no_const;
26698
26699 struct intel_device_info {
26700 u8 gen;
26701@@ -305,7 +305,7 @@ typedef struct drm_i915_private {
26702 int current_page;
26703 int page_flipping;
26704
26705- atomic_t irq_received;
26706+ atomic_unchecked_t irq_received;
26707
26708 /* protects the irq masks */
26709 spinlock_t irq_lock;
26710@@ -882,7 +882,7 @@ struct drm_i915_gem_object {
26711 * will be page flipped away on the next vblank. When it
26712 * reaches 0, dev_priv->pending_flip_queue will be woken up.
26713 */
26714- atomic_t pending_flip;
26715+ atomic_unchecked_t pending_flip;
26716 };
26717
26718 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
26719@@ -1262,7 +1262,7 @@ extern int intel_setup_gmbus(struct drm_
26720 extern void intel_teardown_gmbus(struct drm_device *dev);
26721 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
26722 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
26723-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26724+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26725 {
26726 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
26727 }
26728diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.1.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c
26729--- linux-3.1.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-11-11 15:19:27.000000000 -0500
26730+++ linux-3.1.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-11-16 18:39:07.000000000 -0500
26731@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
26732 i915_gem_clflush_object(obj);
26733
26734 if (obj->base.pending_write_domain)
26735- cd->flips |= atomic_read(&obj->pending_flip);
26736+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
26737
26738 /* The actual obj->write_domain will be updated with
26739 * pending_write_domain after we emit the accumulated flush for all
26740diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_irq.c linux-3.1.1/drivers/gpu/drm/i915/i915_irq.c
26741--- linux-3.1.1/drivers/gpu/drm/i915/i915_irq.c 2011-11-11 15:19:27.000000000 -0500
26742+++ linux-3.1.1/drivers/gpu/drm/i915/i915_irq.c 2011-11-16 18:39:07.000000000 -0500
26743@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler
26744 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
26745 struct drm_i915_master_private *master_priv;
26746
26747- atomic_inc(&dev_priv->irq_received);
26748+ atomic_inc_unchecked(&dev_priv->irq_received);
26749
26750 /* disable master interrupt before clearing iir */
26751 de_ier = I915_READ(DEIER);
26752@@ -565,7 +565,7 @@ static irqreturn_t ironlake_irq_handler(
26753 struct drm_i915_master_private *master_priv;
26754 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
26755
26756- atomic_inc(&dev_priv->irq_received);
26757+ atomic_inc_unchecked(&dev_priv->irq_received);
26758
26759 if (IS_GEN6(dev))
26760 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
26761@@ -1228,7 +1228,7 @@ static irqreturn_t i915_driver_irq_handl
26762 int ret = IRQ_NONE, pipe;
26763 bool blc_event = false;
26764
26765- atomic_inc(&dev_priv->irq_received);
26766+ atomic_inc_unchecked(&dev_priv->irq_received);
26767
26768 iir = I915_READ(IIR);
26769
26770@@ -1740,7 +1740,7 @@ static void ironlake_irq_preinstall(stru
26771 {
26772 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26773
26774- atomic_set(&dev_priv->irq_received, 0);
26775+ atomic_set_unchecked(&dev_priv->irq_received, 0);
26776
26777 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26778 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26779@@ -1904,7 +1904,7 @@ static void i915_driver_irq_preinstall(s
26780 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26781 int pipe;
26782
26783- atomic_set(&dev_priv->irq_received, 0);
26784+ atomic_set_unchecked(&dev_priv->irq_received, 0);
26785
26786 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26787 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26788diff -urNp linux-3.1.1/drivers/gpu/drm/i915/intel_display.c linux-3.1.1/drivers/gpu/drm/i915/intel_display.c
26789--- linux-3.1.1/drivers/gpu/drm/i915/intel_display.c 2011-11-11 15:19:27.000000000 -0500
26790+++ linux-3.1.1/drivers/gpu/drm/i915/intel_display.c 2011-11-16 18:39:07.000000000 -0500
26791@@ -2205,7 +2205,7 @@ intel_pipe_set_base(struct drm_crtc *crt
26792
26793 wait_event(dev_priv->pending_flip_queue,
26794 atomic_read(&dev_priv->mm.wedged) ||
26795- atomic_read(&obj->pending_flip) == 0);
26796+ atomic_read_unchecked(&obj->pending_flip) == 0);
26797
26798 /* Big Hammer, we also need to ensure that any pending
26799 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
26800@@ -2824,7 +2824,7 @@ static void intel_crtc_wait_for_pending_
26801 obj = to_intel_framebuffer(crtc->fb)->obj;
26802 dev_priv = crtc->dev->dev_private;
26803 wait_event(dev_priv->pending_flip_queue,
26804- atomic_read(&obj->pending_flip) == 0);
26805+ atomic_read_unchecked(&obj->pending_flip) == 0);
26806 }
26807
26808 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
26809@@ -6644,7 +6644,7 @@ static void do_intel_finish_page_flip(st
26810
26811 atomic_clear_mask(1 << intel_crtc->plane,
26812 &obj->pending_flip.counter);
26813- if (atomic_read(&obj->pending_flip) == 0)
26814+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
26815 wake_up(&dev_priv->pending_flip_queue);
26816
26817 schedule_work(&work->work);
26818@@ -6933,7 +6933,7 @@ static int intel_crtc_page_flip(struct d
26819 /* Block clients from rendering to the new back buffer until
26820 * the flip occurs and the object is no longer visible.
26821 */
26822- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26823+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26824
26825 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
26826 if (ret)
26827@@ -6947,7 +6947,7 @@ static int intel_crtc_page_flip(struct d
26828 return 0;
26829
26830 cleanup_pending:
26831- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26832+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26833 cleanup_objs:
26834 drm_gem_object_unreference(&work->old_fb_obj->base);
26835 drm_gem_object_unreference(&obj->base);
26836diff -urNp linux-3.1.1/drivers/gpu/drm/mga/mga_drv.h linux-3.1.1/drivers/gpu/drm/mga/mga_drv.h
26837--- linux-3.1.1/drivers/gpu/drm/mga/mga_drv.h 2011-11-11 15:19:27.000000000 -0500
26838+++ linux-3.1.1/drivers/gpu/drm/mga/mga_drv.h 2011-11-16 18:39:07.000000000 -0500
26839@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
26840 u32 clear_cmd;
26841 u32 maccess;
26842
26843- atomic_t vbl_received; /**< Number of vblanks received. */
26844+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
26845 wait_queue_head_t fence_queue;
26846- atomic_t last_fence_retired;
26847+ atomic_unchecked_t last_fence_retired;
26848 u32 next_fence_to_post;
26849
26850 unsigned int fb_cpp;
26851diff -urNp linux-3.1.1/drivers/gpu/drm/mga/mga_irq.c linux-3.1.1/drivers/gpu/drm/mga/mga_irq.c
26852--- linux-3.1.1/drivers/gpu/drm/mga/mga_irq.c 2011-11-11 15:19:27.000000000 -0500
26853+++ linux-3.1.1/drivers/gpu/drm/mga/mga_irq.c 2011-11-16 18:39:07.000000000 -0500
26854@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
26855 if (crtc != 0)
26856 return 0;
26857
26858- return atomic_read(&dev_priv->vbl_received);
26859+ return atomic_read_unchecked(&dev_priv->vbl_received);
26860 }
26861
26862
26863@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26864 /* VBLANK interrupt */
26865 if (status & MGA_VLINEPEN) {
26866 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
26867- atomic_inc(&dev_priv->vbl_received);
26868+ atomic_inc_unchecked(&dev_priv->vbl_received);
26869 drm_handle_vblank(dev, 0);
26870 handled = 1;
26871 }
26872@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26873 if ((prim_start & ~0x03) != (prim_end & ~0x03))
26874 MGA_WRITE(MGA_PRIMEND, prim_end);
26875
26876- atomic_inc(&dev_priv->last_fence_retired);
26877+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
26878 DRM_WAKEUP(&dev_priv->fence_queue);
26879 handled = 1;
26880 }
26881@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
26882 * using fences.
26883 */
26884 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
26885- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
26886+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
26887 - *sequence) <= (1 << 23)));
26888
26889 *sequence = cur_fence;
26890diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_bios.c
26891--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-11-11 15:19:27.000000000 -0500
26892+++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-11-16 18:39:07.000000000 -0500
26893@@ -201,7 +201,7 @@ struct methods {
26894 const char desc[8];
26895 void (*loadbios)(struct drm_device *, uint8_t *);
26896 const bool rw;
26897-};
26898+} __do_const;
26899
26900 static struct methods shadow_methods[] = {
26901 { "PRAMIN", load_vbios_pramin, true },
26902@@ -5489,7 +5489,7 @@ parse_bit_displayport_tbl_entry(struct d
26903 struct bit_table {
26904 const char id;
26905 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
26906-};
26907+} __no_const;
26908
26909 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
26910
26911diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_drv.h
26912--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-11-11 15:19:27.000000000 -0500
26913+++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-11-16 18:39:07.000000000 -0500
26914@@ -238,7 +238,7 @@ struct nouveau_channel {
26915 struct list_head pending;
26916 uint32_t sequence;
26917 uint32_t sequence_ack;
26918- atomic_t last_sequence_irq;
26919+ atomic_unchecked_t last_sequence_irq;
26920 struct nouveau_vma vma;
26921 } fence;
26922
26923@@ -319,7 +319,7 @@ struct nouveau_exec_engine {
26924 u32 handle, u16 class);
26925 void (*set_tile_region)(struct drm_device *dev, int i);
26926 void (*tlb_flush)(struct drm_device *, int engine);
26927-};
26928+} __no_const;
26929
26930 struct nouveau_instmem_engine {
26931 void *priv;
26932@@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
26933 struct nouveau_mc_engine {
26934 int (*init)(struct drm_device *dev);
26935 void (*takedown)(struct drm_device *dev);
26936-};
26937+} __no_const;
26938
26939 struct nouveau_timer_engine {
26940 int (*init)(struct drm_device *dev);
26941 void (*takedown)(struct drm_device *dev);
26942 uint64_t (*read)(struct drm_device *dev);
26943-};
26944+} __no_const;
26945
26946 struct nouveau_fb_engine {
26947 int num_tiles;
26948@@ -513,7 +513,7 @@ struct nouveau_vram_engine {
26949 void (*put)(struct drm_device *, struct nouveau_mem **);
26950
26951 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
26952-};
26953+} __no_const;
26954
26955 struct nouveau_engine {
26956 struct nouveau_instmem_engine instmem;
26957@@ -660,7 +660,7 @@ struct drm_nouveau_private {
26958 struct drm_global_reference mem_global_ref;
26959 struct ttm_bo_global_ref bo_global_ref;
26960 struct ttm_bo_device bdev;
26961- atomic_t validate_sequence;
26962+ atomic_unchecked_t validate_sequence;
26963 } ttm;
26964
26965 struct {
26966diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_fence.c
26967--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-11-11 15:19:27.000000000 -0500
26968+++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-11-16 18:39:07.000000000 -0500
26969@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
26970 if (USE_REFCNT(dev))
26971 sequence = nvchan_rd32(chan, 0x48);
26972 else
26973- sequence = atomic_read(&chan->fence.last_sequence_irq);
26974+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
26975
26976 if (chan->fence.sequence_ack == sequence)
26977 goto out;
26978@@ -541,7 +541,7 @@ nouveau_fence_channel_init(struct nouvea
26979
26980 INIT_LIST_HEAD(&chan->fence.pending);
26981 spin_lock_init(&chan->fence.lock);
26982- atomic_set(&chan->fence.last_sequence_irq, 0);
26983+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
26984 return 0;
26985 }
26986
26987diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_gem.c
26988--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-11-11 15:19:27.000000000 -0500
26989+++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-11-16 18:39:07.000000000 -0500
26990@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *ch
26991 int trycnt = 0;
26992 int ret, i;
26993
26994- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
26995+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
26996 retry:
26997 if (++trycnt > 100000) {
26998 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
26999diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_state.c
27000--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_state.c 2011-11-11 15:19:27.000000000 -0500
27001+++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_state.c 2011-11-16 18:39:07.000000000 -0500
27002@@ -496,7 +496,7 @@ static bool nouveau_switcheroo_can_switc
27003 bool can_switch;
27004
27005 spin_lock(&dev->count_lock);
27006- can_switch = (dev->open_count == 0);
27007+ can_switch = (local_read(&dev->open_count) == 0);
27008 spin_unlock(&dev->count_lock);
27009 return can_switch;
27010 }
27011diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.1.1/drivers/gpu/drm/nouveau/nv04_graph.c
27012--- linux-3.1.1/drivers/gpu/drm/nouveau/nv04_graph.c 2011-11-11 15:19:27.000000000 -0500
27013+++ linux-3.1.1/drivers/gpu/drm/nouveau/nv04_graph.c 2011-11-16 18:39:07.000000000 -0500
27014@@ -554,7 +554,7 @@ static int
27015 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
27016 u32 class, u32 mthd, u32 data)
27017 {
27018- atomic_set(&chan->fence.last_sequence_irq, data);
27019+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
27020 return 0;
27021 }
27022
27023diff -urNp linux-3.1.1/drivers/gpu/drm/r128/r128_cce.c linux-3.1.1/drivers/gpu/drm/r128/r128_cce.c
27024--- linux-3.1.1/drivers/gpu/drm/r128/r128_cce.c 2011-11-11 15:19:27.000000000 -0500
27025+++ linux-3.1.1/drivers/gpu/drm/r128/r128_cce.c 2011-11-16 18:39:07.000000000 -0500
27026@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
27027
27028 /* GH: Simple idle check.
27029 */
27030- atomic_set(&dev_priv->idle_count, 0);
27031+ atomic_set_unchecked(&dev_priv->idle_count, 0);
27032
27033 /* We don't support anything other than bus-mastering ring mode,
27034 * but the ring can be in either AGP or PCI space for the ring
27035diff -urNp linux-3.1.1/drivers/gpu/drm/r128/r128_drv.h linux-3.1.1/drivers/gpu/drm/r128/r128_drv.h
27036--- linux-3.1.1/drivers/gpu/drm/r128/r128_drv.h 2011-11-11 15:19:27.000000000 -0500
27037+++ linux-3.1.1/drivers/gpu/drm/r128/r128_drv.h 2011-11-16 18:39:07.000000000 -0500
27038@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
27039 int is_pci;
27040 unsigned long cce_buffers_offset;
27041
27042- atomic_t idle_count;
27043+ atomic_unchecked_t idle_count;
27044
27045 int page_flipping;
27046 int current_page;
27047 u32 crtc_offset;
27048 u32 crtc_offset_cntl;
27049
27050- atomic_t vbl_received;
27051+ atomic_unchecked_t vbl_received;
27052
27053 u32 color_fmt;
27054 unsigned int front_offset;
27055diff -urNp linux-3.1.1/drivers/gpu/drm/r128/r128_irq.c linux-3.1.1/drivers/gpu/drm/r128/r128_irq.c
27056--- linux-3.1.1/drivers/gpu/drm/r128/r128_irq.c 2011-11-11 15:19:27.000000000 -0500
27057+++ linux-3.1.1/drivers/gpu/drm/r128/r128_irq.c 2011-11-16 18:39:07.000000000 -0500
27058@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
27059 if (crtc != 0)
27060 return 0;
27061
27062- return atomic_read(&dev_priv->vbl_received);
27063+ return atomic_read_unchecked(&dev_priv->vbl_received);
27064 }
27065
27066 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
27067@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
27068 /* VBLANK interrupt */
27069 if (status & R128_CRTC_VBLANK_INT) {
27070 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
27071- atomic_inc(&dev_priv->vbl_received);
27072+ atomic_inc_unchecked(&dev_priv->vbl_received);
27073 drm_handle_vblank(dev, 0);
27074 return IRQ_HANDLED;
27075 }
27076diff -urNp linux-3.1.1/drivers/gpu/drm/r128/r128_state.c linux-3.1.1/drivers/gpu/drm/r128/r128_state.c
27077--- linux-3.1.1/drivers/gpu/drm/r128/r128_state.c 2011-11-11 15:19:27.000000000 -0500
27078+++ linux-3.1.1/drivers/gpu/drm/r128/r128_state.c 2011-11-16 18:39:07.000000000 -0500
27079@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
27080
27081 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
27082 {
27083- if (atomic_read(&dev_priv->idle_count) == 0)
27084+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
27085 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
27086 else
27087- atomic_set(&dev_priv->idle_count, 0);
27088+ atomic_set_unchecked(&dev_priv->idle_count, 0);
27089 }
27090
27091 #endif
27092diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/atom.c linux-3.1.1/drivers/gpu/drm/radeon/atom.c
27093--- linux-3.1.1/drivers/gpu/drm/radeon/atom.c 2011-11-11 15:19:27.000000000 -0500
27094+++ linux-3.1.1/drivers/gpu/drm/radeon/atom.c 2011-11-16 19:09:42.000000000 -0500
27095@@ -1254,6 +1254,8 @@ struct atom_context *atom_parse(struct c
27096 char name[512];
27097 int i;
27098
27099+ pax_track_stack();
27100+
27101 if (!ctx)
27102 return NULL;
27103
27104diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/mkregtable.c linux-3.1.1/drivers/gpu/drm/radeon/mkregtable.c
27105--- linux-3.1.1/drivers/gpu/drm/radeon/mkregtable.c 2011-11-11 15:19:27.000000000 -0500
27106+++ linux-3.1.1/drivers/gpu/drm/radeon/mkregtable.c 2011-11-16 18:39:07.000000000 -0500
27107@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
27108 regex_t mask_rex;
27109 regmatch_t match[4];
27110 char buf[1024];
27111- size_t end;
27112+ long end;
27113 int len;
27114 int done = 0;
27115 int r;
27116 unsigned o;
27117 struct offset *offset;
27118 char last_reg_s[10];
27119- int last_reg;
27120+ unsigned long last_reg;
27121
27122 if (regcomp
27123 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
27124diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_atombios.c
27125--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_atombios.c 2011-11-11 15:19:27.000000000 -0500
27126+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_atombios.c 2011-11-16 18:40:10.000000000 -0500
27127@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
27128 struct radeon_gpio_rec gpio;
27129 struct radeon_hpd hpd;
27130
27131+ pax_track_stack();
27132+
27133 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
27134 return false;
27135
27136diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_device.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_device.c
27137--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_device.c 2011-11-11 15:19:27.000000000 -0500
27138+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_device.c 2011-11-16 18:39:07.000000000 -0500
27139@@ -684,7 +684,7 @@ static bool radeon_switcheroo_can_switch
27140 bool can_switch;
27141
27142 spin_lock(&dev->count_lock);
27143- can_switch = (dev->open_count == 0);
27144+ can_switch = (local_read(&dev->open_count) == 0);
27145 spin_unlock(&dev->count_lock);
27146 return can_switch;
27147 }
27148diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_display.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_display.c
27149--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_display.c 2011-11-11 15:19:27.000000000 -0500
27150+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_display.c 2011-11-16 18:40:10.000000000 -0500
27151@@ -925,6 +925,8 @@ void radeon_compute_pll_legacy(struct ra
27152 uint32_t post_div;
27153 u32 pll_out_min, pll_out_max;
27154
27155+ pax_track_stack();
27156+
27157 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
27158 freq = freq * 1000;
27159
27160diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_drv.h linux-3.1.1/drivers/gpu/drm/radeon/radeon_drv.h
27161--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_drv.h 2011-11-11 15:19:27.000000000 -0500
27162+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_drv.h 2011-11-16 18:39:07.000000000 -0500
27163@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
27164
27165 /* SW interrupt */
27166 wait_queue_head_t swi_queue;
27167- atomic_t swi_emitted;
27168+ atomic_unchecked_t swi_emitted;
27169 int vblank_crtc;
27170 uint32_t irq_enable_reg;
27171 uint32_t r500_disp_irq_reg;
27172diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_fence.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_fence.c
27173--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_fence.c 2011-11-11 15:19:27.000000000 -0500
27174+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_fence.c 2011-11-16 18:39:07.000000000 -0500
27175@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
27176 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
27177 return 0;
27178 }
27179- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
27180+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
27181 if (!rdev->cp.ready)
27182 /* FIXME: cp is not running assume everythings is done right
27183 * away
27184@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
27185 return r;
27186 }
27187 radeon_fence_write(rdev, 0);
27188- atomic_set(&rdev->fence_drv.seq, 0);
27189+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
27190 INIT_LIST_HEAD(&rdev->fence_drv.created);
27191 INIT_LIST_HEAD(&rdev->fence_drv.emited);
27192 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
27193diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon.h linux-3.1.1/drivers/gpu/drm/radeon/radeon.h
27194--- linux-3.1.1/drivers/gpu/drm/radeon/radeon.h 2011-11-11 15:19:27.000000000 -0500
27195+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon.h 2011-11-16 18:39:07.000000000 -0500
27196@@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_d
27197 */
27198 struct radeon_fence_driver {
27199 uint32_t scratch_reg;
27200- atomic_t seq;
27201+ atomic_unchecked_t seq;
27202 uint32_t last_seq;
27203 unsigned long last_jiffies;
27204 unsigned long last_timeout;
27205@@ -962,7 +962,7 @@ struct radeon_asic {
27206 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
27207 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
27208 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
27209-};
27210+} __no_const;
27211
27212 /*
27213 * Asic structures
27214diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_ioc32.c
27215--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-11-11 15:19:27.000000000 -0500
27216+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-11-16 18:39:07.000000000 -0500
27217@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
27218 request = compat_alloc_user_space(sizeof(*request));
27219 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
27220 || __put_user(req32.param, &request->param)
27221- || __put_user((void __user *)(unsigned long)req32.value,
27222+ || __put_user((unsigned long)req32.value,
27223 &request->value))
27224 return -EFAULT;
27225
27226diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_irq.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_irq.c
27227--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_irq.c 2011-11-11 15:19:27.000000000 -0500
27228+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_irq.c 2011-11-16 18:39:07.000000000 -0500
27229@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
27230 unsigned int ret;
27231 RING_LOCALS;
27232
27233- atomic_inc(&dev_priv->swi_emitted);
27234- ret = atomic_read(&dev_priv->swi_emitted);
27235+ atomic_inc_unchecked(&dev_priv->swi_emitted);
27236+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
27237
27238 BEGIN_RING(4);
27239 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
27240@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
27241 drm_radeon_private_t *dev_priv =
27242 (drm_radeon_private_t *) dev->dev_private;
27243
27244- atomic_set(&dev_priv->swi_emitted, 0);
27245+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
27246 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
27247
27248 dev->max_vblank_count = 0x001fffff;
27249diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_state.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_state.c
27250--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_state.c 2011-11-11 15:19:27.000000000 -0500
27251+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_state.c 2011-11-16 18:39:07.000000000 -0500
27252@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
27253 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
27254 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
27255
27256- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
27257+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
27258 sarea_priv->nbox * sizeof(depth_boxes[0])))
27259 return -EFAULT;
27260
27261@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
27262 {
27263 drm_radeon_private_t *dev_priv = dev->dev_private;
27264 drm_radeon_getparam_t *param = data;
27265- int value;
27266+ int value = 0;
27267
27268 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
27269
27270diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_ttm.c
27271--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_ttm.c 2011-11-11 15:19:27.000000000 -0500
27272+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_ttm.c 2011-11-16 18:39:07.000000000 -0500
27273@@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struc
27274 }
27275 if (unlikely(ttm_vm_ops == NULL)) {
27276 ttm_vm_ops = vma->vm_ops;
27277- radeon_ttm_vm_ops = *ttm_vm_ops;
27278- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
27279+ pax_open_kernel();
27280+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
27281+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
27282+ pax_close_kernel();
27283 }
27284 vma->vm_ops = &radeon_ttm_vm_ops;
27285 return 0;
27286diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/rs690.c linux-3.1.1/drivers/gpu/drm/radeon/rs690.c
27287--- linux-3.1.1/drivers/gpu/drm/radeon/rs690.c 2011-11-11 15:19:27.000000000 -0500
27288+++ linux-3.1.1/drivers/gpu/drm/radeon/rs690.c 2011-11-16 18:39:07.000000000 -0500
27289@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
27290 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
27291 rdev->pm.sideport_bandwidth.full)
27292 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
27293- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
27294+ read_delay_latency.full = dfixed_const(800 * 1000);
27295 read_delay_latency.full = dfixed_div(read_delay_latency,
27296 rdev->pm.igp_sideport_mclk);
27297+ a.full = dfixed_const(370);
27298+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
27299 } else {
27300 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
27301 rdev->pm.k8_bandwidth.full)
27302diff -urNp linux-3.1.1/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.1.1/drivers/gpu/drm/ttm/ttm_page_alloc.c
27303--- linux-3.1.1/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-11-11 15:19:27.000000000 -0500
27304+++ linux-3.1.1/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-11-16 18:39:07.000000000 -0500
27305@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
27306 static int ttm_pool_mm_shrink(struct shrinker *shrink,
27307 struct shrink_control *sc)
27308 {
27309- static atomic_t start_pool = ATOMIC_INIT(0);
27310+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
27311 unsigned i;
27312- unsigned pool_offset = atomic_add_return(1, &start_pool);
27313+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
27314 struct ttm_page_pool *pool;
27315 int shrink_pages = sc->nr_to_scan;
27316
27317diff -urNp linux-3.1.1/drivers/gpu/drm/via/via_drv.h linux-3.1.1/drivers/gpu/drm/via/via_drv.h
27318--- linux-3.1.1/drivers/gpu/drm/via/via_drv.h 2011-11-11 15:19:27.000000000 -0500
27319+++ linux-3.1.1/drivers/gpu/drm/via/via_drv.h 2011-11-16 18:39:07.000000000 -0500
27320@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
27321 typedef uint32_t maskarray_t[5];
27322
27323 typedef struct drm_via_irq {
27324- atomic_t irq_received;
27325+ atomic_unchecked_t irq_received;
27326 uint32_t pending_mask;
27327 uint32_t enable_mask;
27328 wait_queue_head_t irq_queue;
27329@@ -75,7 +75,7 @@ typedef struct drm_via_private {
27330 struct timeval last_vblank;
27331 int last_vblank_valid;
27332 unsigned usec_per_vblank;
27333- atomic_t vbl_received;
27334+ atomic_unchecked_t vbl_received;
27335 drm_via_state_t hc_state;
27336 char pci_buf[VIA_PCI_BUF_SIZE];
27337 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
27338diff -urNp linux-3.1.1/drivers/gpu/drm/via/via_irq.c linux-3.1.1/drivers/gpu/drm/via/via_irq.c
27339--- linux-3.1.1/drivers/gpu/drm/via/via_irq.c 2011-11-11 15:19:27.000000000 -0500
27340+++ linux-3.1.1/drivers/gpu/drm/via/via_irq.c 2011-11-16 18:39:07.000000000 -0500
27341@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
27342 if (crtc != 0)
27343 return 0;
27344
27345- return atomic_read(&dev_priv->vbl_received);
27346+ return atomic_read_unchecked(&dev_priv->vbl_received);
27347 }
27348
27349 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
27350@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
27351
27352 status = VIA_READ(VIA_REG_INTERRUPT);
27353 if (status & VIA_IRQ_VBLANK_PENDING) {
27354- atomic_inc(&dev_priv->vbl_received);
27355- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
27356+ atomic_inc_unchecked(&dev_priv->vbl_received);
27357+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
27358 do_gettimeofday(&cur_vblank);
27359 if (dev_priv->last_vblank_valid) {
27360 dev_priv->usec_per_vblank =
27361@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
27362 dev_priv->last_vblank = cur_vblank;
27363 dev_priv->last_vblank_valid = 1;
27364 }
27365- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
27366+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
27367 DRM_DEBUG("US per vblank is: %u\n",
27368 dev_priv->usec_per_vblank);
27369 }
27370@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
27371
27372 for (i = 0; i < dev_priv->num_irqs; ++i) {
27373 if (status & cur_irq->pending_mask) {
27374- atomic_inc(&cur_irq->irq_received);
27375+ atomic_inc_unchecked(&cur_irq->irq_received);
27376 DRM_WAKEUP(&cur_irq->irq_queue);
27377 handled = 1;
27378 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
27379@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
27380 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
27381 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
27382 masks[irq][4]));
27383- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
27384+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
27385 } else {
27386 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
27387 (((cur_irq_sequence =
27388- atomic_read(&cur_irq->irq_received)) -
27389+ atomic_read_unchecked(&cur_irq->irq_received)) -
27390 *sequence) <= (1 << 23)));
27391 }
27392 *sequence = cur_irq_sequence;
27393@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
27394 }
27395
27396 for (i = 0; i < dev_priv->num_irqs; ++i) {
27397- atomic_set(&cur_irq->irq_received, 0);
27398+ atomic_set_unchecked(&cur_irq->irq_received, 0);
27399 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
27400 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
27401 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
27402@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
27403 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
27404 case VIA_IRQ_RELATIVE:
27405 irqwait->request.sequence +=
27406- atomic_read(&cur_irq->irq_received);
27407+ atomic_read_unchecked(&cur_irq->irq_received);
27408 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
27409 case VIA_IRQ_ABSOLUTE:
27410 break;
27411diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
27412--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-11-11 15:19:27.000000000 -0500
27413+++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-11-16 18:39:07.000000000 -0500
27414@@ -240,7 +240,7 @@ struct vmw_private {
27415 * Fencing and IRQs.
27416 */
27417
27418- atomic_t fence_seq;
27419+ atomic_unchecked_t fence_seq;
27420 wait_queue_head_t fence_queue;
27421 wait_queue_head_t fifo_queue;
27422 atomic_t fence_queue_waiters;
27423diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
27424--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-11-11 15:19:27.000000000 -0500
27425+++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-11-16 18:39:07.000000000 -0500
27426@@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device
27427 struct drm_vmw_fence_rep fence_rep;
27428 struct drm_vmw_fence_rep __user *user_fence_rep;
27429 int ret;
27430- void *user_cmd;
27431+ void __user *user_cmd;
27432 void *cmd;
27433 uint32_t sequence;
27434 struct vmw_sw_context *sw_context = &dev_priv->ctx;
27435diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
27436--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-11-11 15:19:27.000000000 -0500
27437+++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-11-16 18:39:07.000000000 -0500
27438@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
27439 while (!vmw_lag_lt(queue, us)) {
27440 spin_lock(&queue->lock);
27441 if (list_empty(&queue->head))
27442- sequence = atomic_read(&dev_priv->fence_seq);
27443+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
27444 else {
27445 fence = list_first_entry(&queue->head,
27446 struct vmw_fence, head);
27447diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
27448--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-11-11 15:19:27.000000000 -0500
27449+++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-11-16 18:39:07.000000000 -0500
27450@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
27451 (unsigned int) min,
27452 (unsigned int) fifo->capabilities);
27453
27454- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
27455+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
27456 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
27457 vmw_fence_queue_init(&fifo->fence_queue);
27458 return vmw_fifo_send_fence(dev_priv, &dummy);
27459@@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_privat
27460 if (reserveable)
27461 iowrite32(bytes, fifo_mem +
27462 SVGA_FIFO_RESERVED);
27463- return fifo_mem + (next_cmd >> 2);
27464+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
27465 } else {
27466 need_bounce = true;
27467 }
27468@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
27469
27470 fm = vmw_fifo_reserve(dev_priv, bytes);
27471 if (unlikely(fm == NULL)) {
27472- *sequence = atomic_read(&dev_priv->fence_seq);
27473+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
27474 ret = -ENOMEM;
27475 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
27476 false, 3*HZ);
27477@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
27478 }
27479
27480 do {
27481- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
27482+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
27483 } while (*sequence == 0);
27484
27485 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
27486diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
27487--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-11-11 15:19:27.000000000 -0500
27488+++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-11-16 18:39:07.000000000 -0500
27489@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
27490 * emitted. Then the fence is stale and signaled.
27491 */
27492
27493- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
27494+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
27495 > VMW_FENCE_WRAP);
27496
27497 return ret;
27498@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
27499
27500 if (fifo_idle)
27501 down_read(&fifo_state->rwsem);
27502- signal_seq = atomic_read(&dev_priv->fence_seq);
27503+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
27504 ret = 0;
27505
27506 for (;;) {
27507diff -urNp linux-3.1.1/drivers/hid/hid-core.c linux-3.1.1/drivers/hid/hid-core.c
27508--- linux-3.1.1/drivers/hid/hid-core.c 2011-11-11 15:19:27.000000000 -0500
27509+++ linux-3.1.1/drivers/hid/hid-core.c 2011-11-16 18:39:07.000000000 -0500
27510@@ -1951,7 +1951,7 @@ static bool hid_ignore(struct hid_device
27511
27512 int hid_add_device(struct hid_device *hdev)
27513 {
27514- static atomic_t id = ATOMIC_INIT(0);
27515+ static atomic_unchecked_t id = ATOMIC_INIT(0);
27516 int ret;
27517
27518 if (WARN_ON(hdev->status & HID_STAT_ADDED))
27519@@ -1966,7 +1966,7 @@ int hid_add_device(struct hid_device *hd
27520 /* XXX hack, any other cleaner solution after the driver core
27521 * is converted to allow more than 20 bytes as the device name? */
27522 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
27523- hdev->vendor, hdev->product, atomic_inc_return(&id));
27524+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
27525
27526 hid_debug_register(hdev, dev_name(&hdev->dev));
27527 ret = device_add(&hdev->dev);
27528diff -urNp linux-3.1.1/drivers/hid/usbhid/hiddev.c linux-3.1.1/drivers/hid/usbhid/hiddev.c
27529--- linux-3.1.1/drivers/hid/usbhid/hiddev.c 2011-11-11 15:19:27.000000000 -0500
27530+++ linux-3.1.1/drivers/hid/usbhid/hiddev.c 2011-11-16 18:39:07.000000000 -0500
27531@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
27532 break;
27533
27534 case HIDIOCAPPLICATION:
27535- if (arg < 0 || arg >= hid->maxapplication)
27536+ if (arg >= hid->maxapplication)
27537 break;
27538
27539 for (i = 0; i < hid->maxcollection; i++)
27540diff -urNp linux-3.1.1/drivers/hwmon/acpi_power_meter.c linux-3.1.1/drivers/hwmon/acpi_power_meter.c
27541--- linux-3.1.1/drivers/hwmon/acpi_power_meter.c 2011-11-11 15:19:27.000000000 -0500
27542+++ linux-3.1.1/drivers/hwmon/acpi_power_meter.c 2011-11-16 18:39:07.000000000 -0500
27543@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
27544 return res;
27545
27546 temp /= 1000;
27547- if (temp < 0)
27548- return -EINVAL;
27549
27550 mutex_lock(&resource->lock);
27551 resource->trip[attr->index - 7] = temp;
27552diff -urNp linux-3.1.1/drivers/hwmon/sht15.c linux-3.1.1/drivers/hwmon/sht15.c
27553--- linux-3.1.1/drivers/hwmon/sht15.c 2011-11-11 15:19:27.000000000 -0500
27554+++ linux-3.1.1/drivers/hwmon/sht15.c 2011-11-16 18:39:07.000000000 -0500
27555@@ -166,7 +166,7 @@ struct sht15_data {
27556 int supply_uV;
27557 bool supply_uV_valid;
27558 struct work_struct update_supply_work;
27559- atomic_t interrupt_handled;
27560+ atomic_unchecked_t interrupt_handled;
27561 };
27562
27563 /**
27564@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
27565 return ret;
27566
27567 gpio_direction_input(data->pdata->gpio_data);
27568- atomic_set(&data->interrupt_handled, 0);
27569+ atomic_set_unchecked(&data->interrupt_handled, 0);
27570
27571 enable_irq(gpio_to_irq(data->pdata->gpio_data));
27572 if (gpio_get_value(data->pdata->gpio_data) == 0) {
27573 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
27574 /* Only relevant if the interrupt hasn't occurred. */
27575- if (!atomic_read(&data->interrupt_handled))
27576+ if (!atomic_read_unchecked(&data->interrupt_handled))
27577 schedule_work(&data->read_work);
27578 }
27579 ret = wait_event_timeout(data->wait_queue,
27580@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
27581
27582 /* First disable the interrupt */
27583 disable_irq_nosync(irq);
27584- atomic_inc(&data->interrupt_handled);
27585+ atomic_inc_unchecked(&data->interrupt_handled);
27586 /* Then schedule a reading work struct */
27587 if (data->state != SHT15_READING_NOTHING)
27588 schedule_work(&data->read_work);
27589@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
27590 * If not, then start the interrupt again - care here as could
27591 * have gone low in meantime so verify it hasn't!
27592 */
27593- atomic_set(&data->interrupt_handled, 0);
27594+ atomic_set_unchecked(&data->interrupt_handled, 0);
27595 enable_irq(gpio_to_irq(data->pdata->gpio_data));
27596 /* If still not occurred or another handler has been scheduled */
27597 if (gpio_get_value(data->pdata->gpio_data)
27598- || atomic_read(&data->interrupt_handled))
27599+ || atomic_read_unchecked(&data->interrupt_handled))
27600 return;
27601 }
27602
27603diff -urNp linux-3.1.1/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.1.1/drivers/i2c/busses/i2c-amd756-s4882.c
27604--- linux-3.1.1/drivers/i2c/busses/i2c-amd756-s4882.c 2011-11-11 15:19:27.000000000 -0500
27605+++ linux-3.1.1/drivers/i2c/busses/i2c-amd756-s4882.c 2011-11-16 18:39:07.000000000 -0500
27606@@ -43,7 +43,7 @@
27607 extern struct i2c_adapter amd756_smbus;
27608
27609 static struct i2c_adapter *s4882_adapter;
27610-static struct i2c_algorithm *s4882_algo;
27611+static i2c_algorithm_no_const *s4882_algo;
27612
27613 /* Wrapper access functions for multiplexed SMBus */
27614 static DEFINE_MUTEX(amd756_lock);
27615diff -urNp linux-3.1.1/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.1.1/drivers/i2c/busses/i2c-nforce2-s4985.c
27616--- linux-3.1.1/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-11-11 15:19:27.000000000 -0500
27617+++ linux-3.1.1/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-11-16 18:39:07.000000000 -0500
27618@@ -41,7 +41,7 @@
27619 extern struct i2c_adapter *nforce2_smbus;
27620
27621 static struct i2c_adapter *s4985_adapter;
27622-static struct i2c_algorithm *s4985_algo;
27623+static i2c_algorithm_no_const *s4985_algo;
27624
27625 /* Wrapper access functions for multiplexed SMBus */
27626 static DEFINE_MUTEX(nforce2_lock);
27627diff -urNp linux-3.1.1/drivers/i2c/i2c-mux.c linux-3.1.1/drivers/i2c/i2c-mux.c
27628--- linux-3.1.1/drivers/i2c/i2c-mux.c 2011-11-11 15:19:27.000000000 -0500
27629+++ linux-3.1.1/drivers/i2c/i2c-mux.c 2011-11-16 18:39:07.000000000 -0500
27630@@ -28,7 +28,7 @@
27631 /* multiplexer per channel data */
27632 struct i2c_mux_priv {
27633 struct i2c_adapter adap;
27634- struct i2c_algorithm algo;
27635+ i2c_algorithm_no_const algo;
27636
27637 struct i2c_adapter *parent;
27638 void *mux_dev; /* the mux chip/device */
27639diff -urNp linux-3.1.1/drivers/ide/aec62xx.c linux-3.1.1/drivers/ide/aec62xx.c
27640--- linux-3.1.1/drivers/ide/aec62xx.c 2011-11-11 15:19:27.000000000 -0500
27641+++ linux-3.1.1/drivers/ide/aec62xx.c 2011-11-16 18:39:07.000000000 -0500
27642@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_
27643 .cable_detect = atp86x_cable_detect,
27644 };
27645
27646-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
27647+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
27648 { /* 0: AEC6210 */
27649 .name = DRV_NAME,
27650 .init_chipset = init_chipset_aec62xx,
27651diff -urNp linux-3.1.1/drivers/ide/alim15x3.c linux-3.1.1/drivers/ide/alim15x3.c
27652--- linux-3.1.1/drivers/ide/alim15x3.c 2011-11-11 15:19:27.000000000 -0500
27653+++ linux-3.1.1/drivers/ide/alim15x3.c 2011-11-16 18:39:07.000000000 -0500
27654@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_
27655 .dma_sff_read_status = ide_dma_sff_read_status,
27656 };
27657
27658-static const struct ide_port_info ali15x3_chipset __devinitdata = {
27659+static const struct ide_port_info ali15x3_chipset __devinitconst = {
27660 .name = DRV_NAME,
27661 .init_chipset = init_chipset_ali15x3,
27662 .init_hwif = init_hwif_ali15x3,
27663diff -urNp linux-3.1.1/drivers/ide/amd74xx.c linux-3.1.1/drivers/ide/amd74xx.c
27664--- linux-3.1.1/drivers/ide/amd74xx.c 2011-11-11 15:19:27.000000000 -0500
27665+++ linux-3.1.1/drivers/ide/amd74xx.c 2011-11-16 18:39:07.000000000 -0500
27666@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_por
27667 .udma_mask = udma, \
27668 }
27669
27670-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
27671+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
27672 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
27673 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
27674 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
27675diff -urNp linux-3.1.1/drivers/ide/atiixp.c linux-3.1.1/drivers/ide/atiixp.c
27676--- linux-3.1.1/drivers/ide/atiixp.c 2011-11-11 15:19:27.000000000 -0500
27677+++ linux-3.1.1/drivers/ide/atiixp.c 2011-11-16 18:39:07.000000000 -0500
27678@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_
27679 .cable_detect = atiixp_cable_detect,
27680 };
27681
27682-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
27683+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
27684 { /* 0: IXP200/300/400/700 */
27685 .name = DRV_NAME,
27686 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
27687diff -urNp linux-3.1.1/drivers/ide/cmd64x.c linux-3.1.1/drivers/ide/cmd64x.c
27688--- linux-3.1.1/drivers/ide/cmd64x.c 2011-11-11 15:19:27.000000000 -0500
27689+++ linux-3.1.1/drivers/ide/cmd64x.c 2011-11-16 18:39:07.000000000 -0500
27690@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_r
27691 .dma_sff_read_status = ide_dma_sff_read_status,
27692 };
27693
27694-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
27695+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
27696 { /* 0: CMD643 */
27697 .name = DRV_NAME,
27698 .init_chipset = init_chipset_cmd64x,
27699diff -urNp linux-3.1.1/drivers/ide/cs5520.c linux-3.1.1/drivers/ide/cs5520.c
27700--- linux-3.1.1/drivers/ide/cs5520.c 2011-11-11 15:19:27.000000000 -0500
27701+++ linux-3.1.1/drivers/ide/cs5520.c 2011-11-16 18:39:07.000000000 -0500
27702@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_
27703 .set_dma_mode = cs5520_set_dma_mode,
27704 };
27705
27706-static const struct ide_port_info cyrix_chipset __devinitdata = {
27707+static const struct ide_port_info cyrix_chipset __devinitconst = {
27708 .name = DRV_NAME,
27709 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
27710 .port_ops = &cs5520_port_ops,
27711diff -urNp linux-3.1.1/drivers/ide/cs5530.c linux-3.1.1/drivers/ide/cs5530.c
27712--- linux-3.1.1/drivers/ide/cs5530.c 2011-11-11 15:19:27.000000000 -0500
27713+++ linux-3.1.1/drivers/ide/cs5530.c 2011-11-16 18:39:07.000000000 -0500
27714@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_
27715 .udma_filter = cs5530_udma_filter,
27716 };
27717
27718-static const struct ide_port_info cs5530_chipset __devinitdata = {
27719+static const struct ide_port_info cs5530_chipset __devinitconst = {
27720 .name = DRV_NAME,
27721 .init_chipset = init_chipset_cs5530,
27722 .init_hwif = init_hwif_cs5530,
27723diff -urNp linux-3.1.1/drivers/ide/cs5535.c linux-3.1.1/drivers/ide/cs5535.c
27724--- linux-3.1.1/drivers/ide/cs5535.c 2011-11-11 15:19:27.000000000 -0500
27725+++ linux-3.1.1/drivers/ide/cs5535.c 2011-11-16 18:39:07.000000000 -0500
27726@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_
27727 .cable_detect = cs5535_cable_detect,
27728 };
27729
27730-static const struct ide_port_info cs5535_chipset __devinitdata = {
27731+static const struct ide_port_info cs5535_chipset __devinitconst = {
27732 .name = DRV_NAME,
27733 .port_ops = &cs5535_port_ops,
27734 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
27735diff -urNp linux-3.1.1/drivers/ide/cy82c693.c linux-3.1.1/drivers/ide/cy82c693.c
27736--- linux-3.1.1/drivers/ide/cy82c693.c 2011-11-11 15:19:27.000000000 -0500
27737+++ linux-3.1.1/drivers/ide/cy82c693.c 2011-11-16 18:39:07.000000000 -0500
27738@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c69
27739 .set_dma_mode = cy82c693_set_dma_mode,
27740 };
27741
27742-static const struct ide_port_info cy82c693_chipset __devinitdata = {
27743+static const struct ide_port_info cy82c693_chipset __devinitconst = {
27744 .name = DRV_NAME,
27745 .init_iops = init_iops_cy82c693,
27746 .port_ops = &cy82c693_port_ops,
27747diff -urNp linux-3.1.1/drivers/ide/hpt366.c linux-3.1.1/drivers/ide/hpt366.c
27748--- linux-3.1.1/drivers/ide/hpt366.c 2011-11-11 15:19:27.000000000 -0500
27749+++ linux-3.1.1/drivers/ide/hpt366.c 2011-11-16 18:39:07.000000000 -0500
27750@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings
27751 }
27752 };
27753
27754-static const struct hpt_info hpt36x __devinitdata = {
27755+static const struct hpt_info hpt36x __devinitconst = {
27756 .chip_name = "HPT36x",
27757 .chip_type = HPT36x,
27758 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
27759@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __de
27760 .timings = &hpt36x_timings
27761 };
27762
27763-static const struct hpt_info hpt370 __devinitdata = {
27764+static const struct hpt_info hpt370 __devinitconst = {
27765 .chip_name = "HPT370",
27766 .chip_type = HPT370,
27767 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27768@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __de
27769 .timings = &hpt37x_timings
27770 };
27771
27772-static const struct hpt_info hpt370a __devinitdata = {
27773+static const struct hpt_info hpt370a __devinitconst = {
27774 .chip_name = "HPT370A",
27775 .chip_type = HPT370A,
27776 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27777@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __d
27778 .timings = &hpt37x_timings
27779 };
27780
27781-static const struct hpt_info hpt374 __devinitdata = {
27782+static const struct hpt_info hpt374 __devinitconst = {
27783 .chip_name = "HPT374",
27784 .chip_type = HPT374,
27785 .udma_mask = ATA_UDMA5,
27786@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __de
27787 .timings = &hpt37x_timings
27788 };
27789
27790-static const struct hpt_info hpt372 __devinitdata = {
27791+static const struct hpt_info hpt372 __devinitconst = {
27792 .chip_name = "HPT372",
27793 .chip_type = HPT372,
27794 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27795@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __de
27796 .timings = &hpt37x_timings
27797 };
27798
27799-static const struct hpt_info hpt372a __devinitdata = {
27800+static const struct hpt_info hpt372a __devinitconst = {
27801 .chip_name = "HPT372A",
27802 .chip_type = HPT372A,
27803 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27804@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __d
27805 .timings = &hpt37x_timings
27806 };
27807
27808-static const struct hpt_info hpt302 __devinitdata = {
27809+static const struct hpt_info hpt302 __devinitconst = {
27810 .chip_name = "HPT302",
27811 .chip_type = HPT302,
27812 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27813@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __de
27814 .timings = &hpt37x_timings
27815 };
27816
27817-static const struct hpt_info hpt371 __devinitdata = {
27818+static const struct hpt_info hpt371 __devinitconst = {
27819 .chip_name = "HPT371",
27820 .chip_type = HPT371,
27821 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27822@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __de
27823 .timings = &hpt37x_timings
27824 };
27825
27826-static const struct hpt_info hpt372n __devinitdata = {
27827+static const struct hpt_info hpt372n __devinitconst = {
27828 .chip_name = "HPT372N",
27829 .chip_type = HPT372N,
27830 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27831@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __d
27832 .timings = &hpt37x_timings
27833 };
27834
27835-static const struct hpt_info hpt302n __devinitdata = {
27836+static const struct hpt_info hpt302n __devinitconst = {
27837 .chip_name = "HPT302N",
27838 .chip_type = HPT302N,
27839 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27840@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __d
27841 .timings = &hpt37x_timings
27842 };
27843
27844-static const struct hpt_info hpt371n __devinitdata = {
27845+static const struct hpt_info hpt371n __devinitconst = {
27846 .chip_name = "HPT371N",
27847 .chip_type = HPT371N,
27848 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27849@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_d
27850 .dma_sff_read_status = ide_dma_sff_read_status,
27851 };
27852
27853-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
27854+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
27855 { /* 0: HPT36x */
27856 .name = DRV_NAME,
27857 .init_chipset = init_chipset_hpt366,
27858diff -urNp linux-3.1.1/drivers/ide/ide-cd.c linux-3.1.1/drivers/ide/ide-cd.c
27859--- linux-3.1.1/drivers/ide/ide-cd.c 2011-11-11 15:19:27.000000000 -0500
27860+++ linux-3.1.1/drivers/ide/ide-cd.c 2011-11-16 18:39:07.000000000 -0500
27861@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
27862 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
27863 if ((unsigned long)buf & alignment
27864 || blk_rq_bytes(rq) & q->dma_pad_mask
27865- || object_is_on_stack(buf))
27866+ || object_starts_on_stack(buf))
27867 drive->dma = 0;
27868 }
27869 }
27870diff -urNp linux-3.1.1/drivers/ide/ide-floppy.c linux-3.1.1/drivers/ide/ide-floppy.c
27871--- linux-3.1.1/drivers/ide/ide-floppy.c 2011-11-11 15:19:27.000000000 -0500
27872+++ linux-3.1.1/drivers/ide/ide-floppy.c 2011-11-16 18:40:10.000000000 -0500
27873@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
27874 u8 pc_buf[256], header_len, desc_cnt;
27875 int i, rc = 1, blocks, length;
27876
27877+ pax_track_stack();
27878+
27879 ide_debug_log(IDE_DBG_FUNC, "enter");
27880
27881 drive->bios_cyl = 0;
27882diff -urNp linux-3.1.1/drivers/ide/ide-pci-generic.c linux-3.1.1/drivers/ide/ide-pci-generic.c
27883--- linux-3.1.1/drivers/ide/ide-pci-generic.c 2011-11-11 15:19:27.000000000 -0500
27884+++ linux-3.1.1/drivers/ide/ide-pci-generic.c 2011-11-16 18:39:07.000000000 -0500
27885@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell
27886 .udma_mask = ATA_UDMA6, \
27887 }
27888
27889-static const struct ide_port_info generic_chipsets[] __devinitdata = {
27890+static const struct ide_port_info generic_chipsets[] __devinitconst = {
27891 /* 0: Unknown */
27892 DECLARE_GENERIC_PCI_DEV(0),
27893
27894diff -urNp linux-3.1.1/drivers/ide/it8172.c linux-3.1.1/drivers/ide/it8172.c
27895--- linux-3.1.1/drivers/ide/it8172.c 2011-11-11 15:19:27.000000000 -0500
27896+++ linux-3.1.1/drivers/ide/it8172.c 2011-11-16 18:39:07.000000000 -0500
27897@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_
27898 .set_dma_mode = it8172_set_dma_mode,
27899 };
27900
27901-static const struct ide_port_info it8172_port_info __devinitdata = {
27902+static const struct ide_port_info it8172_port_info __devinitconst = {
27903 .name = DRV_NAME,
27904 .port_ops = &it8172_port_ops,
27905 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
27906diff -urNp linux-3.1.1/drivers/ide/it8213.c linux-3.1.1/drivers/ide/it8213.c
27907--- linux-3.1.1/drivers/ide/it8213.c 2011-11-11 15:19:27.000000000 -0500
27908+++ linux-3.1.1/drivers/ide/it8213.c 2011-11-16 18:39:07.000000000 -0500
27909@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_
27910 .cable_detect = it8213_cable_detect,
27911 };
27912
27913-static const struct ide_port_info it8213_chipset __devinitdata = {
27914+static const struct ide_port_info it8213_chipset __devinitconst = {
27915 .name = DRV_NAME,
27916 .enablebits = { {0x41, 0x80, 0x80} },
27917 .port_ops = &it8213_port_ops,
27918diff -urNp linux-3.1.1/drivers/ide/it821x.c linux-3.1.1/drivers/ide/it821x.c
27919--- linux-3.1.1/drivers/ide/it821x.c 2011-11-11 15:19:27.000000000 -0500
27920+++ linux-3.1.1/drivers/ide/it821x.c 2011-11-16 18:39:07.000000000 -0500
27921@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_
27922 .cable_detect = it821x_cable_detect,
27923 };
27924
27925-static const struct ide_port_info it821x_chipset __devinitdata = {
27926+static const struct ide_port_info it821x_chipset __devinitconst = {
27927 .name = DRV_NAME,
27928 .init_chipset = init_chipset_it821x,
27929 .init_hwif = init_hwif_it821x,
27930diff -urNp linux-3.1.1/drivers/ide/jmicron.c linux-3.1.1/drivers/ide/jmicron.c
27931--- linux-3.1.1/drivers/ide/jmicron.c 2011-11-11 15:19:27.000000000 -0500
27932+++ linux-3.1.1/drivers/ide/jmicron.c 2011-11-16 18:39:07.000000000 -0500
27933@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron
27934 .cable_detect = jmicron_cable_detect,
27935 };
27936
27937-static const struct ide_port_info jmicron_chipset __devinitdata = {
27938+static const struct ide_port_info jmicron_chipset __devinitconst = {
27939 .name = DRV_NAME,
27940 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
27941 .port_ops = &jmicron_port_ops,
27942diff -urNp linux-3.1.1/drivers/ide/ns87415.c linux-3.1.1/drivers/ide/ns87415.c
27943--- linux-3.1.1/drivers/ide/ns87415.c 2011-11-11 15:19:27.000000000 -0500
27944+++ linux-3.1.1/drivers/ide/ns87415.c 2011-11-16 18:39:07.000000000 -0500
27945@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_
27946 .dma_sff_read_status = superio_dma_sff_read_status,
27947 };
27948
27949-static const struct ide_port_info ns87415_chipset __devinitdata = {
27950+static const struct ide_port_info ns87415_chipset __devinitconst = {
27951 .name = DRV_NAME,
27952 .init_hwif = init_hwif_ns87415,
27953 .tp_ops = &ns87415_tp_ops,
27954diff -urNp linux-3.1.1/drivers/ide/opti621.c linux-3.1.1/drivers/ide/opti621.c
27955--- linux-3.1.1/drivers/ide/opti621.c 2011-11-11 15:19:27.000000000 -0500
27956+++ linux-3.1.1/drivers/ide/opti621.c 2011-11-16 18:39:07.000000000 -0500
27957@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621
27958 .set_pio_mode = opti621_set_pio_mode,
27959 };
27960
27961-static const struct ide_port_info opti621_chipset __devinitdata = {
27962+static const struct ide_port_info opti621_chipset __devinitconst = {
27963 .name = DRV_NAME,
27964 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
27965 .port_ops = &opti621_port_ops,
27966diff -urNp linux-3.1.1/drivers/ide/pdc202xx_new.c linux-3.1.1/drivers/ide/pdc202xx_new.c
27967--- linux-3.1.1/drivers/ide/pdc202xx_new.c 2011-11-11 15:19:27.000000000 -0500
27968+++ linux-3.1.1/drivers/ide/pdc202xx_new.c 2011-11-16 18:39:07.000000000 -0500
27969@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_
27970 .udma_mask = udma, \
27971 }
27972
27973-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
27974+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
27975 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
27976 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
27977 };
27978diff -urNp linux-3.1.1/drivers/ide/pdc202xx_old.c linux-3.1.1/drivers/ide/pdc202xx_old.c
27979--- linux-3.1.1/drivers/ide/pdc202xx_old.c 2011-11-11 15:19:27.000000000 -0500
27980+++ linux-3.1.1/drivers/ide/pdc202xx_old.c 2011-11-16 18:39:07.000000000 -0500
27981@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x
27982 .max_sectors = sectors, \
27983 }
27984
27985-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
27986+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
27987 { /* 0: PDC20246 */
27988 .name = DRV_NAME,
27989 .init_chipset = init_chipset_pdc202xx,
27990diff -urNp linux-3.1.1/drivers/ide/piix.c linux-3.1.1/drivers/ide/piix.c
27991--- linux-3.1.1/drivers/ide/piix.c 2011-11-11 15:19:27.000000000 -0500
27992+++ linux-3.1.1/drivers/ide/piix.c 2011-11-16 18:39:07.000000000 -0500
27993@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_por
27994 .udma_mask = udma, \
27995 }
27996
27997-static const struct ide_port_info piix_pci_info[] __devinitdata = {
27998+static const struct ide_port_info piix_pci_info[] __devinitconst = {
27999 /* 0: MPIIX */
28000 { /*
28001 * MPIIX actually has only a single IDE channel mapped to
28002diff -urNp linux-3.1.1/drivers/ide/rz1000.c linux-3.1.1/drivers/ide/rz1000.c
28003--- linux-3.1.1/drivers/ide/rz1000.c 2011-11-11 15:19:27.000000000 -0500
28004+++ linux-3.1.1/drivers/ide/rz1000.c 2011-11-16 18:39:07.000000000 -0500
28005@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_read
28006 }
28007 }
28008
28009-static const struct ide_port_info rz1000_chipset __devinitdata = {
28010+static const struct ide_port_info rz1000_chipset __devinitconst = {
28011 .name = DRV_NAME,
28012 .host_flags = IDE_HFLAG_NO_DMA,
28013 };
28014diff -urNp linux-3.1.1/drivers/ide/sc1200.c linux-3.1.1/drivers/ide/sc1200.c
28015--- linux-3.1.1/drivers/ide/sc1200.c 2011-11-11 15:19:27.000000000 -0500
28016+++ linux-3.1.1/drivers/ide/sc1200.c 2011-11-16 18:39:07.000000000 -0500
28017@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_d
28018 .dma_sff_read_status = ide_dma_sff_read_status,
28019 };
28020
28021-static const struct ide_port_info sc1200_chipset __devinitdata = {
28022+static const struct ide_port_info sc1200_chipset __devinitconst = {
28023 .name = DRV_NAME,
28024 .port_ops = &sc1200_port_ops,
28025 .dma_ops = &sc1200_dma_ops,
28026diff -urNp linux-3.1.1/drivers/ide/scc_pata.c linux-3.1.1/drivers/ide/scc_pata.c
28027--- linux-3.1.1/drivers/ide/scc_pata.c 2011-11-11 15:19:27.000000000 -0500
28028+++ linux-3.1.1/drivers/ide/scc_pata.c 2011-11-16 18:39:07.000000000 -0500
28029@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_
28030 .dma_sff_read_status = scc_dma_sff_read_status,
28031 };
28032
28033-static const struct ide_port_info scc_chipset __devinitdata = {
28034+static const struct ide_port_info scc_chipset __devinitconst = {
28035 .name = "sccIDE",
28036 .init_iops = init_iops_scc,
28037 .init_dma = scc_init_dma,
28038diff -urNp linux-3.1.1/drivers/ide/serverworks.c linux-3.1.1/drivers/ide/serverworks.c
28039--- linux-3.1.1/drivers/ide/serverworks.c 2011-11-11 15:19:27.000000000 -0500
28040+++ linux-3.1.1/drivers/ide/serverworks.c 2011-11-16 18:39:07.000000000 -0500
28041@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_p
28042 .cable_detect = svwks_cable_detect,
28043 };
28044
28045-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
28046+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
28047 { /* 0: OSB4 */
28048 .name = DRV_NAME,
28049 .init_chipset = init_chipset_svwks,
28050diff -urNp linux-3.1.1/drivers/ide/setup-pci.c linux-3.1.1/drivers/ide/setup-pci.c
28051--- linux-3.1.1/drivers/ide/setup-pci.c 2011-11-11 15:19:27.000000000 -0500
28052+++ linux-3.1.1/drivers/ide/setup-pci.c 2011-11-16 18:40:10.000000000 -0500
28053@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
28054 int ret, i, n_ports = dev2 ? 4 : 2;
28055 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
28056
28057+ pax_track_stack();
28058+
28059 for (i = 0; i < n_ports / 2; i++) {
28060 ret = ide_setup_pci_controller(pdev[i], d, !i);
28061 if (ret < 0)
28062diff -urNp linux-3.1.1/drivers/ide/siimage.c linux-3.1.1/drivers/ide/siimage.c
28063--- linux-3.1.1/drivers/ide/siimage.c 2011-11-11 15:19:27.000000000 -0500
28064+++ linux-3.1.1/drivers/ide/siimage.c 2011-11-16 18:39:07.000000000 -0500
28065@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_
28066 .udma_mask = ATA_UDMA6, \
28067 }
28068
28069-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
28070+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
28071 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
28072 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
28073 };
28074diff -urNp linux-3.1.1/drivers/ide/sis5513.c linux-3.1.1/drivers/ide/sis5513.c
28075--- linux-3.1.1/drivers/ide/sis5513.c 2011-11-11 15:19:27.000000000 -0500
28076+++ linux-3.1.1/drivers/ide/sis5513.c 2011-11-16 18:39:07.000000000 -0500
28077@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata
28078 .cable_detect = sis_cable_detect,
28079 };
28080
28081-static const struct ide_port_info sis5513_chipset __devinitdata = {
28082+static const struct ide_port_info sis5513_chipset __devinitconst = {
28083 .name = DRV_NAME,
28084 .init_chipset = init_chipset_sis5513,
28085 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
28086diff -urNp linux-3.1.1/drivers/ide/sl82c105.c linux-3.1.1/drivers/ide/sl82c105.c
28087--- linux-3.1.1/drivers/ide/sl82c105.c 2011-11-11 15:19:27.000000000 -0500
28088+++ linux-3.1.1/drivers/ide/sl82c105.c 2011-11-16 18:39:07.000000000 -0500
28089@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105
28090 .dma_sff_read_status = ide_dma_sff_read_status,
28091 };
28092
28093-static const struct ide_port_info sl82c105_chipset __devinitdata = {
28094+static const struct ide_port_info sl82c105_chipset __devinitconst = {
28095 .name = DRV_NAME,
28096 .init_chipset = init_chipset_sl82c105,
28097 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
28098diff -urNp linux-3.1.1/drivers/ide/slc90e66.c linux-3.1.1/drivers/ide/slc90e66.c
28099--- linux-3.1.1/drivers/ide/slc90e66.c 2011-11-11 15:19:27.000000000 -0500
28100+++ linux-3.1.1/drivers/ide/slc90e66.c 2011-11-16 18:39:07.000000000 -0500
28101@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e6
28102 .cable_detect = slc90e66_cable_detect,
28103 };
28104
28105-static const struct ide_port_info slc90e66_chipset __devinitdata = {
28106+static const struct ide_port_info slc90e66_chipset __devinitconst = {
28107 .name = DRV_NAME,
28108 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
28109 .port_ops = &slc90e66_port_ops,
28110diff -urNp linux-3.1.1/drivers/ide/tc86c001.c linux-3.1.1/drivers/ide/tc86c001.c
28111--- linux-3.1.1/drivers/ide/tc86c001.c 2011-11-11 15:19:27.000000000 -0500
28112+++ linux-3.1.1/drivers/ide/tc86c001.c 2011-11-16 18:39:07.000000000 -0500
28113@@ -191,7 +191,7 @@ static const struct ide_dma_ops tc86c001
28114 .dma_sff_read_status = ide_dma_sff_read_status,
28115 };
28116
28117-static const struct ide_port_info tc86c001_chipset __devinitdata = {
28118+static const struct ide_port_info tc86c001_chipset __devinitconst = {
28119 .name = DRV_NAME,
28120 .init_hwif = init_hwif_tc86c001,
28121 .port_ops = &tc86c001_port_ops,
28122diff -urNp linux-3.1.1/drivers/ide/triflex.c linux-3.1.1/drivers/ide/triflex.c
28123--- linux-3.1.1/drivers/ide/triflex.c 2011-11-11 15:19:27.000000000 -0500
28124+++ linux-3.1.1/drivers/ide/triflex.c 2011-11-16 18:39:07.000000000 -0500
28125@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex
28126 .set_dma_mode = triflex_set_mode,
28127 };
28128
28129-static const struct ide_port_info triflex_device __devinitdata = {
28130+static const struct ide_port_info triflex_device __devinitconst = {
28131 .name = DRV_NAME,
28132 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
28133 .port_ops = &triflex_port_ops,
28134diff -urNp linux-3.1.1/drivers/ide/trm290.c linux-3.1.1/drivers/ide/trm290.c
28135--- linux-3.1.1/drivers/ide/trm290.c 2011-11-11 15:19:27.000000000 -0500
28136+++ linux-3.1.1/drivers/ide/trm290.c 2011-11-16 18:39:07.000000000 -0500
28137@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops
28138 .dma_check = trm290_dma_check,
28139 };
28140
28141-static const struct ide_port_info trm290_chipset __devinitdata = {
28142+static const struct ide_port_info trm290_chipset __devinitconst = {
28143 .name = DRV_NAME,
28144 .init_hwif = init_hwif_trm290,
28145 .tp_ops = &trm290_tp_ops,
28146diff -urNp linux-3.1.1/drivers/ide/via82cxxx.c linux-3.1.1/drivers/ide/via82cxxx.c
28147--- linux-3.1.1/drivers/ide/via82cxxx.c 2011-11-11 15:19:27.000000000 -0500
28148+++ linux-3.1.1/drivers/ide/via82cxxx.c 2011-11-16 18:39:07.000000000 -0500
28149@@ -403,7 +403,7 @@ static const struct ide_port_ops via_por
28150 .cable_detect = via82cxxx_cable_detect,
28151 };
28152
28153-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
28154+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
28155 .name = DRV_NAME,
28156 .init_chipset = init_chipset_via82cxxx,
28157 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
28158diff -urNp linux-3.1.1/drivers/infiniband/core/cm.c linux-3.1.1/drivers/infiniband/core/cm.c
28159--- linux-3.1.1/drivers/infiniband/core/cm.c 2011-11-11 15:19:27.000000000 -0500
28160+++ linux-3.1.1/drivers/infiniband/core/cm.c 2011-11-16 18:39:07.000000000 -0500
28161@@ -113,7 +113,7 @@ static char const counter_group_names[CM
28162
28163 struct cm_counter_group {
28164 struct kobject obj;
28165- atomic_long_t counter[CM_ATTR_COUNT];
28166+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
28167 };
28168
28169 struct cm_counter_attribute {
28170@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
28171 struct ib_mad_send_buf *msg = NULL;
28172 int ret;
28173
28174- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28175+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28176 counter[CM_REQ_COUNTER]);
28177
28178 /* Quick state check to discard duplicate REQs. */
28179@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
28180 if (!cm_id_priv)
28181 return;
28182
28183- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28184+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28185 counter[CM_REP_COUNTER]);
28186 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
28187 if (ret)
28188@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
28189 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
28190 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
28191 spin_unlock_irq(&cm_id_priv->lock);
28192- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28193+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28194 counter[CM_RTU_COUNTER]);
28195 goto out;
28196 }
28197@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
28198 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
28199 dreq_msg->local_comm_id);
28200 if (!cm_id_priv) {
28201- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28202+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28203 counter[CM_DREQ_COUNTER]);
28204 cm_issue_drep(work->port, work->mad_recv_wc);
28205 return -EINVAL;
28206@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
28207 case IB_CM_MRA_REP_RCVD:
28208 break;
28209 case IB_CM_TIMEWAIT:
28210- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28211+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28212 counter[CM_DREQ_COUNTER]);
28213 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
28214 goto unlock;
28215@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
28216 cm_free_msg(msg);
28217 goto deref;
28218 case IB_CM_DREQ_RCVD:
28219- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28220+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28221 counter[CM_DREQ_COUNTER]);
28222 goto unlock;
28223 default:
28224@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
28225 ib_modify_mad(cm_id_priv->av.port->mad_agent,
28226 cm_id_priv->msg, timeout)) {
28227 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
28228- atomic_long_inc(&work->port->
28229+ atomic_long_inc_unchecked(&work->port->
28230 counter_group[CM_RECV_DUPLICATES].
28231 counter[CM_MRA_COUNTER]);
28232 goto out;
28233@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
28234 break;
28235 case IB_CM_MRA_REQ_RCVD:
28236 case IB_CM_MRA_REP_RCVD:
28237- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28238+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28239 counter[CM_MRA_COUNTER]);
28240 /* fall through */
28241 default:
28242@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
28243 case IB_CM_LAP_IDLE:
28244 break;
28245 case IB_CM_MRA_LAP_SENT:
28246- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28247+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28248 counter[CM_LAP_COUNTER]);
28249 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
28250 goto unlock;
28251@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
28252 cm_free_msg(msg);
28253 goto deref;
28254 case IB_CM_LAP_RCVD:
28255- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28256+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28257 counter[CM_LAP_COUNTER]);
28258 goto unlock;
28259 default:
28260@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
28261 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
28262 if (cur_cm_id_priv) {
28263 spin_unlock_irq(&cm.lock);
28264- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28265+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28266 counter[CM_SIDR_REQ_COUNTER]);
28267 goto out; /* Duplicate message. */
28268 }
28269@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
28270 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
28271 msg->retries = 1;
28272
28273- atomic_long_add(1 + msg->retries,
28274+ atomic_long_add_unchecked(1 + msg->retries,
28275 &port->counter_group[CM_XMIT].counter[attr_index]);
28276 if (msg->retries)
28277- atomic_long_add(msg->retries,
28278+ atomic_long_add_unchecked(msg->retries,
28279 &port->counter_group[CM_XMIT_RETRIES].
28280 counter[attr_index]);
28281
28282@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
28283 }
28284
28285 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
28286- atomic_long_inc(&port->counter_group[CM_RECV].
28287+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
28288 counter[attr_id - CM_ATTR_ID_OFFSET]);
28289
28290 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
28291@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
28292 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
28293
28294 return sprintf(buf, "%ld\n",
28295- atomic_long_read(&group->counter[cm_attr->index]));
28296+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
28297 }
28298
28299 static const struct sysfs_ops cm_counter_ops = {
28300diff -urNp linux-3.1.1/drivers/infiniband/core/fmr_pool.c linux-3.1.1/drivers/infiniband/core/fmr_pool.c
28301--- linux-3.1.1/drivers/infiniband/core/fmr_pool.c 2011-11-11 15:19:27.000000000 -0500
28302+++ linux-3.1.1/drivers/infiniband/core/fmr_pool.c 2011-11-16 18:39:07.000000000 -0500
28303@@ -97,8 +97,8 @@ struct ib_fmr_pool {
28304
28305 struct task_struct *thread;
28306
28307- atomic_t req_ser;
28308- atomic_t flush_ser;
28309+ atomic_unchecked_t req_ser;
28310+ atomic_unchecked_t flush_ser;
28311
28312 wait_queue_head_t force_wait;
28313 };
28314@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
28315 struct ib_fmr_pool *pool = pool_ptr;
28316
28317 do {
28318- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
28319+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
28320 ib_fmr_batch_release(pool);
28321
28322- atomic_inc(&pool->flush_ser);
28323+ atomic_inc_unchecked(&pool->flush_ser);
28324 wake_up_interruptible(&pool->force_wait);
28325
28326 if (pool->flush_function)
28327@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
28328 }
28329
28330 set_current_state(TASK_INTERRUPTIBLE);
28331- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
28332+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
28333 !kthread_should_stop())
28334 schedule();
28335 __set_current_state(TASK_RUNNING);
28336@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
28337 pool->dirty_watermark = params->dirty_watermark;
28338 pool->dirty_len = 0;
28339 spin_lock_init(&pool->pool_lock);
28340- atomic_set(&pool->req_ser, 0);
28341- atomic_set(&pool->flush_ser, 0);
28342+ atomic_set_unchecked(&pool->req_ser, 0);
28343+ atomic_set_unchecked(&pool->flush_ser, 0);
28344 init_waitqueue_head(&pool->force_wait);
28345
28346 pool->thread = kthread_run(ib_fmr_cleanup_thread,
28347@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
28348 }
28349 spin_unlock_irq(&pool->pool_lock);
28350
28351- serial = atomic_inc_return(&pool->req_ser);
28352+ serial = atomic_inc_return_unchecked(&pool->req_ser);
28353 wake_up_process(pool->thread);
28354
28355 if (wait_event_interruptible(pool->force_wait,
28356- atomic_read(&pool->flush_ser) - serial >= 0))
28357+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
28358 return -EINTR;
28359
28360 return 0;
28361@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
28362 } else {
28363 list_add_tail(&fmr->list, &pool->dirty_list);
28364 if (++pool->dirty_len >= pool->dirty_watermark) {
28365- atomic_inc(&pool->req_ser);
28366+ atomic_inc_unchecked(&pool->req_ser);
28367 wake_up_process(pool->thread);
28368 }
28369 }
28370diff -urNp linux-3.1.1/drivers/infiniband/hw/cxgb4/mem.c linux-3.1.1/drivers/infiniband/hw/cxgb4/mem.c
28371--- linux-3.1.1/drivers/infiniband/hw/cxgb4/mem.c 2011-11-11 15:19:27.000000000 -0500
28372+++ linux-3.1.1/drivers/infiniband/hw/cxgb4/mem.c 2011-11-16 18:39:07.000000000 -0500
28373@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
28374 int err;
28375 struct fw_ri_tpte tpt;
28376 u32 stag_idx;
28377- static atomic_t key;
28378+ static atomic_unchecked_t key;
28379
28380 if (c4iw_fatal_error(rdev))
28381 return -EIO;
28382@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
28383 &rdev->resource.tpt_fifo_lock);
28384 if (!stag_idx)
28385 return -ENOMEM;
28386- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
28387+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
28388 }
28389 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
28390 __func__, stag_state, type, pdid, stag_idx);
28391diff -urNp linux-3.1.1/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.1.1/drivers/infiniband/hw/ipath/ipath_fs.c
28392--- linux-3.1.1/drivers/infiniband/hw/ipath/ipath_fs.c 2011-11-11 15:19:27.000000000 -0500
28393+++ linux-3.1.1/drivers/infiniband/hw/ipath/ipath_fs.c 2011-11-16 18:40:10.000000000 -0500
28394@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
28395 struct infinipath_counters counters;
28396 struct ipath_devdata *dd;
28397
28398+ pax_track_stack();
28399+
28400 dd = file->f_path.dentry->d_inode->i_private;
28401 dd->ipath_f_read_counters(dd, &counters);
28402
28403diff -urNp linux-3.1.1/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.1.1/drivers/infiniband/hw/ipath/ipath_rc.c
28404--- linux-3.1.1/drivers/infiniband/hw/ipath/ipath_rc.c 2011-11-11 15:19:27.000000000 -0500
28405+++ linux-3.1.1/drivers/infiniband/hw/ipath/ipath_rc.c 2011-11-16 18:39:07.000000000 -0500
28406@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
28407 struct ib_atomic_eth *ateth;
28408 struct ipath_ack_entry *e;
28409 u64 vaddr;
28410- atomic64_t *maddr;
28411+ atomic64_unchecked_t *maddr;
28412 u64 sdata;
28413 u32 rkey;
28414 u8 next;
28415@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
28416 IB_ACCESS_REMOTE_ATOMIC)))
28417 goto nack_acc_unlck;
28418 /* Perform atomic OP and save result. */
28419- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
28420+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
28421 sdata = be64_to_cpu(ateth->swap_data);
28422 e = &qp->s_ack_queue[qp->r_head_ack_queue];
28423 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
28424- (u64) atomic64_add_return(sdata, maddr) - sdata :
28425+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
28426 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
28427 be64_to_cpu(ateth->compare_data),
28428 sdata);
28429diff -urNp linux-3.1.1/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.1.1/drivers/infiniband/hw/ipath/ipath_ruc.c
28430--- linux-3.1.1/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-11-11 15:19:27.000000000 -0500
28431+++ linux-3.1.1/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-11-16 18:39:07.000000000 -0500
28432@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
28433 unsigned long flags;
28434 struct ib_wc wc;
28435 u64 sdata;
28436- atomic64_t *maddr;
28437+ atomic64_unchecked_t *maddr;
28438 enum ib_wc_status send_status;
28439
28440 /*
28441@@ -382,11 +382,11 @@ again:
28442 IB_ACCESS_REMOTE_ATOMIC)))
28443 goto acc_err;
28444 /* Perform atomic OP and save result. */
28445- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
28446+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
28447 sdata = wqe->wr.wr.atomic.compare_add;
28448 *(u64 *) sqp->s_sge.sge.vaddr =
28449 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
28450- (u64) atomic64_add_return(sdata, maddr) - sdata :
28451+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
28452 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
28453 sdata, wqe->wr.wr.atomic.swap);
28454 goto send_comp;
28455diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes.c linux-3.1.1/drivers/infiniband/hw/nes/nes.c
28456--- linux-3.1.1/drivers/infiniband/hw/nes/nes.c 2011-11-11 15:19:27.000000000 -0500
28457+++ linux-3.1.1/drivers/infiniband/hw/nes/nes.c 2011-11-16 18:39:07.000000000 -0500
28458@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
28459 LIST_HEAD(nes_adapter_list);
28460 static LIST_HEAD(nes_dev_list);
28461
28462-atomic_t qps_destroyed;
28463+atomic_unchecked_t qps_destroyed;
28464
28465 static unsigned int ee_flsh_adapter;
28466 static unsigned int sysfs_nonidx_addr;
28467@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
28468 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
28469 struct nes_adapter *nesadapter = nesdev->nesadapter;
28470
28471- atomic_inc(&qps_destroyed);
28472+ atomic_inc_unchecked(&qps_destroyed);
28473
28474 /* Free the control structures */
28475
28476diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes_cm.c linux-3.1.1/drivers/infiniband/hw/nes/nes_cm.c
28477--- linux-3.1.1/drivers/infiniband/hw/nes/nes_cm.c 2011-11-11 15:19:27.000000000 -0500
28478+++ linux-3.1.1/drivers/infiniband/hw/nes/nes_cm.c 2011-11-16 18:39:07.000000000 -0500
28479@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
28480 u32 cm_packets_retrans;
28481 u32 cm_packets_created;
28482 u32 cm_packets_received;
28483-atomic_t cm_listens_created;
28484-atomic_t cm_listens_destroyed;
28485+atomic_unchecked_t cm_listens_created;
28486+atomic_unchecked_t cm_listens_destroyed;
28487 u32 cm_backlog_drops;
28488-atomic_t cm_loopbacks;
28489-atomic_t cm_nodes_created;
28490-atomic_t cm_nodes_destroyed;
28491-atomic_t cm_accel_dropped_pkts;
28492-atomic_t cm_resets_recvd;
28493+atomic_unchecked_t cm_loopbacks;
28494+atomic_unchecked_t cm_nodes_created;
28495+atomic_unchecked_t cm_nodes_destroyed;
28496+atomic_unchecked_t cm_accel_dropped_pkts;
28497+atomic_unchecked_t cm_resets_recvd;
28498
28499 static inline int mini_cm_accelerated(struct nes_cm_core *,
28500 struct nes_cm_node *);
28501@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
28502
28503 static struct nes_cm_core *g_cm_core;
28504
28505-atomic_t cm_connects;
28506-atomic_t cm_accepts;
28507-atomic_t cm_disconnects;
28508-atomic_t cm_closes;
28509-atomic_t cm_connecteds;
28510-atomic_t cm_connect_reqs;
28511-atomic_t cm_rejects;
28512+atomic_unchecked_t cm_connects;
28513+atomic_unchecked_t cm_accepts;
28514+atomic_unchecked_t cm_disconnects;
28515+atomic_unchecked_t cm_closes;
28516+atomic_unchecked_t cm_connecteds;
28517+atomic_unchecked_t cm_connect_reqs;
28518+atomic_unchecked_t cm_rejects;
28519
28520
28521 /**
28522@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
28523 kfree(listener);
28524 listener = NULL;
28525 ret = 0;
28526- atomic_inc(&cm_listens_destroyed);
28527+ atomic_inc_unchecked(&cm_listens_destroyed);
28528 } else {
28529 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
28530 }
28531@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
28532 cm_node->rem_mac);
28533
28534 add_hte_node(cm_core, cm_node);
28535- atomic_inc(&cm_nodes_created);
28536+ atomic_inc_unchecked(&cm_nodes_created);
28537
28538 return cm_node;
28539 }
28540@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
28541 }
28542
28543 atomic_dec(&cm_core->node_cnt);
28544- atomic_inc(&cm_nodes_destroyed);
28545+ atomic_inc_unchecked(&cm_nodes_destroyed);
28546 nesqp = cm_node->nesqp;
28547 if (nesqp) {
28548 nesqp->cm_node = NULL;
28549@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
28550
28551 static void drop_packet(struct sk_buff *skb)
28552 {
28553- atomic_inc(&cm_accel_dropped_pkts);
28554+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
28555 dev_kfree_skb_any(skb);
28556 }
28557
28558@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
28559 {
28560
28561 int reset = 0; /* whether to send reset in case of err.. */
28562- atomic_inc(&cm_resets_recvd);
28563+ atomic_inc_unchecked(&cm_resets_recvd);
28564 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
28565 " refcnt=%d\n", cm_node, cm_node->state,
28566 atomic_read(&cm_node->ref_count));
28567@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
28568 rem_ref_cm_node(cm_node->cm_core, cm_node);
28569 return NULL;
28570 }
28571- atomic_inc(&cm_loopbacks);
28572+ atomic_inc_unchecked(&cm_loopbacks);
28573 loopbackremotenode->loopbackpartner = cm_node;
28574 loopbackremotenode->tcp_cntxt.rcv_wscale =
28575 NES_CM_DEFAULT_RCV_WND_SCALE;
28576@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
28577 add_ref_cm_node(cm_node);
28578 } else if (cm_node->state == NES_CM_STATE_TSA) {
28579 rem_ref_cm_node(cm_core, cm_node);
28580- atomic_inc(&cm_accel_dropped_pkts);
28581+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
28582 dev_kfree_skb_any(skb);
28583 break;
28584 }
28585@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
28586
28587 if ((cm_id) && (cm_id->event_handler)) {
28588 if (issue_disconn) {
28589- atomic_inc(&cm_disconnects);
28590+ atomic_inc_unchecked(&cm_disconnects);
28591 cm_event.event = IW_CM_EVENT_DISCONNECT;
28592 cm_event.status = disconn_status;
28593 cm_event.local_addr = cm_id->local_addr;
28594@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
28595 }
28596
28597 if (issue_close) {
28598- atomic_inc(&cm_closes);
28599+ atomic_inc_unchecked(&cm_closes);
28600 nes_disconnect(nesqp, 1);
28601
28602 cm_id->provider_data = nesqp;
28603@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
28604
28605 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
28606 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
28607- atomic_inc(&cm_accepts);
28608+ atomic_inc_unchecked(&cm_accepts);
28609
28610 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
28611 netdev_refcnt_read(nesvnic->netdev));
28612@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
28613
28614 struct nes_cm_core *cm_core;
28615
28616- atomic_inc(&cm_rejects);
28617+ atomic_inc_unchecked(&cm_rejects);
28618 cm_node = (struct nes_cm_node *) cm_id->provider_data;
28619 loopback = cm_node->loopbackpartner;
28620 cm_core = cm_node->cm_core;
28621@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
28622 ntohl(cm_id->local_addr.sin_addr.s_addr),
28623 ntohs(cm_id->local_addr.sin_port));
28624
28625- atomic_inc(&cm_connects);
28626+ atomic_inc_unchecked(&cm_connects);
28627 nesqp->active_conn = 1;
28628
28629 /* cache the cm_id in the qp */
28630@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
28631 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
28632 return err;
28633 }
28634- atomic_inc(&cm_listens_created);
28635+ atomic_inc_unchecked(&cm_listens_created);
28636 }
28637
28638 cm_id->add_ref(cm_id);
28639@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
28640 if (nesqp->destroyed) {
28641 return;
28642 }
28643- atomic_inc(&cm_connecteds);
28644+ atomic_inc_unchecked(&cm_connecteds);
28645 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
28646 " local port 0x%04X. jiffies = %lu.\n",
28647 nesqp->hwqp.qp_id,
28648@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
28649
28650 cm_id->add_ref(cm_id);
28651 ret = cm_id->event_handler(cm_id, &cm_event);
28652- atomic_inc(&cm_closes);
28653+ atomic_inc_unchecked(&cm_closes);
28654 cm_event.event = IW_CM_EVENT_CLOSE;
28655 cm_event.status = 0;
28656 cm_event.provider_data = cm_id->provider_data;
28657@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
28658 return;
28659 cm_id = cm_node->cm_id;
28660
28661- atomic_inc(&cm_connect_reqs);
28662+ atomic_inc_unchecked(&cm_connect_reqs);
28663 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28664 cm_node, cm_id, jiffies);
28665
28666@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
28667 return;
28668 cm_id = cm_node->cm_id;
28669
28670- atomic_inc(&cm_connect_reqs);
28671+ atomic_inc_unchecked(&cm_connect_reqs);
28672 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28673 cm_node, cm_id, jiffies);
28674
28675diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes.h linux-3.1.1/drivers/infiniband/hw/nes/nes.h
28676--- linux-3.1.1/drivers/infiniband/hw/nes/nes.h 2011-11-11 15:19:27.000000000 -0500
28677+++ linux-3.1.1/drivers/infiniband/hw/nes/nes.h 2011-11-16 18:39:07.000000000 -0500
28678@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
28679 extern unsigned int wqm_quanta;
28680 extern struct list_head nes_adapter_list;
28681
28682-extern atomic_t cm_connects;
28683-extern atomic_t cm_accepts;
28684-extern atomic_t cm_disconnects;
28685-extern atomic_t cm_closes;
28686-extern atomic_t cm_connecteds;
28687-extern atomic_t cm_connect_reqs;
28688-extern atomic_t cm_rejects;
28689-extern atomic_t mod_qp_timouts;
28690-extern atomic_t qps_created;
28691-extern atomic_t qps_destroyed;
28692-extern atomic_t sw_qps_destroyed;
28693+extern atomic_unchecked_t cm_connects;
28694+extern atomic_unchecked_t cm_accepts;
28695+extern atomic_unchecked_t cm_disconnects;
28696+extern atomic_unchecked_t cm_closes;
28697+extern atomic_unchecked_t cm_connecteds;
28698+extern atomic_unchecked_t cm_connect_reqs;
28699+extern atomic_unchecked_t cm_rejects;
28700+extern atomic_unchecked_t mod_qp_timouts;
28701+extern atomic_unchecked_t qps_created;
28702+extern atomic_unchecked_t qps_destroyed;
28703+extern atomic_unchecked_t sw_qps_destroyed;
28704 extern u32 mh_detected;
28705 extern u32 mh_pauses_sent;
28706 extern u32 cm_packets_sent;
28707@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
28708 extern u32 cm_packets_received;
28709 extern u32 cm_packets_dropped;
28710 extern u32 cm_packets_retrans;
28711-extern atomic_t cm_listens_created;
28712-extern atomic_t cm_listens_destroyed;
28713+extern atomic_unchecked_t cm_listens_created;
28714+extern atomic_unchecked_t cm_listens_destroyed;
28715 extern u32 cm_backlog_drops;
28716-extern atomic_t cm_loopbacks;
28717-extern atomic_t cm_nodes_created;
28718-extern atomic_t cm_nodes_destroyed;
28719-extern atomic_t cm_accel_dropped_pkts;
28720-extern atomic_t cm_resets_recvd;
28721+extern atomic_unchecked_t cm_loopbacks;
28722+extern atomic_unchecked_t cm_nodes_created;
28723+extern atomic_unchecked_t cm_nodes_destroyed;
28724+extern atomic_unchecked_t cm_accel_dropped_pkts;
28725+extern atomic_unchecked_t cm_resets_recvd;
28726
28727 extern u32 int_mod_timer_init;
28728 extern u32 int_mod_cq_depth_256;
28729diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes_nic.c linux-3.1.1/drivers/infiniband/hw/nes/nes_nic.c
28730--- linux-3.1.1/drivers/infiniband/hw/nes/nes_nic.c 2011-11-11 15:19:27.000000000 -0500
28731+++ linux-3.1.1/drivers/infiniband/hw/nes/nes_nic.c 2011-11-16 18:39:07.000000000 -0500
28732@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
28733 target_stat_values[++index] = mh_detected;
28734 target_stat_values[++index] = mh_pauses_sent;
28735 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
28736- target_stat_values[++index] = atomic_read(&cm_connects);
28737- target_stat_values[++index] = atomic_read(&cm_accepts);
28738- target_stat_values[++index] = atomic_read(&cm_disconnects);
28739- target_stat_values[++index] = atomic_read(&cm_connecteds);
28740- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
28741- target_stat_values[++index] = atomic_read(&cm_rejects);
28742- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
28743- target_stat_values[++index] = atomic_read(&qps_created);
28744- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
28745- target_stat_values[++index] = atomic_read(&qps_destroyed);
28746- target_stat_values[++index] = atomic_read(&cm_closes);
28747+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
28748+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
28749+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
28750+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
28751+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
28752+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
28753+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
28754+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
28755+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
28756+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
28757+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
28758 target_stat_values[++index] = cm_packets_sent;
28759 target_stat_values[++index] = cm_packets_bounced;
28760 target_stat_values[++index] = cm_packets_created;
28761 target_stat_values[++index] = cm_packets_received;
28762 target_stat_values[++index] = cm_packets_dropped;
28763 target_stat_values[++index] = cm_packets_retrans;
28764- target_stat_values[++index] = atomic_read(&cm_listens_created);
28765- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
28766+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
28767+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
28768 target_stat_values[++index] = cm_backlog_drops;
28769- target_stat_values[++index] = atomic_read(&cm_loopbacks);
28770- target_stat_values[++index] = atomic_read(&cm_nodes_created);
28771- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
28772- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
28773- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
28774+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
28775+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
28776+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
28777+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
28778+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
28779 target_stat_values[++index] = nesadapter->free_4kpbl;
28780 target_stat_values[++index] = nesadapter->free_256pbl;
28781 target_stat_values[++index] = int_mod_timer_init;
28782diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes_verbs.c linux-3.1.1/drivers/infiniband/hw/nes/nes_verbs.c
28783--- linux-3.1.1/drivers/infiniband/hw/nes/nes_verbs.c 2011-11-11 15:19:27.000000000 -0500
28784+++ linux-3.1.1/drivers/infiniband/hw/nes/nes_verbs.c 2011-11-16 18:39:07.000000000 -0500
28785@@ -46,9 +46,9 @@
28786
28787 #include <rdma/ib_umem.h>
28788
28789-atomic_t mod_qp_timouts;
28790-atomic_t qps_created;
28791-atomic_t sw_qps_destroyed;
28792+atomic_unchecked_t mod_qp_timouts;
28793+atomic_unchecked_t qps_created;
28794+atomic_unchecked_t sw_qps_destroyed;
28795
28796 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
28797
28798@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struc
28799 if (init_attr->create_flags)
28800 return ERR_PTR(-EINVAL);
28801
28802- atomic_inc(&qps_created);
28803+ atomic_inc_unchecked(&qps_created);
28804 switch (init_attr->qp_type) {
28805 case IB_QPT_RC:
28806 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
28807@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *
28808 struct iw_cm_event cm_event;
28809 int ret;
28810
28811- atomic_inc(&sw_qps_destroyed);
28812+ atomic_inc_unchecked(&sw_qps_destroyed);
28813 nesqp->destroyed = 1;
28814
28815 /* Blow away the connection if it exists. */
28816diff -urNp linux-3.1.1/drivers/infiniband/hw/qib/qib.h linux-3.1.1/drivers/infiniband/hw/qib/qib.h
28817--- linux-3.1.1/drivers/infiniband/hw/qib/qib.h 2011-11-11 15:19:27.000000000 -0500
28818+++ linux-3.1.1/drivers/infiniband/hw/qib/qib.h 2011-11-16 18:39:07.000000000 -0500
28819@@ -51,6 +51,7 @@
28820 #include <linux/completion.h>
28821 #include <linux/kref.h>
28822 #include <linux/sched.h>
28823+#include <linux/slab.h>
28824
28825 #include "qib_common.h"
28826 #include "qib_verbs.h"
28827diff -urNp linux-3.1.1/drivers/input/gameport/gameport.c linux-3.1.1/drivers/input/gameport/gameport.c
28828--- linux-3.1.1/drivers/input/gameport/gameport.c 2011-11-11 15:19:27.000000000 -0500
28829+++ linux-3.1.1/drivers/input/gameport/gameport.c 2011-11-16 18:39:07.000000000 -0500
28830@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
28831 */
28832 static void gameport_init_port(struct gameport *gameport)
28833 {
28834- static atomic_t gameport_no = ATOMIC_INIT(0);
28835+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
28836
28837 __module_get(THIS_MODULE);
28838
28839 mutex_init(&gameport->drv_mutex);
28840 device_initialize(&gameport->dev);
28841 dev_set_name(&gameport->dev, "gameport%lu",
28842- (unsigned long)atomic_inc_return(&gameport_no) - 1);
28843+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
28844 gameport->dev.bus = &gameport_bus;
28845 gameport->dev.release = gameport_release_port;
28846 if (gameport->parent)
28847diff -urNp linux-3.1.1/drivers/input/input.c linux-3.1.1/drivers/input/input.c
28848--- linux-3.1.1/drivers/input/input.c 2011-11-11 15:19:27.000000000 -0500
28849+++ linux-3.1.1/drivers/input/input.c 2011-11-16 18:39:07.000000000 -0500
28850@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
28851 */
28852 int input_register_device(struct input_dev *dev)
28853 {
28854- static atomic_t input_no = ATOMIC_INIT(0);
28855+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
28856 struct input_handler *handler;
28857 const char *path;
28858 int error;
28859@@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
28860 dev->setkeycode = input_default_setkeycode;
28861
28862 dev_set_name(&dev->dev, "input%ld",
28863- (unsigned long) atomic_inc_return(&input_no) - 1);
28864+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
28865
28866 error = device_add(&dev->dev);
28867 if (error)
28868diff -urNp linux-3.1.1/drivers/input/joystick/sidewinder.c linux-3.1.1/drivers/input/joystick/sidewinder.c
28869--- linux-3.1.1/drivers/input/joystick/sidewinder.c 2011-11-11 15:19:27.000000000 -0500
28870+++ linux-3.1.1/drivers/input/joystick/sidewinder.c 2011-11-16 18:40:10.000000000 -0500
28871@@ -30,6 +30,7 @@
28872 #include <linux/kernel.h>
28873 #include <linux/module.h>
28874 #include <linux/slab.h>
28875+#include <linux/sched.h>
28876 #include <linux/init.h>
28877 #include <linux/input.h>
28878 #include <linux/gameport.h>
28879@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
28880 unsigned char buf[SW_LENGTH];
28881 int i;
28882
28883+ pax_track_stack();
28884+
28885 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
28886
28887 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
28888diff -urNp linux-3.1.1/drivers/input/joystick/xpad.c linux-3.1.1/drivers/input/joystick/xpad.c
28889--- linux-3.1.1/drivers/input/joystick/xpad.c 2011-11-11 15:19:27.000000000 -0500
28890+++ linux-3.1.1/drivers/input/joystick/xpad.c 2011-11-16 18:39:07.000000000 -0500
28891@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_clas
28892
28893 static int xpad_led_probe(struct usb_xpad *xpad)
28894 {
28895- static atomic_t led_seq = ATOMIC_INIT(0);
28896+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
28897 long led_no;
28898 struct xpad_led *led;
28899 struct led_classdev *led_cdev;
28900@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpa
28901 if (!led)
28902 return -ENOMEM;
28903
28904- led_no = (long)atomic_inc_return(&led_seq) - 1;
28905+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
28906
28907 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
28908 led->xpad = xpad;
28909diff -urNp linux-3.1.1/drivers/input/mousedev.c linux-3.1.1/drivers/input/mousedev.c
28910--- linux-3.1.1/drivers/input/mousedev.c 2011-11-11 15:19:27.000000000 -0500
28911+++ linux-3.1.1/drivers/input/mousedev.c 2011-11-16 18:39:07.000000000 -0500
28912@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
28913
28914 spin_unlock_irq(&client->packet_lock);
28915
28916- if (copy_to_user(buffer, data, count))
28917+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
28918 return -EFAULT;
28919
28920 return count;
28921diff -urNp linux-3.1.1/drivers/input/serio/serio.c linux-3.1.1/drivers/input/serio/serio.c
28922--- linux-3.1.1/drivers/input/serio/serio.c 2011-11-11 15:19:27.000000000 -0500
28923+++ linux-3.1.1/drivers/input/serio/serio.c 2011-11-16 18:39:07.000000000 -0500
28924@@ -497,7 +497,7 @@ static void serio_release_port(struct de
28925 */
28926 static void serio_init_port(struct serio *serio)
28927 {
28928- static atomic_t serio_no = ATOMIC_INIT(0);
28929+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
28930
28931 __module_get(THIS_MODULE);
28932
28933@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
28934 mutex_init(&serio->drv_mutex);
28935 device_initialize(&serio->dev);
28936 dev_set_name(&serio->dev, "serio%ld",
28937- (long)atomic_inc_return(&serio_no) - 1);
28938+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
28939 serio->dev.bus = &serio_bus;
28940 serio->dev.release = serio_release_port;
28941 serio->dev.groups = serio_device_attr_groups;
28942diff -urNp linux-3.1.1/drivers/isdn/capi/capi.c linux-3.1.1/drivers/isdn/capi/capi.c
28943--- linux-3.1.1/drivers/isdn/capi/capi.c 2011-11-11 15:19:27.000000000 -0500
28944+++ linux-3.1.1/drivers/isdn/capi/capi.c 2011-11-16 18:39:07.000000000 -0500
28945@@ -83,8 +83,8 @@ struct capiminor {
28946
28947 struct capi20_appl *ap;
28948 u32 ncci;
28949- atomic_t datahandle;
28950- atomic_t msgid;
28951+ atomic_unchecked_t datahandle;
28952+ atomic_unchecked_t msgid;
28953
28954 struct tty_port port;
28955 int ttyinstop;
28956@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
28957 capimsg_setu16(s, 2, mp->ap->applid);
28958 capimsg_setu8 (s, 4, CAPI_DATA_B3);
28959 capimsg_setu8 (s, 5, CAPI_RESP);
28960- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
28961+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
28962 capimsg_setu32(s, 8, mp->ncci);
28963 capimsg_setu16(s, 12, datahandle);
28964 }
28965@@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
28966 mp->outbytes -= len;
28967 spin_unlock_bh(&mp->outlock);
28968
28969- datahandle = atomic_inc_return(&mp->datahandle);
28970+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
28971 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
28972 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28973 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28974 capimsg_setu16(skb->data, 2, mp->ap->applid);
28975 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
28976 capimsg_setu8 (skb->data, 5, CAPI_REQ);
28977- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
28978+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
28979 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
28980 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
28981 capimsg_setu16(skb->data, 16, len); /* Data length */
28982diff -urNp linux-3.1.1/drivers/isdn/gigaset/common.c linux-3.1.1/drivers/isdn/gigaset/common.c
28983--- linux-3.1.1/drivers/isdn/gigaset/common.c 2011-11-11 15:19:27.000000000 -0500
28984+++ linux-3.1.1/drivers/isdn/gigaset/common.c 2011-11-16 18:39:07.000000000 -0500
28985@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
28986 cs->commands_pending = 0;
28987 cs->cur_at_seq = 0;
28988 cs->gotfwver = -1;
28989- cs->open_count = 0;
28990+ local_set(&cs->open_count, 0);
28991 cs->dev = NULL;
28992 cs->tty = NULL;
28993 cs->tty_dev = NULL;
28994diff -urNp linux-3.1.1/drivers/isdn/gigaset/gigaset.h linux-3.1.1/drivers/isdn/gigaset/gigaset.h
28995--- linux-3.1.1/drivers/isdn/gigaset/gigaset.h 2011-11-11 15:19:27.000000000 -0500
28996+++ linux-3.1.1/drivers/isdn/gigaset/gigaset.h 2011-11-16 18:39:07.000000000 -0500
28997@@ -35,6 +35,7 @@
28998 #include <linux/tty_driver.h>
28999 #include <linux/list.h>
29000 #include <linux/atomic.h>
29001+#include <asm/local.h>
29002
29003 #define GIG_VERSION {0, 5, 0, 0}
29004 #define GIG_COMPAT {0, 4, 0, 0}
29005@@ -433,7 +434,7 @@ struct cardstate {
29006 spinlock_t cmdlock;
29007 unsigned curlen, cmdbytes;
29008
29009- unsigned open_count;
29010+ local_t open_count;
29011 struct tty_struct *tty;
29012 struct tasklet_struct if_wake_tasklet;
29013 unsigned control_state;
29014diff -urNp linux-3.1.1/drivers/isdn/gigaset/interface.c linux-3.1.1/drivers/isdn/gigaset/interface.c
29015--- linux-3.1.1/drivers/isdn/gigaset/interface.c 2011-11-11 15:19:27.000000000 -0500
29016+++ linux-3.1.1/drivers/isdn/gigaset/interface.c 2011-11-16 18:39:07.000000000 -0500
29017@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
29018 }
29019 tty->driver_data = cs;
29020
29021- ++cs->open_count;
29022-
29023- if (cs->open_count == 1) {
29024+ if (local_inc_return(&cs->open_count) == 1) {
29025 spin_lock_irqsave(&cs->lock, flags);
29026 cs->tty = tty;
29027 spin_unlock_irqrestore(&cs->lock, flags);
29028@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
29029
29030 if (!cs->connected)
29031 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29032- else if (!cs->open_count)
29033+ else if (!local_read(&cs->open_count))
29034 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29035 else {
29036- if (!--cs->open_count) {
29037+ if (!local_dec_return(&cs->open_count)) {
29038 spin_lock_irqsave(&cs->lock, flags);
29039 cs->tty = NULL;
29040 spin_unlock_irqrestore(&cs->lock, flags);
29041@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
29042 if (!cs->connected) {
29043 gig_dbg(DEBUG_IF, "not connected");
29044 retval = -ENODEV;
29045- } else if (!cs->open_count)
29046+ } else if (!local_read(&cs->open_count))
29047 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29048 else {
29049 retval = 0;
29050@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
29051 retval = -ENODEV;
29052 goto done;
29053 }
29054- if (!cs->open_count) {
29055+ if (!local_read(&cs->open_count)) {
29056 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29057 retval = -ENODEV;
29058 goto done;
29059@@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
29060 if (!cs->connected) {
29061 gig_dbg(DEBUG_IF, "not connected");
29062 retval = -ENODEV;
29063- } else if (!cs->open_count)
29064+ } else if (!local_read(&cs->open_count))
29065 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29066 else if (cs->mstate != MS_LOCKED) {
29067 dev_warn(cs->dev, "can't write to unlocked device\n");
29068@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
29069
29070 if (!cs->connected)
29071 gig_dbg(DEBUG_IF, "not connected");
29072- else if (!cs->open_count)
29073+ else if (!local_read(&cs->open_count))
29074 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29075 else if (cs->mstate != MS_LOCKED)
29076 dev_warn(cs->dev, "can't write to unlocked device\n");
29077@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
29078
29079 if (!cs->connected)
29080 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29081- else if (!cs->open_count)
29082+ else if (!local_read(&cs->open_count))
29083 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29084 else
29085 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
29086@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
29087
29088 if (!cs->connected)
29089 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29090- else if (!cs->open_count)
29091+ else if (!local_read(&cs->open_count))
29092 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29093 else
29094 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
29095@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
29096 goto out;
29097 }
29098
29099- if (!cs->open_count) {
29100+ if (!local_read(&cs->open_count)) {
29101 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29102 goto out;
29103 }
29104diff -urNp linux-3.1.1/drivers/isdn/hardware/avm/b1.c linux-3.1.1/drivers/isdn/hardware/avm/b1.c
29105--- linux-3.1.1/drivers/isdn/hardware/avm/b1.c 2011-11-11 15:19:27.000000000 -0500
29106+++ linux-3.1.1/drivers/isdn/hardware/avm/b1.c 2011-11-16 18:39:07.000000000 -0500
29107@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
29108 }
29109 if (left) {
29110 if (t4file->user) {
29111- if (copy_from_user(buf, dp, left))
29112+ if (left > sizeof buf || copy_from_user(buf, dp, left))
29113 return -EFAULT;
29114 } else {
29115 memcpy(buf, dp, left);
29116@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
29117 }
29118 if (left) {
29119 if (config->user) {
29120- if (copy_from_user(buf, dp, left))
29121+ if (left > sizeof buf || copy_from_user(buf, dp, left))
29122 return -EFAULT;
29123 } else {
29124 memcpy(buf, dp, left);
29125diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/capidtmf.c linux-3.1.1/drivers/isdn/hardware/eicon/capidtmf.c
29126--- linux-3.1.1/drivers/isdn/hardware/eicon/capidtmf.c 2011-11-11 15:19:27.000000000 -0500
29127+++ linux-3.1.1/drivers/isdn/hardware/eicon/capidtmf.c 2011-11-16 18:40:10.000000000 -0500
29128@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
29129 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
29130 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
29131
29132+ pax_track_stack();
29133
29134 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
29135 {
29136diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/capifunc.c linux-3.1.1/drivers/isdn/hardware/eicon/capifunc.c
29137--- linux-3.1.1/drivers/isdn/hardware/eicon/capifunc.c 2011-11-11 15:19:27.000000000 -0500
29138+++ linux-3.1.1/drivers/isdn/hardware/eicon/capifunc.c 2011-11-16 18:40:10.000000000 -0500
29139@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
29140 IDI_SYNC_REQ req;
29141 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29142
29143+ pax_track_stack();
29144+
29145 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29146
29147 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29148diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/diddfunc.c linux-3.1.1/drivers/isdn/hardware/eicon/diddfunc.c
29149--- linux-3.1.1/drivers/isdn/hardware/eicon/diddfunc.c 2011-11-11 15:19:27.000000000 -0500
29150+++ linux-3.1.1/drivers/isdn/hardware/eicon/diddfunc.c 2011-11-16 18:40:10.000000000 -0500
29151@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
29152 IDI_SYNC_REQ req;
29153 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29154
29155+ pax_track_stack();
29156+
29157 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29158
29159 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29160diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/divasfunc.c linux-3.1.1/drivers/isdn/hardware/eicon/divasfunc.c
29161--- linux-3.1.1/drivers/isdn/hardware/eicon/divasfunc.c 2011-11-11 15:19:27.000000000 -0500
29162+++ linux-3.1.1/drivers/isdn/hardware/eicon/divasfunc.c 2011-11-16 18:40:10.000000000 -0500
29163@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
29164 IDI_SYNC_REQ req;
29165 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29166
29167+ pax_track_stack();
29168+
29169 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29170
29171 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29172diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/divasync.h linux-3.1.1/drivers/isdn/hardware/eicon/divasync.h
29173--- linux-3.1.1/drivers/isdn/hardware/eicon/divasync.h 2011-11-11 15:19:27.000000000 -0500
29174+++ linux-3.1.1/drivers/isdn/hardware/eicon/divasync.h 2011-11-16 18:39:07.000000000 -0500
29175@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
29176 } diva_didd_add_adapter_t;
29177 typedef struct _diva_didd_remove_adapter {
29178 IDI_CALL p_request;
29179-} diva_didd_remove_adapter_t;
29180+} __no_const diva_didd_remove_adapter_t;
29181 typedef struct _diva_didd_read_adapter_array {
29182 void * buffer;
29183 dword length;
29184diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/idifunc.c linux-3.1.1/drivers/isdn/hardware/eicon/idifunc.c
29185--- linux-3.1.1/drivers/isdn/hardware/eicon/idifunc.c 2011-11-11 15:19:27.000000000 -0500
29186+++ linux-3.1.1/drivers/isdn/hardware/eicon/idifunc.c 2011-11-16 18:40:10.000000000 -0500
29187@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
29188 IDI_SYNC_REQ req;
29189 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29190
29191+ pax_track_stack();
29192+
29193 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29194
29195 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29196diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/message.c linux-3.1.1/drivers/isdn/hardware/eicon/message.c
29197--- linux-3.1.1/drivers/isdn/hardware/eicon/message.c 2011-11-11 15:19:27.000000000 -0500
29198+++ linux-3.1.1/drivers/isdn/hardware/eicon/message.c 2011-11-16 18:40:10.000000000 -0500
29199@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
29200 dword d;
29201 word w;
29202
29203+ pax_track_stack();
29204+
29205 a = plci->adapter;
29206 Id = ((word)plci->Id<<8)|a->Id;
29207 PUT_WORD(&SS_Ind[4],0x0000);
29208@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
29209 word j, n, w;
29210 dword d;
29211
29212+ pax_track_stack();
29213+
29214
29215 for(i=0;i<8;i++) bp_parms[i].length = 0;
29216 for(i=0;i<2;i++) global_config[i].length = 0;
29217@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
29218 const byte llc3[] = {4,3,2,2,6,6,0};
29219 const byte header[] = {0,2,3,3,0,0,0};
29220
29221+ pax_track_stack();
29222+
29223 for(i=0;i<8;i++) bp_parms[i].length = 0;
29224 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
29225 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
29226@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
29227 word appl_number_group_type[MAX_APPL];
29228 PLCI *auxplci;
29229
29230+ pax_track_stack();
29231+
29232 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
29233
29234 if(!a->group_optimization_enabled)
29235diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/mntfunc.c linux-3.1.1/drivers/isdn/hardware/eicon/mntfunc.c
29236--- linux-3.1.1/drivers/isdn/hardware/eicon/mntfunc.c 2011-11-11 15:19:27.000000000 -0500
29237+++ linux-3.1.1/drivers/isdn/hardware/eicon/mntfunc.c 2011-11-16 18:40:10.000000000 -0500
29238@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
29239 IDI_SYNC_REQ req;
29240 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29241
29242+ pax_track_stack();
29243+
29244 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29245
29246 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29247diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.1.1/drivers/isdn/hardware/eicon/xdi_adapter.h
29248--- linux-3.1.1/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-11-11 15:19:27.000000000 -0500
29249+++ linux-3.1.1/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-11-16 18:39:07.000000000 -0500
29250@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
29251 typedef struct _diva_os_idi_adapter_interface {
29252 diva_init_card_proc_t cleanup_adapter_proc;
29253 diva_cmd_card_proc_t cmd_proc;
29254-} diva_os_idi_adapter_interface_t;
29255+} __no_const diva_os_idi_adapter_interface_t;
29256
29257 typedef struct _diva_os_xdi_adapter {
29258 struct list_head link;
29259diff -urNp linux-3.1.1/drivers/isdn/i4l/isdn_common.c linux-3.1.1/drivers/isdn/i4l/isdn_common.c
29260--- linux-3.1.1/drivers/isdn/i4l/isdn_common.c 2011-11-11 15:19:27.000000000 -0500
29261+++ linux-3.1.1/drivers/isdn/i4l/isdn_common.c 2011-11-16 18:40:10.000000000 -0500
29262@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
29263 } iocpar;
29264 void __user *argp = (void __user *)arg;
29265
29266+ pax_track_stack();
29267+
29268 #define name iocpar.name
29269 #define bname iocpar.bname
29270 #define iocts iocpar.iocts
29271diff -urNp linux-3.1.1/drivers/isdn/icn/icn.c linux-3.1.1/drivers/isdn/icn/icn.c
29272--- linux-3.1.1/drivers/isdn/icn/icn.c 2011-11-11 15:19:27.000000000 -0500
29273+++ linux-3.1.1/drivers/isdn/icn/icn.c 2011-11-16 18:39:07.000000000 -0500
29274@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
29275 if (count > len)
29276 count = len;
29277 if (user) {
29278- if (copy_from_user(msg, buf, count))
29279+ if (count > sizeof msg || copy_from_user(msg, buf, count))
29280 return -EFAULT;
29281 } else
29282 memcpy(msg, buf, count);
29283diff -urNp linux-3.1.1/drivers/lguest/core.c linux-3.1.1/drivers/lguest/core.c
29284--- linux-3.1.1/drivers/lguest/core.c 2011-11-11 15:19:27.000000000 -0500
29285+++ linux-3.1.1/drivers/lguest/core.c 2011-11-16 18:39:07.000000000 -0500
29286@@ -92,9 +92,17 @@ static __init int map_switcher(void)
29287 * it's worked so far. The end address needs +1 because __get_vm_area
29288 * allocates an extra guard page, so we need space for that.
29289 */
29290+
29291+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29292+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
29293+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
29294+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
29295+#else
29296 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
29297 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
29298 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
29299+#endif
29300+
29301 if (!switcher_vma) {
29302 err = -ENOMEM;
29303 printk("lguest: could not map switcher pages high\n");
29304@@ -119,7 +127,7 @@ static __init int map_switcher(void)
29305 * Now the Switcher is mapped at the right address, we can't fail!
29306 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
29307 */
29308- memcpy(switcher_vma->addr, start_switcher_text,
29309+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
29310 end_switcher_text - start_switcher_text);
29311
29312 printk(KERN_INFO "lguest: mapped switcher at %p\n",
29313diff -urNp linux-3.1.1/drivers/lguest/x86/core.c linux-3.1.1/drivers/lguest/x86/core.c
29314--- linux-3.1.1/drivers/lguest/x86/core.c 2011-11-11 15:19:27.000000000 -0500
29315+++ linux-3.1.1/drivers/lguest/x86/core.c 2011-11-16 18:39:07.000000000 -0500
29316@@ -59,7 +59,7 @@ static struct {
29317 /* Offset from where switcher.S was compiled to where we've copied it */
29318 static unsigned long switcher_offset(void)
29319 {
29320- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
29321+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
29322 }
29323
29324 /* This cpu's struct lguest_pages. */
29325@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
29326 * These copies are pretty cheap, so we do them unconditionally: */
29327 /* Save the current Host top-level page directory.
29328 */
29329+
29330+#ifdef CONFIG_PAX_PER_CPU_PGD
29331+ pages->state.host_cr3 = read_cr3();
29332+#else
29333 pages->state.host_cr3 = __pa(current->mm->pgd);
29334+#endif
29335+
29336 /*
29337 * Set up the Guest's page tables to see this CPU's pages (and no
29338 * other CPU's pages).
29339@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
29340 * compiled-in switcher code and the high-mapped copy we just made.
29341 */
29342 for (i = 0; i < IDT_ENTRIES; i++)
29343- default_idt_entries[i] += switcher_offset();
29344+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
29345
29346 /*
29347 * Set up the Switcher's per-cpu areas.
29348@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
29349 * it will be undisturbed when we switch. To change %cs and jump we
29350 * need this structure to feed to Intel's "lcall" instruction.
29351 */
29352- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
29353+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
29354 lguest_entry.segment = LGUEST_CS;
29355
29356 /*
29357diff -urNp linux-3.1.1/drivers/lguest/x86/switcher_32.S linux-3.1.1/drivers/lguest/x86/switcher_32.S
29358--- linux-3.1.1/drivers/lguest/x86/switcher_32.S 2011-11-11 15:19:27.000000000 -0500
29359+++ linux-3.1.1/drivers/lguest/x86/switcher_32.S 2011-11-16 18:39:07.000000000 -0500
29360@@ -87,6 +87,7 @@
29361 #include <asm/page.h>
29362 #include <asm/segment.h>
29363 #include <asm/lguest.h>
29364+#include <asm/processor-flags.h>
29365
29366 // We mark the start of the code to copy
29367 // It's placed in .text tho it's never run here
29368@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
29369 // Changes type when we load it: damn Intel!
29370 // For after we switch over our page tables
29371 // That entry will be read-only: we'd crash.
29372+
29373+#ifdef CONFIG_PAX_KERNEXEC
29374+ mov %cr0, %edx
29375+ xor $X86_CR0_WP, %edx
29376+ mov %edx, %cr0
29377+#endif
29378+
29379 movl $(GDT_ENTRY_TSS*8), %edx
29380 ltr %dx
29381
29382@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
29383 // Let's clear it again for our return.
29384 // The GDT descriptor of the Host
29385 // Points to the table after two "size" bytes
29386- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
29387+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
29388 // Clear "used" from type field (byte 5, bit 2)
29389- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
29390+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
29391+
29392+#ifdef CONFIG_PAX_KERNEXEC
29393+ mov %cr0, %eax
29394+ xor $X86_CR0_WP, %eax
29395+ mov %eax, %cr0
29396+#endif
29397
29398 // Once our page table's switched, the Guest is live!
29399 // The Host fades as we run this final step.
29400@@ -295,13 +309,12 @@ deliver_to_host:
29401 // I consulted gcc, and it gave
29402 // These instructions, which I gladly credit:
29403 leal (%edx,%ebx,8), %eax
29404- movzwl (%eax),%edx
29405- movl 4(%eax), %eax
29406- xorw %ax, %ax
29407- orl %eax, %edx
29408+ movl 4(%eax), %edx
29409+ movw (%eax), %dx
29410 // Now the address of the handler's in %edx
29411 // We call it now: its "iret" drops us home.
29412- jmp *%edx
29413+ ljmp $__KERNEL_CS, $1f
29414+1: jmp *%edx
29415
29416 // Every interrupt can come to us here
29417 // But we must truly tell each apart.
29418diff -urNp linux-3.1.1/drivers/macintosh/macio_asic.c linux-3.1.1/drivers/macintosh/macio_asic.c
29419--- linux-3.1.1/drivers/macintosh/macio_asic.c 2011-11-11 15:19:27.000000000 -0500
29420+++ linux-3.1.1/drivers/macintosh/macio_asic.c 2011-11-16 18:39:07.000000000 -0500
29421@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(s
29422 * MacIO is matched against any Apple ID, it's probe() function
29423 * will then decide wether it applies or not
29424 */
29425-static const struct pci_device_id __devinitdata pci_ids [] = { {
29426+static const struct pci_device_id __devinitconst pci_ids [] = { {
29427 .vendor = PCI_VENDOR_ID_APPLE,
29428 .device = PCI_ANY_ID,
29429 .subvendor = PCI_ANY_ID,
29430diff -urNp linux-3.1.1/drivers/md/dm.c linux-3.1.1/drivers/md/dm.c
29431--- linux-3.1.1/drivers/md/dm.c 2011-11-11 15:19:27.000000000 -0500
29432+++ linux-3.1.1/drivers/md/dm.c 2011-11-16 18:39:07.000000000 -0500
29433@@ -165,9 +165,9 @@ struct mapped_device {
29434 /*
29435 * Event handling.
29436 */
29437- atomic_t event_nr;
29438+ atomic_unchecked_t event_nr;
29439 wait_queue_head_t eventq;
29440- atomic_t uevent_seq;
29441+ atomic_unchecked_t uevent_seq;
29442 struct list_head uevent_list;
29443 spinlock_t uevent_lock; /* Protect access to uevent_list */
29444
29445@@ -1843,8 +1843,8 @@ static struct mapped_device *alloc_dev(i
29446 rwlock_init(&md->map_lock);
29447 atomic_set(&md->holders, 1);
29448 atomic_set(&md->open_count, 0);
29449- atomic_set(&md->event_nr, 0);
29450- atomic_set(&md->uevent_seq, 0);
29451+ atomic_set_unchecked(&md->event_nr, 0);
29452+ atomic_set_unchecked(&md->uevent_seq, 0);
29453 INIT_LIST_HEAD(&md->uevent_list);
29454 spin_lock_init(&md->uevent_lock);
29455
29456@@ -1978,7 +1978,7 @@ static void event_callback(void *context
29457
29458 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
29459
29460- atomic_inc(&md->event_nr);
29461+ atomic_inc_unchecked(&md->event_nr);
29462 wake_up(&md->eventq);
29463 }
29464
29465@@ -2614,18 +2614,18 @@ int dm_kobject_uevent(struct mapped_devi
29466
29467 uint32_t dm_next_uevent_seq(struct mapped_device *md)
29468 {
29469- return atomic_add_return(1, &md->uevent_seq);
29470+ return atomic_add_return_unchecked(1, &md->uevent_seq);
29471 }
29472
29473 uint32_t dm_get_event_nr(struct mapped_device *md)
29474 {
29475- return atomic_read(&md->event_nr);
29476+ return atomic_read_unchecked(&md->event_nr);
29477 }
29478
29479 int dm_wait_event(struct mapped_device *md, int event_nr)
29480 {
29481 return wait_event_interruptible(md->eventq,
29482- (event_nr != atomic_read(&md->event_nr)));
29483+ (event_nr != atomic_read_unchecked(&md->event_nr)));
29484 }
29485
29486 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
29487diff -urNp linux-3.1.1/drivers/md/dm-ioctl.c linux-3.1.1/drivers/md/dm-ioctl.c
29488--- linux-3.1.1/drivers/md/dm-ioctl.c 2011-11-11 15:19:27.000000000 -0500
29489+++ linux-3.1.1/drivers/md/dm-ioctl.c 2011-11-16 18:39:07.000000000 -0500
29490@@ -1578,7 +1578,7 @@ static int validate_params(uint cmd, str
29491 cmd == DM_LIST_VERSIONS_CMD)
29492 return 0;
29493
29494- if ((cmd == DM_DEV_CREATE_CMD)) {
29495+ if (cmd == DM_DEV_CREATE_CMD) {
29496 if (!*param->name) {
29497 DMWARN("name not supplied when creating device");
29498 return -EINVAL;
29499diff -urNp linux-3.1.1/drivers/md/dm-raid1.c linux-3.1.1/drivers/md/dm-raid1.c
29500--- linux-3.1.1/drivers/md/dm-raid1.c 2011-11-11 15:19:27.000000000 -0500
29501+++ linux-3.1.1/drivers/md/dm-raid1.c 2011-11-16 18:39:07.000000000 -0500
29502@@ -40,7 +40,7 @@ enum dm_raid1_error {
29503
29504 struct mirror {
29505 struct mirror_set *ms;
29506- atomic_t error_count;
29507+ atomic_unchecked_t error_count;
29508 unsigned long error_type;
29509 struct dm_dev *dev;
29510 sector_t offset;
29511@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
29512 struct mirror *m;
29513
29514 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
29515- if (!atomic_read(&m->error_count))
29516+ if (!atomic_read_unchecked(&m->error_count))
29517 return m;
29518
29519 return NULL;
29520@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
29521 * simple way to tell if a device has encountered
29522 * errors.
29523 */
29524- atomic_inc(&m->error_count);
29525+ atomic_inc_unchecked(&m->error_count);
29526
29527 if (test_and_set_bit(error_type, &m->error_type))
29528 return;
29529@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
29530 struct mirror *m = get_default_mirror(ms);
29531
29532 do {
29533- if (likely(!atomic_read(&m->error_count)))
29534+ if (likely(!atomic_read_unchecked(&m->error_count)))
29535 return m;
29536
29537 if (m-- == ms->mirror)
29538@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
29539 {
29540 struct mirror *default_mirror = get_default_mirror(m->ms);
29541
29542- return !atomic_read(&default_mirror->error_count);
29543+ return !atomic_read_unchecked(&default_mirror->error_count);
29544 }
29545
29546 static int mirror_available(struct mirror_set *ms, struct bio *bio)
29547@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
29548 */
29549 if (likely(region_in_sync(ms, region, 1)))
29550 m = choose_mirror(ms, bio->bi_sector);
29551- else if (m && atomic_read(&m->error_count))
29552+ else if (m && atomic_read_unchecked(&m->error_count))
29553 m = NULL;
29554
29555 if (likely(m))
29556@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
29557 }
29558
29559 ms->mirror[mirror].ms = ms;
29560- atomic_set(&(ms->mirror[mirror].error_count), 0);
29561+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
29562 ms->mirror[mirror].error_type = 0;
29563 ms->mirror[mirror].offset = offset;
29564
29565@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
29566 */
29567 static char device_status_char(struct mirror *m)
29568 {
29569- if (!atomic_read(&(m->error_count)))
29570+ if (!atomic_read_unchecked(&(m->error_count)))
29571 return 'A';
29572
29573 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
29574diff -urNp linux-3.1.1/drivers/md/dm-stripe.c linux-3.1.1/drivers/md/dm-stripe.c
29575--- linux-3.1.1/drivers/md/dm-stripe.c 2011-11-11 15:19:27.000000000 -0500
29576+++ linux-3.1.1/drivers/md/dm-stripe.c 2011-11-16 18:39:07.000000000 -0500
29577@@ -20,7 +20,7 @@ struct stripe {
29578 struct dm_dev *dev;
29579 sector_t physical_start;
29580
29581- atomic_t error_count;
29582+ atomic_unchecked_t error_count;
29583 };
29584
29585 struct stripe_c {
29586@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
29587 kfree(sc);
29588 return r;
29589 }
29590- atomic_set(&(sc->stripe[i].error_count), 0);
29591+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
29592 }
29593
29594 ti->private = sc;
29595@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
29596 DMEMIT("%d ", sc->stripes);
29597 for (i = 0; i < sc->stripes; i++) {
29598 DMEMIT("%s ", sc->stripe[i].dev->name);
29599- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
29600+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
29601 'D' : 'A';
29602 }
29603 buffer[i] = '\0';
29604@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
29605 */
29606 for (i = 0; i < sc->stripes; i++)
29607 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
29608- atomic_inc(&(sc->stripe[i].error_count));
29609- if (atomic_read(&(sc->stripe[i].error_count)) <
29610+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
29611+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
29612 DM_IO_ERROR_THRESHOLD)
29613 schedule_work(&sc->trigger_event);
29614 }
29615diff -urNp linux-3.1.1/drivers/md/dm-table.c linux-3.1.1/drivers/md/dm-table.c
29616--- linux-3.1.1/drivers/md/dm-table.c 2011-11-11 15:19:27.000000000 -0500
29617+++ linux-3.1.1/drivers/md/dm-table.c 2011-11-16 18:39:07.000000000 -0500
29618@@ -389,7 +389,7 @@ static int device_area_is_invalid(struct
29619 if (!dev_size)
29620 return 0;
29621
29622- if ((start >= dev_size) || (start + len > dev_size)) {
29623+ if ((start >= dev_size) || (len > dev_size - start)) {
29624 DMWARN("%s: %s too small for target: "
29625 "start=%llu, len=%llu, dev_size=%llu",
29626 dm_device_name(ti->table->md), bdevname(bdev, b),
29627diff -urNp linux-3.1.1/drivers/md/md.c linux-3.1.1/drivers/md/md.c
29628--- linux-3.1.1/drivers/md/md.c 2011-11-11 15:19:27.000000000 -0500
29629+++ linux-3.1.1/drivers/md/md.c 2011-11-16 18:39:07.000000000 -0500
29630@@ -280,10 +280,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
29631 * start build, activate spare
29632 */
29633 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
29634-static atomic_t md_event_count;
29635+static atomic_unchecked_t md_event_count;
29636 void md_new_event(mddev_t *mddev)
29637 {
29638- atomic_inc(&md_event_count);
29639+ atomic_inc_unchecked(&md_event_count);
29640 wake_up(&md_event_waiters);
29641 }
29642 EXPORT_SYMBOL_GPL(md_new_event);
29643@@ -293,7 +293,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
29644 */
29645 static void md_new_event_inintr(mddev_t *mddev)
29646 {
29647- atomic_inc(&md_event_count);
29648+ atomic_inc_unchecked(&md_event_count);
29649 wake_up(&md_event_waiters);
29650 }
29651
29652@@ -1531,7 +1531,7 @@ static int super_1_load(mdk_rdev_t *rdev
29653
29654 rdev->preferred_minor = 0xffff;
29655 rdev->data_offset = le64_to_cpu(sb->data_offset);
29656- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29657+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29658
29659 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
29660 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
29661@@ -1748,7 +1748,7 @@ static void super_1_sync(mddev_t *mddev,
29662 else
29663 sb->resync_offset = cpu_to_le64(0);
29664
29665- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
29666+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
29667
29668 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
29669 sb->size = cpu_to_le64(mddev->dev_sectors);
29670@@ -2643,7 +2643,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
29671 static ssize_t
29672 errors_show(mdk_rdev_t *rdev, char *page)
29673 {
29674- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
29675+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
29676 }
29677
29678 static ssize_t
29679@@ -2652,7 +2652,7 @@ errors_store(mdk_rdev_t *rdev, const cha
29680 char *e;
29681 unsigned long n = simple_strtoul(buf, &e, 10);
29682 if (*buf && (*e == 0 || *e == '\n')) {
29683- atomic_set(&rdev->corrected_errors, n);
29684+ atomic_set_unchecked(&rdev->corrected_errors, n);
29685 return len;
29686 }
29687 return -EINVAL;
29688@@ -3042,8 +3042,8 @@ int md_rdev_init(mdk_rdev_t *rdev)
29689 rdev->sb_loaded = 0;
29690 rdev->bb_page = NULL;
29691 atomic_set(&rdev->nr_pending, 0);
29692- atomic_set(&rdev->read_errors, 0);
29693- atomic_set(&rdev->corrected_errors, 0);
29694+ atomic_set_unchecked(&rdev->read_errors, 0);
29695+ atomic_set_unchecked(&rdev->corrected_errors, 0);
29696
29697 INIT_LIST_HEAD(&rdev->same_set);
29698 init_waitqueue_head(&rdev->blocked_wait);
29699@@ -6667,7 +6667,7 @@ static int md_seq_show(struct seq_file *
29700
29701 spin_unlock(&pers_lock);
29702 seq_printf(seq, "\n");
29703- seq->poll_event = atomic_read(&md_event_count);
29704+ seq->poll_event = atomic_read_unchecked(&md_event_count);
29705 return 0;
29706 }
29707 if (v == (void*)2) {
29708@@ -6756,7 +6756,7 @@ static int md_seq_show(struct seq_file *
29709 chunk_kb ? "KB" : "B");
29710 if (bitmap->file) {
29711 seq_printf(seq, ", file: ");
29712- seq_path(seq, &bitmap->file->f_path, " \t\n");
29713+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
29714 }
29715
29716 seq_printf(seq, "\n");
29717@@ -6787,7 +6787,7 @@ static int md_seq_open(struct inode *ino
29718 return error;
29719
29720 seq = file->private_data;
29721- seq->poll_event = atomic_read(&md_event_count);
29722+ seq->poll_event = atomic_read_unchecked(&md_event_count);
29723 return error;
29724 }
29725
29726@@ -6801,7 +6801,7 @@ static unsigned int mdstat_poll(struct f
29727 /* always allow read */
29728 mask = POLLIN | POLLRDNORM;
29729
29730- if (seq->poll_event != atomic_read(&md_event_count))
29731+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
29732 mask |= POLLERR | POLLPRI;
29733 return mask;
29734 }
29735@@ -6845,7 +6845,7 @@ static int is_mddev_idle(mddev_t *mddev,
29736 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
29737 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
29738 (int)part_stat_read(&disk->part0, sectors[1]) -
29739- atomic_read(&disk->sync_io);
29740+ atomic_read_unchecked(&disk->sync_io);
29741 /* sync IO will cause sync_io to increase before the disk_stats
29742 * as sync_io is counted when a request starts, and
29743 * disk_stats is counted when it completes.
29744diff -urNp linux-3.1.1/drivers/md/md.h linux-3.1.1/drivers/md/md.h
29745--- linux-3.1.1/drivers/md/md.h 2011-11-11 15:19:27.000000000 -0500
29746+++ linux-3.1.1/drivers/md/md.h 2011-11-16 18:39:07.000000000 -0500
29747@@ -124,13 +124,13 @@ struct mdk_rdev_s
29748 * only maintained for arrays that
29749 * support hot removal
29750 */
29751- atomic_t read_errors; /* number of consecutive read errors that
29752+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
29753 * we have tried to ignore.
29754 */
29755 struct timespec last_read_error; /* monotonic time since our
29756 * last read error
29757 */
29758- atomic_t corrected_errors; /* number of corrected read errors,
29759+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
29760 * for reporting to userspace and storing
29761 * in superblock.
29762 */
29763@@ -415,7 +415,7 @@ static inline void rdev_dec_pending(mdk_
29764
29765 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
29766 {
29767- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29768+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29769 }
29770
29771 struct mdk_personality
29772diff -urNp linux-3.1.1/drivers/md/raid10.c linux-3.1.1/drivers/md/raid10.c
29773--- linux-3.1.1/drivers/md/raid10.c 2011-11-11 15:19:27.000000000 -0500
29774+++ linux-3.1.1/drivers/md/raid10.c 2011-11-16 18:39:07.000000000 -0500
29775@@ -1423,7 +1423,7 @@ static void end_sync_read(struct bio *bi
29776 /* The write handler will notice the lack of
29777 * R10BIO_Uptodate and record any errors etc
29778 */
29779- atomic_add(r10_bio->sectors,
29780+ atomic_add_unchecked(r10_bio->sectors,
29781 &conf->mirrors[d].rdev->corrected_errors);
29782
29783 /* for reconstruct, we always reschedule after a read.
29784@@ -1723,7 +1723,7 @@ static void check_decay_read_errors(mdde
29785 {
29786 struct timespec cur_time_mon;
29787 unsigned long hours_since_last;
29788- unsigned int read_errors = atomic_read(&rdev->read_errors);
29789+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
29790
29791 ktime_get_ts(&cur_time_mon);
29792
29793@@ -1745,9 +1745,9 @@ static void check_decay_read_errors(mdde
29794 * overflowing the shift of read_errors by hours_since_last.
29795 */
29796 if (hours_since_last >= 8 * sizeof(read_errors))
29797- atomic_set(&rdev->read_errors, 0);
29798+ atomic_set_unchecked(&rdev->read_errors, 0);
29799 else
29800- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
29801+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
29802 }
29803
29804 static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
29805@@ -1797,8 +1797,8 @@ static void fix_read_error(conf_t *conf,
29806 return;
29807
29808 check_decay_read_errors(mddev, rdev);
29809- atomic_inc(&rdev->read_errors);
29810- if (atomic_read(&rdev->read_errors) > max_read_errors) {
29811+ atomic_inc_unchecked(&rdev->read_errors);
29812+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
29813 char b[BDEVNAME_SIZE];
29814 bdevname(rdev->bdev, b);
29815
29816@@ -1806,7 +1806,7 @@ static void fix_read_error(conf_t *conf,
29817 "md/raid10:%s: %s: Raid device exceeded "
29818 "read_error threshold [cur %d:max %d]\n",
29819 mdname(mddev), b,
29820- atomic_read(&rdev->read_errors), max_read_errors);
29821+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
29822 printk(KERN_NOTICE
29823 "md/raid10:%s: %s: Failing raid device\n",
29824 mdname(mddev), b);
29825@@ -1951,7 +1951,7 @@ static void fix_read_error(conf_t *conf,
29826 (unsigned long long)(
29827 sect + rdev->data_offset),
29828 bdevname(rdev->bdev, b));
29829- atomic_add(s, &rdev->corrected_errors);
29830+ atomic_add_unchecked(s, &rdev->corrected_errors);
29831 }
29832
29833 rdev_dec_pending(rdev, mddev);
29834diff -urNp linux-3.1.1/drivers/md/raid1.c linux-3.1.1/drivers/md/raid1.c
29835--- linux-3.1.1/drivers/md/raid1.c 2011-11-11 15:19:27.000000000 -0500
29836+++ linux-3.1.1/drivers/md/raid1.c 2011-11-16 18:39:07.000000000 -0500
29837@@ -1541,7 +1541,7 @@ static int fix_sync_read_error(r1bio_t *
29838 if (r1_sync_page_io(rdev, sect, s,
29839 bio->bi_io_vec[idx].bv_page,
29840 READ) != 0)
29841- atomic_add(s, &rdev->corrected_errors);
29842+ atomic_add_unchecked(s, &rdev->corrected_errors);
29843 }
29844 sectors -= s;
29845 sect += s;
29846@@ -1754,7 +1754,7 @@ static void fix_read_error(conf_t *conf,
29847 test_bit(In_sync, &rdev->flags)) {
29848 if (r1_sync_page_io(rdev, sect, s,
29849 conf->tmppage, READ)) {
29850- atomic_add(s, &rdev->corrected_errors);
29851+ atomic_add_unchecked(s, &rdev->corrected_errors);
29852 printk(KERN_INFO
29853 "md/raid1:%s: read error corrected "
29854 "(%d sectors at %llu on %s)\n",
29855diff -urNp linux-3.1.1/drivers/md/raid5.c linux-3.1.1/drivers/md/raid5.c
29856--- linux-3.1.1/drivers/md/raid5.c 2011-11-11 15:19:27.000000000 -0500
29857+++ linux-3.1.1/drivers/md/raid5.c 2011-11-16 18:40:10.000000000 -0500
29858@@ -1616,19 +1616,19 @@ static void raid5_end_read_request(struc
29859 (unsigned long long)(sh->sector
29860 + rdev->data_offset),
29861 bdevname(rdev->bdev, b));
29862- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
29863+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
29864 clear_bit(R5_ReadError, &sh->dev[i].flags);
29865 clear_bit(R5_ReWrite, &sh->dev[i].flags);
29866 }
29867- if (atomic_read(&conf->disks[i].rdev->read_errors))
29868- atomic_set(&conf->disks[i].rdev->read_errors, 0);
29869+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
29870+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
29871 } else {
29872 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
29873 int retry = 0;
29874 rdev = conf->disks[i].rdev;
29875
29876 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
29877- atomic_inc(&rdev->read_errors);
29878+ atomic_inc_unchecked(&rdev->read_errors);
29879 if (conf->mddev->degraded >= conf->max_degraded)
29880 printk_ratelimited(
29881 KERN_WARNING
29882@@ -1648,7 +1648,7 @@ static void raid5_end_read_request(struc
29883 (unsigned long long)(sh->sector
29884 + rdev->data_offset),
29885 bdn);
29886- else if (atomic_read(&rdev->read_errors)
29887+ else if (atomic_read_unchecked(&rdev->read_errors)
29888 > conf->max_nr_stripes)
29889 printk(KERN_WARNING
29890 "md/raid:%s: Too many read errors, failing device %s.\n",
29891@@ -1978,6 +1978,7 @@ static sector_t compute_blocknr(struct s
29892 sector_t r_sector;
29893 struct stripe_head sh2;
29894
29895+ pax_track_stack();
29896
29897 chunk_offset = sector_div(new_sector, sectors_per_chunk);
29898 stripe = new_sector;
29899diff -urNp linux-3.1.1/drivers/media/common/saa7146_hlp.c linux-3.1.1/drivers/media/common/saa7146_hlp.c
29900--- linux-3.1.1/drivers/media/common/saa7146_hlp.c 2011-11-11 15:19:27.000000000 -0500
29901+++ linux-3.1.1/drivers/media/common/saa7146_hlp.c 2011-11-16 18:40:10.000000000 -0500
29902@@ -353,6 +353,8 @@ static void calculate_clipping_registers
29903
29904 int x[32], y[32], w[32], h[32];
29905
29906+ pax_track_stack();
29907+
29908 /* clear out memory */
29909 memset(&line_list[0], 0x00, sizeof(u32)*32);
29910 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
29911diff -urNp linux-3.1.1/drivers/media/dvb/ddbridge/ddbridge-core.c linux-3.1.1/drivers/media/dvb/ddbridge/ddbridge-core.c
29912--- linux-3.1.1/drivers/media/dvb/ddbridge/ddbridge-core.c 2011-11-11 15:19:27.000000000 -0500
29913+++ linux-3.1.1/drivers/media/dvb/ddbridge/ddbridge-core.c 2011-11-16 18:39:07.000000000 -0500
29914@@ -1675,7 +1675,7 @@ static struct ddb_info ddb_v6 = {
29915 .subvendor = _subvend, .subdevice = _subdev, \
29916 .driver_data = (unsigned long)&_driverdata }
29917
29918-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
29919+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
29920 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
29921 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
29922 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
29923diff -urNp linux-3.1.1/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.1.1/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
29924--- linux-3.1.1/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-11-11 15:19:27.000000000 -0500
29925+++ linux-3.1.1/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-11-16 18:40:10.000000000 -0500
29926@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
29927 u8 buf[HOST_LINK_BUF_SIZE];
29928 int i;
29929
29930+ pax_track_stack();
29931+
29932 dprintk("%s\n", __func__);
29933
29934 /* check if we have space for a link buf in the rx_buffer */
29935@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
29936 unsigned long timeout;
29937 int written;
29938
29939+ pax_track_stack();
29940+
29941 dprintk("%s\n", __func__);
29942
29943 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
29944diff -urNp linux-3.1.1/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.1.1/drivers/media/dvb/dvb-core/dvb_demux.h
29945--- linux-3.1.1/drivers/media/dvb/dvb-core/dvb_demux.h 2011-11-11 15:19:27.000000000 -0500
29946+++ linux-3.1.1/drivers/media/dvb/dvb-core/dvb_demux.h 2011-11-17 18:34:32.000000000 -0500
29947@@ -73,7 +73,7 @@ struct dvb_demux_feed {
29948 union {
29949 dmx_ts_cb ts;
29950 dmx_section_cb sec;
29951- } cb;
29952+ } __no_const cb;
29953
29954 struct dvb_demux *demux;
29955 void *priv;
29956diff -urNp linux-3.1.1/drivers/media/dvb/dvb-core/dvbdev.c linux-3.1.1/drivers/media/dvb/dvb-core/dvbdev.c
29957--- linux-3.1.1/drivers/media/dvb/dvb-core/dvbdev.c 2011-11-11 15:19:27.000000000 -0500
29958+++ linux-3.1.1/drivers/media/dvb/dvb-core/dvbdev.c 2011-11-16 18:39:07.000000000 -0500
29959@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
29960 const struct dvb_device *template, void *priv, int type)
29961 {
29962 struct dvb_device *dvbdev;
29963- struct file_operations *dvbdevfops;
29964+ file_operations_no_const *dvbdevfops;
29965 struct device *clsdev;
29966 int minor;
29967 int id;
29968diff -urNp linux-3.1.1/drivers/media/dvb/dvb-usb/cxusb.c linux-3.1.1/drivers/media/dvb/dvb-usb/cxusb.c
29969--- linux-3.1.1/drivers/media/dvb/dvb-usb/cxusb.c 2011-11-11 15:19:27.000000000 -0500
29970+++ linux-3.1.1/drivers/media/dvb/dvb-usb/cxusb.c 2011-11-16 18:39:07.000000000 -0500
29971@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
29972 struct dib0700_adapter_state {
29973 int (*set_param_save) (struct dvb_frontend *,
29974 struct dvb_frontend_parameters *);
29975-};
29976+} __no_const;
29977
29978 static int dib7070_set_param_override(struct dvb_frontend *fe,
29979 struct dvb_frontend_parameters *fep)
29980diff -urNp linux-3.1.1/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.1.1/drivers/media/dvb/dvb-usb/dib0700_core.c
29981--- linux-3.1.1/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-11-11 15:19:27.000000000 -0500
29982+++ linux-3.1.1/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-11-16 18:40:10.000000000 -0500
29983@@ -478,6 +478,8 @@ int dib0700_download_firmware(struct usb
29984 if (!buf)
29985 return -ENOMEM;
29986
29987+ pax_track_stack();
29988+
29989 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
29990 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
29991 hx.addr, hx.len, hx.chk);
29992diff -urNp linux-3.1.1/drivers/media/dvb/dvb-usb/dw2102.c linux-3.1.1/drivers/media/dvb/dvb-usb/dw2102.c
29993--- linux-3.1.1/drivers/media/dvb/dvb-usb/dw2102.c 2011-11-11 15:19:27.000000000 -0500
29994+++ linux-3.1.1/drivers/media/dvb/dvb-usb/dw2102.c 2011-11-16 18:39:07.000000000 -0500
29995@@ -95,7 +95,7 @@ struct su3000_state {
29996
29997 struct s6x0_state {
29998 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
29999-};
30000+} __no_const;
30001
30002 /* debug */
30003 static int dvb_usb_dw2102_debug;
30004diff -urNp linux-3.1.1/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.1.1/drivers/media/dvb/dvb-usb/lmedm04.c
30005--- linux-3.1.1/drivers/media/dvb/dvb-usb/lmedm04.c 2011-11-11 15:19:27.000000000 -0500
30006+++ linux-3.1.1/drivers/media/dvb/dvb-usb/lmedm04.c 2011-11-16 18:40:10.000000000 -0500
30007@@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
30008 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
30009 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
30010
30011+ pax_track_stack();
30012
30013 data[0] = 0x8a;
30014 len_in = 1;
30015@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
30016 int ret = 0, len_in;
30017 u8 data[512] = {0};
30018
30019+ pax_track_stack();
30020+
30021 data[0] = 0x0a;
30022 len_in = 1;
30023 info("FRM Firmware Cold Reset");
30024diff -urNp linux-3.1.1/drivers/media/dvb/frontends/dib3000.h linux-3.1.1/drivers/media/dvb/frontends/dib3000.h
30025--- linux-3.1.1/drivers/media/dvb/frontends/dib3000.h 2011-11-11 15:19:27.000000000 -0500
30026+++ linux-3.1.1/drivers/media/dvb/frontends/dib3000.h 2011-11-17 18:38:05.000000000 -0500
30027@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
30028 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
30029 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
30030 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
30031-};
30032+} __no_const;
30033
30034 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
30035 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
30036diff -urNp linux-3.1.1/drivers/media/dvb/frontends/mb86a16.c linux-3.1.1/drivers/media/dvb/frontends/mb86a16.c
30037--- linux-3.1.1/drivers/media/dvb/frontends/mb86a16.c 2011-11-11 15:19:27.000000000 -0500
30038+++ linux-3.1.1/drivers/media/dvb/frontends/mb86a16.c 2011-11-16 18:40:10.000000000 -0500
30039@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
30040 int ret = -1;
30041 int sync;
30042
30043+ pax_track_stack();
30044+
30045 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
30046
30047 fcp = 3000;
30048diff -urNp linux-3.1.1/drivers/media/dvb/frontends/or51211.c linux-3.1.1/drivers/media/dvb/frontends/or51211.c
30049--- linux-3.1.1/drivers/media/dvb/frontends/or51211.c 2011-11-11 15:19:27.000000000 -0500
30050+++ linux-3.1.1/drivers/media/dvb/frontends/or51211.c 2011-11-16 18:40:10.000000000 -0500
30051@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
30052 u8 tudata[585];
30053 int i;
30054
30055+ pax_track_stack();
30056+
30057 dprintk("Firmware is %zd bytes\n",fw->size);
30058
30059 /* Get eprom data */
30060diff -urNp linux-3.1.1/drivers/media/dvb/ngene/ngene-cards.c linux-3.1.1/drivers/media/dvb/ngene/ngene-cards.c
30061--- linux-3.1.1/drivers/media/dvb/ngene/ngene-cards.c 2011-11-11 15:19:27.000000000 -0500
30062+++ linux-3.1.1/drivers/media/dvb/ngene/ngene-cards.c 2011-11-16 18:39:07.000000000 -0500
30063@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780
30064
30065 /****************************************************************************/
30066
30067-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
30068+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
30069 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
30070 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
30071 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
30072diff -urNp linux-3.1.1/drivers/media/radio/radio-cadet.c linux-3.1.1/drivers/media/radio/radio-cadet.c
30073--- linux-3.1.1/drivers/media/radio/radio-cadet.c 2011-11-11 15:19:27.000000000 -0500
30074+++ linux-3.1.1/drivers/media/radio/radio-cadet.c 2011-11-16 18:39:07.000000000 -0500
30075@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *f
30076 unsigned char readbuf[RDS_BUFFER];
30077 int i = 0;
30078
30079+ if (count > RDS_BUFFER)
30080+ return -EFAULT;
30081 mutex_lock(&dev->lock);
30082 if (dev->rdsstat == 0) {
30083 dev->rdsstat = 1;
30084diff -urNp linux-3.1.1/drivers/media/video/au0828/au0828.h linux-3.1.1/drivers/media/video/au0828/au0828.h
30085--- linux-3.1.1/drivers/media/video/au0828/au0828.h 2011-11-11 15:19:27.000000000 -0500
30086+++ linux-3.1.1/drivers/media/video/au0828/au0828.h 2011-11-16 18:39:07.000000000 -0500
30087@@ -191,7 +191,7 @@ struct au0828_dev {
30088
30089 /* I2C */
30090 struct i2c_adapter i2c_adap;
30091- struct i2c_algorithm i2c_algo;
30092+ i2c_algorithm_no_const i2c_algo;
30093 struct i2c_client i2c_client;
30094 u32 i2c_rc;
30095
30096diff -urNp linux-3.1.1/drivers/media/video/cx18/cx18-driver.c linux-3.1.1/drivers/media/video/cx18/cx18-driver.c
30097--- linux-3.1.1/drivers/media/video/cx18/cx18-driver.c 2011-11-11 15:19:27.000000000 -0500
30098+++ linux-3.1.1/drivers/media/video/cx18/cx18-driver.c 2011-11-16 18:40:10.000000000 -0500
30099@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
30100 struct i2c_client c;
30101 u8 eedata[256];
30102
30103+ pax_track_stack();
30104+
30105 memset(&c, 0, sizeof(c));
30106 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
30107 c.adapter = &cx->i2c_adap[0];
30108diff -urNp linux-3.1.1/drivers/media/video/cx23885/cx23885-input.c linux-3.1.1/drivers/media/video/cx23885/cx23885-input.c
30109--- linux-3.1.1/drivers/media/video/cx23885/cx23885-input.c 2011-11-11 15:19:27.000000000 -0500
30110+++ linux-3.1.1/drivers/media/video/cx23885/cx23885-input.c 2011-11-16 18:40:10.000000000 -0500
30111@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
30112 bool handle = false;
30113 struct ir_raw_event ir_core_event[64];
30114
30115+ pax_track_stack();
30116+
30117 do {
30118 num = 0;
30119 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
30120diff -urNp linux-3.1.1/drivers/media/video/cx88/cx88-alsa.c linux-3.1.1/drivers/media/video/cx88/cx88-alsa.c
30121--- linux-3.1.1/drivers/media/video/cx88/cx88-alsa.c 2011-11-11 15:19:27.000000000 -0500
30122+++ linux-3.1.1/drivers/media/video/cx88/cx88-alsa.c 2011-11-16 18:39:07.000000000 -0500
30123@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_
30124 * Only boards with eeprom and byte 1 at eeprom=1 have it
30125 */
30126
30127-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
30128+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
30129 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
30130 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
30131 {0, }
30132diff -urNp linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
30133--- linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-11-11 15:19:27.000000000 -0500
30134+++ linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-11-16 18:40:10.000000000 -0500
30135@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
30136 u8 *eeprom;
30137 struct tveeprom tvdata;
30138
30139+ pax_track_stack();
30140+
30141 memset(&tvdata,0,sizeof(tvdata));
30142
30143 eeprom = pvr2_eeprom_fetch(hdw);
30144diff -urNp linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
30145--- linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-11-11 15:19:27.000000000 -0500
30146+++ linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-11-16 18:39:07.000000000 -0500
30147@@ -196,7 +196,7 @@ struct pvr2_hdw {
30148
30149 /* I2C stuff */
30150 struct i2c_adapter i2c_adap;
30151- struct i2c_algorithm i2c_algo;
30152+ i2c_algorithm_no_const i2c_algo;
30153 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
30154 int i2c_cx25840_hack_state;
30155 int i2c_linked;
30156diff -urNp linux-3.1.1/drivers/media/video/saa7134/saa6752hs.c linux-3.1.1/drivers/media/video/saa7134/saa6752hs.c
30157--- linux-3.1.1/drivers/media/video/saa7134/saa6752hs.c 2011-11-11 15:19:27.000000000 -0500
30158+++ linux-3.1.1/drivers/media/video/saa7134/saa6752hs.c 2011-11-16 18:40:10.000000000 -0500
30159@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
30160 unsigned char localPAT[256];
30161 unsigned char localPMT[256];
30162
30163+ pax_track_stack();
30164+
30165 /* Set video format - must be done first as it resets other settings */
30166 set_reg8(client, 0x41, h->video_format);
30167
30168diff -urNp linux-3.1.1/drivers/media/video/saa7164/saa7164-cmd.c linux-3.1.1/drivers/media/video/saa7164/saa7164-cmd.c
30169--- linux-3.1.1/drivers/media/video/saa7164/saa7164-cmd.c 2011-11-11 15:19:27.000000000 -0500
30170+++ linux-3.1.1/drivers/media/video/saa7164/saa7164-cmd.c 2011-11-16 18:40:10.000000000 -0500
30171@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
30172 u8 tmp[512];
30173 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30174
30175+ pax_track_stack();
30176+
30177 /* While any outstand message on the bus exists... */
30178 do {
30179
30180@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
30181 u8 tmp[512];
30182 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30183
30184+ pax_track_stack();
30185+
30186 while (loop) {
30187
30188 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
30189diff -urNp linux-3.1.1/drivers/media/video/timblogiw.c linux-3.1.1/drivers/media/video/timblogiw.c
30190--- linux-3.1.1/drivers/media/video/timblogiw.c 2011-11-11 15:19:27.000000000 -0500
30191+++ linux-3.1.1/drivers/media/video/timblogiw.c 2011-11-17 18:36:32.000000000 -0500
30192@@ -744,7 +744,7 @@ static int timblogiw_mmap(struct file *f
30193
30194 /* Platform device functions */
30195
30196-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
30197+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
30198 .vidioc_querycap = timblogiw_querycap,
30199 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
30200 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
30201@@ -766,7 +766,7 @@ static __devinitconst struct v4l2_ioctl_
30202 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
30203 };
30204
30205-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
30206+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
30207 .owner = THIS_MODULE,
30208 .open = timblogiw_open,
30209 .release = timblogiw_close,
30210diff -urNp linux-3.1.1/drivers/media/video/usbvision/usbvision-core.c linux-3.1.1/drivers/media/video/usbvision/usbvision-core.c
30211--- linux-3.1.1/drivers/media/video/usbvision/usbvision-core.c 2011-11-11 15:19:27.000000000 -0500
30212+++ linux-3.1.1/drivers/media/video/usbvision/usbvision-core.c 2011-11-16 18:40:10.000000000 -0500
30213@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
30214 unsigned char rv, gv, bv;
30215 static unsigned char *Y, *U, *V;
30216
30217+ pax_track_stack();
30218+
30219 frame = usbvision->cur_frame;
30220 image_size = frame->frmwidth * frame->frmheight;
30221 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
30222diff -urNp linux-3.1.1/drivers/media/video/videobuf-dma-sg.c linux-3.1.1/drivers/media/video/videobuf-dma-sg.c
30223--- linux-3.1.1/drivers/media/video/videobuf-dma-sg.c 2011-11-11 15:19:27.000000000 -0500
30224+++ linux-3.1.1/drivers/media/video/videobuf-dma-sg.c 2011-11-16 18:40:10.000000000 -0500
30225@@ -607,6 +607,8 @@ void *videobuf_sg_alloc(size_t size)
30226 {
30227 struct videobuf_queue q;
30228
30229+ pax_track_stack();
30230+
30231 /* Required to make generic handler to call __videobuf_alloc */
30232 q.int_ops = &sg_ops;
30233
30234diff -urNp linux-3.1.1/drivers/message/fusion/mptbase.c linux-3.1.1/drivers/message/fusion/mptbase.c
30235--- linux-3.1.1/drivers/message/fusion/mptbase.c 2011-11-11 15:19:27.000000000 -0500
30236+++ linux-3.1.1/drivers/message/fusion/mptbase.c 2011-11-16 18:40:10.000000000 -0500
30237@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
30238 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
30239 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
30240
30241+#ifdef CONFIG_GRKERNSEC_HIDESYM
30242+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
30243+#else
30244 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
30245 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
30246+#endif
30247+
30248 /*
30249 * Rounding UP to nearest 4-kB boundary here...
30250 */
30251diff -urNp linux-3.1.1/drivers/message/fusion/mptsas.c linux-3.1.1/drivers/message/fusion/mptsas.c
30252--- linux-3.1.1/drivers/message/fusion/mptsas.c 2011-11-11 15:19:27.000000000 -0500
30253+++ linux-3.1.1/drivers/message/fusion/mptsas.c 2011-11-16 18:39:07.000000000 -0500
30254@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
30255 return 0;
30256 }
30257
30258+static inline void
30259+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
30260+{
30261+ if (phy_info->port_details) {
30262+ phy_info->port_details->rphy = rphy;
30263+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
30264+ ioc->name, rphy));
30265+ }
30266+
30267+ if (rphy) {
30268+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
30269+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
30270+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
30271+ ioc->name, rphy, rphy->dev.release));
30272+ }
30273+}
30274+
30275 /* no mutex */
30276 static void
30277 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
30278@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
30279 return NULL;
30280 }
30281
30282-static inline void
30283-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
30284-{
30285- if (phy_info->port_details) {
30286- phy_info->port_details->rphy = rphy;
30287- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
30288- ioc->name, rphy));
30289- }
30290-
30291- if (rphy) {
30292- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
30293- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
30294- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
30295- ioc->name, rphy, rphy->dev.release));
30296- }
30297-}
30298-
30299 static inline struct sas_port *
30300 mptsas_get_port(struct mptsas_phyinfo *phy_info)
30301 {
30302diff -urNp linux-3.1.1/drivers/message/fusion/mptscsih.c linux-3.1.1/drivers/message/fusion/mptscsih.c
30303--- linux-3.1.1/drivers/message/fusion/mptscsih.c 2011-11-11 15:19:27.000000000 -0500
30304+++ linux-3.1.1/drivers/message/fusion/mptscsih.c 2011-11-16 18:39:07.000000000 -0500
30305@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
30306
30307 h = shost_priv(SChost);
30308
30309- if (h) {
30310- if (h->info_kbuf == NULL)
30311- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
30312- return h->info_kbuf;
30313- h->info_kbuf[0] = '\0';
30314+ if (!h)
30315+ return NULL;
30316
30317- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
30318- h->info_kbuf[size-1] = '\0';
30319- }
30320+ if (h->info_kbuf == NULL)
30321+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
30322+ return h->info_kbuf;
30323+ h->info_kbuf[0] = '\0';
30324+
30325+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
30326+ h->info_kbuf[size-1] = '\0';
30327
30328 return h->info_kbuf;
30329 }
30330diff -urNp linux-3.1.1/drivers/message/i2o/i2o_config.c linux-3.1.1/drivers/message/i2o/i2o_config.c
30331--- linux-3.1.1/drivers/message/i2o/i2o_config.c 2011-11-11 15:19:27.000000000 -0500
30332+++ linux-3.1.1/drivers/message/i2o/i2o_config.c 2011-11-16 18:40:10.000000000 -0500
30333@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
30334 struct i2o_message *msg;
30335 unsigned int iop;
30336
30337+ pax_track_stack();
30338+
30339 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
30340 return -EFAULT;
30341
30342diff -urNp linux-3.1.1/drivers/message/i2o/i2o_proc.c linux-3.1.1/drivers/message/i2o/i2o_proc.c
30343--- linux-3.1.1/drivers/message/i2o/i2o_proc.c 2011-11-11 15:19:27.000000000 -0500
30344+++ linux-3.1.1/drivers/message/i2o/i2o_proc.c 2011-11-16 18:39:07.000000000 -0500
30345@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
30346 "Array Controller Device"
30347 };
30348
30349-static char *chtostr(u8 * chars, int n)
30350-{
30351- char tmp[256];
30352- tmp[0] = 0;
30353- return strncat(tmp, (char *)chars, n);
30354-}
30355-
30356 static int i2o_report_query_status(struct seq_file *seq, int block_status,
30357 char *group)
30358 {
30359@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
30360
30361 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
30362 seq_printf(seq, "%-#8x", ddm_table.module_id);
30363- seq_printf(seq, "%-29s",
30364- chtostr(ddm_table.module_name_version, 28));
30365+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
30366 seq_printf(seq, "%9d ", ddm_table.data_size);
30367 seq_printf(seq, "%8d", ddm_table.code_size);
30368
30369@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
30370
30371 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
30372 seq_printf(seq, "%-#8x", dst->module_id);
30373- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
30374- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
30375+ seq_printf(seq, "%-.28s", dst->module_name_version);
30376+ seq_printf(seq, "%-.8s", dst->date);
30377 seq_printf(seq, "%8d ", dst->module_size);
30378 seq_printf(seq, "%8d ", dst->mpb_size);
30379 seq_printf(seq, "0x%04x", dst->module_flags);
30380@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
30381 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
30382 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
30383 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
30384- seq_printf(seq, "Vendor info : %s\n",
30385- chtostr((u8 *) (work32 + 2), 16));
30386- seq_printf(seq, "Product info : %s\n",
30387- chtostr((u8 *) (work32 + 6), 16));
30388- seq_printf(seq, "Description : %s\n",
30389- chtostr((u8 *) (work32 + 10), 16));
30390- seq_printf(seq, "Product rev. : %s\n",
30391- chtostr((u8 *) (work32 + 14), 8));
30392+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
30393+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
30394+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
30395+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
30396
30397 seq_printf(seq, "Serial number : ");
30398 print_serial_number(seq, (u8 *) (work32 + 16),
30399@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
30400 }
30401
30402 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
30403- seq_printf(seq, "Module name : %s\n",
30404- chtostr(result.module_name, 24));
30405- seq_printf(seq, "Module revision : %s\n",
30406- chtostr(result.module_rev, 8));
30407+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
30408+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
30409
30410 seq_printf(seq, "Serial number : ");
30411 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
30412@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
30413 return 0;
30414 }
30415
30416- seq_printf(seq, "Device name : %s\n",
30417- chtostr(result.device_name, 64));
30418- seq_printf(seq, "Service name : %s\n",
30419- chtostr(result.service_name, 64));
30420- seq_printf(seq, "Physical name : %s\n",
30421- chtostr(result.physical_location, 64));
30422- seq_printf(seq, "Instance number : %s\n",
30423- chtostr(result.instance_number, 4));
30424+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
30425+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
30426+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
30427+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
30428
30429 return 0;
30430 }
30431diff -urNp linux-3.1.1/drivers/message/i2o/iop.c linux-3.1.1/drivers/message/i2o/iop.c
30432--- linux-3.1.1/drivers/message/i2o/iop.c 2011-11-11 15:19:27.000000000 -0500
30433+++ linux-3.1.1/drivers/message/i2o/iop.c 2011-11-16 18:39:07.000000000 -0500
30434@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
30435
30436 spin_lock_irqsave(&c->context_list_lock, flags);
30437
30438- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
30439- atomic_inc(&c->context_list_counter);
30440+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
30441+ atomic_inc_unchecked(&c->context_list_counter);
30442
30443- entry->context = atomic_read(&c->context_list_counter);
30444+ entry->context = atomic_read_unchecked(&c->context_list_counter);
30445
30446 list_add(&entry->list, &c->context_list);
30447
30448@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
30449
30450 #if BITS_PER_LONG == 64
30451 spin_lock_init(&c->context_list_lock);
30452- atomic_set(&c->context_list_counter, 0);
30453+ atomic_set_unchecked(&c->context_list_counter, 0);
30454 INIT_LIST_HEAD(&c->context_list);
30455 #endif
30456
30457diff -urNp linux-3.1.1/drivers/mfd/ab3100-core.c linux-3.1.1/drivers/mfd/ab3100-core.c
30458--- linux-3.1.1/drivers/mfd/ab3100-core.c 2011-11-11 15:19:27.000000000 -0500
30459+++ linux-3.1.1/drivers/mfd/ab3100-core.c 2011-11-16 18:39:07.000000000 -0500
30460@@ -809,7 +809,7 @@ struct ab_family_id {
30461 char *name;
30462 };
30463
30464-static const struct ab_family_id ids[] __devinitdata = {
30465+static const struct ab_family_id ids[] __devinitconst = {
30466 /* AB3100 */
30467 {
30468 .id = 0xc0,
30469diff -urNp linux-3.1.1/drivers/mfd/abx500-core.c linux-3.1.1/drivers/mfd/abx500-core.c
30470--- linux-3.1.1/drivers/mfd/abx500-core.c 2011-11-11 15:19:27.000000000 -0500
30471+++ linux-3.1.1/drivers/mfd/abx500-core.c 2011-11-16 18:39:07.000000000 -0500
30472@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
30473
30474 struct abx500_device_entry {
30475 struct list_head list;
30476- struct abx500_ops ops;
30477+ abx500_ops_no_const ops;
30478 struct device *dev;
30479 };
30480
30481diff -urNp linux-3.1.1/drivers/mfd/janz-cmodio.c linux-3.1.1/drivers/mfd/janz-cmodio.c
30482--- linux-3.1.1/drivers/mfd/janz-cmodio.c 2011-11-11 15:19:27.000000000 -0500
30483+++ linux-3.1.1/drivers/mfd/janz-cmodio.c 2011-11-16 18:39:07.000000000 -0500
30484@@ -13,6 +13,7 @@
30485
30486 #include <linux/kernel.h>
30487 #include <linux/module.h>
30488+#include <linux/slab.h>
30489 #include <linux/init.h>
30490 #include <linux/pci.h>
30491 #include <linux/interrupt.h>
30492diff -urNp linux-3.1.1/drivers/mfd/wm8350-i2c.c linux-3.1.1/drivers/mfd/wm8350-i2c.c
30493--- linux-3.1.1/drivers/mfd/wm8350-i2c.c 2011-11-11 15:19:27.000000000 -0500
30494+++ linux-3.1.1/drivers/mfd/wm8350-i2c.c 2011-11-16 18:40:10.000000000 -0500
30495@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
30496 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
30497 int ret;
30498
30499+ pax_track_stack();
30500+
30501 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
30502 return -EINVAL;
30503
30504diff -urNp linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.c
30505--- linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.c 2011-11-11 15:19:27.000000000 -0500
30506+++ linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.c 2011-11-16 18:39:07.000000000 -0500
30507@@ -437,7 +437,7 @@ static irqreturn_t lis302dl_interrupt(in
30508 * the lid is closed. This leads to interrupts as soon as a little move
30509 * is done.
30510 */
30511- atomic_inc(&lis3_dev.count);
30512+ atomic_inc_unchecked(&lis3_dev.count);
30513
30514 wake_up_interruptible(&lis3_dev.misc_wait);
30515 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30516@@ -520,7 +520,7 @@ static int lis3lv02d_misc_open(struct in
30517 if (lis3_dev.pm_dev)
30518 pm_runtime_get_sync(lis3_dev.pm_dev);
30519
30520- atomic_set(&lis3_dev.count, 0);
30521+ atomic_set_unchecked(&lis3_dev.count, 0);
30522 return 0;
30523 }
30524
30525@@ -547,7 +547,7 @@ static ssize_t lis3lv02d_misc_read(struc
30526 add_wait_queue(&lis3_dev.misc_wait, &wait);
30527 while (true) {
30528 set_current_state(TASK_INTERRUPTIBLE);
30529- data = atomic_xchg(&lis3_dev.count, 0);
30530+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30531 if (data)
30532 break;
30533
30534@@ -585,7 +585,7 @@ out:
30535 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30536 {
30537 poll_wait(file, &lis3_dev.misc_wait, wait);
30538- if (atomic_read(&lis3_dev.count))
30539+ if (atomic_read_unchecked(&lis3_dev.count))
30540 return POLLIN | POLLRDNORM;
30541 return 0;
30542 }
30543diff -urNp linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.h
30544--- linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.h 2011-11-11 15:19:27.000000000 -0500
30545+++ linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.h 2011-11-16 18:39:07.000000000 -0500
30546@@ -265,7 +265,7 @@ struct lis3lv02d {
30547 struct input_polled_dev *idev; /* input device */
30548 struct platform_device *pdev; /* platform device */
30549 struct regulator_bulk_data regulators[2];
30550- atomic_t count; /* interrupt count after last read */
30551+ atomic_unchecked_t count; /* interrupt count after last read */
30552 union axis_conversion ac; /* hw -> logical axis */
30553 int mapped_btns[3];
30554
30555diff -urNp linux-3.1.1/drivers/misc/sgi-gru/gruhandles.c linux-3.1.1/drivers/misc/sgi-gru/gruhandles.c
30556--- linux-3.1.1/drivers/misc/sgi-gru/gruhandles.c 2011-11-11 15:19:27.000000000 -0500
30557+++ linux-3.1.1/drivers/misc/sgi-gru/gruhandles.c 2011-11-16 18:39:07.000000000 -0500
30558@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
30559 unsigned long nsec;
30560
30561 nsec = CLKS2NSEC(clks);
30562- atomic_long_inc(&mcs_op_statistics[op].count);
30563- atomic_long_add(nsec, &mcs_op_statistics[op].total);
30564+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
30565+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
30566 if (mcs_op_statistics[op].max < nsec)
30567 mcs_op_statistics[op].max = nsec;
30568 }
30569diff -urNp linux-3.1.1/drivers/misc/sgi-gru/gruprocfs.c linux-3.1.1/drivers/misc/sgi-gru/gruprocfs.c
30570--- linux-3.1.1/drivers/misc/sgi-gru/gruprocfs.c 2011-11-11 15:19:27.000000000 -0500
30571+++ linux-3.1.1/drivers/misc/sgi-gru/gruprocfs.c 2011-11-16 18:39:07.000000000 -0500
30572@@ -32,9 +32,9 @@
30573
30574 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
30575
30576-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
30577+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
30578 {
30579- unsigned long val = atomic_long_read(v);
30580+ unsigned long val = atomic_long_read_unchecked(v);
30581
30582 seq_printf(s, "%16lu %s\n", val, id);
30583 }
30584@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
30585
30586 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
30587 for (op = 0; op < mcsop_last; op++) {
30588- count = atomic_long_read(&mcs_op_statistics[op].count);
30589- total = atomic_long_read(&mcs_op_statistics[op].total);
30590+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
30591+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
30592 max = mcs_op_statistics[op].max;
30593 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
30594 count ? total / count : 0, max);
30595diff -urNp linux-3.1.1/drivers/misc/sgi-gru/grutables.h linux-3.1.1/drivers/misc/sgi-gru/grutables.h
30596--- linux-3.1.1/drivers/misc/sgi-gru/grutables.h 2011-11-11 15:19:27.000000000 -0500
30597+++ linux-3.1.1/drivers/misc/sgi-gru/grutables.h 2011-11-16 18:39:07.000000000 -0500
30598@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
30599 * GRU statistics.
30600 */
30601 struct gru_stats_s {
30602- atomic_long_t vdata_alloc;
30603- atomic_long_t vdata_free;
30604- atomic_long_t gts_alloc;
30605- atomic_long_t gts_free;
30606- atomic_long_t gms_alloc;
30607- atomic_long_t gms_free;
30608- atomic_long_t gts_double_allocate;
30609- atomic_long_t assign_context;
30610- atomic_long_t assign_context_failed;
30611- atomic_long_t free_context;
30612- atomic_long_t load_user_context;
30613- atomic_long_t load_kernel_context;
30614- atomic_long_t lock_kernel_context;
30615- atomic_long_t unlock_kernel_context;
30616- atomic_long_t steal_user_context;
30617- atomic_long_t steal_kernel_context;
30618- atomic_long_t steal_context_failed;
30619- atomic_long_t nopfn;
30620- atomic_long_t asid_new;
30621- atomic_long_t asid_next;
30622- atomic_long_t asid_wrap;
30623- atomic_long_t asid_reuse;
30624- atomic_long_t intr;
30625- atomic_long_t intr_cbr;
30626- atomic_long_t intr_tfh;
30627- atomic_long_t intr_spurious;
30628- atomic_long_t intr_mm_lock_failed;
30629- atomic_long_t call_os;
30630- atomic_long_t call_os_wait_queue;
30631- atomic_long_t user_flush_tlb;
30632- atomic_long_t user_unload_context;
30633- atomic_long_t user_exception;
30634- atomic_long_t set_context_option;
30635- atomic_long_t check_context_retarget_intr;
30636- atomic_long_t check_context_unload;
30637- atomic_long_t tlb_dropin;
30638- atomic_long_t tlb_preload_page;
30639- atomic_long_t tlb_dropin_fail_no_asid;
30640- atomic_long_t tlb_dropin_fail_upm;
30641- atomic_long_t tlb_dropin_fail_invalid;
30642- atomic_long_t tlb_dropin_fail_range_active;
30643- atomic_long_t tlb_dropin_fail_idle;
30644- atomic_long_t tlb_dropin_fail_fmm;
30645- atomic_long_t tlb_dropin_fail_no_exception;
30646- atomic_long_t tfh_stale_on_fault;
30647- atomic_long_t mmu_invalidate_range;
30648- atomic_long_t mmu_invalidate_page;
30649- atomic_long_t flush_tlb;
30650- atomic_long_t flush_tlb_gru;
30651- atomic_long_t flush_tlb_gru_tgh;
30652- atomic_long_t flush_tlb_gru_zero_asid;
30653-
30654- atomic_long_t copy_gpa;
30655- atomic_long_t read_gpa;
30656-
30657- atomic_long_t mesq_receive;
30658- atomic_long_t mesq_receive_none;
30659- atomic_long_t mesq_send;
30660- atomic_long_t mesq_send_failed;
30661- atomic_long_t mesq_noop;
30662- atomic_long_t mesq_send_unexpected_error;
30663- atomic_long_t mesq_send_lb_overflow;
30664- atomic_long_t mesq_send_qlimit_reached;
30665- atomic_long_t mesq_send_amo_nacked;
30666- atomic_long_t mesq_send_put_nacked;
30667- atomic_long_t mesq_page_overflow;
30668- atomic_long_t mesq_qf_locked;
30669- atomic_long_t mesq_qf_noop_not_full;
30670- atomic_long_t mesq_qf_switch_head_failed;
30671- atomic_long_t mesq_qf_unexpected_error;
30672- atomic_long_t mesq_noop_unexpected_error;
30673- atomic_long_t mesq_noop_lb_overflow;
30674- atomic_long_t mesq_noop_qlimit_reached;
30675- atomic_long_t mesq_noop_amo_nacked;
30676- atomic_long_t mesq_noop_put_nacked;
30677- atomic_long_t mesq_noop_page_overflow;
30678+ atomic_long_unchecked_t vdata_alloc;
30679+ atomic_long_unchecked_t vdata_free;
30680+ atomic_long_unchecked_t gts_alloc;
30681+ atomic_long_unchecked_t gts_free;
30682+ atomic_long_unchecked_t gms_alloc;
30683+ atomic_long_unchecked_t gms_free;
30684+ atomic_long_unchecked_t gts_double_allocate;
30685+ atomic_long_unchecked_t assign_context;
30686+ atomic_long_unchecked_t assign_context_failed;
30687+ atomic_long_unchecked_t free_context;
30688+ atomic_long_unchecked_t load_user_context;
30689+ atomic_long_unchecked_t load_kernel_context;
30690+ atomic_long_unchecked_t lock_kernel_context;
30691+ atomic_long_unchecked_t unlock_kernel_context;
30692+ atomic_long_unchecked_t steal_user_context;
30693+ atomic_long_unchecked_t steal_kernel_context;
30694+ atomic_long_unchecked_t steal_context_failed;
30695+ atomic_long_unchecked_t nopfn;
30696+ atomic_long_unchecked_t asid_new;
30697+ atomic_long_unchecked_t asid_next;
30698+ atomic_long_unchecked_t asid_wrap;
30699+ atomic_long_unchecked_t asid_reuse;
30700+ atomic_long_unchecked_t intr;
30701+ atomic_long_unchecked_t intr_cbr;
30702+ atomic_long_unchecked_t intr_tfh;
30703+ atomic_long_unchecked_t intr_spurious;
30704+ atomic_long_unchecked_t intr_mm_lock_failed;
30705+ atomic_long_unchecked_t call_os;
30706+ atomic_long_unchecked_t call_os_wait_queue;
30707+ atomic_long_unchecked_t user_flush_tlb;
30708+ atomic_long_unchecked_t user_unload_context;
30709+ atomic_long_unchecked_t user_exception;
30710+ atomic_long_unchecked_t set_context_option;
30711+ atomic_long_unchecked_t check_context_retarget_intr;
30712+ atomic_long_unchecked_t check_context_unload;
30713+ atomic_long_unchecked_t tlb_dropin;
30714+ atomic_long_unchecked_t tlb_preload_page;
30715+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
30716+ atomic_long_unchecked_t tlb_dropin_fail_upm;
30717+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
30718+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
30719+ atomic_long_unchecked_t tlb_dropin_fail_idle;
30720+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
30721+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
30722+ atomic_long_unchecked_t tfh_stale_on_fault;
30723+ atomic_long_unchecked_t mmu_invalidate_range;
30724+ atomic_long_unchecked_t mmu_invalidate_page;
30725+ atomic_long_unchecked_t flush_tlb;
30726+ atomic_long_unchecked_t flush_tlb_gru;
30727+ atomic_long_unchecked_t flush_tlb_gru_tgh;
30728+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
30729+
30730+ atomic_long_unchecked_t copy_gpa;
30731+ atomic_long_unchecked_t read_gpa;
30732+
30733+ atomic_long_unchecked_t mesq_receive;
30734+ atomic_long_unchecked_t mesq_receive_none;
30735+ atomic_long_unchecked_t mesq_send;
30736+ atomic_long_unchecked_t mesq_send_failed;
30737+ atomic_long_unchecked_t mesq_noop;
30738+ atomic_long_unchecked_t mesq_send_unexpected_error;
30739+ atomic_long_unchecked_t mesq_send_lb_overflow;
30740+ atomic_long_unchecked_t mesq_send_qlimit_reached;
30741+ atomic_long_unchecked_t mesq_send_amo_nacked;
30742+ atomic_long_unchecked_t mesq_send_put_nacked;
30743+ atomic_long_unchecked_t mesq_page_overflow;
30744+ atomic_long_unchecked_t mesq_qf_locked;
30745+ atomic_long_unchecked_t mesq_qf_noop_not_full;
30746+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
30747+ atomic_long_unchecked_t mesq_qf_unexpected_error;
30748+ atomic_long_unchecked_t mesq_noop_unexpected_error;
30749+ atomic_long_unchecked_t mesq_noop_lb_overflow;
30750+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
30751+ atomic_long_unchecked_t mesq_noop_amo_nacked;
30752+ atomic_long_unchecked_t mesq_noop_put_nacked;
30753+ atomic_long_unchecked_t mesq_noop_page_overflow;
30754
30755 };
30756
30757@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
30758 tghop_invalidate, mcsop_last};
30759
30760 struct mcs_op_statistic {
30761- atomic_long_t count;
30762- atomic_long_t total;
30763+ atomic_long_unchecked_t count;
30764+ atomic_long_unchecked_t total;
30765 unsigned long max;
30766 };
30767
30768@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
30769
30770 #define STAT(id) do { \
30771 if (gru_options & OPT_STATS) \
30772- atomic_long_inc(&gru_stats.id); \
30773+ atomic_long_inc_unchecked(&gru_stats.id); \
30774 } while (0)
30775
30776 #ifdef CONFIG_SGI_GRU_DEBUG
30777diff -urNp linux-3.1.1/drivers/misc/sgi-xp/xpc.h linux-3.1.1/drivers/misc/sgi-xp/xpc.h
30778--- linux-3.1.1/drivers/misc/sgi-xp/xpc.h 2011-11-11 15:19:27.000000000 -0500
30779+++ linux-3.1.1/drivers/misc/sgi-xp/xpc.h 2011-11-16 18:39:07.000000000 -0500
30780@@ -835,6 +835,7 @@ struct xpc_arch_operations {
30781 void (*received_payload) (struct xpc_channel *, void *);
30782 void (*notify_senders_of_disconnect) (struct xpc_channel *);
30783 };
30784+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
30785
30786 /* struct xpc_partition act_state values (for XPC HB) */
30787
30788@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_regis
30789 /* found in xpc_main.c */
30790 extern struct device *xpc_part;
30791 extern struct device *xpc_chan;
30792-extern struct xpc_arch_operations xpc_arch_ops;
30793+extern xpc_arch_operations_no_const xpc_arch_ops;
30794 extern int xpc_disengage_timelimit;
30795 extern int xpc_disengage_timedout;
30796 extern int xpc_activate_IRQ_rcvd;
30797diff -urNp linux-3.1.1/drivers/misc/sgi-xp/xpc_main.c linux-3.1.1/drivers/misc/sgi-xp/xpc_main.c
30798--- linux-3.1.1/drivers/misc/sgi-xp/xpc_main.c 2011-11-11 15:19:27.000000000 -0500
30799+++ linux-3.1.1/drivers/misc/sgi-xp/xpc_main.c 2011-11-16 18:39:07.000000000 -0500
30800@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_not
30801 .notifier_call = xpc_system_die,
30802 };
30803
30804-struct xpc_arch_operations xpc_arch_ops;
30805+xpc_arch_operations_no_const xpc_arch_ops;
30806
30807 /*
30808 * Timer function to enforce the timelimit on the partition disengage.
30809diff -urNp linux-3.1.1/drivers/misc/sgi-xp/xp.h linux-3.1.1/drivers/misc/sgi-xp/xp.h
30810--- linux-3.1.1/drivers/misc/sgi-xp/xp.h 2011-11-11 15:19:27.000000000 -0500
30811+++ linux-3.1.1/drivers/misc/sgi-xp/xp.h 2011-11-16 18:39:07.000000000 -0500
30812@@ -289,7 +289,7 @@ struct xpc_interface {
30813 xpc_notify_func, void *);
30814 void (*received) (short, int, void *);
30815 enum xp_retval (*partid_to_nasids) (short, void *);
30816-};
30817+} __no_const;
30818
30819 extern struct xpc_interface xpc_interface;
30820
30821diff -urNp linux-3.1.1/drivers/mmc/host/sdhci-pci.c linux-3.1.1/drivers/mmc/host/sdhci-pci.c
30822--- linux-3.1.1/drivers/mmc/host/sdhci-pci.c 2011-11-11 15:19:27.000000000 -0500
30823+++ linux-3.1.1/drivers/mmc/host/sdhci-pci.c 2011-11-16 18:39:07.000000000 -0500
30824@@ -542,7 +542,7 @@ static const struct sdhci_pci_fixes sdhc
30825 .probe = via_probe,
30826 };
30827
30828-static const struct pci_device_id pci_ids[] __devinitdata = {
30829+static const struct pci_device_id pci_ids[] __devinitconst = {
30830 {
30831 .vendor = PCI_VENDOR_ID_RICOH,
30832 .device = PCI_DEVICE_ID_RICOH_R5C822,
30833diff -urNp linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0001.c
30834--- linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0001.c 2011-11-11 15:19:27.000000000 -0500
30835+++ linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0001.c 2011-11-16 18:40:10.000000000 -0500
30836@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
30837 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
30838 unsigned long timeo = jiffies + HZ;
30839
30840+ pax_track_stack();
30841+
30842 /* Prevent setting state FL_SYNCING for chip in suspended state. */
30843 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
30844 goto sleep;
30845@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
30846 unsigned long initial_adr;
30847 int initial_len = len;
30848
30849+ pax_track_stack();
30850+
30851 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
30852 adr += chip->start;
30853 initial_adr = adr;
30854@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
30855 int retries = 3;
30856 int ret;
30857
30858+ pax_track_stack();
30859+
30860 adr += chip->start;
30861
30862 retry:
30863diff -urNp linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0020.c
30864--- linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0020.c 2011-11-11 15:19:27.000000000 -0500
30865+++ linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0020.c 2011-11-16 18:40:10.000000000 -0500
30866@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
30867 unsigned long cmd_addr;
30868 struct cfi_private *cfi = map->fldrv_priv;
30869
30870+ pax_track_stack();
30871+
30872 adr += chip->start;
30873
30874 /* Ensure cmd read/writes are aligned. */
30875@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
30876 DECLARE_WAITQUEUE(wait, current);
30877 int wbufsize, z;
30878
30879+ pax_track_stack();
30880+
30881 /* M58LW064A requires bus alignment for buffer wriets -- saw */
30882 if (adr & (map_bankwidth(map)-1))
30883 return -EINVAL;
30884@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
30885 DECLARE_WAITQUEUE(wait, current);
30886 int ret = 0;
30887
30888+ pax_track_stack();
30889+
30890 adr += chip->start;
30891
30892 /* Let's determine this according to the interleave only once */
30893@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
30894 unsigned long timeo = jiffies + HZ;
30895 DECLARE_WAITQUEUE(wait, current);
30896
30897+ pax_track_stack();
30898+
30899 adr += chip->start;
30900
30901 /* Let's determine this according to the interleave only once */
30902@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
30903 unsigned long timeo = jiffies + HZ;
30904 DECLARE_WAITQUEUE(wait, current);
30905
30906+ pax_track_stack();
30907+
30908 adr += chip->start;
30909
30910 /* Let's determine this according to the interleave only once */
30911diff -urNp linux-3.1.1/drivers/mtd/devices/doc2000.c linux-3.1.1/drivers/mtd/devices/doc2000.c
30912--- linux-3.1.1/drivers/mtd/devices/doc2000.c 2011-11-11 15:19:27.000000000 -0500
30913+++ linux-3.1.1/drivers/mtd/devices/doc2000.c 2011-11-16 18:39:07.000000000 -0500
30914@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
30915
30916 /* The ECC will not be calculated correctly if less than 512 is written */
30917 /* DBB-
30918- if (len != 0x200 && eccbuf)
30919+ if (len != 0x200)
30920 printk(KERN_WARNING
30921 "ECC needs a full sector write (adr: %lx size %lx)\n",
30922 (long) to, (long) len);
30923diff -urNp linux-3.1.1/drivers/mtd/devices/doc2001.c linux-3.1.1/drivers/mtd/devices/doc2001.c
30924--- linux-3.1.1/drivers/mtd/devices/doc2001.c 2011-11-11 15:19:27.000000000 -0500
30925+++ linux-3.1.1/drivers/mtd/devices/doc2001.c 2011-11-16 18:39:07.000000000 -0500
30926@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
30927 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
30928
30929 /* Don't allow read past end of device */
30930- if (from >= this->totlen)
30931+ if (from >= this->totlen || !len)
30932 return -EINVAL;
30933
30934 /* Don't allow a single read to cross a 512-byte block boundary */
30935diff -urNp linux-3.1.1/drivers/mtd/ftl.c linux-3.1.1/drivers/mtd/ftl.c
30936--- linux-3.1.1/drivers/mtd/ftl.c 2011-11-11 15:19:27.000000000 -0500
30937+++ linux-3.1.1/drivers/mtd/ftl.c 2011-11-16 18:40:10.000000000 -0500
30938@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
30939 loff_t offset;
30940 uint16_t srcunitswap = cpu_to_le16(srcunit);
30941
30942+ pax_track_stack();
30943+
30944 eun = &part->EUNInfo[srcunit];
30945 xfer = &part->XferInfo[xferunit];
30946 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
30947diff -urNp linux-3.1.1/drivers/mtd/inftlcore.c linux-3.1.1/drivers/mtd/inftlcore.c
30948--- linux-3.1.1/drivers/mtd/inftlcore.c 2011-11-11 15:19:27.000000000 -0500
30949+++ linux-3.1.1/drivers/mtd/inftlcore.c 2011-11-16 18:40:10.000000000 -0500
30950@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
30951 struct inftl_oob oob;
30952 size_t retlen;
30953
30954+ pax_track_stack();
30955+
30956 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
30957 "pending=%d)\n", inftl, thisVUC, pendingblock);
30958
30959diff -urNp linux-3.1.1/drivers/mtd/inftlmount.c linux-3.1.1/drivers/mtd/inftlmount.c
30960--- linux-3.1.1/drivers/mtd/inftlmount.c 2011-11-11 15:19:27.000000000 -0500
30961+++ linux-3.1.1/drivers/mtd/inftlmount.c 2011-11-16 18:40:10.000000000 -0500
30962@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
30963 struct INFTLPartition *ip;
30964 size_t retlen;
30965
30966+ pax_track_stack();
30967+
30968 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
30969
30970 /*
30971diff -urNp linux-3.1.1/drivers/mtd/lpddr/qinfo_probe.c linux-3.1.1/drivers/mtd/lpddr/qinfo_probe.c
30972--- linux-3.1.1/drivers/mtd/lpddr/qinfo_probe.c 2011-11-11 15:19:27.000000000 -0500
30973+++ linux-3.1.1/drivers/mtd/lpddr/qinfo_probe.c 2011-11-16 18:40:10.000000000 -0500
30974@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
30975 {
30976 map_word pfow_val[4];
30977
30978+ pax_track_stack();
30979+
30980 /* Check identification string */
30981 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
30982 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
30983diff -urNp linux-3.1.1/drivers/mtd/mtdchar.c linux-3.1.1/drivers/mtd/mtdchar.c
30984--- linux-3.1.1/drivers/mtd/mtdchar.c 2011-11-11 15:19:27.000000000 -0500
30985+++ linux-3.1.1/drivers/mtd/mtdchar.c 2011-11-16 18:40:10.000000000 -0500
30986@@ -554,6 +554,8 @@ static int mtd_ioctl(struct file *file,
30987 u_long size;
30988 struct mtd_info_user info;
30989
30990+ pax_track_stack();
30991+
30992 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
30993
30994 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
30995diff -urNp linux-3.1.1/drivers/mtd/nand/denali.c linux-3.1.1/drivers/mtd/nand/denali.c
30996--- linux-3.1.1/drivers/mtd/nand/denali.c 2011-11-11 15:19:27.000000000 -0500
30997+++ linux-3.1.1/drivers/mtd/nand/denali.c 2011-11-16 18:39:07.000000000 -0500
30998@@ -26,6 +26,7 @@
30999 #include <linux/pci.h>
31000 #include <linux/mtd/mtd.h>
31001 #include <linux/module.h>
31002+#include <linux/slab.h>
31003
31004 #include "denali.h"
31005
31006diff -urNp linux-3.1.1/drivers/mtd/nftlcore.c linux-3.1.1/drivers/mtd/nftlcore.c
31007--- linux-3.1.1/drivers/mtd/nftlcore.c 2011-11-11 15:19:27.000000000 -0500
31008+++ linux-3.1.1/drivers/mtd/nftlcore.c 2011-11-16 18:40:10.000000000 -0500
31009@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
31010 int inplace = 1;
31011 size_t retlen;
31012
31013+ pax_track_stack();
31014+
31015 memset(BlockMap, 0xff, sizeof(BlockMap));
31016 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
31017
31018diff -urNp linux-3.1.1/drivers/mtd/nftlmount.c linux-3.1.1/drivers/mtd/nftlmount.c
31019--- linux-3.1.1/drivers/mtd/nftlmount.c 2011-11-11 15:19:27.000000000 -0500
31020+++ linux-3.1.1/drivers/mtd/nftlmount.c 2011-11-16 18:40:10.000000000 -0500
31021@@ -24,6 +24,7 @@
31022 #include <asm/errno.h>
31023 #include <linux/delay.h>
31024 #include <linux/slab.h>
31025+#include <linux/sched.h>
31026 #include <linux/mtd/mtd.h>
31027 #include <linux/mtd/nand.h>
31028 #include <linux/mtd/nftl.h>
31029@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
31030 struct mtd_info *mtd = nftl->mbd.mtd;
31031 unsigned int i;
31032
31033+ pax_track_stack();
31034+
31035 /* Assume logical EraseSize == physical erasesize for starting the scan.
31036 We'll sort it out later if we find a MediaHeader which says otherwise */
31037 /* Actually, we won't. The new DiskOnChip driver has already scanned
31038diff -urNp linux-3.1.1/drivers/mtd/ubi/build.c linux-3.1.1/drivers/mtd/ubi/build.c
31039--- linux-3.1.1/drivers/mtd/ubi/build.c 2011-11-11 15:19:27.000000000 -0500
31040+++ linux-3.1.1/drivers/mtd/ubi/build.c 2011-11-16 18:39:07.000000000 -0500
31041@@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
31042 static int __init bytes_str_to_int(const char *str)
31043 {
31044 char *endp;
31045- unsigned long result;
31046+ unsigned long result, scale = 1;
31047
31048 result = simple_strtoul(str, &endp, 0);
31049 if (str == endp || result >= INT_MAX) {
31050@@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const
31051
31052 switch (*endp) {
31053 case 'G':
31054- result *= 1024;
31055+ scale *= 1024;
31056 case 'M':
31057- result *= 1024;
31058+ scale *= 1024;
31059 case 'K':
31060- result *= 1024;
31061+ scale *= 1024;
31062 if (endp[1] == 'i' && endp[2] == 'B')
31063 endp += 2;
31064 case '\0':
31065@@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const
31066 return -EINVAL;
31067 }
31068
31069- return result;
31070+ if ((intoverflow_t)result*scale >= INT_MAX) {
31071+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
31072+ str);
31073+ return -EINVAL;
31074+ }
31075+
31076+ return result*scale;
31077 }
31078
31079 /**
31080diff -urNp linux-3.1.1/drivers/net/atlx/atl2.c linux-3.1.1/drivers/net/atlx/atl2.c
31081--- linux-3.1.1/drivers/net/atlx/atl2.c 2011-11-11 15:19:27.000000000 -0500
31082+++ linux-3.1.1/drivers/net/atlx/atl2.c 2011-11-16 18:39:07.000000000 -0500
31083@@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw
31084 */
31085
31086 #define ATL2_PARAM(X, desc) \
31087- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
31088+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
31089 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
31090 MODULE_PARM_DESC(X, desc);
31091 #else
31092diff -urNp linux-3.1.1/drivers/net/bna/bfa_ioc_ct.c linux-3.1.1/drivers/net/bna/bfa_ioc_ct.c
31093--- linux-3.1.1/drivers/net/bna/bfa_ioc_ct.c 2011-11-11 15:19:27.000000000 -0500
31094+++ linux-3.1.1/drivers/net/bna/bfa_ioc_ct.c 2011-11-16 18:39:07.000000000 -0500
31095@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
31096 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
31097 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
31098
31099-static struct bfa_ioc_hwif nw_hwif_ct;
31100+static struct bfa_ioc_hwif nw_hwif_ct = {
31101+ .ioc_pll_init = bfa_ioc_ct_pll_init,
31102+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
31103+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
31104+ .ioc_reg_init = bfa_ioc_ct_reg_init,
31105+ .ioc_map_port = bfa_ioc_ct_map_port,
31106+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
31107+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
31108+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
31109+ .ioc_sync_start = bfa_ioc_ct_sync_start,
31110+ .ioc_sync_join = bfa_ioc_ct_sync_join,
31111+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
31112+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
31113+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
31114+};
31115
31116 /**
31117 * Called from bfa_ioc_attach() to map asic specific calls.
31118@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
31119 void
31120 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
31121 {
31122- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
31123- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
31124- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
31125- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
31126- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
31127- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
31128- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
31129- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
31130- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
31131- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
31132- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
31133- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
31134- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
31135-
31136 ioc->ioc_hwif = &nw_hwif_ct;
31137 }
31138
31139diff -urNp linux-3.1.1/drivers/net/bna/bnad.c linux-3.1.1/drivers/net/bna/bnad.c
31140--- linux-3.1.1/drivers/net/bna/bnad.c 2011-11-11 15:19:27.000000000 -0500
31141+++ linux-3.1.1/drivers/net/bna/bnad.c 2011-11-16 18:39:07.000000000 -0500
31142@@ -1673,7 +1673,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
31143 struct bna_intr_info *intr_info =
31144 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
31145 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
31146- struct bna_tx_event_cbfn tx_cbfn;
31147+ static struct bna_tx_event_cbfn tx_cbfn = {
31148+ /* Initialize the tx event handlers */
31149+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
31150+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
31151+ .tx_stall_cbfn = bnad_cb_tx_stall,
31152+ .tx_resume_cbfn = bnad_cb_tx_resume,
31153+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
31154+ };
31155 struct bna_tx *tx;
31156 unsigned long flags;
31157
31158@@ -1682,13 +1689,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
31159 tx_config->txq_depth = bnad->txq_depth;
31160 tx_config->tx_type = BNA_TX_T_REGULAR;
31161
31162- /* Initialize the tx event handlers */
31163- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
31164- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
31165- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
31166- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
31167- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
31168-
31169 /* Get BNA's resource requirement for one tx object */
31170 spin_lock_irqsave(&bnad->bna_lock, flags);
31171 bna_tx_res_req(bnad->num_txq_per_tx,
31172@@ -1819,21 +1819,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
31173 struct bna_intr_info *intr_info =
31174 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
31175 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
31176- struct bna_rx_event_cbfn rx_cbfn;
31177+ static struct bna_rx_event_cbfn rx_cbfn = {
31178+ /* Initialize the Rx event handlers */
31179+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
31180+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
31181+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
31182+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
31183+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
31184+ .rx_post_cbfn = bnad_cb_rx_post
31185+ };
31186 struct bna_rx *rx;
31187 unsigned long flags;
31188
31189 /* Initialize the Rx object configuration */
31190 bnad_init_rx_config(bnad, rx_config);
31191
31192- /* Initialize the Rx event handlers */
31193- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
31194- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
31195- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
31196- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
31197- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
31198- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
31199-
31200 /* Get BNA's resource requirement for one Rx object */
31201 spin_lock_irqsave(&bnad->bna_lock, flags);
31202 bna_rx_res_req(rx_config, res_info);
31203diff -urNp linux-3.1.1/drivers/net/bnx2.c linux-3.1.1/drivers/net/bnx2.c
31204--- linux-3.1.1/drivers/net/bnx2.c 2011-11-11 15:19:27.000000000 -0500
31205+++ linux-3.1.1/drivers/net/bnx2.c 2011-11-16 18:40:11.000000000 -0500
31206@@ -5877,6 +5877,8 @@ bnx2_test_nvram(struct bnx2 *bp)
31207 int rc = 0;
31208 u32 magic, csum;
31209
31210+ pax_track_stack();
31211+
31212 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
31213 goto test_nvram_done;
31214
31215diff -urNp linux-3.1.1/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.1.1/drivers/net/bnx2x/bnx2x_ethtool.c
31216--- linux-3.1.1/drivers/net/bnx2x/bnx2x_ethtool.c 2011-11-11 15:19:27.000000000 -0500
31217+++ linux-3.1.1/drivers/net/bnx2x/bnx2x_ethtool.c 2011-11-16 18:40:11.000000000 -0500
31218@@ -1943,6 +1943,8 @@ static int bnx2x_test_nvram(struct bnx2x
31219 int i, rc;
31220 u32 magic, crc;
31221
31222+ pax_track_stack();
31223+
31224 if (BP_NOMCP(bp))
31225 return 0;
31226
31227diff -urNp linux-3.1.1/drivers/net/bnx2x/bnx2x_sp.h linux-3.1.1/drivers/net/bnx2x/bnx2x_sp.h
31228--- linux-3.1.1/drivers/net/bnx2x/bnx2x_sp.h 2011-11-11 15:19:27.000000000 -0500
31229+++ linux-3.1.1/drivers/net/bnx2x/bnx2x_sp.h 2011-11-16 18:39:07.000000000 -0500
31230@@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
31231
31232 int (*wait_comp)(struct bnx2x *bp,
31233 struct bnx2x_rx_mode_ramrod_params *p);
31234-};
31235+} __no_const;
31236
31237 /********************** Set multicast group ***********************************/
31238
31239diff -urNp linux-3.1.1/drivers/net/cxgb3/l2t.h linux-3.1.1/drivers/net/cxgb3/l2t.h
31240--- linux-3.1.1/drivers/net/cxgb3/l2t.h 2011-11-11 15:19:27.000000000 -0500
31241+++ linux-3.1.1/drivers/net/cxgb3/l2t.h 2011-11-16 18:39:07.000000000 -0500
31242@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)
31243 */
31244 struct l2t_skb_cb {
31245 arp_failure_handler_func arp_failure_handler;
31246-};
31247+} __no_const;
31248
31249 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
31250
31251diff -urNp linux-3.1.1/drivers/net/cxgb4/cxgb4_main.c linux-3.1.1/drivers/net/cxgb4/cxgb4_main.c
31252--- linux-3.1.1/drivers/net/cxgb4/cxgb4_main.c 2011-11-11 15:19:27.000000000 -0500
31253+++ linux-3.1.1/drivers/net/cxgb4/cxgb4_main.c 2011-11-16 18:40:22.000000000 -0500
31254@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
31255 unsigned int nchan = adap->params.nports;
31256 struct msix_entry entries[MAX_INGQ + 1];
31257
31258+ pax_track_stack();
31259+
31260 for (i = 0; i < ARRAY_SIZE(entries); ++i)
31261 entries[i].entry = i;
31262
31263diff -urNp linux-3.1.1/drivers/net/cxgb4/t4_hw.c linux-3.1.1/drivers/net/cxgb4/t4_hw.c
31264--- linux-3.1.1/drivers/net/cxgb4/t4_hw.c 2011-11-11 15:19:27.000000000 -0500
31265+++ linux-3.1.1/drivers/net/cxgb4/t4_hw.c 2011-11-16 18:40:22.000000000 -0500
31266@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
31267 u8 vpd[VPD_LEN], csum;
31268 unsigned int vpdr_len, kw_offset, id_len;
31269
31270+ pax_track_stack();
31271+
31272 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
31273 if (ret < 0)
31274 return ret;
31275diff -urNp linux-3.1.1/drivers/net/e1000e/82571.c linux-3.1.1/drivers/net/e1000e/82571.c
31276--- linux-3.1.1/drivers/net/e1000e/82571.c 2011-11-11 15:19:27.000000000 -0500
31277+++ linux-3.1.1/drivers/net/e1000e/82571.c 2011-11-16 18:39:07.000000000 -0500
31278@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
31279 {
31280 struct e1000_hw *hw = &adapter->hw;
31281 struct e1000_mac_info *mac = &hw->mac;
31282- struct e1000_mac_operations *func = &mac->ops;
31283+ e1000_mac_operations_no_const *func = &mac->ops;
31284 u32 swsm = 0;
31285 u32 swsm2 = 0;
31286 bool force_clear_smbi = false;
31287diff -urNp linux-3.1.1/drivers/net/e1000e/es2lan.c linux-3.1.1/drivers/net/e1000e/es2lan.c
31288--- linux-3.1.1/drivers/net/e1000e/es2lan.c 2011-11-11 15:19:27.000000000 -0500
31289+++ linux-3.1.1/drivers/net/e1000e/es2lan.c 2011-11-16 18:39:07.000000000 -0500
31290@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
31291 {
31292 struct e1000_hw *hw = &adapter->hw;
31293 struct e1000_mac_info *mac = &hw->mac;
31294- struct e1000_mac_operations *func = &mac->ops;
31295+ e1000_mac_operations_no_const *func = &mac->ops;
31296
31297 /* Set media type */
31298 switch (adapter->pdev->device) {
31299diff -urNp linux-3.1.1/drivers/net/e1000e/hw.h linux-3.1.1/drivers/net/e1000e/hw.h
31300--- linux-3.1.1/drivers/net/e1000e/hw.h 2011-11-11 15:19:27.000000000 -0500
31301+++ linux-3.1.1/drivers/net/e1000e/hw.h 2011-11-16 18:39:07.000000000 -0500
31302@@ -778,6 +778,7 @@ struct e1000_mac_operations {
31303 void (*write_vfta)(struct e1000_hw *, u32, u32);
31304 s32 (*read_mac_addr)(struct e1000_hw *);
31305 };
31306+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
31307
31308 /*
31309 * When to use various PHY register access functions:
31310@@ -818,6 +819,7 @@ struct e1000_phy_operations {
31311 void (*power_up)(struct e1000_hw *);
31312 void (*power_down)(struct e1000_hw *);
31313 };
31314+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
31315
31316 /* Function pointers for the NVM. */
31317 struct e1000_nvm_operations {
31318@@ -829,9 +831,10 @@ struct e1000_nvm_operations {
31319 s32 (*validate)(struct e1000_hw *);
31320 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
31321 };
31322+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
31323
31324 struct e1000_mac_info {
31325- struct e1000_mac_operations ops;
31326+ e1000_mac_operations_no_const ops;
31327 u8 addr[ETH_ALEN];
31328 u8 perm_addr[ETH_ALEN];
31329
31330@@ -872,7 +875,7 @@ struct e1000_mac_info {
31331 };
31332
31333 struct e1000_phy_info {
31334- struct e1000_phy_operations ops;
31335+ e1000_phy_operations_no_const ops;
31336
31337 enum e1000_phy_type type;
31338
31339@@ -906,7 +909,7 @@ struct e1000_phy_info {
31340 };
31341
31342 struct e1000_nvm_info {
31343- struct e1000_nvm_operations ops;
31344+ e1000_nvm_operations_no_const ops;
31345
31346 enum e1000_nvm_type type;
31347 enum e1000_nvm_override override;
31348diff -urNp linux-3.1.1/drivers/net/fealnx.c linux-3.1.1/drivers/net/fealnx.c
31349--- linux-3.1.1/drivers/net/fealnx.c 2011-11-11 15:19:27.000000000 -0500
31350+++ linux-3.1.1/drivers/net/fealnx.c 2011-11-16 18:39:07.000000000 -0500
31351@@ -150,7 +150,7 @@ struct chip_info {
31352 int flags;
31353 };
31354
31355-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
31356+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
31357 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
31358 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
31359 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
31360diff -urNp linux-3.1.1/drivers/net/hamradio/6pack.c linux-3.1.1/drivers/net/hamradio/6pack.c
31361--- linux-3.1.1/drivers/net/hamradio/6pack.c 2011-11-11 15:19:27.000000000 -0500
31362+++ linux-3.1.1/drivers/net/hamradio/6pack.c 2011-11-16 18:40:22.000000000 -0500
31363@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
31364 unsigned char buf[512];
31365 int count1;
31366
31367+ pax_track_stack();
31368+
31369 if (!count)
31370 return;
31371
31372diff -urNp linux-3.1.1/drivers/net/igb/e1000_hw.h linux-3.1.1/drivers/net/igb/e1000_hw.h
31373--- linux-3.1.1/drivers/net/igb/e1000_hw.h 2011-11-11 15:19:27.000000000 -0500
31374+++ linux-3.1.1/drivers/net/igb/e1000_hw.h 2011-11-16 18:39:07.000000000 -0500
31375@@ -314,6 +314,7 @@ struct e1000_mac_operations {
31376 s32 (*read_mac_addr)(struct e1000_hw *);
31377 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
31378 };
31379+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
31380
31381 struct e1000_phy_operations {
31382 s32 (*acquire)(struct e1000_hw *);
31383@@ -330,6 +331,7 @@ struct e1000_phy_operations {
31384 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
31385 s32 (*write_reg)(struct e1000_hw *, u32, u16);
31386 };
31387+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
31388
31389 struct e1000_nvm_operations {
31390 s32 (*acquire)(struct e1000_hw *);
31391@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
31392 s32 (*update)(struct e1000_hw *);
31393 s32 (*validate)(struct e1000_hw *);
31394 };
31395+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
31396
31397 struct e1000_info {
31398 s32 (*get_invariants)(struct e1000_hw *);
31399@@ -350,7 +353,7 @@ struct e1000_info {
31400 extern const struct e1000_info e1000_82575_info;
31401
31402 struct e1000_mac_info {
31403- struct e1000_mac_operations ops;
31404+ e1000_mac_operations_no_const ops;
31405
31406 u8 addr[6];
31407 u8 perm_addr[6];
31408@@ -388,7 +391,7 @@ struct e1000_mac_info {
31409 };
31410
31411 struct e1000_phy_info {
31412- struct e1000_phy_operations ops;
31413+ e1000_phy_operations_no_const ops;
31414
31415 enum e1000_phy_type type;
31416
31417@@ -423,7 +426,7 @@ struct e1000_phy_info {
31418 };
31419
31420 struct e1000_nvm_info {
31421- struct e1000_nvm_operations ops;
31422+ e1000_nvm_operations_no_const ops;
31423 enum e1000_nvm_type type;
31424 enum e1000_nvm_override override;
31425
31426@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
31427 s32 (*check_for_ack)(struct e1000_hw *, u16);
31428 s32 (*check_for_rst)(struct e1000_hw *, u16);
31429 };
31430+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
31431
31432 struct e1000_mbx_stats {
31433 u32 msgs_tx;
31434@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
31435 };
31436
31437 struct e1000_mbx_info {
31438- struct e1000_mbx_operations ops;
31439+ e1000_mbx_operations_no_const ops;
31440 struct e1000_mbx_stats stats;
31441 u32 timeout;
31442 u32 usec_delay;
31443diff -urNp linux-3.1.1/drivers/net/igbvf/vf.h linux-3.1.1/drivers/net/igbvf/vf.h
31444--- linux-3.1.1/drivers/net/igbvf/vf.h 2011-11-11 15:19:27.000000000 -0500
31445+++ linux-3.1.1/drivers/net/igbvf/vf.h 2011-11-16 18:39:07.000000000 -0500
31446@@ -189,9 +189,10 @@ struct e1000_mac_operations {
31447 s32 (*read_mac_addr)(struct e1000_hw *);
31448 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
31449 };
31450+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
31451
31452 struct e1000_mac_info {
31453- struct e1000_mac_operations ops;
31454+ e1000_mac_operations_no_const ops;
31455 u8 addr[6];
31456 u8 perm_addr[6];
31457
31458@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
31459 s32 (*check_for_ack)(struct e1000_hw *);
31460 s32 (*check_for_rst)(struct e1000_hw *);
31461 };
31462+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
31463
31464 struct e1000_mbx_stats {
31465 u32 msgs_tx;
31466@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
31467 };
31468
31469 struct e1000_mbx_info {
31470- struct e1000_mbx_operations ops;
31471+ e1000_mbx_operations_no_const ops;
31472 struct e1000_mbx_stats stats;
31473 u32 timeout;
31474 u32 usec_delay;
31475diff -urNp linux-3.1.1/drivers/net/ixgb/ixgb_main.c linux-3.1.1/drivers/net/ixgb/ixgb_main.c
31476--- linux-3.1.1/drivers/net/ixgb/ixgb_main.c 2011-11-11 15:19:27.000000000 -0500
31477+++ linux-3.1.1/drivers/net/ixgb/ixgb_main.c 2011-11-16 18:40:22.000000000 -0500
31478@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
31479 u32 rctl;
31480 int i;
31481
31482+ pax_track_stack();
31483+
31484 /* Check for Promiscuous and All Multicast modes */
31485
31486 rctl = IXGB_READ_REG(hw, RCTL);
31487diff -urNp linux-3.1.1/drivers/net/ixgb/ixgb_param.c linux-3.1.1/drivers/net/ixgb/ixgb_param.c
31488--- linux-3.1.1/drivers/net/ixgb/ixgb_param.c 2011-11-11 15:19:27.000000000 -0500
31489+++ linux-3.1.1/drivers/net/ixgb/ixgb_param.c 2011-11-16 18:40:22.000000000 -0500
31490@@ -261,6 +261,9 @@ void __devinit
31491 ixgb_check_options(struct ixgb_adapter *adapter)
31492 {
31493 int bd = adapter->bd_number;
31494+
31495+ pax_track_stack();
31496+
31497 if (bd >= IXGB_MAX_NIC) {
31498 pr_notice("Warning: no configuration for board #%i\n", bd);
31499 pr_notice("Using defaults for all values\n");
31500diff -urNp linux-3.1.1/drivers/net/ixgbe/ixgbe_type.h linux-3.1.1/drivers/net/ixgbe/ixgbe_type.h
31501--- linux-3.1.1/drivers/net/ixgbe/ixgbe_type.h 2011-11-11 15:19:27.000000000 -0500
31502+++ linux-3.1.1/drivers/net/ixgbe/ixgbe_type.h 2011-11-16 18:39:07.000000000 -0500
31503@@ -2642,6 +2642,7 @@ struct ixgbe_eeprom_operations {
31504 s32 (*update_checksum)(struct ixgbe_hw *);
31505 u16 (*calc_checksum)(struct ixgbe_hw *);
31506 };
31507+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
31508
31509 struct ixgbe_mac_operations {
31510 s32 (*init_hw)(struct ixgbe_hw *);
31511@@ -2703,6 +2704,7 @@ struct ixgbe_mac_operations {
31512 /* Manageability interface */
31513 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
31514 };
31515+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
31516
31517 struct ixgbe_phy_operations {
31518 s32 (*identify)(struct ixgbe_hw *);
31519@@ -2722,9 +2724,10 @@ struct ixgbe_phy_operations {
31520 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
31521 s32 (*check_overtemp)(struct ixgbe_hw *);
31522 };
31523+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
31524
31525 struct ixgbe_eeprom_info {
31526- struct ixgbe_eeprom_operations ops;
31527+ ixgbe_eeprom_operations_no_const ops;
31528 enum ixgbe_eeprom_type type;
31529 u32 semaphore_delay;
31530 u16 word_size;
31531@@ -2734,7 +2737,7 @@ struct ixgbe_eeprom_info {
31532
31533 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
31534 struct ixgbe_mac_info {
31535- struct ixgbe_mac_operations ops;
31536+ ixgbe_mac_operations_no_const ops;
31537 enum ixgbe_mac_type type;
31538 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
31539 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
31540@@ -2762,7 +2765,7 @@ struct ixgbe_mac_info {
31541 };
31542
31543 struct ixgbe_phy_info {
31544- struct ixgbe_phy_operations ops;
31545+ ixgbe_phy_operations_no_const ops;
31546 struct mdio_if_info mdio;
31547 enum ixgbe_phy_type type;
31548 u32 id;
31549@@ -2790,6 +2793,7 @@ struct ixgbe_mbx_operations {
31550 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
31551 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
31552 };
31553+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31554
31555 struct ixgbe_mbx_stats {
31556 u32 msgs_tx;
31557@@ -2801,7 +2805,7 @@ struct ixgbe_mbx_stats {
31558 };
31559
31560 struct ixgbe_mbx_info {
31561- struct ixgbe_mbx_operations ops;
31562+ ixgbe_mbx_operations_no_const ops;
31563 struct ixgbe_mbx_stats stats;
31564 u32 timeout;
31565 u32 usec_delay;
31566diff -urNp linux-3.1.1/drivers/net/ixgbevf/vf.h linux-3.1.1/drivers/net/ixgbevf/vf.h
31567--- linux-3.1.1/drivers/net/ixgbevf/vf.h 2011-11-11 15:19:27.000000000 -0500
31568+++ linux-3.1.1/drivers/net/ixgbevf/vf.h 2011-11-16 18:39:07.000000000 -0500
31569@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
31570 s32 (*clear_vfta)(struct ixgbe_hw *);
31571 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
31572 };
31573+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
31574
31575 enum ixgbe_mac_type {
31576 ixgbe_mac_unknown = 0,
31577@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
31578 };
31579
31580 struct ixgbe_mac_info {
31581- struct ixgbe_mac_operations ops;
31582+ ixgbe_mac_operations_no_const ops;
31583 u8 addr[6];
31584 u8 perm_addr[6];
31585
31586@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
31587 s32 (*check_for_ack)(struct ixgbe_hw *);
31588 s32 (*check_for_rst)(struct ixgbe_hw *);
31589 };
31590+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31591
31592 struct ixgbe_mbx_stats {
31593 u32 msgs_tx;
31594@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
31595 };
31596
31597 struct ixgbe_mbx_info {
31598- struct ixgbe_mbx_operations ops;
31599+ ixgbe_mbx_operations_no_const ops;
31600 struct ixgbe_mbx_stats stats;
31601 u32 timeout;
31602 u32 udelay;
31603diff -urNp linux-3.1.1/drivers/net/ksz884x.c linux-3.1.1/drivers/net/ksz884x.c
31604--- linux-3.1.1/drivers/net/ksz884x.c 2011-11-11 15:19:27.000000000 -0500
31605+++ linux-3.1.1/drivers/net/ksz884x.c 2011-11-16 18:40:22.000000000 -0500
31606@@ -6533,6 +6533,8 @@ static void netdev_get_ethtool_stats(str
31607 int rc;
31608 u64 counter[TOTAL_PORT_COUNTER_NUM];
31609
31610+ pax_track_stack();
31611+
31612 mutex_lock(&hw_priv->lock);
31613 n = SWITCH_PORT_NUM;
31614 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
31615diff -urNp linux-3.1.1/drivers/net/mlx4/main.c linux-3.1.1/drivers/net/mlx4/main.c
31616--- linux-3.1.1/drivers/net/mlx4/main.c 2011-11-11 15:19:27.000000000 -0500
31617+++ linux-3.1.1/drivers/net/mlx4/main.c 2011-11-16 18:40:22.000000000 -0500
31618@@ -40,6 +40,7 @@
31619 #include <linux/dma-mapping.h>
31620 #include <linux/slab.h>
31621 #include <linux/io-mapping.h>
31622+#include <linux/sched.h>
31623
31624 #include <linux/mlx4/device.h>
31625 #include <linux/mlx4/doorbell.h>
31626@@ -762,6 +763,8 @@ static int mlx4_init_hca(struct mlx4_dev
31627 u64 icm_size;
31628 int err;
31629
31630+ pax_track_stack();
31631+
31632 err = mlx4_QUERY_FW(dev);
31633 if (err) {
31634 if (err == -EACCES)
31635diff -urNp linux-3.1.1/drivers/net/niu.c linux-3.1.1/drivers/net/niu.c
31636--- linux-3.1.1/drivers/net/niu.c 2011-11-11 15:19:27.000000000 -0500
31637+++ linux-3.1.1/drivers/net/niu.c 2011-11-16 18:40:22.000000000 -0500
31638@@ -9061,6 +9061,8 @@ static void __devinit niu_try_msix(struc
31639 int i, num_irqs, err;
31640 u8 first_ldg;
31641
31642+ pax_track_stack();
31643+
31644 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
31645 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
31646 ldg_num_map[i] = first_ldg + i;
31647diff -urNp linux-3.1.1/drivers/net/pcnet32.c linux-3.1.1/drivers/net/pcnet32.c
31648--- linux-3.1.1/drivers/net/pcnet32.c 2011-11-11 15:19:27.000000000 -0500
31649+++ linux-3.1.1/drivers/net/pcnet32.c 2011-11-16 18:39:07.000000000 -0500
31650@@ -270,7 +270,7 @@ struct pcnet32_private {
31651 struct sk_buff **rx_skbuff;
31652 dma_addr_t *tx_dma_addr;
31653 dma_addr_t *rx_dma_addr;
31654- struct pcnet32_access a;
31655+ struct pcnet32_access *a;
31656 spinlock_t lock; /* Guard lock */
31657 unsigned int cur_rx, cur_tx; /* The next free ring entry */
31658 unsigned int rx_ring_size; /* current rx ring size */
31659@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
31660 u16 val;
31661
31662 netif_wake_queue(dev);
31663- val = lp->a.read_csr(ioaddr, CSR3);
31664+ val = lp->a->read_csr(ioaddr, CSR3);
31665 val &= 0x00ff;
31666- lp->a.write_csr(ioaddr, CSR3, val);
31667+ lp->a->write_csr(ioaddr, CSR3, val);
31668 napi_enable(&lp->napi);
31669 }
31670
31671@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
31672 r = mii_link_ok(&lp->mii_if);
31673 } else if (lp->chip_version >= PCNET32_79C970A) {
31674 ulong ioaddr = dev->base_addr; /* card base I/O address */
31675- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
31676+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
31677 } else { /* can not detect link on really old chips */
31678 r = 1;
31679 }
31680@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
31681 pcnet32_netif_stop(dev);
31682
31683 spin_lock_irqsave(&lp->lock, flags);
31684- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31685+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31686
31687 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
31688
31689@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
31690 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
31691 {
31692 struct pcnet32_private *lp = netdev_priv(dev);
31693- struct pcnet32_access *a = &lp->a; /* access to registers */
31694+ struct pcnet32_access *a = lp->a; /* access to registers */
31695 ulong ioaddr = dev->base_addr; /* card base I/O address */
31696 struct sk_buff *skb; /* sk buff */
31697 int x, i; /* counters */
31698@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
31699 pcnet32_netif_stop(dev);
31700
31701 spin_lock_irqsave(&lp->lock, flags);
31702- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31703+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31704
31705 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
31706
31707 /* Reset the PCNET32 */
31708- lp->a.reset(ioaddr);
31709- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31710+ lp->a->reset(ioaddr);
31711+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31712
31713 /* switch pcnet32 to 32bit mode */
31714- lp->a.write_bcr(ioaddr, 20, 2);
31715+ lp->a->write_bcr(ioaddr, 20, 2);
31716
31717 /* purge & init rings but don't actually restart */
31718 pcnet32_restart(dev, 0x0000);
31719
31720- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31721+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31722
31723 /* Initialize Transmit buffers. */
31724 size = data_len + 15;
31725@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
31726
31727 /* set int loopback in CSR15 */
31728 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
31729- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
31730+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
31731
31732 teststatus = cpu_to_le16(0x8000);
31733- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31734+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31735
31736 /* Check status of descriptors */
31737 for (x = 0; x < numbuffs; x++) {
31738@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
31739 }
31740 }
31741
31742- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31743+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31744 wmb();
31745 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
31746 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
31747@@ -1015,7 +1015,7 @@ clean_up:
31748 pcnet32_restart(dev, CSR0_NORMAL);
31749 } else {
31750 pcnet32_purge_rx_ring(dev);
31751- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31752+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31753 }
31754 spin_unlock_irqrestore(&lp->lock, flags);
31755
31756@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
31757 enum ethtool_phys_id_state state)
31758 {
31759 struct pcnet32_private *lp = netdev_priv(dev);
31760- struct pcnet32_access *a = &lp->a;
31761+ struct pcnet32_access *a = lp->a;
31762 ulong ioaddr = dev->base_addr;
31763 unsigned long flags;
31764 int i;
31765@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
31766 {
31767 int csr5;
31768 struct pcnet32_private *lp = netdev_priv(dev);
31769- struct pcnet32_access *a = &lp->a;
31770+ struct pcnet32_access *a = lp->a;
31771 ulong ioaddr = dev->base_addr;
31772 int ticks;
31773
31774@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
31775 spin_lock_irqsave(&lp->lock, flags);
31776 if (pcnet32_tx(dev)) {
31777 /* reset the chip to clear the error condition, then restart */
31778- lp->a.reset(ioaddr);
31779- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31780+ lp->a->reset(ioaddr);
31781+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31782 pcnet32_restart(dev, CSR0_START);
31783 netif_wake_queue(dev);
31784 }
31785@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
31786 __napi_complete(napi);
31787
31788 /* clear interrupt masks */
31789- val = lp->a.read_csr(ioaddr, CSR3);
31790+ val = lp->a->read_csr(ioaddr, CSR3);
31791 val &= 0x00ff;
31792- lp->a.write_csr(ioaddr, CSR3, val);
31793+ lp->a->write_csr(ioaddr, CSR3, val);
31794
31795 /* Set interrupt enable. */
31796- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
31797+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
31798
31799 spin_unlock_irqrestore(&lp->lock, flags);
31800 }
31801@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
31802 int i, csr0;
31803 u16 *buff = ptr;
31804 struct pcnet32_private *lp = netdev_priv(dev);
31805- struct pcnet32_access *a = &lp->a;
31806+ struct pcnet32_access *a = lp->a;
31807 ulong ioaddr = dev->base_addr;
31808 unsigned long flags;
31809
31810@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
31811 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
31812 if (lp->phymask & (1 << j)) {
31813 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
31814- lp->a.write_bcr(ioaddr, 33,
31815+ lp->a->write_bcr(ioaddr, 33,
31816 (j << 5) | i);
31817- *buff++ = lp->a.read_bcr(ioaddr, 34);
31818+ *buff++ = lp->a->read_bcr(ioaddr, 34);
31819 }
31820 }
31821 }
31822@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31823 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
31824 lp->options |= PCNET32_PORT_FD;
31825
31826- lp->a = *a;
31827+ lp->a = a;
31828
31829 /* prior to register_netdev, dev->name is not yet correct */
31830 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
31831@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31832 if (lp->mii) {
31833 /* lp->phycount and lp->phymask are set to 0 by memset above */
31834
31835- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31836+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31837 /* scan for PHYs */
31838 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31839 unsigned short id1, id2;
31840@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31841 pr_info("Found PHY %04x:%04x at address %d\n",
31842 id1, id2, i);
31843 }
31844- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31845+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31846 if (lp->phycount > 1)
31847 lp->options |= PCNET32_PORT_MII;
31848 }
31849@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
31850 }
31851
31852 /* Reset the PCNET32 */
31853- lp->a.reset(ioaddr);
31854+ lp->a->reset(ioaddr);
31855
31856 /* switch pcnet32 to 32bit mode */
31857- lp->a.write_bcr(ioaddr, 20, 2);
31858+ lp->a->write_bcr(ioaddr, 20, 2);
31859
31860 netif_printk(lp, ifup, KERN_DEBUG, dev,
31861 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
31862@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
31863 (u32) (lp->init_dma_addr));
31864
31865 /* set/reset autoselect bit */
31866- val = lp->a.read_bcr(ioaddr, 2) & ~2;
31867+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
31868 if (lp->options & PCNET32_PORT_ASEL)
31869 val |= 2;
31870- lp->a.write_bcr(ioaddr, 2, val);
31871+ lp->a->write_bcr(ioaddr, 2, val);
31872
31873 /* handle full duplex setting */
31874 if (lp->mii_if.full_duplex) {
31875- val = lp->a.read_bcr(ioaddr, 9) & ~3;
31876+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
31877 if (lp->options & PCNET32_PORT_FD) {
31878 val |= 1;
31879 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
31880@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
31881 if (lp->chip_version == 0x2627)
31882 val |= 3;
31883 }
31884- lp->a.write_bcr(ioaddr, 9, val);
31885+ lp->a->write_bcr(ioaddr, 9, val);
31886 }
31887
31888 /* set/reset GPSI bit in test register */
31889- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
31890+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
31891 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
31892 val |= 0x10;
31893- lp->a.write_csr(ioaddr, 124, val);
31894+ lp->a->write_csr(ioaddr, 124, val);
31895
31896 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
31897 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
31898@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
31899 * duplex, and/or enable auto negotiation, and clear DANAS
31900 */
31901 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
31902- lp->a.write_bcr(ioaddr, 32,
31903- lp->a.read_bcr(ioaddr, 32) | 0x0080);
31904+ lp->a->write_bcr(ioaddr, 32,
31905+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
31906 /* disable Auto Negotiation, set 10Mpbs, HD */
31907- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
31908+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
31909 if (lp->options & PCNET32_PORT_FD)
31910 val |= 0x10;
31911 if (lp->options & PCNET32_PORT_100)
31912 val |= 0x08;
31913- lp->a.write_bcr(ioaddr, 32, val);
31914+ lp->a->write_bcr(ioaddr, 32, val);
31915 } else {
31916 if (lp->options & PCNET32_PORT_ASEL) {
31917- lp->a.write_bcr(ioaddr, 32,
31918- lp->a.read_bcr(ioaddr,
31919+ lp->a->write_bcr(ioaddr, 32,
31920+ lp->a->read_bcr(ioaddr,
31921 32) | 0x0080);
31922 /* enable auto negotiate, setup, disable fd */
31923- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
31924+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
31925 val |= 0x20;
31926- lp->a.write_bcr(ioaddr, 32, val);
31927+ lp->a->write_bcr(ioaddr, 32, val);
31928 }
31929 }
31930 } else {
31931@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
31932 * There is really no good other way to handle multiple PHYs
31933 * other than turning off all automatics
31934 */
31935- val = lp->a.read_bcr(ioaddr, 2);
31936- lp->a.write_bcr(ioaddr, 2, val & ~2);
31937- val = lp->a.read_bcr(ioaddr, 32);
31938- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31939+ val = lp->a->read_bcr(ioaddr, 2);
31940+ lp->a->write_bcr(ioaddr, 2, val & ~2);
31941+ val = lp->a->read_bcr(ioaddr, 32);
31942+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31943
31944 if (!(lp->options & PCNET32_PORT_ASEL)) {
31945 /* setup ecmd */
31946@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
31947 ethtool_cmd_speed_set(&ecmd,
31948 (lp->options & PCNET32_PORT_100) ?
31949 SPEED_100 : SPEED_10);
31950- bcr9 = lp->a.read_bcr(ioaddr, 9);
31951+ bcr9 = lp->a->read_bcr(ioaddr, 9);
31952
31953 if (lp->options & PCNET32_PORT_FD) {
31954 ecmd.duplex = DUPLEX_FULL;
31955@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
31956 ecmd.duplex = DUPLEX_HALF;
31957 bcr9 |= ~(1 << 0);
31958 }
31959- lp->a.write_bcr(ioaddr, 9, bcr9);
31960+ lp->a->write_bcr(ioaddr, 9, bcr9);
31961 }
31962
31963 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31964@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
31965
31966 #ifdef DO_DXSUFLO
31967 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
31968- val = lp->a.read_csr(ioaddr, CSR3);
31969+ val = lp->a->read_csr(ioaddr, CSR3);
31970 val |= 0x40;
31971- lp->a.write_csr(ioaddr, CSR3, val);
31972+ lp->a->write_csr(ioaddr, CSR3, val);
31973 }
31974 #endif
31975
31976@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
31977 napi_enable(&lp->napi);
31978
31979 /* Re-initialize the PCNET32, and start it when done. */
31980- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
31981- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
31982+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
31983+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
31984
31985- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31986- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
31987+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31988+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
31989
31990 netif_start_queue(dev);
31991
31992@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
31993
31994 i = 0;
31995 while (i++ < 100)
31996- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
31997+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
31998 break;
31999 /*
32000 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
32001 * reports that doing so triggers a bug in the '974.
32002 */
32003- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
32004+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
32005
32006 netif_printk(lp, ifup, KERN_DEBUG, dev,
32007 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
32008 i,
32009 (u32) (lp->init_dma_addr),
32010- lp->a.read_csr(ioaddr, CSR0));
32011+ lp->a->read_csr(ioaddr, CSR0));
32012
32013 spin_unlock_irqrestore(&lp->lock, flags);
32014
32015@@ -2218,7 +2218,7 @@ err_free_ring:
32016 * Switch back to 16bit mode to avoid problems with dumb
32017 * DOS packet driver after a warm reboot
32018 */
32019- lp->a.write_bcr(ioaddr, 20, 4);
32020+ lp->a->write_bcr(ioaddr, 20, 4);
32021
32022 err_free_irq:
32023 spin_unlock_irqrestore(&lp->lock, flags);
32024@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
32025
32026 /* wait for stop */
32027 for (i = 0; i < 100; i++)
32028- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
32029+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
32030 break;
32031
32032 if (i >= 100)
32033@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
32034 return;
32035
32036 /* ReInit Ring */
32037- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
32038+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
32039 i = 0;
32040 while (i++ < 1000)
32041- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
32042+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
32043 break;
32044
32045- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
32046+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
32047 }
32048
32049 static void pcnet32_tx_timeout(struct net_device *dev)
32050@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
32051 /* Transmitter timeout, serious problems. */
32052 if (pcnet32_debug & NETIF_MSG_DRV)
32053 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
32054- dev->name, lp->a.read_csr(ioaddr, CSR0));
32055- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
32056+ dev->name, lp->a->read_csr(ioaddr, CSR0));
32057+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
32058 dev->stats.tx_errors++;
32059 if (netif_msg_tx_err(lp)) {
32060 int i;
32061@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
32062
32063 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
32064 "%s() called, csr0 %4.4x\n",
32065- __func__, lp->a.read_csr(ioaddr, CSR0));
32066+ __func__, lp->a->read_csr(ioaddr, CSR0));
32067
32068 /* Default status -- will not enable Successful-TxDone
32069 * interrupt when that option is available to us.
32070@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
32071 dev->stats.tx_bytes += skb->len;
32072
32073 /* Trigger an immediate send poll. */
32074- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
32075+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
32076
32077 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
32078 lp->tx_full = 1;
32079@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
32080
32081 spin_lock(&lp->lock);
32082
32083- csr0 = lp->a.read_csr(ioaddr, CSR0);
32084+ csr0 = lp->a->read_csr(ioaddr, CSR0);
32085 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
32086 if (csr0 == 0xffff)
32087 break; /* PCMCIA remove happened */
32088 /* Acknowledge all of the current interrupt sources ASAP. */
32089- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
32090+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
32091
32092 netif_printk(lp, intr, KERN_DEBUG, dev,
32093 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
32094- csr0, lp->a.read_csr(ioaddr, CSR0));
32095+ csr0, lp->a->read_csr(ioaddr, CSR0));
32096
32097 /* Log misc errors. */
32098 if (csr0 & 0x4000)
32099@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
32100 if (napi_schedule_prep(&lp->napi)) {
32101 u16 val;
32102 /* set interrupt masks */
32103- val = lp->a.read_csr(ioaddr, CSR3);
32104+ val = lp->a->read_csr(ioaddr, CSR3);
32105 val |= 0x5f00;
32106- lp->a.write_csr(ioaddr, CSR3, val);
32107+ lp->a->write_csr(ioaddr, CSR3, val);
32108
32109 __napi_schedule(&lp->napi);
32110 break;
32111 }
32112- csr0 = lp->a.read_csr(ioaddr, CSR0);
32113+ csr0 = lp->a->read_csr(ioaddr, CSR0);
32114 }
32115
32116 netif_printk(lp, intr, KERN_DEBUG, dev,
32117 "exiting interrupt, csr0=%#4.4x\n",
32118- lp->a.read_csr(ioaddr, CSR0));
32119+ lp->a->read_csr(ioaddr, CSR0));
32120
32121 spin_unlock(&lp->lock);
32122
32123@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
32124
32125 spin_lock_irqsave(&lp->lock, flags);
32126
32127- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
32128+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
32129
32130 netif_printk(lp, ifdown, KERN_DEBUG, dev,
32131 "Shutting down ethercard, status was %2.2x\n",
32132- lp->a.read_csr(ioaddr, CSR0));
32133+ lp->a->read_csr(ioaddr, CSR0));
32134
32135 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
32136- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
32137+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
32138
32139 /*
32140 * Switch back to 16bit mode to avoid problems with dumb
32141 * DOS packet driver after a warm reboot
32142 */
32143- lp->a.write_bcr(ioaddr, 20, 4);
32144+ lp->a->write_bcr(ioaddr, 20, 4);
32145
32146 spin_unlock_irqrestore(&lp->lock, flags);
32147
32148@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
32149 unsigned long flags;
32150
32151 spin_lock_irqsave(&lp->lock, flags);
32152- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
32153+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
32154 spin_unlock_irqrestore(&lp->lock, flags);
32155
32156 return &dev->stats;
32157@@ -2577,10 +2577,10 @@ static void pcnet32_load_multicast(struc
32158 if (dev->flags & IFF_ALLMULTI) {
32159 ib->filter[0] = cpu_to_le32(~0U);
32160 ib->filter[1] = cpu_to_le32(~0U);
32161- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
32162- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
32163- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
32164- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
32165+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
32166+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
32167+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
32168+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
32169 return;
32170 }
32171 /* clear the multicast filter */
32172@@ -2594,7 +2594,7 @@ static void pcnet32_load_multicast(struc
32173 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
32174 }
32175 for (i = 0; i < 4; i++)
32176- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
32177+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
32178 le16_to_cpu(mcast_table[i]));
32179 }
32180
32181@@ -2609,28 +2609,28 @@ static void pcnet32_set_multicast_list(s
32182
32183 spin_lock_irqsave(&lp->lock, flags);
32184 suspended = pcnet32_suspend(dev, &flags, 0);
32185- csr15 = lp->a.read_csr(ioaddr, CSR15);
32186+ csr15 = lp->a->read_csr(ioaddr, CSR15);
32187 if (dev->flags & IFF_PROMISC) {
32188 /* Log any net taps. */
32189 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
32190 lp->init_block->mode =
32191 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
32192 7);
32193- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
32194+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
32195 } else {
32196 lp->init_block->mode =
32197 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
32198- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
32199+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
32200 pcnet32_load_multicast(dev);
32201 }
32202
32203 if (suspended) {
32204 int csr5;
32205 /* clear SUSPEND (SPND) - CSR5 bit 0 */
32206- csr5 = lp->a.read_csr(ioaddr, CSR5);
32207- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
32208+ csr5 = lp->a->read_csr(ioaddr, CSR5);
32209+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
32210 } else {
32211- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
32212+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
32213 pcnet32_restart(dev, CSR0_NORMAL);
32214 netif_wake_queue(dev);
32215 }
32216@@ -2648,8 +2648,8 @@ static int mdio_read(struct net_device *
32217 if (!lp->mii)
32218 return 0;
32219
32220- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32221- val_out = lp->a.read_bcr(ioaddr, 34);
32222+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32223+ val_out = lp->a->read_bcr(ioaddr, 34);
32224
32225 return val_out;
32226 }
32227@@ -2663,8 +2663,8 @@ static void mdio_write(struct net_device
32228 if (!lp->mii)
32229 return;
32230
32231- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32232- lp->a.write_bcr(ioaddr, 34, val);
32233+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32234+ lp->a->write_bcr(ioaddr, 34, val);
32235 }
32236
32237 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32238@@ -2741,7 +2741,7 @@ static void pcnet32_check_media(struct n
32239 curr_link = mii_link_ok(&lp->mii_if);
32240 } else {
32241 ulong ioaddr = dev->base_addr; /* card base I/O address */
32242- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
32243+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
32244 }
32245 if (!curr_link) {
32246 if (prev_link || verbose) {
32247@@ -2764,13 +2764,13 @@ static void pcnet32_check_media(struct n
32248 (ecmd.duplex == DUPLEX_FULL)
32249 ? "full" : "half");
32250 }
32251- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
32252+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
32253 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
32254 if (lp->mii_if.full_duplex)
32255 bcr9 |= (1 << 0);
32256 else
32257 bcr9 &= ~(1 << 0);
32258- lp->a.write_bcr(dev->base_addr, 9, bcr9);
32259+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
32260 }
32261 } else {
32262 netif_info(lp, link, dev, "link up\n");
32263diff -urNp linux-3.1.1/drivers/net/ppp_generic.c linux-3.1.1/drivers/net/ppp_generic.c
32264--- linux-3.1.1/drivers/net/ppp_generic.c 2011-11-11 15:19:27.000000000 -0500
32265+++ linux-3.1.1/drivers/net/ppp_generic.c 2011-11-16 18:39:07.000000000 -0500
32266@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
32267 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
32268 struct ppp_stats stats;
32269 struct ppp_comp_stats cstats;
32270- char *vers;
32271
32272 switch (cmd) {
32273 case SIOCGPPPSTATS:
32274@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
32275 break;
32276
32277 case SIOCGPPPVER:
32278- vers = PPP_VERSION;
32279- if (copy_to_user(addr, vers, strlen(vers) + 1))
32280+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
32281 break;
32282 err = 0;
32283 break;
32284diff -urNp linux-3.1.1/drivers/net/r8169.c linux-3.1.1/drivers/net/r8169.c
32285--- linux-3.1.1/drivers/net/r8169.c 2011-11-11 15:19:27.000000000 -0500
32286+++ linux-3.1.1/drivers/net/r8169.c 2011-11-16 18:39:07.000000000 -0500
32287@@ -663,12 +663,12 @@ struct rtl8169_private {
32288 struct mdio_ops {
32289 void (*write)(void __iomem *, int, int);
32290 int (*read)(void __iomem *, int);
32291- } mdio_ops;
32292+ } __no_const mdio_ops;
32293
32294 struct pll_power_ops {
32295 void (*down)(struct rtl8169_private *);
32296 void (*up)(struct rtl8169_private *);
32297- } pll_power_ops;
32298+ } __no_const pll_power_ops;
32299
32300 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
32301 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
32302diff -urNp linux-3.1.1/drivers/net/sis190.c linux-3.1.1/drivers/net/sis190.c
32303--- linux-3.1.1/drivers/net/sis190.c 2011-11-11 15:19:27.000000000 -0500
32304+++ linux-3.1.1/drivers/net/sis190.c 2011-11-16 18:39:07.000000000 -0500
32305@@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr
32306 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
32307 struct net_device *dev)
32308 {
32309- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
32310+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
32311 struct sis190_private *tp = netdev_priv(dev);
32312 struct pci_dev *isa_bridge;
32313 u8 reg, tmp8;
32314diff -urNp linux-3.1.1/drivers/net/sundance.c linux-3.1.1/drivers/net/sundance.c
32315--- linux-3.1.1/drivers/net/sundance.c 2011-11-11 15:19:27.000000000 -0500
32316+++ linux-3.1.1/drivers/net/sundance.c 2011-11-16 18:39:07.000000000 -0500
32317@@ -218,7 +218,7 @@ enum {
32318 struct pci_id_info {
32319 const char *name;
32320 };
32321-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32322+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32323 {"D-Link DFE-550TX FAST Ethernet Adapter"},
32324 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32325 {"D-Link DFE-580TX 4 port Server Adapter"},
32326diff -urNp linux-3.1.1/drivers/net/tg3.h linux-3.1.1/drivers/net/tg3.h
32327--- linux-3.1.1/drivers/net/tg3.h 2011-11-11 15:19:27.000000000 -0500
32328+++ linux-3.1.1/drivers/net/tg3.h 2011-11-16 18:39:07.000000000 -0500
32329@@ -134,6 +134,7 @@
32330 #define CHIPREV_ID_5750_A0 0x4000
32331 #define CHIPREV_ID_5750_A1 0x4001
32332 #define CHIPREV_ID_5750_A3 0x4003
32333+#define CHIPREV_ID_5750_C1 0x4201
32334 #define CHIPREV_ID_5750_C2 0x4202
32335 #define CHIPREV_ID_5752_A0_HW 0x5000
32336 #define CHIPREV_ID_5752_A0 0x6000
32337diff -urNp linux-3.1.1/drivers/net/tokenring/abyss.c linux-3.1.1/drivers/net/tokenring/abyss.c
32338--- linux-3.1.1/drivers/net/tokenring/abyss.c 2011-11-11 15:19:27.000000000 -0500
32339+++ linux-3.1.1/drivers/net/tokenring/abyss.c 2011-11-16 18:39:07.000000000 -0500
32340@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
32341
32342 static int __init abyss_init (void)
32343 {
32344- abyss_netdev_ops = tms380tr_netdev_ops;
32345+ pax_open_kernel();
32346+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32347
32348- abyss_netdev_ops.ndo_open = abyss_open;
32349- abyss_netdev_ops.ndo_stop = abyss_close;
32350+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
32351+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
32352+ pax_close_kernel();
32353
32354 return pci_register_driver(&abyss_driver);
32355 }
32356diff -urNp linux-3.1.1/drivers/net/tokenring/madgemc.c linux-3.1.1/drivers/net/tokenring/madgemc.c
32357--- linux-3.1.1/drivers/net/tokenring/madgemc.c 2011-11-11 15:19:27.000000000 -0500
32358+++ linux-3.1.1/drivers/net/tokenring/madgemc.c 2011-11-16 18:39:07.000000000 -0500
32359@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
32360
32361 static int __init madgemc_init (void)
32362 {
32363- madgemc_netdev_ops = tms380tr_netdev_ops;
32364- madgemc_netdev_ops.ndo_open = madgemc_open;
32365- madgemc_netdev_ops.ndo_stop = madgemc_close;
32366+ pax_open_kernel();
32367+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32368+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
32369+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
32370+ pax_close_kernel();
32371
32372 return mca_register_driver (&madgemc_driver);
32373 }
32374diff -urNp linux-3.1.1/drivers/net/tokenring/proteon.c linux-3.1.1/drivers/net/tokenring/proteon.c
32375--- linux-3.1.1/drivers/net/tokenring/proteon.c 2011-11-11 15:19:27.000000000 -0500
32376+++ linux-3.1.1/drivers/net/tokenring/proteon.c 2011-11-16 18:39:07.000000000 -0500
32377@@ -353,9 +353,11 @@ static int __init proteon_init(void)
32378 struct platform_device *pdev;
32379 int i, num = 0, err = 0;
32380
32381- proteon_netdev_ops = tms380tr_netdev_ops;
32382- proteon_netdev_ops.ndo_open = proteon_open;
32383- proteon_netdev_ops.ndo_stop = tms380tr_close;
32384+ pax_open_kernel();
32385+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32386+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
32387+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
32388+ pax_close_kernel();
32389
32390 err = platform_driver_register(&proteon_driver);
32391 if (err)
32392diff -urNp linux-3.1.1/drivers/net/tokenring/skisa.c linux-3.1.1/drivers/net/tokenring/skisa.c
32393--- linux-3.1.1/drivers/net/tokenring/skisa.c 2011-11-11 15:19:27.000000000 -0500
32394+++ linux-3.1.1/drivers/net/tokenring/skisa.c 2011-11-16 18:39:07.000000000 -0500
32395@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
32396 struct platform_device *pdev;
32397 int i, num = 0, err = 0;
32398
32399- sk_isa_netdev_ops = tms380tr_netdev_ops;
32400- sk_isa_netdev_ops.ndo_open = sk_isa_open;
32401- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32402+ pax_open_kernel();
32403+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32404+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
32405+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32406+ pax_close_kernel();
32407
32408 err = platform_driver_register(&sk_isa_driver);
32409 if (err)
32410diff -urNp linux-3.1.1/drivers/net/tulip/de2104x.c linux-3.1.1/drivers/net/tulip/de2104x.c
32411--- linux-3.1.1/drivers/net/tulip/de2104x.c 2011-11-11 15:19:27.000000000 -0500
32412+++ linux-3.1.1/drivers/net/tulip/de2104x.c 2011-11-16 18:40:22.000000000 -0500
32413@@ -1795,6 +1795,8 @@ static void __devinit de21041_get_srom_i
32414 struct de_srom_info_leaf *il;
32415 void *bufp;
32416
32417+ pax_track_stack();
32418+
32419 /* download entire eeprom */
32420 for (i = 0; i < DE_EEPROM_WORDS; i++)
32421 ((__le16 *)ee_data)[i] =
32422diff -urNp linux-3.1.1/drivers/net/tulip/de4x5.c linux-3.1.1/drivers/net/tulip/de4x5.c
32423--- linux-3.1.1/drivers/net/tulip/de4x5.c 2011-11-11 15:19:27.000000000 -0500
32424+++ linux-3.1.1/drivers/net/tulip/de4x5.c 2011-11-16 18:39:07.000000000 -0500
32425@@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, stru
32426 for (i=0; i<ETH_ALEN; i++) {
32427 tmp.addr[i] = dev->dev_addr[i];
32428 }
32429- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32430+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32431 break;
32432
32433 case DE4X5_SET_HWADDR: /* Set the hardware address */
32434@@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, stru
32435 spin_lock_irqsave(&lp->lock, flags);
32436 memcpy(&statbuf, &lp->pktStats, ioc->len);
32437 spin_unlock_irqrestore(&lp->lock, flags);
32438- if (copy_to_user(ioc->data, &statbuf, ioc->len))
32439+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32440 return -EFAULT;
32441 break;
32442 }
32443diff -urNp linux-3.1.1/drivers/net/tulip/eeprom.c linux-3.1.1/drivers/net/tulip/eeprom.c
32444--- linux-3.1.1/drivers/net/tulip/eeprom.c 2011-11-11 15:19:27.000000000 -0500
32445+++ linux-3.1.1/drivers/net/tulip/eeprom.c 2011-11-16 18:39:07.000000000 -0500
32446@@ -81,7 +81,7 @@ static struct eeprom_fixup eeprom_fixups
32447 {NULL}};
32448
32449
32450-static const char *block_name[] __devinitdata = {
32451+static const char *block_name[] __devinitconst = {
32452 "21140 non-MII",
32453 "21140 MII PHY",
32454 "21142 Serial PHY",
32455diff -urNp linux-3.1.1/drivers/net/tulip/winbond-840.c linux-3.1.1/drivers/net/tulip/winbond-840.c
32456--- linux-3.1.1/drivers/net/tulip/winbond-840.c 2011-11-11 15:19:27.000000000 -0500
32457+++ linux-3.1.1/drivers/net/tulip/winbond-840.c 2011-11-16 18:39:07.000000000 -0500
32458@@ -236,7 +236,7 @@ struct pci_id_info {
32459 int drv_flags; /* Driver use, intended as capability flags. */
32460 };
32461
32462-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32463+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32464 { /* Sometime a Level-One switch card. */
32465 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32466 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32467diff -urNp linux-3.1.1/drivers/net/usb/hso.c linux-3.1.1/drivers/net/usb/hso.c
32468--- linux-3.1.1/drivers/net/usb/hso.c 2011-11-11 15:19:27.000000000 -0500
32469+++ linux-3.1.1/drivers/net/usb/hso.c 2011-11-16 18:39:07.000000000 -0500
32470@@ -71,7 +71,7 @@
32471 #include <asm/byteorder.h>
32472 #include <linux/serial_core.h>
32473 #include <linux/serial.h>
32474-
32475+#include <asm/local.h>
32476
32477 #define MOD_AUTHOR "Option Wireless"
32478 #define MOD_DESCRIPTION "USB High Speed Option driver"
32479@@ -257,7 +257,7 @@ struct hso_serial {
32480
32481 /* from usb_serial_port */
32482 struct tty_struct *tty;
32483- int open_count;
32484+ local_t open_count;
32485 spinlock_t serial_lock;
32486
32487 int (*write_data) (struct hso_serial *serial);
32488@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
32489 struct urb *urb;
32490
32491 urb = serial->rx_urb[0];
32492- if (serial->open_count > 0) {
32493+ if (local_read(&serial->open_count) > 0) {
32494 count = put_rxbuf_data(urb, serial);
32495 if (count == -1)
32496 return;
32497@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
32498 DUMP1(urb->transfer_buffer, urb->actual_length);
32499
32500 /* Anyone listening? */
32501- if (serial->open_count == 0)
32502+ if (local_read(&serial->open_count) == 0)
32503 return;
32504
32505 if (status == 0) {
32506@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
32507 spin_unlock_irq(&serial->serial_lock);
32508
32509 /* check for port already opened, if not set the termios */
32510- serial->open_count++;
32511- if (serial->open_count == 1) {
32512+ if (local_inc_return(&serial->open_count) == 1) {
32513 serial->rx_state = RX_IDLE;
32514 /* Force default termio settings */
32515 _hso_serial_set_termios(tty, NULL);
32516@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
32517 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32518 if (result) {
32519 hso_stop_serial_device(serial->parent);
32520- serial->open_count--;
32521+ local_dec(&serial->open_count);
32522 kref_put(&serial->parent->ref, hso_serial_ref_free);
32523 }
32524 } else {
32525@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
32526
32527 /* reset the rts and dtr */
32528 /* do the actual close */
32529- serial->open_count--;
32530+ local_dec(&serial->open_count);
32531
32532- if (serial->open_count <= 0) {
32533- serial->open_count = 0;
32534+ if (local_read(&serial->open_count) <= 0) {
32535+ local_set(&serial->open_count, 0);
32536 spin_lock_irq(&serial->serial_lock);
32537 if (serial->tty == tty) {
32538 serial->tty->driver_data = NULL;
32539@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
32540
32541 /* the actual setup */
32542 spin_lock_irqsave(&serial->serial_lock, flags);
32543- if (serial->open_count)
32544+ if (local_read(&serial->open_count))
32545 _hso_serial_set_termios(tty, old);
32546 else
32547 tty->termios = old;
32548@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
32549 D1("Pending read interrupt on port %d\n", i);
32550 spin_lock(&serial->serial_lock);
32551 if (serial->rx_state == RX_IDLE &&
32552- serial->open_count > 0) {
32553+ local_read(&serial->open_count) > 0) {
32554 /* Setup and send a ctrl req read on
32555 * port i */
32556 if (!serial->rx_urb_filled[0]) {
32557@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
32558 /* Start all serial ports */
32559 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32560 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32561- if (dev2ser(serial_table[i])->open_count) {
32562+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
32563 result =
32564 hso_start_serial_device(serial_table[i], GFP_NOIO);
32565 hso_kick_transmit(dev2ser(serial_table[i]));
32566diff -urNp linux-3.1.1/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.1.1/drivers/net/vmxnet3/vmxnet3_ethtool.c
32567--- linux-3.1.1/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-11-11 15:19:27.000000000 -0500
32568+++ linux-3.1.1/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-11-16 18:39:07.000000000 -0500
32569@@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device
32570 * Return with error code if any of the queue indices
32571 * is out of range
32572 */
32573- if (p->ring_index[i] < 0 ||
32574- p->ring_index[i] >= adapter->num_rx_queues)
32575+ if (p->ring_index[i] >= adapter->num_rx_queues)
32576 return -EINVAL;
32577 }
32578
32579diff -urNp linux-3.1.1/drivers/net/vxge/vxge-config.h linux-3.1.1/drivers/net/vxge/vxge-config.h
32580--- linux-3.1.1/drivers/net/vxge/vxge-config.h 2011-11-11 15:19:27.000000000 -0500
32581+++ linux-3.1.1/drivers/net/vxge/vxge-config.h 2011-11-16 18:39:07.000000000 -0500
32582@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
32583 void (*link_down)(struct __vxge_hw_device *devh);
32584 void (*crit_err)(struct __vxge_hw_device *devh,
32585 enum vxge_hw_event type, u64 ext_data);
32586-};
32587+} __no_const;
32588
32589 /*
32590 * struct __vxge_hw_blockpool_entry - Block private data structure
32591diff -urNp linux-3.1.1/drivers/net/vxge/vxge-main.c linux-3.1.1/drivers/net/vxge/vxge-main.c
32592--- linux-3.1.1/drivers/net/vxge/vxge-main.c 2011-11-11 15:19:27.000000000 -0500
32593+++ linux-3.1.1/drivers/net/vxge/vxge-main.c 2011-11-16 18:40:22.000000000 -0500
32594@@ -100,6 +100,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32595 struct sk_buff *completed[NR_SKB_COMPLETED];
32596 int more;
32597
32598+ pax_track_stack();
32599+
32600 do {
32601 more = 0;
32602 skb_ptr = completed;
32603@@ -1915,6 +1917,8 @@ static enum vxge_hw_status vxge_rth_conf
32604 u8 mtable[256] = {0}; /* CPU to vpath mapping */
32605 int index;
32606
32607+ pax_track_stack();
32608+
32609 /*
32610 * Filling
32611 * - itable with bucket numbers
32612diff -urNp linux-3.1.1/drivers/net/vxge/vxge-traffic.h linux-3.1.1/drivers/net/vxge/vxge-traffic.h
32613--- linux-3.1.1/drivers/net/vxge/vxge-traffic.h 2011-11-11 15:19:27.000000000 -0500
32614+++ linux-3.1.1/drivers/net/vxge/vxge-traffic.h 2011-11-16 18:39:07.000000000 -0500
32615@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
32616 struct vxge_hw_mempool_dma *dma_object,
32617 u32 index,
32618 u32 is_last);
32619-};
32620+} __no_const;
32621
32622 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
32623 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
32624diff -urNp linux-3.1.1/drivers/net/wan/hdlc_x25.c linux-3.1.1/drivers/net/wan/hdlc_x25.c
32625--- linux-3.1.1/drivers/net/wan/hdlc_x25.c 2011-11-11 15:19:27.000000000 -0500
32626+++ linux-3.1.1/drivers/net/wan/hdlc_x25.c 2011-11-16 18:39:07.000000000 -0500
32627@@ -134,16 +134,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
32628
32629 static int x25_open(struct net_device *dev)
32630 {
32631- struct lapb_register_struct cb;
32632+ static struct lapb_register_struct cb = {
32633+ .connect_confirmation = x25_connected,
32634+ .connect_indication = x25_connected,
32635+ .disconnect_confirmation = x25_disconnected,
32636+ .disconnect_indication = x25_disconnected,
32637+ .data_indication = x25_data_indication,
32638+ .data_transmit = x25_data_transmit
32639+ };
32640 int result;
32641
32642- cb.connect_confirmation = x25_connected;
32643- cb.connect_indication = x25_connected;
32644- cb.disconnect_confirmation = x25_disconnected;
32645- cb.disconnect_indication = x25_disconnected;
32646- cb.data_indication = x25_data_indication;
32647- cb.data_transmit = x25_data_transmit;
32648-
32649 result = lapb_register(dev, &cb);
32650 if (result != LAPB_OK)
32651 return result;
32652diff -urNp linux-3.1.1/drivers/net/wimax/i2400m/usb-fw.c linux-3.1.1/drivers/net/wimax/i2400m/usb-fw.c
32653--- linux-3.1.1/drivers/net/wimax/i2400m/usb-fw.c 2011-11-11 15:19:27.000000000 -0500
32654+++ linux-3.1.1/drivers/net/wimax/i2400m/usb-fw.c 2011-11-16 18:40:22.000000000 -0500
32655@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
32656 int do_autopm = 1;
32657 DECLARE_COMPLETION_ONSTACK(notif_completion);
32658
32659+ pax_track_stack();
32660+
32661 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
32662 i2400m, ack, ack_size);
32663 BUG_ON(_ack == i2400m->bm_ack_buf);
32664diff -urNp linux-3.1.1/drivers/net/wireless/airo.c linux-3.1.1/drivers/net/wireless/airo.c
32665--- linux-3.1.1/drivers/net/wireless/airo.c 2011-11-11 15:19:27.000000000 -0500
32666+++ linux-3.1.1/drivers/net/wireless/airo.c 2011-11-16 18:40:22.000000000 -0500
32667@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
32668 BSSListElement * loop_net;
32669 BSSListElement * tmp_net;
32670
32671+ pax_track_stack();
32672+
32673 /* Blow away current list of scan results */
32674 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
32675 list_move_tail (&loop_net->list, &ai->network_free_list);
32676@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
32677 WepKeyRid wkr;
32678 int rc;
32679
32680+ pax_track_stack();
32681+
32682 memset( &mySsid, 0, sizeof( mySsid ) );
32683 kfree (ai->flash);
32684 ai->flash = NULL;
32685@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
32686 __le32 *vals = stats.vals;
32687 int len;
32688
32689+ pax_track_stack();
32690+
32691 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32692 return -ENOMEM;
32693 data = file->private_data;
32694@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
32695 /* If doLoseSync is not 1, we won't do a Lose Sync */
32696 int doLoseSync = -1;
32697
32698+ pax_track_stack();
32699+
32700 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32701 return -ENOMEM;
32702 data = file->private_data;
32703@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
32704 int i;
32705 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
32706
32707+ pax_track_stack();
32708+
32709 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
32710 if (!qual)
32711 return -ENOMEM;
32712@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
32713 CapabilityRid cap_rid;
32714 __le32 *vals = stats_rid.vals;
32715
32716+ pax_track_stack();
32717+
32718 /* Get stats out of the card */
32719 clear_bit(JOB_WSTATS, &local->jobs);
32720 if (local->power.event) {
32721diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath5k/debug.c linux-3.1.1/drivers/net/wireless/ath/ath5k/debug.c
32722--- linux-3.1.1/drivers/net/wireless/ath/ath5k/debug.c 2011-11-11 15:19:27.000000000 -0500
32723+++ linux-3.1.1/drivers/net/wireless/ath/ath5k/debug.c 2011-11-16 19:08:21.000000000 -0500
32724@@ -203,6 +203,8 @@ static ssize_t read_file_beacon(struct f
32725 unsigned int v;
32726 u64 tsf;
32727
32728+ pax_track_stack();
32729+
32730 v = ath5k_hw_reg_read(ah, AR5K_BEACON);
32731 len += snprintf(buf + len, sizeof(buf) - len,
32732 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
32733@@ -321,6 +323,8 @@ static ssize_t read_file_debug(struct fi
32734 unsigned int len = 0;
32735 unsigned int i;
32736
32737+ pax_track_stack();
32738+
32739 len += snprintf(buf + len, sizeof(buf) - len,
32740 "DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
32741
32742@@ -492,6 +496,8 @@ static ssize_t read_file_misc(struct fil
32743 unsigned int len = 0;
32744 u32 filt = ath5k_hw_get_rx_filter(ah);
32745
32746+ pax_track_stack();
32747+
32748 len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
32749 ah->bssidmask);
32750 len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
32751@@ -548,6 +554,8 @@ static ssize_t read_file_frameerrors(str
32752 unsigned int len = 0;
32753 int i;
32754
32755+ pax_track_stack();
32756+
32757 len += snprintf(buf + len, sizeof(buf) - len,
32758 "RX\n---------------------\n");
32759 len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
32760@@ -665,6 +673,8 @@ static ssize_t read_file_ani(struct file
32761 char buf[700];
32762 unsigned int len = 0;
32763
32764+ pax_track_stack();
32765+
32766 len += snprintf(buf + len, sizeof(buf) - len,
32767 "HW has PHY error counters:\t%s\n",
32768 ah->ah_capabilities.cap_has_phyerr_counters ?
32769@@ -829,6 +839,8 @@ static ssize_t read_file_queue(struct fi
32770 struct ath5k_buf *bf, *bf0;
32771 int i, n;
32772
32773+ pax_track_stack();
32774+
32775 len += snprintf(buf + len, sizeof(buf) - len,
32776 "available txbuffers: %d\n", ah->txbuf_len);
32777
32778diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_calib.c
32779--- linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-11-11 15:19:27.000000000 -0500
32780+++ linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-11-16 18:40:22.000000000 -0500
32781@@ -758,6 +758,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
32782 int i, im, j;
32783 int nmeasurement;
32784
32785+ pax_track_stack();
32786+
32787 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
32788 if (ah->txchainmask & (1 << i))
32789 num_chains++;
32790diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
32791--- linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-11-11 15:19:27.000000000 -0500
32792+++ linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-11-16 18:40:22.000000000 -0500
32793@@ -406,6 +406,8 @@ static bool create_pa_curve(u32 *data_L,
32794 int theta_low_bin = 0;
32795 int i;
32796
32797+ pax_track_stack();
32798+
32799 /* disregard any bin that contains <= 16 samples */
32800 thresh_accum_cnt = 16;
32801 scale_factor = 5;
32802diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/debug.c linux-3.1.1/drivers/net/wireless/ath/ath9k/debug.c
32803--- linux-3.1.1/drivers/net/wireless/ath/ath9k/debug.c 2011-11-11 15:19:27.000000000 -0500
32804+++ linux-3.1.1/drivers/net/wireless/ath/ath9k/debug.c 2011-11-16 18:40:22.000000000 -0500
32805@@ -387,6 +387,8 @@ static ssize_t read_file_interrupt(struc
32806 char buf[512];
32807 unsigned int len = 0;
32808
32809+ pax_track_stack();
32810+
32811 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
32812 len += snprintf(buf + len, sizeof(buf) - len,
32813 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
32814@@ -477,6 +479,8 @@ static ssize_t read_file_wiphy(struct fi
32815 u8 addr[ETH_ALEN];
32816 u32 tmp;
32817
32818+ pax_track_stack();
32819+
32820 len += snprintf(buf + len, sizeof(buf) - len,
32821 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
32822 wiphy_name(sc->hw->wiphy),
32823diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.1.1/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
32824--- linux-3.1.1/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-11-11 15:19:27.000000000 -0500
32825+++ linux-3.1.1/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-11-16 18:40:22.000000000 -0500
32826@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
32827 unsigned int len = 0;
32828 int ret = 0;
32829
32830+ pax_track_stack();
32831+
32832 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32833
32834 ath9k_htc_ps_wakeup(priv);
32835@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
32836 unsigned int len = 0;
32837 int ret = 0;
32838
32839+ pax_track_stack();
32840+
32841 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32842
32843 ath9k_htc_ps_wakeup(priv);
32844@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
32845 unsigned int len = 0;
32846 int ret = 0;
32847
32848+ pax_track_stack();
32849+
32850 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32851
32852 ath9k_htc_ps_wakeup(priv);
32853@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
32854 char buf[512];
32855 unsigned int len = 0;
32856
32857+ pax_track_stack();
32858+
32859 len += snprintf(buf + len, sizeof(buf) - len,
32860 "%20s : %10u\n", "Buffers queued",
32861 priv->debug.tx_stats.buf_queued);
32862@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
32863 char buf[512];
32864 unsigned int len = 0;
32865
32866+ pax_track_stack();
32867+
32868 spin_lock_bh(&priv->tx.tx_lock);
32869
32870 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
32871@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
32872 char buf[512];
32873 unsigned int len = 0;
32874
32875+ pax_track_stack();
32876+
32877 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
32878 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
32879
32880diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/hw.h linux-3.1.1/drivers/net/wireless/ath/ath9k/hw.h
32881--- linux-3.1.1/drivers/net/wireless/ath/ath9k/hw.h 2011-11-11 15:19:27.000000000 -0500
32882+++ linux-3.1.1/drivers/net/wireless/ath/ath9k/hw.h 2011-11-16 18:39:07.000000000 -0500
32883@@ -588,7 +588,7 @@ struct ath_hw_private_ops {
32884
32885 /* ANI */
32886 void (*ani_cache_ini_regs)(struct ath_hw *ah);
32887-};
32888+} __no_const;
32889
32890 /**
32891 * struct ath_hw_ops - callbacks used by hardware code and driver code
32892@@ -639,7 +639,7 @@ struct ath_hw_ops {
32893 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
32894 struct ath_hw_antcomb_conf *antconf);
32895
32896-};
32897+} __no_const;
32898
32899 struct ath_nf_limits {
32900 s16 max;
32901@@ -652,7 +652,7 @@ struct ath_nf_limits {
32902 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
32903
32904 struct ath_hw {
32905- struct ath_ops reg_ops;
32906+ ath_ops_no_const reg_ops;
32907
32908 struct ieee80211_hw *hw;
32909 struct ath_common common;
32910diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath.h linux-3.1.1/drivers/net/wireless/ath/ath.h
32911--- linux-3.1.1/drivers/net/wireless/ath/ath.h 2011-11-11 15:19:27.000000000 -0500
32912+++ linux-3.1.1/drivers/net/wireless/ath/ath.h 2011-11-16 18:39:07.000000000 -0500
32913@@ -121,6 +121,7 @@ struct ath_ops {
32914 void (*write_flush) (void *);
32915 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
32916 };
32917+typedef struct ath_ops __no_const ath_ops_no_const;
32918
32919 struct ath_common;
32920 struct ath_bus_ops;
32921diff -urNp linux-3.1.1/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.1.1/drivers/net/wireless/ipw2x00/ipw2100.c
32922--- linux-3.1.1/drivers/net/wireless/ipw2x00/ipw2100.c 2011-11-11 15:19:27.000000000 -0500
32923+++ linux-3.1.1/drivers/net/wireless/ipw2x00/ipw2100.c 2011-11-16 18:40:22.000000000 -0500
32924@@ -2102,6 +2102,8 @@ static int ipw2100_set_essid(struct ipw2
32925 int err;
32926 DECLARE_SSID_BUF(ssid);
32927
32928+ pax_track_stack();
32929+
32930 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
32931
32932 if (ssid_len)
32933@@ -5451,6 +5453,8 @@ static int ipw2100_set_key(struct ipw210
32934 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
32935 int err;
32936
32937+ pax_track_stack();
32938+
32939 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
32940 idx, keylen, len);
32941
32942diff -urNp linux-3.1.1/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.1.1/drivers/net/wireless/ipw2x00/libipw_rx.c
32943--- linux-3.1.1/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-11-11 15:19:27.000000000 -0500
32944+++ linux-3.1.1/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-11-16 18:40:22.000000000 -0500
32945@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
32946 unsigned long flags;
32947 DECLARE_SSID_BUF(ssid);
32948
32949+ pax_track_stack();
32950+
32951 LIBIPW_DEBUG_SCAN("'%s' (%pM"
32952 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
32953 print_ssid(ssid, info_element->data, info_element->len),
32954diff -urNp linux-3.1.1/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.1.1/drivers/net/wireless/iwlegacy/iwl3945-base.c
32955--- linux-3.1.1/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-11-11 15:19:27.000000000 -0500
32956+++ linux-3.1.1/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-11-16 18:39:07.000000000 -0500
32957@@ -3687,7 +3687,9 @@ static int iwl3945_pci_probe(struct pci_
32958 */
32959 if (iwl3945_mod_params.disable_hw_scan) {
32960 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
32961- iwl3945_hw_ops.hw_scan = NULL;
32962+ pax_open_kernel();
32963+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
32964+ pax_close_kernel();
32965 }
32966
32967 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
32968diff -urNp linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
32969--- linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-11-11 15:19:27.000000000 -0500
32970+++ linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-11-16 18:40:22.000000000 -0500
32971@@ -920,6 +920,8 @@ static void rs_tx_status(void *priv_r, s
32972 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
32973 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
32974
32975+ pax_track_stack();
32976+
32977 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
32978
32979 /* Treat uninitialized rate scaling data same as non-existing. */
32980@@ -2931,6 +2933,8 @@ static void rs_fill_link_cmd(struct iwl_
32981 container_of(lq_sta, struct iwl_station_priv, lq_sta);
32982 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
32983
32984+ pax_track_stack();
32985+
32986 /* Override starting rate (index 0) if needed for debug purposes */
32987 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
32988
32989diff -urNp linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debugfs.c
32990--- linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-11-11 15:19:27.000000000 -0500
32991+++ linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-11-16 18:40:22.000000000 -0500
32992@@ -561,6 +561,8 @@ static ssize_t iwl_dbgfs_status_read(str
32993 int pos = 0;
32994 const size_t bufsz = sizeof(buf);
32995
32996+ pax_track_stack();
32997+
32998 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
32999 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
33000 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
33001@@ -693,6 +695,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
33002 char buf[256 * NUM_IWL_RXON_CTX];
33003 const size_t bufsz = sizeof(buf);
33004
33005+ pax_track_stack();
33006+
33007 for_each_context(priv, ctx) {
33008 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
33009 ctx->ctxid);
33010diff -urNp linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debug.h
33011--- linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-11-11 15:19:27.000000000 -0500
33012+++ linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-11-16 18:39:07.000000000 -0500
33013@@ -68,8 +68,8 @@ do {
33014 } while (0)
33015
33016 #else
33017-#define IWL_DEBUG(__priv, level, fmt, args...)
33018-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
33019+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
33020+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
33021 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
33022 const void *p, u32 len)
33023 {}
33024diff -urNp linux-3.1.1/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.1.1/drivers/net/wireless/iwmc3200wifi/debugfs.c
33025--- linux-3.1.1/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-11-11 15:19:27.000000000 -0500
33026+++ linux-3.1.1/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-11-16 18:40:22.000000000 -0500
33027@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
33028 int buf_len = 512;
33029 size_t len = 0;
33030
33031+ pax_track_stack();
33032+
33033 if (*ppos != 0)
33034 return 0;
33035 if (count < sizeof(buf))
33036diff -urNp linux-3.1.1/drivers/net/wireless/mac80211_hwsim.c linux-3.1.1/drivers/net/wireless/mac80211_hwsim.c
33037--- linux-3.1.1/drivers/net/wireless/mac80211_hwsim.c 2011-11-11 15:19:27.000000000 -0500
33038+++ linux-3.1.1/drivers/net/wireless/mac80211_hwsim.c 2011-11-16 18:39:07.000000000 -0500
33039@@ -1670,9 +1670,11 @@ static int __init init_mac80211_hwsim(vo
33040 return -EINVAL;
33041
33042 if (fake_hw_scan) {
33043- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33044- mac80211_hwsim_ops.sw_scan_start = NULL;
33045- mac80211_hwsim_ops.sw_scan_complete = NULL;
33046+ pax_open_kernel();
33047+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33048+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33049+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33050+ pax_close_kernel();
33051 }
33052
33053 spin_lock_init(&hwsim_radio_lock);
33054diff -urNp linux-3.1.1/drivers/net/wireless/mwifiex/main.h linux-3.1.1/drivers/net/wireless/mwifiex/main.h
33055--- linux-3.1.1/drivers/net/wireless/mwifiex/main.h 2011-11-11 15:19:27.000000000 -0500
33056+++ linux-3.1.1/drivers/net/wireless/mwifiex/main.h 2011-11-16 18:39:07.000000000 -0500
33057@@ -560,7 +560,7 @@ struct mwifiex_if_ops {
33058
33059 void (*update_mp_end_port) (struct mwifiex_adapter *, u16);
33060 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33061-};
33062+} __no_const;
33063
33064 struct mwifiex_adapter {
33065 struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
33066diff -urNp linux-3.1.1/drivers/net/wireless/rndis_wlan.c linux-3.1.1/drivers/net/wireless/rndis_wlan.c
33067--- linux-3.1.1/drivers/net/wireless/rndis_wlan.c 2011-11-11 15:19:27.000000000 -0500
33068+++ linux-3.1.1/drivers/net/wireless/rndis_wlan.c 2011-11-16 18:39:07.000000000 -0500
33069@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
33070
33071 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33072
33073- if (rts_threshold < 0 || rts_threshold > 2347)
33074+ if (rts_threshold > 2347)
33075 rts_threshold = 2347;
33076
33077 tmp = cpu_to_le32(rts_threshold);
33078diff -urNp linux-3.1.1/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.1.1/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
33079--- linux-3.1.1/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-11-11 15:19:27.000000000 -0500
33080+++ linux-3.1.1/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-11-16 18:40:22.000000000 -0500
33081@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
33082 u8 rfpath;
33083 u8 num_total_rfpath = rtlphy->num_total_rfpath;
33084
33085+ pax_track_stack();
33086+
33087 precommoncmdcnt = 0;
33088 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
33089 MAX_PRECMD_CNT,
33090diff -urNp linux-3.1.1/drivers/net/wireless/wl1251/wl1251.h linux-3.1.1/drivers/net/wireless/wl1251/wl1251.h
33091--- linux-3.1.1/drivers/net/wireless/wl1251/wl1251.h 2011-11-11 15:19:27.000000000 -0500
33092+++ linux-3.1.1/drivers/net/wireless/wl1251/wl1251.h 2011-11-16 18:39:07.000000000 -0500
33093@@ -266,7 +266,7 @@ struct wl1251_if_operations {
33094 void (*reset)(struct wl1251 *wl);
33095 void (*enable_irq)(struct wl1251 *wl);
33096 void (*disable_irq)(struct wl1251 *wl);
33097-};
33098+} __no_const;
33099
33100 struct wl1251 {
33101 struct ieee80211_hw *hw;
33102diff -urNp linux-3.1.1/drivers/net/wireless/wl12xx/spi.c linux-3.1.1/drivers/net/wireless/wl12xx/spi.c
33103--- linux-3.1.1/drivers/net/wireless/wl12xx/spi.c 2011-11-11 15:19:27.000000000 -0500
33104+++ linux-3.1.1/drivers/net/wireless/wl12xx/spi.c 2011-11-16 18:40:22.000000000 -0500
33105@@ -281,6 +281,8 @@ static void wl1271_spi_raw_write(struct
33106 u32 chunk_len;
33107 int i;
33108
33109+ pax_track_stack();
33110+
33111 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
33112
33113 spi_message_init(&m);
33114diff -urNp linux-3.1.1/drivers/oprofile/buffer_sync.c linux-3.1.1/drivers/oprofile/buffer_sync.c
33115--- linux-3.1.1/drivers/oprofile/buffer_sync.c 2011-11-11 15:19:27.000000000 -0500
33116+++ linux-3.1.1/drivers/oprofile/buffer_sync.c 2011-11-16 18:39:07.000000000 -0500
33117@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
33118 if (cookie == NO_COOKIE)
33119 offset = pc;
33120 if (cookie == INVALID_COOKIE) {
33121- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33122+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33123 offset = pc;
33124 }
33125 if (cookie != last_cookie) {
33126@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
33127 /* add userspace sample */
33128
33129 if (!mm) {
33130- atomic_inc(&oprofile_stats.sample_lost_no_mm);
33131+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33132 return 0;
33133 }
33134
33135 cookie = lookup_dcookie(mm, s->eip, &offset);
33136
33137 if (cookie == INVALID_COOKIE) {
33138- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33139+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33140 return 0;
33141 }
33142
33143@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33144 /* ignore backtraces if failed to add a sample */
33145 if (state == sb_bt_start) {
33146 state = sb_bt_ignore;
33147- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33148+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33149 }
33150 }
33151 release_mm(mm);
33152diff -urNp linux-3.1.1/drivers/oprofile/event_buffer.c linux-3.1.1/drivers/oprofile/event_buffer.c
33153--- linux-3.1.1/drivers/oprofile/event_buffer.c 2011-11-11 15:19:27.000000000 -0500
33154+++ linux-3.1.1/drivers/oprofile/event_buffer.c 2011-11-16 18:39:07.000000000 -0500
33155@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
33156 }
33157
33158 if (buffer_pos == buffer_size) {
33159- atomic_inc(&oprofile_stats.event_lost_overflow);
33160+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33161 return;
33162 }
33163
33164diff -urNp linux-3.1.1/drivers/oprofile/oprof.c linux-3.1.1/drivers/oprofile/oprof.c
33165--- linux-3.1.1/drivers/oprofile/oprof.c 2011-11-11 15:19:27.000000000 -0500
33166+++ linux-3.1.1/drivers/oprofile/oprof.c 2011-11-16 18:39:07.000000000 -0500
33167@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
33168 if (oprofile_ops.switch_events())
33169 return;
33170
33171- atomic_inc(&oprofile_stats.multiplex_counter);
33172+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33173 start_switch_worker();
33174 }
33175
33176diff -urNp linux-3.1.1/drivers/oprofile/oprofilefs.c linux-3.1.1/drivers/oprofile/oprofilefs.c
33177--- linux-3.1.1/drivers/oprofile/oprofilefs.c 2011-11-11 15:19:27.000000000 -0500
33178+++ linux-3.1.1/drivers/oprofile/oprofilefs.c 2011-11-16 18:39:07.000000000 -0500
33179@@ -186,7 +186,7 @@ static const struct file_operations atom
33180
33181
33182 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33183- char const *name, atomic_t *val)
33184+ char const *name, atomic_unchecked_t *val)
33185 {
33186 return __oprofilefs_create_file(sb, root, name,
33187 &atomic_ro_fops, 0444, val);
33188diff -urNp linux-3.1.1/drivers/oprofile/oprofile_stats.c linux-3.1.1/drivers/oprofile/oprofile_stats.c
33189--- linux-3.1.1/drivers/oprofile/oprofile_stats.c 2011-11-11 15:19:27.000000000 -0500
33190+++ linux-3.1.1/drivers/oprofile/oprofile_stats.c 2011-11-16 18:39:07.000000000 -0500
33191@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33192 cpu_buf->sample_invalid_eip = 0;
33193 }
33194
33195- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33196- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33197- atomic_set(&oprofile_stats.event_lost_overflow, 0);
33198- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33199- atomic_set(&oprofile_stats.multiplex_counter, 0);
33200+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33201+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33202+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33203+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33204+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33205 }
33206
33207
33208diff -urNp linux-3.1.1/drivers/oprofile/oprofile_stats.h linux-3.1.1/drivers/oprofile/oprofile_stats.h
33209--- linux-3.1.1/drivers/oprofile/oprofile_stats.h 2011-11-11 15:19:27.000000000 -0500
33210+++ linux-3.1.1/drivers/oprofile/oprofile_stats.h 2011-11-16 18:39:07.000000000 -0500
33211@@ -13,11 +13,11 @@
33212 #include <linux/atomic.h>
33213
33214 struct oprofile_stat_struct {
33215- atomic_t sample_lost_no_mm;
33216- atomic_t sample_lost_no_mapping;
33217- atomic_t bt_lost_no_mapping;
33218- atomic_t event_lost_overflow;
33219- atomic_t multiplex_counter;
33220+ atomic_unchecked_t sample_lost_no_mm;
33221+ atomic_unchecked_t sample_lost_no_mapping;
33222+ atomic_unchecked_t bt_lost_no_mapping;
33223+ atomic_unchecked_t event_lost_overflow;
33224+ atomic_unchecked_t multiplex_counter;
33225 };
33226
33227 extern struct oprofile_stat_struct oprofile_stats;
33228diff -urNp linux-3.1.1/drivers/parport/procfs.c linux-3.1.1/drivers/parport/procfs.c
33229--- linux-3.1.1/drivers/parport/procfs.c 2011-11-11 15:19:27.000000000 -0500
33230+++ linux-3.1.1/drivers/parport/procfs.c 2011-11-16 18:39:07.000000000 -0500
33231@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
33232
33233 *ppos += len;
33234
33235- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33236+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33237 }
33238
33239 #ifdef CONFIG_PARPORT_1284
33240@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
33241
33242 *ppos += len;
33243
33244- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33245+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33246 }
33247 #endif /* IEEE1284.3 support. */
33248
33249diff -urNp linux-3.1.1/drivers/pci/hotplug/cpci_hotplug.h linux-3.1.1/drivers/pci/hotplug/cpci_hotplug.h
33250--- linux-3.1.1/drivers/pci/hotplug/cpci_hotplug.h 2011-11-11 15:19:27.000000000 -0500
33251+++ linux-3.1.1/drivers/pci/hotplug/cpci_hotplug.h 2011-11-16 18:39:07.000000000 -0500
33252@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33253 int (*hardware_test) (struct slot* slot, u32 value);
33254 u8 (*get_power) (struct slot* slot);
33255 int (*set_power) (struct slot* slot, int value);
33256-};
33257+} __no_const;
33258
33259 struct cpci_hp_controller {
33260 unsigned int irq;
33261diff -urNp linux-3.1.1/drivers/pci/hotplug/cpqphp_nvram.c linux-3.1.1/drivers/pci/hotplug/cpqphp_nvram.c
33262--- linux-3.1.1/drivers/pci/hotplug/cpqphp_nvram.c 2011-11-11 15:19:27.000000000 -0500
33263+++ linux-3.1.1/drivers/pci/hotplug/cpqphp_nvram.c 2011-11-16 18:39:07.000000000 -0500
33264@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
33265
33266 void compaq_nvram_init (void __iomem *rom_start)
33267 {
33268+
33269+#ifndef CONFIG_PAX_KERNEXEC
33270 if (rom_start) {
33271 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33272 }
33273+#endif
33274+
33275 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33276
33277 /* initialize our int15 lock */
33278diff -urNp linux-3.1.1/drivers/pci/pcie/aspm.c linux-3.1.1/drivers/pci/pcie/aspm.c
33279--- linux-3.1.1/drivers/pci/pcie/aspm.c 2011-11-11 15:19:27.000000000 -0500
33280+++ linux-3.1.1/drivers/pci/pcie/aspm.c 2011-11-16 18:39:07.000000000 -0500
33281@@ -27,9 +27,9 @@
33282 #define MODULE_PARAM_PREFIX "pcie_aspm."
33283
33284 /* Note: those are not register definitions */
33285-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33286-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33287-#define ASPM_STATE_L1 (4) /* L1 state */
33288+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33289+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33290+#define ASPM_STATE_L1 (4U) /* L1 state */
33291 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33292 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33293
33294diff -urNp linux-3.1.1/drivers/pci/probe.c linux-3.1.1/drivers/pci/probe.c
33295--- linux-3.1.1/drivers/pci/probe.c 2011-11-11 15:19:27.000000000 -0500
33296+++ linux-3.1.1/drivers/pci/probe.c 2011-11-16 18:39:07.000000000 -0500
33297@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev,
33298 u32 l, sz, mask;
33299 u16 orig_cmd;
33300
33301- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33302+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33303
33304 if (!dev->mmio_always_on) {
33305 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33306diff -urNp linux-3.1.1/drivers/pci/proc.c linux-3.1.1/drivers/pci/proc.c
33307--- linux-3.1.1/drivers/pci/proc.c 2011-11-11 15:19:27.000000000 -0500
33308+++ linux-3.1.1/drivers/pci/proc.c 2011-11-16 18:40:22.000000000 -0500
33309@@ -476,7 +476,16 @@ static const struct file_operations proc
33310 static int __init pci_proc_init(void)
33311 {
33312 struct pci_dev *dev = NULL;
33313+
33314+#ifdef CONFIG_GRKERNSEC_PROC_ADD
33315+#ifdef CONFIG_GRKERNSEC_PROC_USER
33316+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33317+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33318+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33319+#endif
33320+#else
33321 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33322+#endif
33323 proc_create("devices", 0, proc_bus_pci_dir,
33324 &proc_bus_pci_dev_operations);
33325 proc_initialized = 1;
33326diff -urNp linux-3.1.1/drivers/pci/xen-pcifront.c linux-3.1.1/drivers/pci/xen-pcifront.c
33327--- linux-3.1.1/drivers/pci/xen-pcifront.c 2011-11-11 15:19:27.000000000 -0500
33328+++ linux-3.1.1/drivers/pci/xen-pcifront.c 2011-11-16 18:40:22.000000000 -0500
33329@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
33330 struct pcifront_sd *sd = bus->sysdata;
33331 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33332
33333+ pax_track_stack();
33334+
33335 if (verbose_request)
33336 dev_info(&pdev->xdev->dev,
33337 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
33338@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
33339 struct pcifront_sd *sd = bus->sysdata;
33340 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33341
33342+ pax_track_stack();
33343+
33344 if (verbose_request)
33345 dev_info(&pdev->xdev->dev,
33346 "write dev=%04x:%02x:%02x.%01x - "
33347@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
33348 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33349 struct msi_desc *entry;
33350
33351+ pax_track_stack();
33352+
33353 if (nvec > SH_INFO_MAX_VEC) {
33354 dev_err(&dev->dev, "too much vector for pci frontend: %x."
33355 " Increase SH_INFO_MAX_VEC.\n", nvec);
33356@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
33357 struct pcifront_sd *sd = dev->bus->sysdata;
33358 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33359
33360+ pax_track_stack();
33361+
33362 err = do_pci_op(pdev, &op);
33363
33364 /* What should do for error ? */
33365@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
33366 struct pcifront_sd *sd = dev->bus->sysdata;
33367 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33368
33369+ pax_track_stack();
33370+
33371 err = do_pci_op(pdev, &op);
33372 if (likely(!err)) {
33373 vector[0] = op.value;
33374diff -urNp linux-3.1.1/drivers/platform/x86/thinkpad_acpi.c linux-3.1.1/drivers/platform/x86/thinkpad_acpi.c
33375--- linux-3.1.1/drivers/platform/x86/thinkpad_acpi.c 2011-11-11 15:19:27.000000000 -0500
33376+++ linux-3.1.1/drivers/platform/x86/thinkpad_acpi.c 2011-11-16 18:39:07.000000000 -0500
33377@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33378 return 0;
33379 }
33380
33381-void static hotkey_mask_warn_incomplete_mask(void)
33382+static void hotkey_mask_warn_incomplete_mask(void)
33383 {
33384 /* log only what the user can fix... */
33385 const u32 wantedmask = hotkey_driver_mask &
33386diff -urNp linux-3.1.1/drivers/pnp/pnpbios/bioscalls.c linux-3.1.1/drivers/pnp/pnpbios/bioscalls.c
33387--- linux-3.1.1/drivers/pnp/pnpbios/bioscalls.c 2011-11-11 15:19:27.000000000 -0500
33388+++ linux-3.1.1/drivers/pnp/pnpbios/bioscalls.c 2011-11-16 18:39:07.000000000 -0500
33389@@ -59,7 +59,7 @@ do { \
33390 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33391 } while(0)
33392
33393-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33394+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33395 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33396
33397 /*
33398@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
33399
33400 cpu = get_cpu();
33401 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33402+
33403+ pax_open_kernel();
33404 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33405+ pax_close_kernel();
33406
33407 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33408 spin_lock_irqsave(&pnp_bios_lock, flags);
33409@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
33410 :"memory");
33411 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33412
33413+ pax_open_kernel();
33414 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33415+ pax_close_kernel();
33416+
33417 put_cpu();
33418
33419 /* If we get here and this is set then the PnP BIOS faulted on us. */
33420@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
33421 return status;
33422 }
33423
33424-void pnpbios_calls_init(union pnp_bios_install_struct *header)
33425+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33426 {
33427 int i;
33428
33429@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
33430 pnp_bios_callpoint.offset = header->fields.pm16offset;
33431 pnp_bios_callpoint.segment = PNP_CS16;
33432
33433+ pax_open_kernel();
33434+
33435 for_each_possible_cpu(i) {
33436 struct desc_struct *gdt = get_cpu_gdt_table(i);
33437 if (!gdt)
33438@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
33439 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33440 (unsigned long)__va(header->fields.pm16dseg));
33441 }
33442+
33443+ pax_close_kernel();
33444 }
33445diff -urNp linux-3.1.1/drivers/pnp/resource.c linux-3.1.1/drivers/pnp/resource.c
33446--- linux-3.1.1/drivers/pnp/resource.c 2011-11-11 15:19:27.000000000 -0500
33447+++ linux-3.1.1/drivers/pnp/resource.c 2011-11-16 18:39:07.000000000 -0500
33448@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
33449 return 1;
33450
33451 /* check if the resource is valid */
33452- if (*irq < 0 || *irq > 15)
33453+ if (*irq > 15)
33454 return 0;
33455
33456 /* check if the resource is reserved */
33457@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
33458 return 1;
33459
33460 /* check if the resource is valid */
33461- if (*dma < 0 || *dma == 4 || *dma > 7)
33462+ if (*dma == 4 || *dma > 7)
33463 return 0;
33464
33465 /* check if the resource is reserved */
33466diff -urNp linux-3.1.1/drivers/power/bq27x00_battery.c linux-3.1.1/drivers/power/bq27x00_battery.c
33467--- linux-3.1.1/drivers/power/bq27x00_battery.c 2011-11-11 15:19:27.000000000 -0500
33468+++ linux-3.1.1/drivers/power/bq27x00_battery.c 2011-11-16 18:39:07.000000000 -0500
33469@@ -67,7 +67,7 @@
33470 struct bq27x00_device_info;
33471 struct bq27x00_access_methods {
33472 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
33473-};
33474+} __no_const;
33475
33476 enum bq27x00_chip { BQ27000, BQ27500 };
33477
33478diff -urNp linux-3.1.1/drivers/regulator/max8660.c linux-3.1.1/drivers/regulator/max8660.c
33479--- linux-3.1.1/drivers/regulator/max8660.c 2011-11-11 15:19:27.000000000 -0500
33480+++ linux-3.1.1/drivers/regulator/max8660.c 2011-11-16 18:39:07.000000000 -0500
33481@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
33482 max8660->shadow_regs[MAX8660_OVER1] = 5;
33483 } else {
33484 /* Otherwise devices can be toggled via software */
33485- max8660_dcdc_ops.enable = max8660_dcdc_enable;
33486- max8660_dcdc_ops.disable = max8660_dcdc_disable;
33487+ pax_open_kernel();
33488+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
33489+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
33490+ pax_close_kernel();
33491 }
33492
33493 /*
33494diff -urNp linux-3.1.1/drivers/regulator/mc13892-regulator.c linux-3.1.1/drivers/regulator/mc13892-regulator.c
33495--- linux-3.1.1/drivers/regulator/mc13892-regulator.c 2011-11-11 15:19:27.000000000 -0500
33496+++ linux-3.1.1/drivers/regulator/mc13892-regulator.c 2011-11-16 18:39:07.000000000 -0500
33497@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
33498 }
33499 mc13xxx_unlock(mc13892);
33500
33501- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33502+ pax_open_kernel();
33503+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33504 = mc13892_vcam_set_mode;
33505- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33506+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33507 = mc13892_vcam_get_mode;
33508+ pax_close_kernel();
33509 for (i = 0; i < pdata->num_regulators; i++) {
33510 init_data = &pdata->regulators[i];
33511 priv->regulators[i] = regulator_register(
33512diff -urNp linux-3.1.1/drivers/rtc/rtc-dev.c linux-3.1.1/drivers/rtc/rtc-dev.c
33513--- linux-3.1.1/drivers/rtc/rtc-dev.c 2011-11-11 15:19:27.000000000 -0500
33514+++ linux-3.1.1/drivers/rtc/rtc-dev.c 2011-11-16 18:40:22.000000000 -0500
33515@@ -14,6 +14,7 @@
33516 #include <linux/module.h>
33517 #include <linux/rtc.h>
33518 #include <linux/sched.h>
33519+#include <linux/grsecurity.h>
33520 #include "rtc-core.h"
33521
33522 static dev_t rtc_devt;
33523@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
33524 if (copy_from_user(&tm, uarg, sizeof(tm)))
33525 return -EFAULT;
33526
33527+ gr_log_timechange();
33528+
33529 return rtc_set_time(rtc, &tm);
33530
33531 case RTC_PIE_ON:
33532diff -urNp linux-3.1.1/drivers/scsi/aacraid/aacraid.h linux-3.1.1/drivers/scsi/aacraid/aacraid.h
33533--- linux-3.1.1/drivers/scsi/aacraid/aacraid.h 2011-11-11 15:19:27.000000000 -0500
33534+++ linux-3.1.1/drivers/scsi/aacraid/aacraid.h 2011-11-16 18:39:07.000000000 -0500
33535@@ -492,7 +492,7 @@ struct adapter_ops
33536 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
33537 /* Administrative operations */
33538 int (*adapter_comm)(struct aac_dev * dev, int comm);
33539-};
33540+} __no_const;
33541
33542 /*
33543 * Define which interrupt handler needs to be installed
33544diff -urNp linux-3.1.1/drivers/scsi/aacraid/commctrl.c linux-3.1.1/drivers/scsi/aacraid/commctrl.c
33545--- linux-3.1.1/drivers/scsi/aacraid/commctrl.c 2011-11-11 15:19:27.000000000 -0500
33546+++ linux-3.1.1/drivers/scsi/aacraid/commctrl.c 2011-11-16 18:40:22.000000000 -0500
33547@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
33548 u32 actual_fibsize64, actual_fibsize = 0;
33549 int i;
33550
33551+ pax_track_stack();
33552
33553 if (dev->in_reset) {
33554 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
33555diff -urNp linux-3.1.1/drivers/scsi/aacraid/linit.c linux-3.1.1/drivers/scsi/aacraid/linit.c
33556--- linux-3.1.1/drivers/scsi/aacraid/linit.c 2011-11-11 15:19:27.000000000 -0500
33557+++ linux-3.1.1/drivers/scsi/aacraid/linit.c 2011-11-16 18:39:07.000000000 -0500
33558@@ -92,7 +92,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_
33559 #elif defined(__devinitconst)
33560 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33561 #else
33562-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
33563+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33564 #endif
33565 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
33566 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
33567diff -urNp linux-3.1.1/drivers/scsi/aic94xx/aic94xx_init.c linux-3.1.1/drivers/scsi/aic94xx/aic94xx_init.c
33568--- linux-3.1.1/drivers/scsi/aic94xx/aic94xx_init.c 2011-11-11 15:19:27.000000000 -0500
33569+++ linux-3.1.1/drivers/scsi/aic94xx/aic94xx_init.c 2011-11-16 18:39:07.000000000 -0500
33570@@ -1012,7 +1012,7 @@ static struct sas_domain_function_templa
33571 .lldd_control_phy = asd_control_phy,
33572 };
33573
33574-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
33575+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
33576 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
33577 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
33578 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
33579diff -urNp linux-3.1.1/drivers/scsi/bfa/bfad.c linux-3.1.1/drivers/scsi/bfa/bfad.c
33580--- linux-3.1.1/drivers/scsi/bfa/bfad.c 2011-11-11 15:19:27.000000000 -0500
33581+++ linux-3.1.1/drivers/scsi/bfa/bfad.c 2011-11-16 19:01:15.000000000 -0500
33582@@ -1019,6 +1019,8 @@ bfad_start_ops(struct bfad_s *bfad) {
33583 struct bfad_vport_s *vport, *vport_new;
33584 struct bfa_fcs_driver_info_s driver_info;
33585
33586+ pax_track_stack();
33587+
33588 /* Limit min/max. xfer size to [64k-32MB] */
33589 if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
33590 max_xfer_size = BFAD_MIN_SECTORS >> 1;
33591diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.c linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.c
33592--- linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.c 2011-11-11 15:19:27.000000000 -0500
33593+++ linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.c 2011-11-16 18:39:07.000000000 -0500
33594@@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct
33595 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
33596 {
33597 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
33598- struct bfa_itn_s *itn;
33599+ bfa_itn_s_no_const *itn;
33600
33601 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
33602 itn->isr = isr;
33603diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.h linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.h
33604--- linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.h 2011-11-11 15:19:27.000000000 -0500
33605+++ linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.h 2011-11-16 18:39:07.000000000 -0500
33606@@ -37,6 +37,7 @@ struct bfa_iotag_s {
33607 struct bfa_itn_s {
33608 bfa_isr_func_t isr;
33609 };
33610+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
33611
33612 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
33613 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
33614@@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
33615 struct list_head iotag_tio_free_q; /* free IO resources */
33616 struct list_head iotag_unused_q; /* unused IO resources*/
33617 struct bfa_iotag_s *iotag_arr;
33618- struct bfa_itn_s *itn_arr;
33619+ bfa_itn_s_no_const *itn_arr;
33620 int num_ioim_reqs;
33621 int num_fwtio_reqs;
33622 int num_itns;
33623diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.1.1/drivers/scsi/bfa/bfa_fcs_lport.c
33624--- linux-3.1.1/drivers/scsi/bfa/bfa_fcs_lport.c 2011-11-11 15:19:27.000000000 -0500
33625+++ linux-3.1.1/drivers/scsi/bfa/bfa_fcs_lport.c 2011-11-16 18:40:22.000000000 -0500
33626@@ -1700,6 +1700,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
33627 u16 len, count;
33628 u16 templen;
33629
33630+ pax_track_stack();
33631+
33632 /*
33633 * get hba attributes
33634 */
33635@@ -1977,6 +1979,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
33636 u8 count = 0;
33637 u16 templen;
33638
33639+ pax_track_stack();
33640+
33641 /*
33642 * get port attributes
33643 */
33644diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.1.1/drivers/scsi/bfa/bfa_fcs_rport.c
33645--- linux-3.1.1/drivers/scsi/bfa/bfa_fcs_rport.c 2011-11-11 15:19:27.000000000 -0500
33646+++ linux-3.1.1/drivers/scsi/bfa/bfa_fcs_rport.c 2011-11-16 18:40:22.000000000 -0500
33647@@ -1871,6 +1871,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
33648 struct fc_rpsc_speed_info_s speeds;
33649 struct bfa_port_attr_s pport_attr;
33650
33651+ pax_track_stack();
33652+
33653 bfa_trc(port->fcs, rx_fchs->s_id);
33654 bfa_trc(port->fcs, rx_fchs->d_id);
33655
33656diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa.h linux-3.1.1/drivers/scsi/bfa/bfa.h
33657--- linux-3.1.1/drivers/scsi/bfa/bfa.h 2011-11-11 15:19:27.000000000 -0500
33658+++ linux-3.1.1/drivers/scsi/bfa/bfa.h 2011-11-16 18:39:07.000000000 -0500
33659@@ -196,7 +196,7 @@ struct bfa_hwif_s {
33660 u32 *end);
33661 int cpe_vec_q0;
33662 int rme_vec_q0;
33663-};
33664+} __no_const;
33665 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
33666
33667 struct bfa_faa_cbfn_s {
33668diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_ioc.h linux-3.1.1/drivers/scsi/bfa/bfa_ioc.h
33669--- linux-3.1.1/drivers/scsi/bfa/bfa_ioc.h 2011-11-11 15:19:27.000000000 -0500
33670+++ linux-3.1.1/drivers/scsi/bfa/bfa_ioc.h 2011-11-16 18:39:07.000000000 -0500
33671@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
33672 bfa_ioc_disable_cbfn_t disable_cbfn;
33673 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
33674 bfa_ioc_reset_cbfn_t reset_cbfn;
33675-};
33676+} __no_const;
33677
33678 /*
33679 * IOC event notification mechanism.
33680@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
33681 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
33682 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
33683 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
33684-};
33685+} __no_const;
33686
33687 /*
33688 * Queue element to wait for room in request queue. FIFO order is
33689diff -urNp linux-3.1.1/drivers/scsi/BusLogic.c linux-3.1.1/drivers/scsi/BusLogic.c
33690--- linux-3.1.1/drivers/scsi/BusLogic.c 2011-11-11 15:19:27.000000000 -0500
33691+++ linux-3.1.1/drivers/scsi/BusLogic.c 2011-11-16 18:40:22.000000000 -0500
33692@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
33693 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
33694 *PrototypeHostAdapter)
33695 {
33696+ pax_track_stack();
33697+
33698 /*
33699 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
33700 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
33701diff -urNp linux-3.1.1/drivers/scsi/dpt_i2o.c linux-3.1.1/drivers/scsi/dpt_i2o.c
33702--- linux-3.1.1/drivers/scsi/dpt_i2o.c 2011-11-11 15:19:27.000000000 -0500
33703+++ linux-3.1.1/drivers/scsi/dpt_i2o.c 2011-11-16 18:40:22.000000000 -0500
33704@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
33705 dma_addr_t addr;
33706 ulong flags = 0;
33707
33708+ pax_track_stack();
33709+
33710 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
33711 // get user msg size in u32s
33712 if(get_user(size, &user_msg[0])){
33713@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
33714 s32 rcode;
33715 dma_addr_t addr;
33716
33717+ pax_track_stack();
33718+
33719 memset(msg, 0 , sizeof(msg));
33720 len = scsi_bufflen(cmd);
33721 direction = 0x00000000;
33722diff -urNp linux-3.1.1/drivers/scsi/eata.c linux-3.1.1/drivers/scsi/eata.c
33723--- linux-3.1.1/drivers/scsi/eata.c 2011-11-11 15:19:27.000000000 -0500
33724+++ linux-3.1.1/drivers/scsi/eata.c 2011-11-16 18:40:22.000000000 -0500
33725@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
33726 struct hostdata *ha;
33727 char name[16];
33728
33729+ pax_track_stack();
33730+
33731 sprintf(name, "%s%d", driver_name, j);
33732
33733 if (!request_region(port_base, REGION_SIZE, driver_name)) {
33734diff -urNp linux-3.1.1/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.1.1/drivers/scsi/fcoe/fcoe_ctlr.c
33735--- linux-3.1.1/drivers/scsi/fcoe/fcoe_ctlr.c 2011-11-11 15:19:27.000000000 -0500
33736+++ linux-3.1.1/drivers/scsi/fcoe/fcoe_ctlr.c 2011-11-16 18:40:22.000000000 -0500
33737@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
33738 } buf;
33739 int rc;
33740
33741+ pax_track_stack();
33742+
33743 fiph = (struct fip_header *)skb->data;
33744 sub = fiph->fip_subcode;
33745
33746diff -urNp linux-3.1.1/drivers/scsi/gdth.c linux-3.1.1/drivers/scsi/gdth.c
33747--- linux-3.1.1/drivers/scsi/gdth.c 2011-11-11 15:19:27.000000000 -0500
33748+++ linux-3.1.1/drivers/scsi/gdth.c 2011-11-16 18:40:22.000000000 -0500
33749@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
33750 unsigned long flags;
33751 gdth_ha_str *ha;
33752
33753+ pax_track_stack();
33754+
33755 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
33756 return -EFAULT;
33757 ha = gdth_find_ha(ldrv.ionode);
33758@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
33759 gdth_ha_str *ha;
33760 int rval;
33761
33762+ pax_track_stack();
33763+
33764 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
33765 res.number >= MAX_HDRIVES)
33766 return -EFAULT;
33767@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
33768 gdth_ha_str *ha;
33769 int rval;
33770
33771+ pax_track_stack();
33772+
33773 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
33774 return -EFAULT;
33775 ha = gdth_find_ha(gen.ionode);
33776@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
33777 int i;
33778 gdth_cmd_str gdtcmd;
33779 char cmnd[MAX_COMMAND_SIZE];
33780+
33781+ pax_track_stack();
33782+
33783 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
33784
33785 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
33786diff -urNp linux-3.1.1/drivers/scsi/gdth_proc.c linux-3.1.1/drivers/scsi/gdth_proc.c
33787--- linux-3.1.1/drivers/scsi/gdth_proc.c 2011-11-11 15:19:27.000000000 -0500
33788+++ linux-3.1.1/drivers/scsi/gdth_proc.c 2011-11-16 18:40:22.000000000 -0500
33789@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
33790 u64 paddr;
33791
33792 char cmnd[MAX_COMMAND_SIZE];
33793+
33794+ pax_track_stack();
33795+
33796 memset(cmnd, 0xff, 12);
33797 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
33798
33799@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
33800 gdth_hget_str *phg;
33801 char cmnd[MAX_COMMAND_SIZE];
33802
33803+ pax_track_stack();
33804+
33805 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
33806 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
33807 if (!gdtcmd || !estr)
33808diff -urNp linux-3.1.1/drivers/scsi/hosts.c linux-3.1.1/drivers/scsi/hosts.c
33809--- linux-3.1.1/drivers/scsi/hosts.c 2011-11-11 15:19:27.000000000 -0500
33810+++ linux-3.1.1/drivers/scsi/hosts.c 2011-11-16 18:39:07.000000000 -0500
33811@@ -42,7 +42,7 @@
33812 #include "scsi_logging.h"
33813
33814
33815-static atomic_t scsi_host_next_hn; /* host_no for next new host */
33816+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
33817
33818
33819 static void scsi_host_cls_release(struct device *dev)
33820@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct
33821 * subtract one because we increment first then return, but we need to
33822 * know what the next host number was before increment
33823 */
33824- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
33825+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
33826 shost->dma_channel = 0xff;
33827
33828 /* These three are default values which can be overridden */
33829diff -urNp linux-3.1.1/drivers/scsi/hpsa.c linux-3.1.1/drivers/scsi/hpsa.c
33830--- linux-3.1.1/drivers/scsi/hpsa.c 2011-11-11 15:19:27.000000000 -0500
33831+++ linux-3.1.1/drivers/scsi/hpsa.c 2011-11-16 18:39:07.000000000 -0500
33832@@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
33833 u32 a;
33834
33835 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
33836- return h->access.command_completed(h);
33837+ return h->access->command_completed(h);
33838
33839 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
33840 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
33841@@ -2955,7 +2955,7 @@ static void start_io(struct ctlr_info *h
33842 while (!list_empty(&h->reqQ)) {
33843 c = list_entry(h->reqQ.next, struct CommandList, list);
33844 /* can't do anything if fifo is full */
33845- if ((h->access.fifo_full(h))) {
33846+ if ((h->access->fifo_full(h))) {
33847 dev_warn(&h->pdev->dev, "fifo full\n");
33848 break;
33849 }
33850@@ -2965,7 +2965,7 @@ static void start_io(struct ctlr_info *h
33851 h->Qdepth--;
33852
33853 /* Tell the controller execute command */
33854- h->access.submit_command(h, c);
33855+ h->access->submit_command(h, c);
33856
33857 /* Put job onto the completed Q */
33858 addQ(&h->cmpQ, c);
33859@@ -2974,17 +2974,17 @@ static void start_io(struct ctlr_info *h
33860
33861 static inline unsigned long get_next_completion(struct ctlr_info *h)
33862 {
33863- return h->access.command_completed(h);
33864+ return h->access->command_completed(h);
33865 }
33866
33867 static inline bool interrupt_pending(struct ctlr_info *h)
33868 {
33869- return h->access.intr_pending(h);
33870+ return h->access->intr_pending(h);
33871 }
33872
33873 static inline long interrupt_not_for_us(struct ctlr_info *h)
33874 {
33875- return (h->access.intr_pending(h) == 0) ||
33876+ return (h->access->intr_pending(h) == 0) ||
33877 (h->interrupts_enabled == 0);
33878 }
33879
33880@@ -3881,7 +3881,7 @@ static int __devinit hpsa_pci_init(struc
33881 if (prod_index < 0)
33882 return -ENODEV;
33883 h->product_name = products[prod_index].product_name;
33884- h->access = *(products[prod_index].access);
33885+ h->access = products[prod_index].access;
33886
33887 if (hpsa_board_disabled(h->pdev)) {
33888 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
33889@@ -4158,7 +4158,7 @@ reinit_after_soft_reset:
33890 }
33891
33892 /* make sure the board interrupts are off */
33893- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33894+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33895
33896 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
33897 goto clean2;
33898@@ -4192,7 +4192,7 @@ reinit_after_soft_reset:
33899 * fake ones to scoop up any residual completions.
33900 */
33901 spin_lock_irqsave(&h->lock, flags);
33902- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33903+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33904 spin_unlock_irqrestore(&h->lock, flags);
33905 free_irq(h->intr[h->intr_mode], h);
33906 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
33907@@ -4211,9 +4211,9 @@ reinit_after_soft_reset:
33908 dev_info(&h->pdev->dev, "Board READY.\n");
33909 dev_info(&h->pdev->dev,
33910 "Waiting for stale completions to drain.\n");
33911- h->access.set_intr_mask(h, HPSA_INTR_ON);
33912+ h->access->set_intr_mask(h, HPSA_INTR_ON);
33913 msleep(10000);
33914- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33915+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33916
33917 rc = controller_reset_failed(h->cfgtable);
33918 if (rc)
33919@@ -4234,7 +4234,7 @@ reinit_after_soft_reset:
33920 }
33921
33922 /* Turn the interrupts on so we can service requests */
33923- h->access.set_intr_mask(h, HPSA_INTR_ON);
33924+ h->access->set_intr_mask(h, HPSA_INTR_ON);
33925
33926 hpsa_hba_inquiry(h);
33927 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
33928@@ -4287,7 +4287,7 @@ static void hpsa_shutdown(struct pci_dev
33929 * To write all data in the battery backed cache to disks
33930 */
33931 hpsa_flush_cache(h);
33932- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33933+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33934 free_irq(h->intr[h->intr_mode], h);
33935 #ifdef CONFIG_PCI_MSI
33936 if (h->msix_vector)
33937@@ -4450,7 +4450,7 @@ static __devinit void hpsa_enter_perform
33938 return;
33939 }
33940 /* Change the access methods to the performant access methods */
33941- h->access = SA5_performant_access;
33942+ h->access = &SA5_performant_access;
33943 h->transMethod = CFGTBL_Trans_Performant;
33944 }
33945
33946diff -urNp linux-3.1.1/drivers/scsi/hpsa.h linux-3.1.1/drivers/scsi/hpsa.h
33947--- linux-3.1.1/drivers/scsi/hpsa.h 2011-11-11 15:19:27.000000000 -0500
33948+++ linux-3.1.1/drivers/scsi/hpsa.h 2011-11-16 18:39:07.000000000 -0500
33949@@ -73,7 +73,7 @@ struct ctlr_info {
33950 unsigned int msix_vector;
33951 unsigned int msi_vector;
33952 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
33953- struct access_method access;
33954+ struct access_method *access;
33955
33956 /* queue and queue Info */
33957 struct list_head reqQ;
33958diff -urNp linux-3.1.1/drivers/scsi/ips.h linux-3.1.1/drivers/scsi/ips.h
33959--- linux-3.1.1/drivers/scsi/ips.h 2011-11-11 15:19:27.000000000 -0500
33960+++ linux-3.1.1/drivers/scsi/ips.h 2011-11-16 18:39:07.000000000 -0500
33961@@ -1027,7 +1027,7 @@ typedef struct {
33962 int (*intr)(struct ips_ha *);
33963 void (*enableint)(struct ips_ha *);
33964 uint32_t (*statupd)(struct ips_ha *);
33965-} ips_hw_func_t;
33966+} __no_const ips_hw_func_t;
33967
33968 typedef struct ips_ha {
33969 uint8_t ha_id[IPS_MAX_CHANNELS+1];
33970diff -urNp linux-3.1.1/drivers/scsi/libfc/fc_exch.c linux-3.1.1/drivers/scsi/libfc/fc_exch.c
33971--- linux-3.1.1/drivers/scsi/libfc/fc_exch.c 2011-11-11 15:19:27.000000000 -0500
33972+++ linux-3.1.1/drivers/scsi/libfc/fc_exch.c 2011-11-16 18:39:07.000000000 -0500
33973@@ -105,12 +105,12 @@ struct fc_exch_mgr {
33974 * all together if not used XXX
33975 */
33976 struct {
33977- atomic_t no_free_exch;
33978- atomic_t no_free_exch_xid;
33979- atomic_t xid_not_found;
33980- atomic_t xid_busy;
33981- atomic_t seq_not_found;
33982- atomic_t non_bls_resp;
33983+ atomic_unchecked_t no_free_exch;
33984+ atomic_unchecked_t no_free_exch_xid;
33985+ atomic_unchecked_t xid_not_found;
33986+ atomic_unchecked_t xid_busy;
33987+ atomic_unchecked_t seq_not_found;
33988+ atomic_unchecked_t non_bls_resp;
33989 } stats;
33990 };
33991
33992@@ -718,7 +718,7 @@ static struct fc_exch *fc_exch_em_alloc(
33993 /* allocate memory for exchange */
33994 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
33995 if (!ep) {
33996- atomic_inc(&mp->stats.no_free_exch);
33997+ atomic_inc_unchecked(&mp->stats.no_free_exch);
33998 goto out;
33999 }
34000 memset(ep, 0, sizeof(*ep));
34001@@ -779,7 +779,7 @@ out:
34002 return ep;
34003 err:
34004 spin_unlock_bh(&pool->lock);
34005- atomic_inc(&mp->stats.no_free_exch_xid);
34006+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34007 mempool_free(ep, mp->ep_pool);
34008 return NULL;
34009 }
34010@@ -922,7 +922,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34011 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34012 ep = fc_exch_find(mp, xid);
34013 if (!ep) {
34014- atomic_inc(&mp->stats.xid_not_found);
34015+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34016 reject = FC_RJT_OX_ID;
34017 goto out;
34018 }
34019@@ -952,7 +952,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34020 ep = fc_exch_find(mp, xid);
34021 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34022 if (ep) {
34023- atomic_inc(&mp->stats.xid_busy);
34024+ atomic_inc_unchecked(&mp->stats.xid_busy);
34025 reject = FC_RJT_RX_ID;
34026 goto rel;
34027 }
34028@@ -963,7 +963,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34029 }
34030 xid = ep->xid; /* get our XID */
34031 } else if (!ep) {
34032- atomic_inc(&mp->stats.xid_not_found);
34033+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34034 reject = FC_RJT_RX_ID; /* XID not found */
34035 goto out;
34036 }
34037@@ -980,7 +980,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34038 } else {
34039 sp = &ep->seq;
34040 if (sp->id != fh->fh_seq_id) {
34041- atomic_inc(&mp->stats.seq_not_found);
34042+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34043 if (f_ctl & FC_FC_END_SEQ) {
34044 /*
34045 * Update sequence_id based on incoming last
34046@@ -1430,22 +1430,22 @@ static void fc_exch_recv_seq_resp(struct
34047
34048 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34049 if (!ep) {
34050- atomic_inc(&mp->stats.xid_not_found);
34051+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34052 goto out;
34053 }
34054 if (ep->esb_stat & ESB_ST_COMPLETE) {
34055- atomic_inc(&mp->stats.xid_not_found);
34056+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34057 goto rel;
34058 }
34059 if (ep->rxid == FC_XID_UNKNOWN)
34060 ep->rxid = ntohs(fh->fh_rx_id);
34061 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34062- atomic_inc(&mp->stats.xid_not_found);
34063+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34064 goto rel;
34065 }
34066 if (ep->did != ntoh24(fh->fh_s_id) &&
34067 ep->did != FC_FID_FLOGI) {
34068- atomic_inc(&mp->stats.xid_not_found);
34069+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34070 goto rel;
34071 }
34072 sof = fr_sof(fp);
34073@@ -1454,7 +1454,7 @@ static void fc_exch_recv_seq_resp(struct
34074 sp->ssb_stat |= SSB_ST_RESP;
34075 sp->id = fh->fh_seq_id;
34076 } else if (sp->id != fh->fh_seq_id) {
34077- atomic_inc(&mp->stats.seq_not_found);
34078+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34079 goto rel;
34080 }
34081
34082@@ -1518,9 +1518,9 @@ static void fc_exch_recv_resp(struct fc_
34083 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34084
34085 if (!sp)
34086- atomic_inc(&mp->stats.xid_not_found);
34087+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34088 else
34089- atomic_inc(&mp->stats.non_bls_resp);
34090+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
34091
34092 fc_frame_free(fp);
34093 }
34094diff -urNp linux-3.1.1/drivers/scsi/libsas/sas_ata.c linux-3.1.1/drivers/scsi/libsas/sas_ata.c
34095--- linux-3.1.1/drivers/scsi/libsas/sas_ata.c 2011-11-11 15:19:27.000000000 -0500
34096+++ linux-3.1.1/drivers/scsi/libsas/sas_ata.c 2011-11-16 18:39:07.000000000 -0500
34097@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
34098 .postreset = ata_std_postreset,
34099 .error_handler = ata_std_error_handler,
34100 .post_internal_cmd = sas_ata_post_internal,
34101- .qc_defer = ata_std_qc_defer,
34102+ .qc_defer = ata_std_qc_defer,
34103 .qc_prep = ata_noop_qc_prep,
34104 .qc_issue = sas_ata_qc_issue,
34105 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34106diff -urNp linux-3.1.1/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.1.1/drivers/scsi/lpfc/lpfc_debugfs.c
34107--- linux-3.1.1/drivers/scsi/lpfc/lpfc_debugfs.c 2011-11-11 15:19:27.000000000 -0500
34108+++ linux-3.1.1/drivers/scsi/lpfc/lpfc_debugfs.c 2011-11-16 18:40:22.000000000 -0500
34109@@ -105,7 +105,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
34110
34111 #include <linux/debugfs.h>
34112
34113-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34114+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34115 static unsigned long lpfc_debugfs_start_time = 0L;
34116
34117 /* iDiag */
34118@@ -146,7 +146,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
34119 lpfc_debugfs_enable = 0;
34120
34121 len = 0;
34122- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34123+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34124 (lpfc_debugfs_max_disc_trc - 1);
34125 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34126 dtp = vport->disc_trc + i;
34127@@ -212,7 +212,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
34128 lpfc_debugfs_enable = 0;
34129
34130 len = 0;
34131- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34132+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34133 (lpfc_debugfs_max_slow_ring_trc - 1);
34134 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34135 dtp = phba->slow_ring_trc + i;
34136@@ -635,14 +635,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
34137 !vport || !vport->disc_trc)
34138 return;
34139
34140- index = atomic_inc_return(&vport->disc_trc_cnt) &
34141+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34142 (lpfc_debugfs_max_disc_trc - 1);
34143 dtp = vport->disc_trc + index;
34144 dtp->fmt = fmt;
34145 dtp->data1 = data1;
34146 dtp->data2 = data2;
34147 dtp->data3 = data3;
34148- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34149+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34150 dtp->jif = jiffies;
34151 #endif
34152 return;
34153@@ -673,14 +673,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
34154 !phba || !phba->slow_ring_trc)
34155 return;
34156
34157- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34158+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34159 (lpfc_debugfs_max_slow_ring_trc - 1);
34160 dtp = phba->slow_ring_trc + index;
34161 dtp->fmt = fmt;
34162 dtp->data1 = data1;
34163 dtp->data2 = data2;
34164 dtp->data3 = data3;
34165- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34166+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34167 dtp->jif = jiffies;
34168 #endif
34169 return;
34170@@ -3828,7 +3828,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34171 "slow_ring buffer\n");
34172 goto debug_failed;
34173 }
34174- atomic_set(&phba->slow_ring_trc_cnt, 0);
34175+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34176 memset(phba->slow_ring_trc, 0,
34177 (sizeof(struct lpfc_debugfs_trc) *
34178 lpfc_debugfs_max_slow_ring_trc));
34179@@ -3874,7 +3874,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34180 "buffer\n");
34181 goto debug_failed;
34182 }
34183- atomic_set(&vport->disc_trc_cnt, 0);
34184+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34185
34186 snprintf(name, sizeof(name), "discovery_trace");
34187 vport->debug_disc_trc =
34188diff -urNp linux-3.1.1/drivers/scsi/lpfc/lpfc.h linux-3.1.1/drivers/scsi/lpfc/lpfc.h
34189--- linux-3.1.1/drivers/scsi/lpfc/lpfc.h 2011-11-11 15:19:27.000000000 -0500
34190+++ linux-3.1.1/drivers/scsi/lpfc/lpfc.h 2011-11-16 18:39:07.000000000 -0500
34191@@ -425,7 +425,7 @@ struct lpfc_vport {
34192 struct dentry *debug_nodelist;
34193 struct dentry *vport_debugfs_root;
34194 struct lpfc_debugfs_trc *disc_trc;
34195- atomic_t disc_trc_cnt;
34196+ atomic_unchecked_t disc_trc_cnt;
34197 #endif
34198 uint8_t stat_data_enabled;
34199 uint8_t stat_data_blocked;
34200@@ -835,8 +835,8 @@ struct lpfc_hba {
34201 struct timer_list fabric_block_timer;
34202 unsigned long bit_flags;
34203 #define FABRIC_COMANDS_BLOCKED 0
34204- atomic_t num_rsrc_err;
34205- atomic_t num_cmd_success;
34206+ atomic_unchecked_t num_rsrc_err;
34207+ atomic_unchecked_t num_cmd_success;
34208 unsigned long last_rsrc_error_time;
34209 unsigned long last_ramp_down_time;
34210 unsigned long last_ramp_up_time;
34211@@ -850,7 +850,7 @@ struct lpfc_hba {
34212 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
34213 struct dentry *debug_slow_ring_trc;
34214 struct lpfc_debugfs_trc *slow_ring_trc;
34215- atomic_t slow_ring_trc_cnt;
34216+ atomic_unchecked_t slow_ring_trc_cnt;
34217 /* iDiag debugfs sub-directory */
34218 struct dentry *idiag_root;
34219 struct dentry *idiag_pci_cfg;
34220diff -urNp linux-3.1.1/drivers/scsi/lpfc/lpfc_init.c linux-3.1.1/drivers/scsi/lpfc/lpfc_init.c
34221--- linux-3.1.1/drivers/scsi/lpfc/lpfc_init.c 2011-11-11 15:19:27.000000000 -0500
34222+++ linux-3.1.1/drivers/scsi/lpfc/lpfc_init.c 2011-11-16 18:39:07.000000000 -0500
34223@@ -9969,8 +9969,10 @@ lpfc_init(void)
34224 printk(LPFC_COPYRIGHT "\n");
34225
34226 if (lpfc_enable_npiv) {
34227- lpfc_transport_functions.vport_create = lpfc_vport_create;
34228- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34229+ pax_open_kernel();
34230+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34231+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34232+ pax_close_kernel();
34233 }
34234 lpfc_transport_template =
34235 fc_attach_transport(&lpfc_transport_functions);
34236diff -urNp linux-3.1.1/drivers/scsi/lpfc/lpfc_scsi.c linux-3.1.1/drivers/scsi/lpfc/lpfc_scsi.c
34237--- linux-3.1.1/drivers/scsi/lpfc/lpfc_scsi.c 2011-11-11 15:19:27.000000000 -0500
34238+++ linux-3.1.1/drivers/scsi/lpfc/lpfc_scsi.c 2011-11-16 18:39:07.000000000 -0500
34239@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
34240 uint32_t evt_posted;
34241
34242 spin_lock_irqsave(&phba->hbalock, flags);
34243- atomic_inc(&phba->num_rsrc_err);
34244+ atomic_inc_unchecked(&phba->num_rsrc_err);
34245 phba->last_rsrc_error_time = jiffies;
34246
34247 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34248@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
34249 unsigned long flags;
34250 struct lpfc_hba *phba = vport->phba;
34251 uint32_t evt_posted;
34252- atomic_inc(&phba->num_cmd_success);
34253+ atomic_inc_unchecked(&phba->num_cmd_success);
34254
34255 if (vport->cfg_lun_queue_depth <= queue_depth)
34256 return;
34257@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34258 unsigned long num_rsrc_err, num_cmd_success;
34259 int i;
34260
34261- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34262- num_cmd_success = atomic_read(&phba->num_cmd_success);
34263+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34264+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34265
34266 vports = lpfc_create_vport_work_array(phba);
34267 if (vports != NULL)
34268@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34269 }
34270 }
34271 lpfc_destroy_vport_work_array(phba, vports);
34272- atomic_set(&phba->num_rsrc_err, 0);
34273- atomic_set(&phba->num_cmd_success, 0);
34274+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34275+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34276 }
34277
34278 /**
34279@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
34280 }
34281 }
34282 lpfc_destroy_vport_work_array(phba, vports);
34283- atomic_set(&phba->num_rsrc_err, 0);
34284- atomic_set(&phba->num_cmd_success, 0);
34285+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34286+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34287 }
34288
34289 /**
34290diff -urNp linux-3.1.1/drivers/scsi/megaraid/megaraid_mbox.c linux-3.1.1/drivers/scsi/megaraid/megaraid_mbox.c
34291--- linux-3.1.1/drivers/scsi/megaraid/megaraid_mbox.c 2011-11-11 15:19:27.000000000 -0500
34292+++ linux-3.1.1/drivers/scsi/megaraid/megaraid_mbox.c 2011-11-16 18:40:22.000000000 -0500
34293@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
34294 int rval;
34295 int i;
34296
34297+ pax_track_stack();
34298+
34299 // Allocate memory for the base list of scb for management module.
34300 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
34301
34302diff -urNp linux-3.1.1/drivers/scsi/osd/osd_initiator.c linux-3.1.1/drivers/scsi/osd/osd_initiator.c
34303--- linux-3.1.1/drivers/scsi/osd/osd_initiator.c 2011-11-11 15:19:27.000000000 -0500
34304+++ linux-3.1.1/drivers/scsi/osd/osd_initiator.c 2011-11-16 18:40:22.000000000 -0500
34305@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
34306 int nelem = ARRAY_SIZE(get_attrs), a = 0;
34307 int ret;
34308
34309+ pax_track_stack();
34310+
34311 or = osd_start_request(od, GFP_KERNEL);
34312 if (!or)
34313 return -ENOMEM;
34314diff -urNp linux-3.1.1/drivers/scsi/pmcraid.c linux-3.1.1/drivers/scsi/pmcraid.c
34315--- linux-3.1.1/drivers/scsi/pmcraid.c 2011-11-11 15:19:27.000000000 -0500
34316+++ linux-3.1.1/drivers/scsi/pmcraid.c 2011-11-16 18:39:07.000000000 -0500
34317@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
34318 res->scsi_dev = scsi_dev;
34319 scsi_dev->hostdata = res;
34320 res->change_detected = 0;
34321- atomic_set(&res->read_failures, 0);
34322- atomic_set(&res->write_failures, 0);
34323+ atomic_set_unchecked(&res->read_failures, 0);
34324+ atomic_set_unchecked(&res->write_failures, 0);
34325 rc = 0;
34326 }
34327 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34328@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
34329
34330 /* If this was a SCSI read/write command keep count of errors */
34331 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34332- atomic_inc(&res->read_failures);
34333+ atomic_inc_unchecked(&res->read_failures);
34334 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34335- atomic_inc(&res->write_failures);
34336+ atomic_inc_unchecked(&res->write_failures);
34337
34338 if (!RES_IS_GSCSI(res->cfg_entry) &&
34339 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34340@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
34341 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34342 * hrrq_id assigned here in queuecommand
34343 */
34344- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34345+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34346 pinstance->num_hrrq;
34347 cmd->cmd_done = pmcraid_io_done;
34348
34349@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
34350 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34351 * hrrq_id assigned here in queuecommand
34352 */
34353- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34354+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34355 pinstance->num_hrrq;
34356
34357 if (request_size) {
34358@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
34359
34360 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34361 /* add resources only after host is added into system */
34362- if (!atomic_read(&pinstance->expose_resources))
34363+ if (!atomic_read_unchecked(&pinstance->expose_resources))
34364 return;
34365
34366 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34367@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
34368 init_waitqueue_head(&pinstance->reset_wait_q);
34369
34370 atomic_set(&pinstance->outstanding_cmds, 0);
34371- atomic_set(&pinstance->last_message_id, 0);
34372- atomic_set(&pinstance->expose_resources, 0);
34373+ atomic_set_unchecked(&pinstance->last_message_id, 0);
34374+ atomic_set_unchecked(&pinstance->expose_resources, 0);
34375
34376 INIT_LIST_HEAD(&pinstance->free_res_q);
34377 INIT_LIST_HEAD(&pinstance->used_res_q);
34378@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
34379 /* Schedule worker thread to handle CCN and take care of adding and
34380 * removing devices to OS
34381 */
34382- atomic_set(&pinstance->expose_resources, 1);
34383+ atomic_set_unchecked(&pinstance->expose_resources, 1);
34384 schedule_work(&pinstance->worker_q);
34385 return rc;
34386
34387diff -urNp linux-3.1.1/drivers/scsi/pmcraid.h linux-3.1.1/drivers/scsi/pmcraid.h
34388--- linux-3.1.1/drivers/scsi/pmcraid.h 2011-11-11 15:19:27.000000000 -0500
34389+++ linux-3.1.1/drivers/scsi/pmcraid.h 2011-11-16 18:39:07.000000000 -0500
34390@@ -749,7 +749,7 @@ struct pmcraid_instance {
34391 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34392
34393 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34394- atomic_t last_message_id;
34395+ atomic_unchecked_t last_message_id;
34396
34397 /* configuration table */
34398 struct pmcraid_config_table *cfg_table;
34399@@ -778,7 +778,7 @@ struct pmcraid_instance {
34400 atomic_t outstanding_cmds;
34401
34402 /* should add/delete resources to mid-layer now ?*/
34403- atomic_t expose_resources;
34404+ atomic_unchecked_t expose_resources;
34405
34406
34407
34408@@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
34409 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34410 };
34411 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34412- atomic_t read_failures; /* count of failed READ commands */
34413- atomic_t write_failures; /* count of failed WRITE commands */
34414+ atomic_unchecked_t read_failures; /* count of failed READ commands */
34415+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34416
34417 /* To indicate add/delete/modify during CCN */
34418 u8 change_detected;
34419diff -urNp linux-3.1.1/drivers/scsi/qla2xxx/qla_def.h linux-3.1.1/drivers/scsi/qla2xxx/qla_def.h
34420--- linux-3.1.1/drivers/scsi/qla2xxx/qla_def.h 2011-11-11 15:19:27.000000000 -0500
34421+++ linux-3.1.1/drivers/scsi/qla2xxx/qla_def.h 2011-11-16 18:39:07.000000000 -0500
34422@@ -2244,7 +2244,7 @@ struct isp_operations {
34423 int (*get_flash_version) (struct scsi_qla_host *, void *);
34424 int (*start_scsi) (srb_t *);
34425 int (*abort_isp) (struct scsi_qla_host *);
34426-};
34427+} __no_const;
34428
34429 /* MSI-X Support *************************************************************/
34430
34431diff -urNp linux-3.1.1/drivers/scsi/qla4xxx/ql4_def.h linux-3.1.1/drivers/scsi/qla4xxx/ql4_def.h
34432--- linux-3.1.1/drivers/scsi/qla4xxx/ql4_def.h 2011-11-11 15:19:27.000000000 -0500
34433+++ linux-3.1.1/drivers/scsi/qla4xxx/ql4_def.h 2011-11-16 18:39:07.000000000 -0500
34434@@ -256,7 +256,7 @@ struct ddb_entry {
34435 atomic_t retry_relogin_timer; /* Min Time between relogins
34436 * (4000 only) */
34437 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
34438- atomic_t relogin_retry_count; /* Num of times relogin has been
34439+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34440 * retried */
34441
34442 uint16_t port;
34443diff -urNp linux-3.1.1/drivers/scsi/qla4xxx/ql4_init.c linux-3.1.1/drivers/scsi/qla4xxx/ql4_init.c
34444--- linux-3.1.1/drivers/scsi/qla4xxx/ql4_init.c 2011-11-11 15:19:27.000000000 -0500
34445+++ linux-3.1.1/drivers/scsi/qla4xxx/ql4_init.c 2011-11-16 18:39:07.000000000 -0500
34446@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
34447 ddb_entry->fw_ddb_index = fw_ddb_index;
34448 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34449 atomic_set(&ddb_entry->relogin_timer, 0);
34450- atomic_set(&ddb_entry->relogin_retry_count, 0);
34451+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34452 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34453 list_add_tail(&ddb_entry->list, &ha->ddb_list);
34454 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
34455@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
34456 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
34457 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
34458 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34459- atomic_set(&ddb_entry->relogin_retry_count, 0);
34460+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34461 atomic_set(&ddb_entry->relogin_timer, 0);
34462 clear_bit(DF_RELOGIN, &ddb_entry->flags);
34463 iscsi_unblock_session(ddb_entry->sess);
34464diff -urNp linux-3.1.1/drivers/scsi/qla4xxx/ql4_os.c linux-3.1.1/drivers/scsi/qla4xxx/ql4_os.c
34465--- linux-3.1.1/drivers/scsi/qla4xxx/ql4_os.c 2011-11-11 15:19:27.000000000 -0500
34466+++ linux-3.1.1/drivers/scsi/qla4xxx/ql4_os.c 2011-11-16 18:39:07.000000000 -0500
34467@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
34468 ddb_entry->fw_ddb_device_state ==
34469 DDB_DS_SESSION_FAILED) {
34470 /* Reset retry relogin timer */
34471- atomic_inc(&ddb_entry->relogin_retry_count);
34472+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34473 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
34474 " timed out-retrying"
34475 " relogin (%d)\n",
34476 ha->host_no,
34477 ddb_entry->fw_ddb_index,
34478- atomic_read(&ddb_entry->
34479+ atomic_read_unchecked(&ddb_entry->
34480 relogin_retry_count))
34481 );
34482 start_dpc++;
34483diff -urNp linux-3.1.1/drivers/scsi/scsi.c linux-3.1.1/drivers/scsi/scsi.c
34484--- linux-3.1.1/drivers/scsi/scsi.c 2011-11-11 15:19:27.000000000 -0500
34485+++ linux-3.1.1/drivers/scsi/scsi.c 2011-11-16 18:39:07.000000000 -0500
34486@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
34487 unsigned long timeout;
34488 int rtn = 0;
34489
34490- atomic_inc(&cmd->device->iorequest_cnt);
34491+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34492
34493 /* check if the device is still usable */
34494 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34495diff -urNp linux-3.1.1/drivers/scsi/scsi_debug.c linux-3.1.1/drivers/scsi/scsi_debug.c
34496--- linux-3.1.1/drivers/scsi/scsi_debug.c 2011-11-11 15:19:27.000000000 -0500
34497+++ linux-3.1.1/drivers/scsi/scsi_debug.c 2011-11-16 18:40:22.000000000 -0500
34498@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
34499 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
34500 unsigned char *cmd = (unsigned char *)scp->cmnd;
34501
34502+ pax_track_stack();
34503+
34504 if ((errsts = check_readiness(scp, 1, devip)))
34505 return errsts;
34506 memset(arr, 0, sizeof(arr));
34507@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
34508 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
34509 unsigned char *cmd = (unsigned char *)scp->cmnd;
34510
34511+ pax_track_stack();
34512+
34513 if ((errsts = check_readiness(scp, 1, devip)))
34514 return errsts;
34515 memset(arr, 0, sizeof(arr));
34516diff -urNp linux-3.1.1/drivers/scsi/scsi_lib.c linux-3.1.1/drivers/scsi/scsi_lib.c
34517--- linux-3.1.1/drivers/scsi/scsi_lib.c 2011-11-11 15:19:27.000000000 -0500
34518+++ linux-3.1.1/drivers/scsi/scsi_lib.c 2011-11-16 18:39:07.000000000 -0500
34519@@ -1413,7 +1413,7 @@ static void scsi_kill_request(struct req
34520 shost = sdev->host;
34521 scsi_init_cmd_errh(cmd);
34522 cmd->result = DID_NO_CONNECT << 16;
34523- atomic_inc(&cmd->device->iorequest_cnt);
34524+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34525
34526 /*
34527 * SCSI request completion path will do scsi_device_unbusy(),
34528@@ -1439,9 +1439,9 @@ static void scsi_softirq_done(struct req
34529
34530 INIT_LIST_HEAD(&cmd->eh_entry);
34531
34532- atomic_inc(&cmd->device->iodone_cnt);
34533+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
34534 if (cmd->result)
34535- atomic_inc(&cmd->device->ioerr_cnt);
34536+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34537
34538 disposition = scsi_decide_disposition(cmd);
34539 if (disposition != SUCCESS &&
34540diff -urNp linux-3.1.1/drivers/scsi/scsi_sysfs.c linux-3.1.1/drivers/scsi/scsi_sysfs.c
34541--- linux-3.1.1/drivers/scsi/scsi_sysfs.c 2011-11-11 15:19:27.000000000 -0500
34542+++ linux-3.1.1/drivers/scsi/scsi_sysfs.c 2011-11-16 18:39:07.000000000 -0500
34543@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
34544 char *buf) \
34545 { \
34546 struct scsi_device *sdev = to_scsi_device(dev); \
34547- unsigned long long count = atomic_read(&sdev->field); \
34548+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
34549 return snprintf(buf, 20, "0x%llx\n", count); \
34550 } \
34551 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34552diff -urNp linux-3.1.1/drivers/scsi/scsi_tgt_lib.c linux-3.1.1/drivers/scsi/scsi_tgt_lib.c
34553--- linux-3.1.1/drivers/scsi/scsi_tgt_lib.c 2011-11-11 15:19:27.000000000 -0500
34554+++ linux-3.1.1/drivers/scsi/scsi_tgt_lib.c 2011-11-16 18:39:07.000000000 -0500
34555@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct sc
34556 int err;
34557
34558 dprintk("%lx %u\n", uaddr, len);
34559- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
34560+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
34561 if (err) {
34562 /*
34563 * TODO: need to fixup sg_tablesize, max_segment_size,
34564diff -urNp linux-3.1.1/drivers/scsi/scsi_transport_fc.c linux-3.1.1/drivers/scsi/scsi_transport_fc.c
34565--- linux-3.1.1/drivers/scsi/scsi_transport_fc.c 2011-11-11 15:19:27.000000000 -0500
34566+++ linux-3.1.1/drivers/scsi/scsi_transport_fc.c 2011-11-16 18:39:07.000000000 -0500
34567@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
34568 * Netlink Infrastructure
34569 */
34570
34571-static atomic_t fc_event_seq;
34572+static atomic_unchecked_t fc_event_seq;
34573
34574 /**
34575 * fc_get_event_number - Obtain the next sequential FC event number
34576@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
34577 u32
34578 fc_get_event_number(void)
34579 {
34580- return atomic_add_return(1, &fc_event_seq);
34581+ return atomic_add_return_unchecked(1, &fc_event_seq);
34582 }
34583 EXPORT_SYMBOL(fc_get_event_number);
34584
34585@@ -645,7 +645,7 @@ static __init int fc_transport_init(void
34586 {
34587 int error;
34588
34589- atomic_set(&fc_event_seq, 0);
34590+ atomic_set_unchecked(&fc_event_seq, 0);
34591
34592 error = transport_class_register(&fc_host_class);
34593 if (error)
34594@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
34595 char *cp;
34596
34597 *val = simple_strtoul(buf, &cp, 0);
34598- if ((*cp && (*cp != '\n')) || (*val < 0))
34599+ if (*cp && (*cp != '\n'))
34600 return -EINVAL;
34601 /*
34602 * Check for overflow; dev_loss_tmo is u32
34603diff -urNp linux-3.1.1/drivers/scsi/scsi_transport_iscsi.c linux-3.1.1/drivers/scsi/scsi_transport_iscsi.c
34604--- linux-3.1.1/drivers/scsi/scsi_transport_iscsi.c 2011-11-11 15:19:27.000000000 -0500
34605+++ linux-3.1.1/drivers/scsi/scsi_transport_iscsi.c 2011-11-16 18:39:07.000000000 -0500
34606@@ -83,7 +83,7 @@ struct iscsi_internal {
34607 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34608 };
34609
34610-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34611+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34612 static struct workqueue_struct *iscsi_eh_timer_workq;
34613
34614 /*
34615@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
34616 int err;
34617
34618 ihost = shost->shost_data;
34619- session->sid = atomic_add_return(1, &iscsi_session_nr);
34620+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34621
34622 if (id == ISCSI_MAX_TARGET) {
34623 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34624@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
34625 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34626 ISCSI_TRANSPORT_VERSION);
34627
34628- atomic_set(&iscsi_session_nr, 0);
34629+ atomic_set_unchecked(&iscsi_session_nr, 0);
34630
34631 err = class_register(&iscsi_transport_class);
34632 if (err)
34633diff -urNp linux-3.1.1/drivers/scsi/scsi_transport_srp.c linux-3.1.1/drivers/scsi/scsi_transport_srp.c
34634--- linux-3.1.1/drivers/scsi/scsi_transport_srp.c 2011-11-11 15:19:27.000000000 -0500
34635+++ linux-3.1.1/drivers/scsi/scsi_transport_srp.c 2011-11-16 18:39:07.000000000 -0500
34636@@ -33,7 +33,7 @@
34637 #include "scsi_transport_srp_internal.h"
34638
34639 struct srp_host_attrs {
34640- atomic_t next_port_id;
34641+ atomic_unchecked_t next_port_id;
34642 };
34643 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34644
34645@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34646 struct Scsi_Host *shost = dev_to_shost(dev);
34647 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34648
34649- atomic_set(&srp_host->next_port_id, 0);
34650+ atomic_set_unchecked(&srp_host->next_port_id, 0);
34651 return 0;
34652 }
34653
34654@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34655 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34656 rport->roles = ids->roles;
34657
34658- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34659+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34660 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34661
34662 transport_setup_device(&rport->dev);
34663diff -urNp linux-3.1.1/drivers/scsi/sg.c linux-3.1.1/drivers/scsi/sg.c
34664--- linux-3.1.1/drivers/scsi/sg.c 2011-11-11 15:19:27.000000000 -0500
34665+++ linux-3.1.1/drivers/scsi/sg.c 2011-11-16 18:39:07.000000000 -0500
34666@@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int
34667 sdp->disk->disk_name,
34668 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
34669 NULL,
34670- (char *)arg);
34671+ (char __user *)arg);
34672 case BLKTRACESTART:
34673 return blk_trace_startstop(sdp->device->request_queue, 1);
34674 case BLKTRACESTOP:
34675@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
34676 const struct file_operations * fops;
34677 };
34678
34679-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34680+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34681 {"allow_dio", &adio_fops},
34682 {"debug", &debug_fops},
34683 {"def_reserved_size", &dressz_fops},
34684@@ -2325,7 +2325,7 @@ sg_proc_init(void)
34685 {
34686 int k, mask;
34687 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34688- struct sg_proc_leaf * leaf;
34689+ const struct sg_proc_leaf * leaf;
34690
34691 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34692 if (!sg_proc_sgp)
34693diff -urNp linux-3.1.1/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.1.1/drivers/scsi/sym53c8xx_2/sym_glue.c
34694--- linux-3.1.1/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-11-11 15:19:27.000000000 -0500
34695+++ linux-3.1.1/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-11-16 18:40:22.000000000 -0500
34696@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
34697 int do_iounmap = 0;
34698 int do_disable_device = 1;
34699
34700+ pax_track_stack();
34701+
34702 memset(&sym_dev, 0, sizeof(sym_dev));
34703 memset(&nvram, 0, sizeof(nvram));
34704 sym_dev.pdev = pdev;
34705diff -urNp linux-3.1.1/drivers/scsi/vmw_pvscsi.c linux-3.1.1/drivers/scsi/vmw_pvscsi.c
34706--- linux-3.1.1/drivers/scsi/vmw_pvscsi.c 2011-11-11 15:19:27.000000000 -0500
34707+++ linux-3.1.1/drivers/scsi/vmw_pvscsi.c 2011-11-16 18:40:22.000000000 -0500
34708@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
34709 dma_addr_t base;
34710 unsigned i;
34711
34712+ pax_track_stack();
34713+
34714 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
34715 cmd.reqRingNumPages = adapter->req_pages;
34716 cmd.cmpRingNumPages = adapter->cmp_pages;
34717diff -urNp linux-3.1.1/drivers/spi/spi.c linux-3.1.1/drivers/spi/spi.c
34718--- linux-3.1.1/drivers/spi/spi.c 2011-11-11 15:19:27.000000000 -0500
34719+++ linux-3.1.1/drivers/spi/spi.c 2011-11-16 18:39:07.000000000 -0500
34720@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
34721 EXPORT_SYMBOL_GPL(spi_bus_unlock);
34722
34723 /* portable code must never pass more than 32 bytes */
34724-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34725+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
34726
34727 static u8 *buf;
34728
34729diff -urNp linux-3.1.1/drivers/spi/spi-dw-pci.c linux-3.1.1/drivers/spi/spi-dw-pci.c
34730--- linux-3.1.1/drivers/spi/spi-dw-pci.c 2011-11-11 15:19:27.000000000 -0500
34731+++ linux-3.1.1/drivers/spi/spi-dw-pci.c 2011-11-16 18:39:07.000000000 -0500
34732@@ -148,7 +148,7 @@ static int spi_resume(struct pci_dev *pd
34733 #define spi_resume NULL
34734 #endif
34735
34736-static const struct pci_device_id pci_ids[] __devinitdata = {
34737+static const struct pci_device_id pci_ids[] __devinitconst = {
34738 /* Intel MID platform SPI controller 0 */
34739 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
34740 {},
34741diff -urNp linux-3.1.1/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.1.1/drivers/staging/ath6kl/os/linux/ar6000_drv.c
34742--- linux-3.1.1/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-11-11 15:19:27.000000000 -0500
34743+++ linux-3.1.1/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-11-16 18:39:07.000000000 -0500
34744@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
34745 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
34746
34747
34748-static struct net_device_ops ar6000_netdev_ops = {
34749+static net_device_ops_no_const ar6000_netdev_ops = {
34750 .ndo_init = NULL,
34751 .ndo_open = ar6000_open,
34752 .ndo_stop = ar6000_close,
34753diff -urNp linux-3.1.1/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.1.1/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
34754--- linux-3.1.1/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-11-11 15:19:27.000000000 -0500
34755+++ linux-3.1.1/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-11-16 18:39:07.000000000 -0500
34756@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
34757 typedef struct ar6k_pal_config_s
34758 {
34759 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
34760-}ar6k_pal_config_t;
34761+} __no_const ar6k_pal_config_t;
34762
34763 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
34764 #endif /* _AR6K_PAL_H_ */
34765diff -urNp linux-3.1.1/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.1.1/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
34766--- linux-3.1.1/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-11-11 15:19:27.000000000 -0500
34767+++ linux-3.1.1/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-11-16 18:39:07.000000000 -0500
34768@@ -451,14 +451,14 @@ static void brcmf_op_if(struct brcmf_if
34769 free_netdev(ifp->net);
34770 }
34771 /* Allocate etherdev, including space for private structure */
34772- ifp->net = alloc_etherdev(sizeof(drvr_priv));
34773+ ifp->net = alloc_etherdev(sizeof(*drvr_priv));
34774 if (!ifp->net) {
34775 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34776 ret = -ENOMEM;
34777 }
34778 if (ret == 0) {
34779 strcpy(ifp->net->name, ifp->name);
34780- memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(drvr_priv));
34781+ memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(*drvr_priv));
34782 err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
34783 if (err != 0) {
34784 BRCMF_ERROR(("%s: brcmf_net_attach failed, "
34785@@ -1279,7 +1279,7 @@ struct brcmf_pub *brcmf_attach(struct br
34786 BRCMF_TRACE(("%s: Enter\n", __func__));
34787
34788 /* Allocate etherdev, including space for private structure */
34789- net = alloc_etherdev(sizeof(drvr_priv));
34790+ net = alloc_etherdev(sizeof(*drvr_priv));
34791 if (!net) {
34792 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34793 goto fail;
34794@@ -1295,7 +1295,7 @@ struct brcmf_pub *brcmf_attach(struct br
34795 /*
34796 * Save the brcmf_info into the priv
34797 */
34798- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
34799+ memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
34800
34801 /* Set network interface name if it was provided as module parameter */
34802 if (iface_name[0]) {
34803@@ -1352,7 +1352,7 @@ struct brcmf_pub *brcmf_attach(struct br
34804 /*
34805 * Save the brcmf_info into the priv
34806 */
34807- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
34808+ memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
34809
34810 #if defined(CONFIG_PM_SLEEP)
34811 atomic_set(&brcmf_mmc_suspend, false);
34812diff -urNp linux-3.1.1/drivers/staging/brcm80211/brcmfmac/sdio_host.h linux-3.1.1/drivers/staging/brcm80211/brcmfmac/sdio_host.h
34813--- linux-3.1.1/drivers/staging/brcm80211/brcmfmac/sdio_host.h 2011-11-11 15:19:27.000000000 -0500
34814+++ linux-3.1.1/drivers/staging/brcm80211/brcmfmac/sdio_host.h 2011-11-16 18:39:07.000000000 -0500
34815@@ -263,7 +263,7 @@ struct brcmf_sdioh_driver {
34816 u16 func, uint bustype, u32 regsva, void *param);
34817 /* detach from device */
34818 void (*detach) (void *ch);
34819-};
34820+} __no_const;
34821
34822 struct sdioh_info;
34823
34824diff -urNp linux-3.1.1/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h linux-3.1.1/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
34825--- linux-3.1.1/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h 2011-11-11 15:19:27.000000000 -0500
34826+++ linux-3.1.1/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h 2011-11-16 18:39:07.000000000 -0500
34827@@ -591,7 +591,7 @@ struct phy_func_ptr {
34828 initfn_t carrsuppr;
34829 rxsigpwrfn_t rxsigpwr;
34830 detachfn_t detach;
34831-};
34832+} __no_const;
34833
34834 struct brcms_phy {
34835 struct brcms_phy_pub pubpi_ro;
34836diff -urNp linux-3.1.1/drivers/staging/et131x/et1310_tx.c linux-3.1.1/drivers/staging/et131x/et1310_tx.c
34837--- linux-3.1.1/drivers/staging/et131x/et1310_tx.c 2011-11-11 15:19:27.000000000 -0500
34838+++ linux-3.1.1/drivers/staging/et131x/et1310_tx.c 2011-11-16 18:39:07.000000000 -0500
34839@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
34840 struct net_device_stats *stats = &etdev->net_stats;
34841
34842 if (tcb->flags & fMP_DEST_BROAD)
34843- atomic_inc(&etdev->stats.brdcstxmt);
34844+ atomic_inc_unchecked(&etdev->stats.brdcstxmt);
34845 else if (tcb->flags & fMP_DEST_MULTI)
34846- atomic_inc(&etdev->stats.multixmt);
34847+ atomic_inc_unchecked(&etdev->stats.multixmt);
34848 else
34849- atomic_inc(&etdev->stats.unixmt);
34850+ atomic_inc_unchecked(&etdev->stats.unixmt);
34851
34852 if (tcb->skb) {
34853 stats->tx_bytes += tcb->skb->len;
34854diff -urNp linux-3.1.1/drivers/staging/et131x/et131x_adapter.h linux-3.1.1/drivers/staging/et131x/et131x_adapter.h
34855--- linux-3.1.1/drivers/staging/et131x/et131x_adapter.h 2011-11-11 15:19:27.000000000 -0500
34856+++ linux-3.1.1/drivers/staging/et131x/et131x_adapter.h 2011-11-16 18:39:07.000000000 -0500
34857@@ -106,11 +106,11 @@ struct ce_stats {
34858 * operations
34859 */
34860 u32 unircv; /* # multicast packets received */
34861- atomic_t unixmt; /* # multicast packets for Tx */
34862+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
34863 u32 multircv; /* # multicast packets received */
34864- atomic_t multixmt; /* # multicast packets for Tx */
34865+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
34866 u32 brdcstrcv; /* # broadcast packets received */
34867- atomic_t brdcstxmt; /* # broadcast packets for Tx */
34868+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
34869 u32 norcvbuf; /* # Rx packets discarded */
34870 u32 noxmtbuf; /* # Tx packets discarded */
34871
34872diff -urNp linux-3.1.1/drivers/staging/hv/channel.c linux-3.1.1/drivers/staging/hv/channel.c
34873--- linux-3.1.1/drivers/staging/hv/channel.c 2011-11-11 15:19:27.000000000 -0500
34874+++ linux-3.1.1/drivers/staging/hv/channel.c 2011-11-16 18:39:07.000000000 -0500
34875@@ -447,8 +447,8 @@ int vmbus_establish_gpadl(struct vmbus_c
34876 int ret = 0;
34877 int t;
34878
34879- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
34880- atomic_inc(&vmbus_connection.next_gpadl_handle);
34881+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
34882+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
34883
34884 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
34885 if (ret)
34886diff -urNp linux-3.1.1/drivers/staging/hv/hv.c linux-3.1.1/drivers/staging/hv/hv.c
34887--- linux-3.1.1/drivers/staging/hv/hv.c 2011-11-11 15:19:27.000000000 -0500
34888+++ linux-3.1.1/drivers/staging/hv/hv.c 2011-11-16 18:39:07.000000000 -0500
34889@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
34890 u64 output_address = (output) ? virt_to_phys(output) : 0;
34891 u32 output_address_hi = output_address >> 32;
34892 u32 output_address_lo = output_address & 0xFFFFFFFF;
34893- volatile void *hypercall_page = hv_context.hypercall_page;
34894+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
34895
34896 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
34897 "=a"(hv_status_lo) : "d" (control_hi),
34898diff -urNp linux-3.1.1/drivers/staging/hv/hv_mouse.c linux-3.1.1/drivers/staging/hv/hv_mouse.c
34899--- linux-3.1.1/drivers/staging/hv/hv_mouse.c 2011-11-11 15:19:27.000000000 -0500
34900+++ linux-3.1.1/drivers/staging/hv/hv_mouse.c 2011-11-16 18:39:07.000000000 -0500
34901@@ -878,8 +878,10 @@ static void reportdesc_callback(struct h
34902 if (hid_dev) {
34903 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
34904
34905- hid_dev->ll_driver->open = mousevsc_hid_open;
34906- hid_dev->ll_driver->close = mousevsc_hid_close;
34907+ pax_open_kernel();
34908+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
34909+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
34910+ pax_close_kernel();
34911
34912 hid_dev->bus = BUS_VIRTUAL;
34913 hid_dev->vendor = input_device_ctx->device_info.vendor;
34914diff -urNp linux-3.1.1/drivers/staging/hv/hyperv_vmbus.h linux-3.1.1/drivers/staging/hv/hyperv_vmbus.h
34915--- linux-3.1.1/drivers/staging/hv/hyperv_vmbus.h 2011-11-11 15:19:27.000000000 -0500
34916+++ linux-3.1.1/drivers/staging/hv/hyperv_vmbus.h 2011-11-16 18:39:07.000000000 -0500
34917@@ -559,7 +559,7 @@ enum vmbus_connect_state {
34918 struct vmbus_connection {
34919 enum vmbus_connect_state conn_state;
34920
34921- atomic_t next_gpadl_handle;
34922+ atomic_unchecked_t next_gpadl_handle;
34923
34924 /*
34925 * Represents channel interrupts. Each bit position represents a
34926diff -urNp linux-3.1.1/drivers/staging/hv/rndis_filter.c linux-3.1.1/drivers/staging/hv/rndis_filter.c
34927--- linux-3.1.1/drivers/staging/hv/rndis_filter.c 2011-11-11 15:19:27.000000000 -0500
34928+++ linux-3.1.1/drivers/staging/hv/rndis_filter.c 2011-11-16 18:39:07.000000000 -0500
34929@@ -43,7 +43,7 @@ struct rndis_device {
34930
34931 enum rndis_device_state state;
34932 u32 link_stat;
34933- atomic_t new_req_id;
34934+ atomic_unchecked_t new_req_id;
34935
34936 spinlock_t request_lock;
34937 struct list_head req_list;
34938@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
34939 * template
34940 */
34941 set = &rndis_msg->msg.set_req;
34942- set->req_id = atomic_inc_return(&dev->new_req_id);
34943+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34944
34945 /* Add to the request list */
34946 spin_lock_irqsave(&dev->request_lock, flags);
34947@@ -622,7 +622,7 @@ static void rndis_filter_halt_device(str
34948
34949 /* Setup the rndis set */
34950 halt = &request->request_msg.msg.halt_req;
34951- halt->req_id = atomic_inc_return(&dev->new_req_id);
34952+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34953
34954 /* Ignore return since this msg is optional. */
34955 rndis_filter_send_request(dev, request);
34956diff -urNp linux-3.1.1/drivers/staging/hv/vmbus_drv.c linux-3.1.1/drivers/staging/hv/vmbus_drv.c
34957--- linux-3.1.1/drivers/staging/hv/vmbus_drv.c 2011-11-11 15:19:27.000000000 -0500
34958+++ linux-3.1.1/drivers/staging/hv/vmbus_drv.c 2011-11-16 18:39:07.000000000 -0500
34959@@ -660,11 +660,11 @@ int vmbus_child_device_register(struct h
34960 {
34961 int ret = 0;
34962
34963- static atomic_t device_num = ATOMIC_INIT(0);
34964+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
34965
34966 /* Set the device name. Otherwise, device_register() will fail. */
34967 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
34968- atomic_inc_return(&device_num));
34969+ atomic_inc_return_unchecked(&device_num));
34970
34971 /* The new device belongs to this bus */
34972 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
34973diff -urNp linux-3.1.1/drivers/staging/iio/ring_generic.h linux-3.1.1/drivers/staging/iio/ring_generic.h
34974--- linux-3.1.1/drivers/staging/iio/ring_generic.h 2011-11-11 15:19:27.000000000 -0500
34975+++ linux-3.1.1/drivers/staging/iio/ring_generic.h 2011-11-16 18:39:07.000000000 -0500
34976@@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
34977
34978 int (*is_enabled)(struct iio_ring_buffer *ring);
34979 int (*enable)(struct iio_ring_buffer *ring);
34980-};
34981+} __no_const;
34982
34983 struct iio_ring_setup_ops {
34984 int (*preenable)(struct iio_dev *);
34985diff -urNp linux-3.1.1/drivers/staging/mei/interface.c linux-3.1.1/drivers/staging/mei/interface.c
34986--- linux-3.1.1/drivers/staging/mei/interface.c 2011-11-11 15:19:27.000000000 -0500
34987+++ linux-3.1.1/drivers/staging/mei/interface.c 2011-11-17 18:39:18.000000000 -0500
34988@@ -332,7 +332,7 @@ int mei_send_flow_control(struct mei_dev
34989 mei_hdr->reserved = 0;
34990
34991 mei_flow_control = (struct hbm_flow_control *) &dev->wr_msg_buf[1];
34992- memset(mei_flow_control, 0, sizeof(mei_flow_control));
34993+ memset(mei_flow_control, 0, sizeof(*mei_flow_control));
34994 mei_flow_control->host_addr = cl->host_client_id;
34995 mei_flow_control->me_addr = cl->me_client_id;
34996 mei_flow_control->cmd.cmd = MEI_FLOW_CONTROL_CMD;
34997@@ -396,7 +396,7 @@ int mei_disconnect(struct mei_device *de
34998
34999 mei_cli_disconnect =
35000 (struct hbm_client_disconnect_request *) &dev->wr_msg_buf[1];
35001- memset(mei_cli_disconnect, 0, sizeof(mei_cli_disconnect));
35002+ memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect));
35003 mei_cli_disconnect->host_addr = cl->host_client_id;
35004 mei_cli_disconnect->me_addr = cl->me_client_id;
35005 mei_cli_disconnect->cmd.cmd = CLIENT_DISCONNECT_REQ_CMD;
35006diff -urNp linux-3.1.1/drivers/staging/octeon/ethernet.c linux-3.1.1/drivers/staging/octeon/ethernet.c
35007--- linux-3.1.1/drivers/staging/octeon/ethernet.c 2011-11-11 15:19:27.000000000 -0500
35008+++ linux-3.1.1/drivers/staging/octeon/ethernet.c 2011-11-16 18:39:07.000000000 -0500
35009@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
35010 * since the RX tasklet also increments it.
35011 */
35012 #ifdef CONFIG_64BIT
35013- atomic64_add(rx_status.dropped_packets,
35014- (atomic64_t *)&priv->stats.rx_dropped);
35015+ atomic64_add_unchecked(rx_status.dropped_packets,
35016+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35017 #else
35018- atomic_add(rx_status.dropped_packets,
35019- (atomic_t *)&priv->stats.rx_dropped);
35020+ atomic_add_unchecked(rx_status.dropped_packets,
35021+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
35022 #endif
35023 }
35024
35025diff -urNp linux-3.1.1/drivers/staging/octeon/ethernet-rx.c linux-3.1.1/drivers/staging/octeon/ethernet-rx.c
35026--- linux-3.1.1/drivers/staging/octeon/ethernet-rx.c 2011-11-11 15:19:27.000000000 -0500
35027+++ linux-3.1.1/drivers/staging/octeon/ethernet-rx.c 2011-11-16 18:39:07.000000000 -0500
35028@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi
35029 /* Increment RX stats for virtual ports */
35030 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35031 #ifdef CONFIG_64BIT
35032- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35033- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35034+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35035+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35036 #else
35037- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35038- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35039+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35040+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35041 #endif
35042 }
35043 netif_receive_skb(skb);
35044@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi
35045 dev->name);
35046 */
35047 #ifdef CONFIG_64BIT
35048- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35049+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35050 #else
35051- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35052+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
35053 #endif
35054 dev_kfree_skb_irq(skb);
35055 }
35056diff -urNp linux-3.1.1/drivers/staging/pohmelfs/inode.c linux-3.1.1/drivers/staging/pohmelfs/inode.c
35057--- linux-3.1.1/drivers/staging/pohmelfs/inode.c 2011-11-11 15:19:27.000000000 -0500
35058+++ linux-3.1.1/drivers/staging/pohmelfs/inode.c 2011-11-16 18:39:07.000000000 -0500
35059@@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct su
35060 mutex_init(&psb->mcache_lock);
35061 psb->mcache_root = RB_ROOT;
35062 psb->mcache_timeout = msecs_to_jiffies(5000);
35063- atomic_long_set(&psb->mcache_gen, 0);
35064+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
35065
35066 psb->trans_max_pages = 100;
35067
35068@@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct su
35069 INIT_LIST_HEAD(&psb->crypto_ready_list);
35070 INIT_LIST_HEAD(&psb->crypto_active_list);
35071
35072- atomic_set(&psb->trans_gen, 1);
35073+ atomic_set_unchecked(&psb->trans_gen, 1);
35074 atomic_long_set(&psb->total_inodes, 0);
35075
35076 mutex_init(&psb->state_lock);
35077diff -urNp linux-3.1.1/drivers/staging/pohmelfs/mcache.c linux-3.1.1/drivers/staging/pohmelfs/mcache.c
35078--- linux-3.1.1/drivers/staging/pohmelfs/mcache.c 2011-11-11 15:19:27.000000000 -0500
35079+++ linux-3.1.1/drivers/staging/pohmelfs/mcache.c 2011-11-16 18:39:07.000000000 -0500
35080@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
35081 m->data = data;
35082 m->start = start;
35083 m->size = size;
35084- m->gen = atomic_long_inc_return(&psb->mcache_gen);
35085+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35086
35087 mutex_lock(&psb->mcache_lock);
35088 err = pohmelfs_mcache_insert(psb, m);
35089diff -urNp linux-3.1.1/drivers/staging/pohmelfs/netfs.h linux-3.1.1/drivers/staging/pohmelfs/netfs.h
35090--- linux-3.1.1/drivers/staging/pohmelfs/netfs.h 2011-11-11 15:19:27.000000000 -0500
35091+++ linux-3.1.1/drivers/staging/pohmelfs/netfs.h 2011-11-16 18:39:07.000000000 -0500
35092@@ -571,14 +571,14 @@ struct pohmelfs_config;
35093 struct pohmelfs_sb {
35094 struct rb_root mcache_root;
35095 struct mutex mcache_lock;
35096- atomic_long_t mcache_gen;
35097+ atomic_long_unchecked_t mcache_gen;
35098 unsigned long mcache_timeout;
35099
35100 unsigned int idx;
35101
35102 unsigned int trans_retries;
35103
35104- atomic_t trans_gen;
35105+ atomic_unchecked_t trans_gen;
35106
35107 unsigned int crypto_attached_size;
35108 unsigned int crypto_align_size;
35109diff -urNp linux-3.1.1/drivers/staging/pohmelfs/trans.c linux-3.1.1/drivers/staging/pohmelfs/trans.c
35110--- linux-3.1.1/drivers/staging/pohmelfs/trans.c 2011-11-11 15:19:27.000000000 -0500
35111+++ linux-3.1.1/drivers/staging/pohmelfs/trans.c 2011-11-16 18:39:07.000000000 -0500
35112@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
35113 int err;
35114 struct netfs_cmd *cmd = t->iovec.iov_base;
35115
35116- t->gen = atomic_inc_return(&psb->trans_gen);
35117+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35118
35119 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35120 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35121diff -urNp linux-3.1.1/drivers/staging/rtl8712/rtl871x_io.h linux-3.1.1/drivers/staging/rtl8712/rtl871x_io.h
35122--- linux-3.1.1/drivers/staging/rtl8712/rtl871x_io.h 2011-11-11 15:19:27.000000000 -0500
35123+++ linux-3.1.1/drivers/staging/rtl8712/rtl871x_io.h 2011-11-16 18:39:07.000000000 -0500
35124@@ -83,7 +83,7 @@ struct _io_ops {
35125 u8 *pmem);
35126 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35127 u8 *pmem);
35128-};
35129+} __no_const;
35130
35131 struct io_req {
35132 struct list_head list;
35133diff -urNp linux-3.1.1/drivers/staging/sbe-2t3e3/netdev.c linux-3.1.1/drivers/staging/sbe-2t3e3/netdev.c
35134--- linux-3.1.1/drivers/staging/sbe-2t3e3/netdev.c 2011-11-11 15:19:27.000000000 -0500
35135+++ linux-3.1.1/drivers/staging/sbe-2t3e3/netdev.c 2011-11-16 18:39:08.000000000 -0500
35136@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
35137 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35138
35139 if (rlen)
35140- if (copy_to_user(data, &resp, rlen))
35141+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35142 return -EFAULT;
35143
35144 return 0;
35145diff -urNp linux-3.1.1/drivers/staging/usbip/usbip_common.h linux-3.1.1/drivers/staging/usbip/usbip_common.h
35146--- linux-3.1.1/drivers/staging/usbip/usbip_common.h 2011-11-11 15:19:27.000000000 -0500
35147+++ linux-3.1.1/drivers/staging/usbip/usbip_common.h 2011-11-16 18:39:08.000000000 -0500
35148@@ -289,7 +289,7 @@ struct usbip_device {
35149 void (*shutdown)(struct usbip_device *);
35150 void (*reset)(struct usbip_device *);
35151 void (*unusable)(struct usbip_device *);
35152- } eh_ops;
35153+ } __no_const eh_ops;
35154 };
35155
35156 #if 0
35157diff -urNp linux-3.1.1/drivers/staging/usbip/vhci.h linux-3.1.1/drivers/staging/usbip/vhci.h
35158--- linux-3.1.1/drivers/staging/usbip/vhci.h 2011-11-11 15:19:27.000000000 -0500
35159+++ linux-3.1.1/drivers/staging/usbip/vhci.h 2011-11-16 18:39:08.000000000 -0500
35160@@ -85,7 +85,7 @@ struct vhci_hcd {
35161 unsigned resuming:1;
35162 unsigned long re_timeout;
35163
35164- atomic_t seqnum;
35165+ atomic_unchecked_t seqnum;
35166
35167 /*
35168 * NOTE:
35169diff -urNp linux-3.1.1/drivers/staging/usbip/vhci_hcd.c linux-3.1.1/drivers/staging/usbip/vhci_hcd.c
35170--- linux-3.1.1/drivers/staging/usbip/vhci_hcd.c 2011-11-11 15:19:27.000000000 -0500
35171+++ linux-3.1.1/drivers/staging/usbip/vhci_hcd.c 2011-11-16 18:39:08.000000000 -0500
35172@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
35173 return;
35174 }
35175
35176- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35177+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35178 if (priv->seqnum == 0xffff)
35179 dev_info(&urb->dev->dev, "seqnum max\n");
35180
35181@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_h
35182 return -ENOMEM;
35183 }
35184
35185- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35186+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35187 if (unlink->seqnum == 0xffff)
35188 pr_info("seqnum max\n");
35189
35190@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hc
35191 vdev->rhport = rhport;
35192 }
35193
35194- atomic_set(&vhci->seqnum, 0);
35195+ atomic_set_unchecked(&vhci->seqnum, 0);
35196 spin_lock_init(&vhci->lock);
35197
35198 hcd->power_budget = 0; /* no limit */
35199diff -urNp linux-3.1.1/drivers/staging/usbip/vhci_rx.c linux-3.1.1/drivers/staging/usbip/vhci_rx.c
35200--- linux-3.1.1/drivers/staging/usbip/vhci_rx.c 2011-11-11 15:19:27.000000000 -0500
35201+++ linux-3.1.1/drivers/staging/usbip/vhci_rx.c 2011-11-16 18:39:08.000000000 -0500
35202@@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
35203 if (!urb) {
35204 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35205 pr_info("max seqnum %d\n",
35206- atomic_read(&the_controller->seqnum));
35207+ atomic_read_unchecked(&the_controller->seqnum));
35208 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35209 return;
35210 }
35211diff -urNp linux-3.1.1/drivers/staging/vt6655/hostap.c linux-3.1.1/drivers/staging/vt6655/hostap.c
35212--- linux-3.1.1/drivers/staging/vt6655/hostap.c 2011-11-11 15:19:27.000000000 -0500
35213+++ linux-3.1.1/drivers/staging/vt6655/hostap.c 2011-11-16 18:39:08.000000000 -0500
35214@@ -79,14 +79,13 @@ static int msglevel
35215 *
35216 */
35217
35218+static net_device_ops_no_const apdev_netdev_ops;
35219+
35220 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35221 {
35222 PSDevice apdev_priv;
35223 struct net_device *dev = pDevice->dev;
35224 int ret;
35225- const struct net_device_ops apdev_netdev_ops = {
35226- .ndo_start_xmit = pDevice->tx_80211,
35227- };
35228
35229 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35230
35231@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
35232 *apdev_priv = *pDevice;
35233 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35234
35235+ /* only half broken now */
35236+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35237 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35238
35239 pDevice->apdev->type = ARPHRD_IEEE80211;
35240diff -urNp linux-3.1.1/drivers/staging/vt6656/hostap.c linux-3.1.1/drivers/staging/vt6656/hostap.c
35241--- linux-3.1.1/drivers/staging/vt6656/hostap.c 2011-11-11 15:19:27.000000000 -0500
35242+++ linux-3.1.1/drivers/staging/vt6656/hostap.c 2011-11-16 18:39:08.000000000 -0500
35243@@ -80,14 +80,13 @@ static int msglevel
35244 *
35245 */
35246
35247+static net_device_ops_no_const apdev_netdev_ops;
35248+
35249 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35250 {
35251 PSDevice apdev_priv;
35252 struct net_device *dev = pDevice->dev;
35253 int ret;
35254- const struct net_device_ops apdev_netdev_ops = {
35255- .ndo_start_xmit = pDevice->tx_80211,
35256- };
35257
35258 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35259
35260@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
35261 *apdev_priv = *pDevice;
35262 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35263
35264+ /* only half broken now */
35265+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35266 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35267
35268 pDevice->apdev->type = ARPHRD_IEEE80211;
35269diff -urNp linux-3.1.1/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.1.1/drivers/staging/wlan-ng/hfa384x_usb.c
35270--- linux-3.1.1/drivers/staging/wlan-ng/hfa384x_usb.c 2011-11-11 15:19:27.000000000 -0500
35271+++ linux-3.1.1/drivers/staging/wlan-ng/hfa384x_usb.c 2011-11-16 18:39:08.000000000 -0500
35272@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
35273
35274 struct usbctlx_completor {
35275 int (*complete) (struct usbctlx_completor *);
35276-};
35277+} __no_const;
35278
35279 static int
35280 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35281diff -urNp linux-3.1.1/drivers/staging/zcache/tmem.c linux-3.1.1/drivers/staging/zcache/tmem.c
35282--- linux-3.1.1/drivers/staging/zcache/tmem.c 2011-11-11 15:19:27.000000000 -0500
35283+++ linux-3.1.1/drivers/staging/zcache/tmem.c 2011-11-16 18:39:08.000000000 -0500
35284@@ -39,7 +39,7 @@
35285 * A tmem host implementation must use this function to register callbacks
35286 * for memory allocation.
35287 */
35288-static struct tmem_hostops tmem_hostops;
35289+static tmem_hostops_no_const tmem_hostops;
35290
35291 static void tmem_objnode_tree_init(void);
35292
35293@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
35294 * A tmem host implementation must use this function to register
35295 * callbacks for a page-accessible memory (PAM) implementation
35296 */
35297-static struct tmem_pamops tmem_pamops;
35298+static tmem_pamops_no_const tmem_pamops;
35299
35300 void tmem_register_pamops(struct tmem_pamops *m)
35301 {
35302diff -urNp linux-3.1.1/drivers/staging/zcache/tmem.h linux-3.1.1/drivers/staging/zcache/tmem.h
35303--- linux-3.1.1/drivers/staging/zcache/tmem.h 2011-11-11 15:19:27.000000000 -0500
35304+++ linux-3.1.1/drivers/staging/zcache/tmem.h 2011-11-16 18:39:08.000000000 -0500
35305@@ -180,6 +180,7 @@ struct tmem_pamops {
35306 void (*new_obj)(struct tmem_obj *);
35307 int (*replace_in_obj)(void *, struct tmem_obj *);
35308 };
35309+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35310 extern void tmem_register_pamops(struct tmem_pamops *m);
35311
35312 /* memory allocation methods provided by the host implementation */
35313@@ -189,6 +190,7 @@ struct tmem_hostops {
35314 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35315 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35316 };
35317+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35318 extern void tmem_register_hostops(struct tmem_hostops *m);
35319
35320 /* core tmem accessor functions */
35321diff -urNp linux-3.1.1/drivers/target/iscsi/iscsi_target.c linux-3.1.1/drivers/target/iscsi/iscsi_target.c
35322--- linux-3.1.1/drivers/target/iscsi/iscsi_target.c 2011-11-11 15:19:27.000000000 -0500
35323+++ linux-3.1.1/drivers/target/iscsi/iscsi_target.c 2011-11-16 18:39:08.000000000 -0500
35324@@ -1368,7 +1368,7 @@ static int iscsit_handle_data_out(struct
35325 * outstanding_r2ts reaches zero, go ahead and send the delayed
35326 * TASK_ABORTED status.
35327 */
35328- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
35329+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
35330 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
35331 if (--cmd->outstanding_r2ts < 1) {
35332 iscsit_stop_dataout_timer(cmd);
35333diff -urNp linux-3.1.1/drivers/target/target_core_alua.c linux-3.1.1/drivers/target/target_core_alua.c
35334--- linux-3.1.1/drivers/target/target_core_alua.c 2011-11-11 15:19:27.000000000 -0500
35335+++ linux-3.1.1/drivers/target/target_core_alua.c 2011-11-16 18:40:29.000000000 -0500
35336@@ -723,6 +723,8 @@ static int core_alua_update_tpg_primary_
35337 char path[ALUA_METADATA_PATH_LEN];
35338 int len;
35339
35340+ pax_track_stack();
35341+
35342 memset(path, 0, ALUA_METADATA_PATH_LEN);
35343
35344 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
35345@@ -986,6 +988,8 @@ static int core_alua_update_tpg_secondar
35346 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
35347 int len;
35348
35349+ pax_track_stack();
35350+
35351 memset(path, 0, ALUA_METADATA_PATH_LEN);
35352 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
35353
35354diff -urNp linux-3.1.1/drivers/target/target_core_cdb.c linux-3.1.1/drivers/target/target_core_cdb.c
35355--- linux-3.1.1/drivers/target/target_core_cdb.c 2011-11-11 15:19:27.000000000 -0500
35356+++ linux-3.1.1/drivers/target/target_core_cdb.c 2011-11-16 18:40:29.000000000 -0500
35357@@ -933,6 +933,8 @@ target_emulate_modesense(struct se_cmd *
35358 int length = 0;
35359 unsigned char buf[SE_MODE_PAGE_BUF];
35360
35361+ pax_track_stack();
35362+
35363 memset(buf, 0, SE_MODE_PAGE_BUF);
35364
35365 switch (cdb[2] & 0x3f) {
35366diff -urNp linux-3.1.1/drivers/target/target_core_configfs.c linux-3.1.1/drivers/target/target_core_configfs.c
35367--- linux-3.1.1/drivers/target/target_core_configfs.c 2011-11-11 15:19:27.000000000 -0500
35368+++ linux-3.1.1/drivers/target/target_core_configfs.c 2011-11-16 19:04:37.000000000 -0500
35369@@ -1267,6 +1267,8 @@ static ssize_t target_core_dev_pr_show_a
35370 ssize_t len = 0;
35371 int reg_count = 0, prf_isid;
35372
35373+ pax_track_stack();
35374+
35375 if (!su_dev->se_dev_ptr)
35376 return -ENODEV;
35377
35378diff -urNp linux-3.1.1/drivers/target/target_core_pr.c linux-3.1.1/drivers/target/target_core_pr.c
35379--- linux-3.1.1/drivers/target/target_core_pr.c 2011-11-11 15:19:27.000000000 -0500
35380+++ linux-3.1.1/drivers/target/target_core_pr.c 2011-11-16 18:40:29.000000000 -0500
35381@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
35382 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
35383 u16 tpgt;
35384
35385+ pax_track_stack();
35386+
35387 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
35388 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
35389 /*
35390@@ -1867,6 +1869,8 @@ static int __core_scsi3_update_aptpl_buf
35391 ssize_t len = 0;
35392 int reg_count = 0;
35393
35394+ pax_track_stack();
35395+
35396 memset(buf, 0, pr_aptpl_buf_len);
35397 /*
35398 * Called to clear metadata once APTPL has been deactivated.
35399@@ -1989,6 +1993,8 @@ static int __core_scsi3_write_aptpl_to_f
35400 char path[512];
35401 int ret;
35402
35403+ pax_track_stack();
35404+
35405 memset(iov, 0, sizeof(struct iovec));
35406 memset(path, 0, 512);
35407
35408diff -urNp linux-3.1.1/drivers/target/target_core_tmr.c linux-3.1.1/drivers/target/target_core_tmr.c
35409--- linux-3.1.1/drivers/target/target_core_tmr.c 2011-11-11 15:19:27.000000000 -0500
35410+++ linux-3.1.1/drivers/target/target_core_tmr.c 2011-11-16 18:39:08.000000000 -0500
35411@@ -255,7 +255,7 @@ static void core_tmr_drain_task_list(
35412 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
35413 cmd->t_task_list_num,
35414 atomic_read(&cmd->t_task_cdbs_left),
35415- atomic_read(&cmd->t_task_cdbs_sent),
35416+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35417 atomic_read(&cmd->t_transport_active),
35418 atomic_read(&cmd->t_transport_stop),
35419 atomic_read(&cmd->t_transport_sent));
35420@@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
35421 pr_debug("LUN_RESET: got t_transport_active = 1 for"
35422 " task: %p, t_fe_count: %d dev: %p\n", task,
35423 fe_count, dev);
35424- atomic_set(&cmd->t_transport_aborted, 1);
35425+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35426 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35427
35428 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35429@@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
35430 }
35431 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35432 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35433- atomic_set(&cmd->t_transport_aborted, 1);
35434+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35435 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35436
35437 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35438diff -urNp linux-3.1.1/drivers/target/target_core_transport.c linux-3.1.1/drivers/target/target_core_transport.c
35439--- linux-3.1.1/drivers/target/target_core_transport.c 2011-11-11 15:19:27.000000000 -0500
35440+++ linux-3.1.1/drivers/target/target_core_transport.c 2011-11-16 18:39:08.000000000 -0500
35441@@ -1445,7 +1445,7 @@ struct se_device *transport_add_device_t
35442
35443 dev->queue_depth = dev_limits->queue_depth;
35444 atomic_set(&dev->depth_left, dev->queue_depth);
35445- atomic_set(&dev->dev_ordered_id, 0);
35446+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
35447
35448 se_dev_set_default_attribs(dev, dev_limits);
35449
35450@@ -1633,7 +1633,7 @@ static int transport_check_alloc_task_at
35451 * Used to determine when ORDERED commands should go from
35452 * Dormant to Active status.
35453 */
35454- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
35455+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
35456 smp_mb__after_atomic_inc();
35457 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35458 cmd->se_ordered_id, cmd->sam_task_attr,
35459@@ -1960,7 +1960,7 @@ static void transport_generic_request_fa
35460 " t_transport_active: %d t_transport_stop: %d"
35461 " t_transport_sent: %d\n", cmd->t_task_list_num,
35462 atomic_read(&cmd->t_task_cdbs_left),
35463- atomic_read(&cmd->t_task_cdbs_sent),
35464+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35465 atomic_read(&cmd->t_task_cdbs_ex_left),
35466 atomic_read(&cmd->t_transport_active),
35467 atomic_read(&cmd->t_transport_stop),
35468@@ -2460,9 +2460,9 @@ check_depth:
35469 spin_lock_irqsave(&cmd->t_state_lock, flags);
35470 atomic_set(&task->task_active, 1);
35471 atomic_set(&task->task_sent, 1);
35472- atomic_inc(&cmd->t_task_cdbs_sent);
35473+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
35474
35475- if (atomic_read(&cmd->t_task_cdbs_sent) ==
35476+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
35477 cmd->t_task_list_num)
35478 atomic_set(&cmd->transport_sent, 1);
35479
35480@@ -4665,7 +4665,7 @@ static void transport_generic_wait_for_t
35481 atomic_set(&cmd->transport_lun_stop, 0);
35482 }
35483 if (!atomic_read(&cmd->t_transport_active) ||
35484- atomic_read(&cmd->t_transport_aborted))
35485+ atomic_read_unchecked(&cmd->t_transport_aborted))
35486 goto remove;
35487
35488 atomic_set(&cmd->t_transport_stop, 1);
35489@@ -4900,7 +4900,7 @@ int transport_check_aborted_status(struc
35490 {
35491 int ret = 0;
35492
35493- if (atomic_read(&cmd->t_transport_aborted) != 0) {
35494+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
35495 if (!send_status ||
35496 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35497 return 1;
35498@@ -4937,7 +4937,7 @@ void transport_send_task_abort(struct se
35499 */
35500 if (cmd->data_direction == DMA_TO_DEVICE) {
35501 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
35502- atomic_inc(&cmd->t_transport_aborted);
35503+ atomic_inc_unchecked(&cmd->t_transport_aborted);
35504 smp_mb__after_atomic_inc();
35505 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
35506 transport_new_cmd_failure(cmd);
35507@@ -5051,7 +5051,7 @@ static void transport_processing_shutdow
35508 cmd->se_tfo->get_task_tag(cmd),
35509 cmd->t_task_list_num,
35510 atomic_read(&cmd->t_task_cdbs_left),
35511- atomic_read(&cmd->t_task_cdbs_sent),
35512+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35513 atomic_read(&cmd->t_transport_active),
35514 atomic_read(&cmd->t_transport_stop),
35515 atomic_read(&cmd->t_transport_sent));
35516diff -urNp linux-3.1.1/drivers/telephony/ixj.c linux-3.1.1/drivers/telephony/ixj.c
35517--- linux-3.1.1/drivers/telephony/ixj.c 2011-11-11 15:19:27.000000000 -0500
35518+++ linux-3.1.1/drivers/telephony/ixj.c 2011-11-16 18:40:29.000000000 -0500
35519@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
35520 bool mContinue;
35521 char *pIn, *pOut;
35522
35523+ pax_track_stack();
35524+
35525 if (!SCI_Prepare(j))
35526 return 0;
35527
35528diff -urNp linux-3.1.1/drivers/tty/hvc/hvcs.c linux-3.1.1/drivers/tty/hvc/hvcs.c
35529--- linux-3.1.1/drivers/tty/hvc/hvcs.c 2011-11-11 15:19:27.000000000 -0500
35530+++ linux-3.1.1/drivers/tty/hvc/hvcs.c 2011-11-16 18:39:08.000000000 -0500
35531@@ -83,6 +83,7 @@
35532 #include <asm/hvcserver.h>
35533 #include <asm/uaccess.h>
35534 #include <asm/vio.h>
35535+#include <asm/local.h>
35536
35537 /*
35538 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35539@@ -270,7 +271,7 @@ struct hvcs_struct {
35540 unsigned int index;
35541
35542 struct tty_struct *tty;
35543- int open_count;
35544+ local_t open_count;
35545
35546 /*
35547 * Used to tell the driver kernel_thread what operations need to take
35548@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
35549
35550 spin_lock_irqsave(&hvcsd->lock, flags);
35551
35552- if (hvcsd->open_count > 0) {
35553+ if (local_read(&hvcsd->open_count) > 0) {
35554 spin_unlock_irqrestore(&hvcsd->lock, flags);
35555 printk(KERN_INFO "HVCS: vterm state unchanged. "
35556 "The hvcs device node is still in use.\n");
35557@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
35558 if ((retval = hvcs_partner_connect(hvcsd)))
35559 goto error_release;
35560
35561- hvcsd->open_count = 1;
35562+ local_set(&hvcsd->open_count, 1);
35563 hvcsd->tty = tty;
35564 tty->driver_data = hvcsd;
35565
35566@@ -1179,7 +1180,7 @@ fast_open:
35567
35568 spin_lock_irqsave(&hvcsd->lock, flags);
35569 kref_get(&hvcsd->kref);
35570- hvcsd->open_count++;
35571+ local_inc(&hvcsd->open_count);
35572 hvcsd->todo_mask |= HVCS_SCHED_READ;
35573 spin_unlock_irqrestore(&hvcsd->lock, flags);
35574
35575@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
35576 hvcsd = tty->driver_data;
35577
35578 spin_lock_irqsave(&hvcsd->lock, flags);
35579- if (--hvcsd->open_count == 0) {
35580+ if (local_dec_and_test(&hvcsd->open_count)) {
35581
35582 vio_disable_interrupts(hvcsd->vdev);
35583
35584@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
35585 free_irq(irq, hvcsd);
35586 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35587 return;
35588- } else if (hvcsd->open_count < 0) {
35589+ } else if (local_read(&hvcsd->open_count) < 0) {
35590 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35591 " is missmanaged.\n",
35592- hvcsd->vdev->unit_address, hvcsd->open_count);
35593+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35594 }
35595
35596 spin_unlock_irqrestore(&hvcsd->lock, flags);
35597@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
35598
35599 spin_lock_irqsave(&hvcsd->lock, flags);
35600 /* Preserve this so that we know how many kref refs to put */
35601- temp_open_count = hvcsd->open_count;
35602+ temp_open_count = local_read(&hvcsd->open_count);
35603
35604 /*
35605 * Don't kref put inside the spinlock because the destruction
35606@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
35607 hvcsd->tty->driver_data = NULL;
35608 hvcsd->tty = NULL;
35609
35610- hvcsd->open_count = 0;
35611+ local_set(&hvcsd->open_count, 0);
35612
35613 /* This will drop any buffered data on the floor which is OK in a hangup
35614 * scenario. */
35615@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
35616 * the middle of a write operation? This is a crummy place to do this
35617 * but we want to keep it all in the spinlock.
35618 */
35619- if (hvcsd->open_count <= 0) {
35620+ if (local_read(&hvcsd->open_count) <= 0) {
35621 spin_unlock_irqrestore(&hvcsd->lock, flags);
35622 return -ENODEV;
35623 }
35624@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
35625 {
35626 struct hvcs_struct *hvcsd = tty->driver_data;
35627
35628- if (!hvcsd || hvcsd->open_count <= 0)
35629+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35630 return 0;
35631
35632 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35633diff -urNp linux-3.1.1/drivers/tty/ipwireless/tty.c linux-3.1.1/drivers/tty/ipwireless/tty.c
35634--- linux-3.1.1/drivers/tty/ipwireless/tty.c 2011-11-11 15:19:27.000000000 -0500
35635+++ linux-3.1.1/drivers/tty/ipwireless/tty.c 2011-11-16 18:39:08.000000000 -0500
35636@@ -29,6 +29,7 @@
35637 #include <linux/tty_driver.h>
35638 #include <linux/tty_flip.h>
35639 #include <linux/uaccess.h>
35640+#include <asm/local.h>
35641
35642 #include "tty.h"
35643 #include "network.h"
35644@@ -51,7 +52,7 @@ struct ipw_tty {
35645 int tty_type;
35646 struct ipw_network *network;
35647 struct tty_struct *linux_tty;
35648- int open_count;
35649+ local_t open_count;
35650 unsigned int control_lines;
35651 struct mutex ipw_tty_mutex;
35652 int tx_bytes_queued;
35653@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
35654 mutex_unlock(&tty->ipw_tty_mutex);
35655 return -ENODEV;
35656 }
35657- if (tty->open_count == 0)
35658+ if (local_read(&tty->open_count) == 0)
35659 tty->tx_bytes_queued = 0;
35660
35661- tty->open_count++;
35662+ local_inc(&tty->open_count);
35663
35664 tty->linux_tty = linux_tty;
35665 linux_tty->driver_data = tty;
35666@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
35667
35668 static void do_ipw_close(struct ipw_tty *tty)
35669 {
35670- tty->open_count--;
35671-
35672- if (tty->open_count == 0) {
35673+ if (local_dec_return(&tty->open_count) == 0) {
35674 struct tty_struct *linux_tty = tty->linux_tty;
35675
35676 if (linux_tty != NULL) {
35677@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
35678 return;
35679
35680 mutex_lock(&tty->ipw_tty_mutex);
35681- if (tty->open_count == 0) {
35682+ if (local_read(&tty->open_count) == 0) {
35683 mutex_unlock(&tty->ipw_tty_mutex);
35684 return;
35685 }
35686@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
35687 return;
35688 }
35689
35690- if (!tty->open_count) {
35691+ if (!local_read(&tty->open_count)) {
35692 mutex_unlock(&tty->ipw_tty_mutex);
35693 return;
35694 }
35695@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
35696 return -ENODEV;
35697
35698 mutex_lock(&tty->ipw_tty_mutex);
35699- if (!tty->open_count) {
35700+ if (!local_read(&tty->open_count)) {
35701 mutex_unlock(&tty->ipw_tty_mutex);
35702 return -EINVAL;
35703 }
35704@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
35705 if (!tty)
35706 return -ENODEV;
35707
35708- if (!tty->open_count)
35709+ if (!local_read(&tty->open_count))
35710 return -EINVAL;
35711
35712 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35713@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
35714 if (!tty)
35715 return 0;
35716
35717- if (!tty->open_count)
35718+ if (!local_read(&tty->open_count))
35719 return 0;
35720
35721 return tty->tx_bytes_queued;
35722@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
35723 if (!tty)
35724 return -ENODEV;
35725
35726- if (!tty->open_count)
35727+ if (!local_read(&tty->open_count))
35728 return -EINVAL;
35729
35730 return get_control_lines(tty);
35731@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
35732 if (!tty)
35733 return -ENODEV;
35734
35735- if (!tty->open_count)
35736+ if (!local_read(&tty->open_count))
35737 return -EINVAL;
35738
35739 return set_control_lines(tty, set, clear);
35740@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
35741 if (!tty)
35742 return -ENODEV;
35743
35744- if (!tty->open_count)
35745+ if (!local_read(&tty->open_count))
35746 return -EINVAL;
35747
35748 /* FIXME: Exactly how is the tty object locked here .. */
35749@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
35750 against a parallel ioctl etc */
35751 mutex_lock(&ttyj->ipw_tty_mutex);
35752 }
35753- while (ttyj->open_count)
35754+ while (local_read(&ttyj->open_count))
35755 do_ipw_close(ttyj);
35756 ipwireless_disassociate_network_ttys(network,
35757 ttyj->channel_idx);
35758diff -urNp linux-3.1.1/drivers/tty/n_gsm.c linux-3.1.1/drivers/tty/n_gsm.c
35759--- linux-3.1.1/drivers/tty/n_gsm.c 2011-11-11 15:19:27.000000000 -0500
35760+++ linux-3.1.1/drivers/tty/n_gsm.c 2011-11-16 18:39:08.000000000 -0500
35761@@ -1625,7 +1625,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
35762 kref_init(&dlci->ref);
35763 mutex_init(&dlci->mutex);
35764 dlci->fifo = &dlci->_fifo;
35765- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35766+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35767 kfree(dlci);
35768 return NULL;
35769 }
35770diff -urNp linux-3.1.1/drivers/tty/n_tty.c linux-3.1.1/drivers/tty/n_tty.c
35771--- linux-3.1.1/drivers/tty/n_tty.c 2011-11-11 15:19:27.000000000 -0500
35772+++ linux-3.1.1/drivers/tty/n_tty.c 2011-11-16 18:39:08.000000000 -0500
35773@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
35774 {
35775 *ops = tty_ldisc_N_TTY;
35776 ops->owner = NULL;
35777- ops->refcount = ops->flags = 0;
35778+ atomic_set(&ops->refcount, 0);
35779+ ops->flags = 0;
35780 }
35781 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35782diff -urNp linux-3.1.1/drivers/tty/pty.c linux-3.1.1/drivers/tty/pty.c
35783--- linux-3.1.1/drivers/tty/pty.c 2011-11-11 15:19:27.000000000 -0500
35784+++ linux-3.1.1/drivers/tty/pty.c 2011-11-16 18:39:08.000000000 -0500
35785@@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
35786 register_sysctl_table(pty_root_table);
35787
35788 /* Now create the /dev/ptmx special device */
35789+ pax_open_kernel();
35790 tty_default_fops(&ptmx_fops);
35791- ptmx_fops.open = ptmx_open;
35792+ *(void **)&ptmx_fops.open = ptmx_open;
35793+ pax_close_kernel();
35794
35795 cdev_init(&ptmx_cdev, &ptmx_fops);
35796 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35797diff -urNp linux-3.1.1/drivers/tty/rocket.c linux-3.1.1/drivers/tty/rocket.c
35798--- linux-3.1.1/drivers/tty/rocket.c 2011-11-11 15:19:27.000000000 -0500
35799+++ linux-3.1.1/drivers/tty/rocket.c 2011-11-16 18:40:29.000000000 -0500
35800@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
35801 struct rocket_ports tmp;
35802 int board;
35803
35804+ pax_track_stack();
35805+
35806 if (!retports)
35807 return -EFAULT;
35808 memset(&tmp, 0, sizeof (tmp));
35809diff -urNp linux-3.1.1/drivers/tty/serial/kgdboc.c linux-3.1.1/drivers/tty/serial/kgdboc.c
35810--- linux-3.1.1/drivers/tty/serial/kgdboc.c 2011-11-11 15:19:27.000000000 -0500
35811+++ linux-3.1.1/drivers/tty/serial/kgdboc.c 2011-11-16 18:39:08.000000000 -0500
35812@@ -23,8 +23,9 @@
35813 #define MAX_CONFIG_LEN 40
35814
35815 static struct kgdb_io kgdboc_io_ops;
35816+static struct kgdb_io kgdboc_io_ops_console;
35817
35818-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35819+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35820 static int configured = -1;
35821
35822 static char config[MAX_CONFIG_LEN];
35823@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
35824 kgdboc_unregister_kbd();
35825 if (configured == 1)
35826 kgdb_unregister_io_module(&kgdboc_io_ops);
35827+ else if (configured == 2)
35828+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
35829 }
35830
35831 static int configure_kgdboc(void)
35832@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
35833 int err;
35834 char *cptr = config;
35835 struct console *cons;
35836+ int is_console = 0;
35837
35838 err = kgdboc_option_setup(config);
35839 if (err || !strlen(config) || isspace(config[0]))
35840 goto noconfig;
35841
35842 err = -ENODEV;
35843- kgdboc_io_ops.is_console = 0;
35844 kgdb_tty_driver = NULL;
35845
35846 kgdboc_use_kms = 0;
35847@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
35848 int idx;
35849 if (cons->device && cons->device(cons, &idx) == p &&
35850 idx == tty_line) {
35851- kgdboc_io_ops.is_console = 1;
35852+ is_console = 1;
35853 break;
35854 }
35855 cons = cons->next;
35856@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
35857 kgdb_tty_line = tty_line;
35858
35859 do_register:
35860- err = kgdb_register_io_module(&kgdboc_io_ops);
35861+ if (is_console) {
35862+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
35863+ configured = 2;
35864+ } else {
35865+ err = kgdb_register_io_module(&kgdboc_io_ops);
35866+ configured = 1;
35867+ }
35868 if (err)
35869 goto noconfig;
35870
35871- configured = 1;
35872-
35873 return 0;
35874
35875 noconfig:
35876@@ -212,7 +219,7 @@ noconfig:
35877 static int __init init_kgdboc(void)
35878 {
35879 /* Already configured? */
35880- if (configured == 1)
35881+ if (configured >= 1)
35882 return 0;
35883
35884 return configure_kgdboc();
35885@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
35886 if (config[len - 1] == '\n')
35887 config[len - 1] = '\0';
35888
35889- if (configured == 1)
35890+ if (configured >= 1)
35891 cleanup_kgdboc();
35892
35893 /* Go and configure with the new params. */
35894@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
35895 .post_exception = kgdboc_post_exp_handler,
35896 };
35897
35898+static struct kgdb_io kgdboc_io_ops_console = {
35899+ .name = "kgdboc",
35900+ .read_char = kgdboc_get_char,
35901+ .write_char = kgdboc_put_char,
35902+ .pre_exception = kgdboc_pre_exp_handler,
35903+ .post_exception = kgdboc_post_exp_handler,
35904+ .is_console = 1
35905+};
35906+
35907 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
35908 /* This is only available if kgdboc is a built in for early debugging */
35909 static int __init kgdboc_early_init(char *opt)
35910diff -urNp linux-3.1.1/drivers/tty/serial/mfd.c linux-3.1.1/drivers/tty/serial/mfd.c
35911--- linux-3.1.1/drivers/tty/serial/mfd.c 2011-11-11 15:19:27.000000000 -0500
35912+++ linux-3.1.1/drivers/tty/serial/mfd.c 2011-11-16 18:39:08.000000000 -0500
35913@@ -1423,7 +1423,7 @@ static void serial_hsu_remove(struct pci
35914 }
35915
35916 /* First 3 are UART ports, and the 4th is the DMA */
35917-static const struct pci_device_id pci_ids[] __devinitdata = {
35918+static const struct pci_device_id pci_ids[] __devinitconst = {
35919 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
35920 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
35921 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
35922diff -urNp linux-3.1.1/drivers/tty/serial/mrst_max3110.c linux-3.1.1/drivers/tty/serial/mrst_max3110.c
35923--- linux-3.1.1/drivers/tty/serial/mrst_max3110.c 2011-11-11 15:19:27.000000000 -0500
35924+++ linux-3.1.1/drivers/tty/serial/mrst_max3110.c 2011-11-16 18:40:29.000000000 -0500
35925@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
35926 int loop = 1, num, total = 0;
35927 u8 recv_buf[512], *pbuf;
35928
35929+ pax_track_stack();
35930+
35931 pbuf = recv_buf;
35932 do {
35933 num = max3110_read_multi(max, pbuf);
35934diff -urNp linux-3.1.1/drivers/tty/tty_io.c linux-3.1.1/drivers/tty/tty_io.c
35935--- linux-3.1.1/drivers/tty/tty_io.c 2011-11-11 15:19:27.000000000 -0500
35936+++ linux-3.1.1/drivers/tty/tty_io.c 2011-11-16 18:39:08.000000000 -0500
35937@@ -3238,7 +3238,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
35938
35939 void tty_default_fops(struct file_operations *fops)
35940 {
35941- *fops = tty_fops;
35942+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
35943 }
35944
35945 /*
35946diff -urNp linux-3.1.1/drivers/tty/tty_ldisc.c linux-3.1.1/drivers/tty/tty_ldisc.c
35947--- linux-3.1.1/drivers/tty/tty_ldisc.c 2011-11-11 15:19:27.000000000 -0500
35948+++ linux-3.1.1/drivers/tty/tty_ldisc.c 2011-11-16 18:39:08.000000000 -0500
35949@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
35950 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
35951 struct tty_ldisc_ops *ldo = ld->ops;
35952
35953- ldo->refcount--;
35954+ atomic_dec(&ldo->refcount);
35955 module_put(ldo->owner);
35956 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35957
35958@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
35959 spin_lock_irqsave(&tty_ldisc_lock, flags);
35960 tty_ldiscs[disc] = new_ldisc;
35961 new_ldisc->num = disc;
35962- new_ldisc->refcount = 0;
35963+ atomic_set(&new_ldisc->refcount, 0);
35964 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35965
35966 return ret;
35967@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
35968 return -EINVAL;
35969
35970 spin_lock_irqsave(&tty_ldisc_lock, flags);
35971- if (tty_ldiscs[disc]->refcount)
35972+ if (atomic_read(&tty_ldiscs[disc]->refcount))
35973 ret = -EBUSY;
35974 else
35975 tty_ldiscs[disc] = NULL;
35976@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
35977 if (ldops) {
35978 ret = ERR_PTR(-EAGAIN);
35979 if (try_module_get(ldops->owner)) {
35980- ldops->refcount++;
35981+ atomic_inc(&ldops->refcount);
35982 ret = ldops;
35983 }
35984 }
35985@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
35986 unsigned long flags;
35987
35988 spin_lock_irqsave(&tty_ldisc_lock, flags);
35989- ldops->refcount--;
35990+ atomic_dec(&ldops->refcount);
35991 module_put(ldops->owner);
35992 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35993 }
35994diff -urNp linux-3.1.1/drivers/tty/vt/keyboard.c linux-3.1.1/drivers/tty/vt/keyboard.c
35995--- linux-3.1.1/drivers/tty/vt/keyboard.c 2011-11-11 15:19:27.000000000 -0500
35996+++ linux-3.1.1/drivers/tty/vt/keyboard.c 2011-11-16 18:40:29.000000000 -0500
35997@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
35998 kbd->kbdmode == VC_OFF) &&
35999 value != KVAL(K_SAK))
36000 return; /* SAK is allowed even in raw mode */
36001+
36002+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
36003+ {
36004+ void *func = fn_handler[value];
36005+ if (func == fn_show_state || func == fn_show_ptregs ||
36006+ func == fn_show_mem)
36007+ return;
36008+ }
36009+#endif
36010+
36011 fn_handler[value](vc);
36012 }
36013
36014diff -urNp linux-3.1.1/drivers/tty/vt/vt.c linux-3.1.1/drivers/tty/vt/vt.c
36015--- linux-3.1.1/drivers/tty/vt/vt.c 2011-11-11 15:19:27.000000000 -0500
36016+++ linux-3.1.1/drivers/tty/vt/vt.c 2011-11-16 18:39:08.000000000 -0500
36017@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
36018
36019 static void notify_write(struct vc_data *vc, unsigned int unicode)
36020 {
36021- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
36022+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
36023 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
36024 }
36025
36026diff -urNp linux-3.1.1/drivers/tty/vt/vt_ioctl.c linux-3.1.1/drivers/tty/vt/vt_ioctl.c
36027--- linux-3.1.1/drivers/tty/vt/vt_ioctl.c 2011-11-11 15:19:27.000000000 -0500
36028+++ linux-3.1.1/drivers/tty/vt/vt_ioctl.c 2011-11-16 18:40:29.000000000 -0500
36029@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
36030 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
36031 return -EFAULT;
36032
36033- if (!capable(CAP_SYS_TTY_CONFIG))
36034- perm = 0;
36035-
36036 switch (cmd) {
36037 case KDGKBENT:
36038 key_map = key_maps[s];
36039@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
36040 val = (i ? K_HOLE : K_NOSUCHMAP);
36041 return put_user(val, &user_kbe->kb_value);
36042 case KDSKBENT:
36043+ if (!capable(CAP_SYS_TTY_CONFIG))
36044+ perm = 0;
36045+
36046 if (!perm)
36047 return -EPERM;
36048 if (!i && v == K_NOSUCHMAP) {
36049@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
36050 int i, j, k;
36051 int ret;
36052
36053- if (!capable(CAP_SYS_TTY_CONFIG))
36054- perm = 0;
36055-
36056 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
36057 if (!kbs) {
36058 ret = -ENOMEM;
36059@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
36060 kfree(kbs);
36061 return ((p && *p) ? -EOVERFLOW : 0);
36062 case KDSKBSENT:
36063+ if (!capable(CAP_SYS_TTY_CONFIG))
36064+ perm = 0;
36065+
36066 if (!perm) {
36067 ret = -EPERM;
36068 goto reterr;
36069diff -urNp linux-3.1.1/drivers/uio/uio.c linux-3.1.1/drivers/uio/uio.c
36070--- linux-3.1.1/drivers/uio/uio.c 2011-11-11 15:19:27.000000000 -0500
36071+++ linux-3.1.1/drivers/uio/uio.c 2011-11-16 18:39:08.000000000 -0500
36072@@ -25,6 +25,7 @@
36073 #include <linux/kobject.h>
36074 #include <linux/cdev.h>
36075 #include <linux/uio_driver.h>
36076+#include <asm/local.h>
36077
36078 #define UIO_MAX_DEVICES (1U << MINORBITS)
36079
36080@@ -32,10 +33,10 @@ struct uio_device {
36081 struct module *owner;
36082 struct device *dev;
36083 int minor;
36084- atomic_t event;
36085+ atomic_unchecked_t event;
36086 struct fasync_struct *async_queue;
36087 wait_queue_head_t wait;
36088- int vma_count;
36089+ local_t vma_count;
36090 struct uio_info *info;
36091 struct kobject *map_dir;
36092 struct kobject *portio_dir;
36093@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
36094 struct device_attribute *attr, char *buf)
36095 {
36096 struct uio_device *idev = dev_get_drvdata(dev);
36097- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
36098+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
36099 }
36100
36101 static struct device_attribute uio_class_attributes[] = {
36102@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
36103 {
36104 struct uio_device *idev = info->uio_dev;
36105
36106- atomic_inc(&idev->event);
36107+ atomic_inc_unchecked(&idev->event);
36108 wake_up_interruptible(&idev->wait);
36109 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
36110 }
36111@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
36112 }
36113
36114 listener->dev = idev;
36115- listener->event_count = atomic_read(&idev->event);
36116+ listener->event_count = atomic_read_unchecked(&idev->event);
36117 filep->private_data = listener;
36118
36119 if (idev->info->open) {
36120@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
36121 return -EIO;
36122
36123 poll_wait(filep, &idev->wait, wait);
36124- if (listener->event_count != atomic_read(&idev->event))
36125+ if (listener->event_count != atomic_read_unchecked(&idev->event))
36126 return POLLIN | POLLRDNORM;
36127 return 0;
36128 }
36129@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
36130 do {
36131 set_current_state(TASK_INTERRUPTIBLE);
36132
36133- event_count = atomic_read(&idev->event);
36134+ event_count = atomic_read_unchecked(&idev->event);
36135 if (event_count != listener->event_count) {
36136 if (copy_to_user(buf, &event_count, count))
36137 retval = -EFAULT;
36138@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
36139 static void uio_vma_open(struct vm_area_struct *vma)
36140 {
36141 struct uio_device *idev = vma->vm_private_data;
36142- idev->vma_count++;
36143+ local_inc(&idev->vma_count);
36144 }
36145
36146 static void uio_vma_close(struct vm_area_struct *vma)
36147 {
36148 struct uio_device *idev = vma->vm_private_data;
36149- idev->vma_count--;
36150+ local_dec(&idev->vma_count);
36151 }
36152
36153 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36154@@ -823,7 +824,7 @@ int __uio_register_device(struct module
36155 idev->owner = owner;
36156 idev->info = info;
36157 init_waitqueue_head(&idev->wait);
36158- atomic_set(&idev->event, 0);
36159+ atomic_set_unchecked(&idev->event, 0);
36160
36161 ret = uio_get_minor(idev);
36162 if (ret)
36163diff -urNp linux-3.1.1/drivers/usb/atm/cxacru.c linux-3.1.1/drivers/usb/atm/cxacru.c
36164--- linux-3.1.1/drivers/usb/atm/cxacru.c 2011-11-11 15:19:27.000000000 -0500
36165+++ linux-3.1.1/drivers/usb/atm/cxacru.c 2011-11-16 18:39:08.000000000 -0500
36166@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
36167 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
36168 if (ret < 2)
36169 return -EINVAL;
36170- if (index < 0 || index > 0x7f)
36171+ if (index > 0x7f)
36172 return -EINVAL;
36173 pos += tmp;
36174
36175diff -urNp linux-3.1.1/drivers/usb/atm/usbatm.c linux-3.1.1/drivers/usb/atm/usbatm.c
36176--- linux-3.1.1/drivers/usb/atm/usbatm.c 2011-11-11 15:19:27.000000000 -0500
36177+++ linux-3.1.1/drivers/usb/atm/usbatm.c 2011-11-16 18:39:08.000000000 -0500
36178@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
36179 if (printk_ratelimit())
36180 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
36181 __func__, vpi, vci);
36182- atomic_inc(&vcc->stats->rx_err);
36183+ atomic_inc_unchecked(&vcc->stats->rx_err);
36184 return;
36185 }
36186
36187@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
36188 if (length > ATM_MAX_AAL5_PDU) {
36189 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
36190 __func__, length, vcc);
36191- atomic_inc(&vcc->stats->rx_err);
36192+ atomic_inc_unchecked(&vcc->stats->rx_err);
36193 goto out;
36194 }
36195
36196@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
36197 if (sarb->len < pdu_length) {
36198 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
36199 __func__, pdu_length, sarb->len, vcc);
36200- atomic_inc(&vcc->stats->rx_err);
36201+ atomic_inc_unchecked(&vcc->stats->rx_err);
36202 goto out;
36203 }
36204
36205 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36206 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36207 __func__, vcc);
36208- atomic_inc(&vcc->stats->rx_err);
36209+ atomic_inc_unchecked(&vcc->stats->rx_err);
36210 goto out;
36211 }
36212
36213@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
36214 if (printk_ratelimit())
36215 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36216 __func__, length);
36217- atomic_inc(&vcc->stats->rx_drop);
36218+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36219 goto out;
36220 }
36221
36222@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
36223
36224 vcc->push(vcc, skb);
36225
36226- atomic_inc(&vcc->stats->rx);
36227+ atomic_inc_unchecked(&vcc->stats->rx);
36228 out:
36229 skb_trim(sarb, 0);
36230 }
36231@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned l
36232 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36233
36234 usbatm_pop(vcc, skb);
36235- atomic_inc(&vcc->stats->tx);
36236+ atomic_inc_unchecked(&vcc->stats->tx);
36237
36238 skb = skb_dequeue(&instance->sndqueue);
36239 }
36240@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
36241 if (!left--)
36242 return sprintf(page,
36243 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36244- atomic_read(&atm_dev->stats.aal5.tx),
36245- atomic_read(&atm_dev->stats.aal5.tx_err),
36246- atomic_read(&atm_dev->stats.aal5.rx),
36247- atomic_read(&atm_dev->stats.aal5.rx_err),
36248- atomic_read(&atm_dev->stats.aal5.rx_drop));
36249+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36250+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36251+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36252+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36253+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36254
36255 if (!left--) {
36256 if (instance->disconnected)
36257diff -urNp linux-3.1.1/drivers/usb/core/devices.c linux-3.1.1/drivers/usb/core/devices.c
36258--- linux-3.1.1/drivers/usb/core/devices.c 2011-11-11 15:19:27.000000000 -0500
36259+++ linux-3.1.1/drivers/usb/core/devices.c 2011-11-16 18:39:08.000000000 -0500
36260@@ -126,7 +126,7 @@ static const char format_endpt[] =
36261 * time it gets called.
36262 */
36263 static struct device_connect_event {
36264- atomic_t count;
36265+ atomic_unchecked_t count;
36266 wait_queue_head_t wait;
36267 } device_event = {
36268 .count = ATOMIC_INIT(1),
36269@@ -164,7 +164,7 @@ static const struct class_info clas_info
36270
36271 void usbfs_conn_disc_event(void)
36272 {
36273- atomic_add(2, &device_event.count);
36274+ atomic_add_unchecked(2, &device_event.count);
36275 wake_up(&device_event.wait);
36276 }
36277
36278@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
36279
36280 poll_wait(file, &device_event.wait, wait);
36281
36282- event_count = atomic_read(&device_event.count);
36283+ event_count = atomic_read_unchecked(&device_event.count);
36284 if (file->f_version != event_count) {
36285 file->f_version = event_count;
36286 return POLLIN | POLLRDNORM;
36287diff -urNp linux-3.1.1/drivers/usb/core/message.c linux-3.1.1/drivers/usb/core/message.c
36288--- linux-3.1.1/drivers/usb/core/message.c 2011-11-11 15:19:27.000000000 -0500
36289+++ linux-3.1.1/drivers/usb/core/message.c 2011-11-16 18:39:08.000000000 -0500
36290@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
36291 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36292 if (buf) {
36293 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36294- if (len > 0) {
36295- smallbuf = kmalloc(++len, GFP_NOIO);
36296+ if (len++ > 0) {
36297+ smallbuf = kmalloc(len, GFP_NOIO);
36298 if (!smallbuf)
36299 return buf;
36300 memcpy(smallbuf, buf, len);
36301diff -urNp linux-3.1.1/drivers/usb/early/ehci-dbgp.c linux-3.1.1/drivers/usb/early/ehci-dbgp.c
36302--- linux-3.1.1/drivers/usb/early/ehci-dbgp.c 2011-11-11 15:19:27.000000000 -0500
36303+++ linux-3.1.1/drivers/usb/early/ehci-dbgp.c 2011-11-16 18:39:08.000000000 -0500
36304@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
36305
36306 #ifdef CONFIG_KGDB
36307 static struct kgdb_io kgdbdbgp_io_ops;
36308-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36309+static struct kgdb_io kgdbdbgp_io_ops_console;
36310+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36311 #else
36312 #define dbgp_kgdb_mode (0)
36313 #endif
36314@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
36315 .write_char = kgdbdbgp_write_char,
36316 };
36317
36318+static struct kgdb_io kgdbdbgp_io_ops_console = {
36319+ .name = "kgdbdbgp",
36320+ .read_char = kgdbdbgp_read_char,
36321+ .write_char = kgdbdbgp_write_char,
36322+ .is_console = 1
36323+};
36324+
36325 static int kgdbdbgp_wait_time;
36326
36327 static int __init kgdbdbgp_parse_config(char *str)
36328@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
36329 ptr++;
36330 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36331 }
36332- kgdb_register_io_module(&kgdbdbgp_io_ops);
36333- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36334+ if (early_dbgp_console.index != -1)
36335+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36336+ else
36337+ kgdb_register_io_module(&kgdbdbgp_io_ops);
36338
36339 return 0;
36340 }
36341diff -urNp linux-3.1.1/drivers/usb/host/xhci-mem.c linux-3.1.1/drivers/usb/host/xhci-mem.c
36342--- linux-3.1.1/drivers/usb/host/xhci-mem.c 2011-11-11 15:19:27.000000000 -0500
36343+++ linux-3.1.1/drivers/usb/host/xhci-mem.c 2011-11-16 18:40:29.000000000 -0500
36344@@ -1690,6 +1690,8 @@ static int xhci_check_trb_in_td_math(str
36345 unsigned int num_tests;
36346 int i, ret;
36347
36348+ pax_track_stack();
36349+
36350 num_tests = ARRAY_SIZE(simple_test_vector);
36351 for (i = 0; i < num_tests; i++) {
36352 ret = xhci_test_trb_in_td(xhci,
36353diff -urNp linux-3.1.1/drivers/usb/wusbcore/wa-hc.h linux-3.1.1/drivers/usb/wusbcore/wa-hc.h
36354--- linux-3.1.1/drivers/usb/wusbcore/wa-hc.h 2011-11-11 15:19:27.000000000 -0500
36355+++ linux-3.1.1/drivers/usb/wusbcore/wa-hc.h 2011-11-16 18:39:08.000000000 -0500
36356@@ -192,7 +192,7 @@ struct wahc {
36357 struct list_head xfer_delayed_list;
36358 spinlock_t xfer_list_lock;
36359 struct work_struct xfer_work;
36360- atomic_t xfer_id_count;
36361+ atomic_unchecked_t xfer_id_count;
36362 };
36363
36364
36365@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
36366 INIT_LIST_HEAD(&wa->xfer_delayed_list);
36367 spin_lock_init(&wa->xfer_list_lock);
36368 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36369- atomic_set(&wa->xfer_id_count, 1);
36370+ atomic_set_unchecked(&wa->xfer_id_count, 1);
36371 }
36372
36373 /**
36374diff -urNp linux-3.1.1/drivers/usb/wusbcore/wa-xfer.c linux-3.1.1/drivers/usb/wusbcore/wa-xfer.c
36375--- linux-3.1.1/drivers/usb/wusbcore/wa-xfer.c 2011-11-11 15:19:27.000000000 -0500
36376+++ linux-3.1.1/drivers/usb/wusbcore/wa-xfer.c 2011-11-16 18:39:08.000000000 -0500
36377@@ -295,7 +295,7 @@ out:
36378 */
36379 static void wa_xfer_id_init(struct wa_xfer *xfer)
36380 {
36381- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36382+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36383 }
36384
36385 /*
36386diff -urNp linux-3.1.1/drivers/vhost/vhost.c linux-3.1.1/drivers/vhost/vhost.c
36387--- linux-3.1.1/drivers/vhost/vhost.c 2011-11-11 15:19:27.000000000 -0500
36388+++ linux-3.1.1/drivers/vhost/vhost.c 2011-11-16 18:39:08.000000000 -0500
36389@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhos
36390 return 0;
36391 }
36392
36393-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36394+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36395 {
36396 struct file *eventfp, *filep = NULL,
36397 *pollstart = NULL, *pollstop = NULL;
36398diff -urNp linux-3.1.1/drivers/video/aty/aty128fb.c linux-3.1.1/drivers/video/aty/aty128fb.c
36399--- linux-3.1.1/drivers/video/aty/aty128fb.c 2011-11-11 15:19:27.000000000 -0500
36400+++ linux-3.1.1/drivers/video/aty/aty128fb.c 2011-11-16 18:39:08.000000000 -0500
36401@@ -148,7 +148,7 @@ enum {
36402 };
36403
36404 /* Must match above enum */
36405-static const char *r128_family[] __devinitdata = {
36406+static const char *r128_family[] __devinitconst = {
36407 "AGP",
36408 "PCI",
36409 "PRO AGP",
36410diff -urNp linux-3.1.1/drivers/video/fbcmap.c linux-3.1.1/drivers/video/fbcmap.c
36411--- linux-3.1.1/drivers/video/fbcmap.c 2011-11-11 15:19:27.000000000 -0500
36412+++ linux-3.1.1/drivers/video/fbcmap.c 2011-11-16 18:39:08.000000000 -0500
36413@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
36414 rc = -ENODEV;
36415 goto out;
36416 }
36417- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36418- !info->fbops->fb_setcmap)) {
36419+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36420 rc = -EINVAL;
36421 goto out1;
36422 }
36423diff -urNp linux-3.1.1/drivers/video/fbmem.c linux-3.1.1/drivers/video/fbmem.c
36424--- linux-3.1.1/drivers/video/fbmem.c 2011-11-11 15:19:27.000000000 -0500
36425+++ linux-3.1.1/drivers/video/fbmem.c 2011-11-16 18:40:29.000000000 -0500
36426@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
36427 image->dx += image->width + 8;
36428 }
36429 } else if (rotate == FB_ROTATE_UD) {
36430- for (x = 0; x < num && image->dx >= 0; x++) {
36431+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36432 info->fbops->fb_imageblit(info, image);
36433 image->dx -= image->width + 8;
36434 }
36435@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
36436 image->dy += image->height + 8;
36437 }
36438 } else if (rotate == FB_ROTATE_CCW) {
36439- for (x = 0; x < num && image->dy >= 0; x++) {
36440+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36441 info->fbops->fb_imageblit(info, image);
36442 image->dy -= image->height + 8;
36443 }
36444@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
36445 int flags = info->flags;
36446 int ret = 0;
36447
36448+ pax_track_stack();
36449+
36450 if (var->activate & FB_ACTIVATE_INV_MODE) {
36451 struct fb_videomode mode1, mode2;
36452
36453@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
36454 void __user *argp = (void __user *)arg;
36455 long ret = 0;
36456
36457+ pax_track_stack();
36458+
36459 switch (cmd) {
36460 case FBIOGET_VSCREENINFO:
36461 if (!lock_fb_info(info))
36462@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
36463 return -EFAULT;
36464 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36465 return -EINVAL;
36466- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36467+ if (con2fb.framebuffer >= FB_MAX)
36468 return -EINVAL;
36469 if (!registered_fb[con2fb.framebuffer])
36470 request_module("fb%d", con2fb.framebuffer);
36471diff -urNp linux-3.1.1/drivers/video/geode/gx1fb_core.c linux-3.1.1/drivers/video/geode/gx1fb_core.c
36472--- linux-3.1.1/drivers/video/geode/gx1fb_core.c 2011-11-11 15:19:27.000000000 -0500
36473+++ linux-3.1.1/drivers/video/geode/gx1fb_core.c 2011-11-16 18:39:08.000000000 -0500
36474@@ -29,7 +29,7 @@ static int crt_option = 1;
36475 static char panel_option[32] = "";
36476
36477 /* Modes relevant to the GX1 (taken from modedb.c) */
36478-static const struct fb_videomode __devinitdata gx1_modedb[] = {
36479+static const struct fb_videomode __devinitconst gx1_modedb[] = {
36480 /* 640x480-60 VESA */
36481 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36482 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36483diff -urNp linux-3.1.1/drivers/video/gxt4500.c linux-3.1.1/drivers/video/gxt4500.c
36484--- linux-3.1.1/drivers/video/gxt4500.c 2011-11-11 15:19:27.000000000 -0500
36485+++ linux-3.1.1/drivers/video/gxt4500.c 2011-11-16 18:39:08.000000000 -0500
36486@@ -156,7 +156,7 @@ struct gxt4500_par {
36487 static char *mode_option;
36488
36489 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36490-static const struct fb_videomode defaultmode __devinitdata = {
36491+static const struct fb_videomode defaultmode __devinitconst = {
36492 .refresh = 60,
36493 .xres = 1280,
36494 .yres = 1024,
36495@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, stru
36496 return 0;
36497 }
36498
36499-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36500+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36501 .id = "IBM GXT4500P",
36502 .type = FB_TYPE_PACKED_PIXELS,
36503 .visual = FB_VISUAL_PSEUDOCOLOR,
36504diff -urNp linux-3.1.1/drivers/video/i810/i810_accel.c linux-3.1.1/drivers/video/i810/i810_accel.c
36505--- linux-3.1.1/drivers/video/i810/i810_accel.c 2011-11-11 15:19:27.000000000 -0500
36506+++ linux-3.1.1/drivers/video/i810/i810_accel.c 2011-11-16 18:39:08.000000000 -0500
36507@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
36508 }
36509 }
36510 printk("ringbuffer lockup!!!\n");
36511+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36512 i810_report_error(mmio);
36513 par->dev_flags |= LOCKUP;
36514 info->pixmap.scan_align = 1;
36515diff -urNp linux-3.1.1/drivers/video/i810/i810_main.c linux-3.1.1/drivers/video/i810/i810_main.c
36516--- linux-3.1.1/drivers/video/i810/i810_main.c 2011-11-11 15:19:27.000000000 -0500
36517+++ linux-3.1.1/drivers/video/i810/i810_main.c 2011-11-16 18:39:08.000000000 -0500
36518@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_
36519 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36520
36521 /* PCI */
36522-static const char *i810_pci_list[] __devinitdata = {
36523+static const char *i810_pci_list[] __devinitconst = {
36524 "Intel(R) 810 Framebuffer Device" ,
36525 "Intel(R) 810-DC100 Framebuffer Device" ,
36526 "Intel(R) 810E Framebuffer Device" ,
36527diff -urNp linux-3.1.1/drivers/video/jz4740_fb.c linux-3.1.1/drivers/video/jz4740_fb.c
36528--- linux-3.1.1/drivers/video/jz4740_fb.c 2011-11-11 15:19:27.000000000 -0500
36529+++ linux-3.1.1/drivers/video/jz4740_fb.c 2011-11-16 18:39:08.000000000 -0500
36530@@ -136,7 +136,7 @@ struct jzfb {
36531 uint32_t pseudo_palette[16];
36532 };
36533
36534-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36535+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36536 .id = "JZ4740 FB",
36537 .type = FB_TYPE_PACKED_PIXELS,
36538 .visual = FB_VISUAL_TRUECOLOR,
36539diff -urNp linux-3.1.1/drivers/video/logo/logo_linux_clut224.ppm linux-3.1.1/drivers/video/logo/logo_linux_clut224.ppm
36540--- linux-3.1.1/drivers/video/logo/logo_linux_clut224.ppm 2011-11-11 15:19:27.000000000 -0500
36541+++ linux-3.1.1/drivers/video/logo/logo_linux_clut224.ppm 2011-11-16 18:40:29.000000000 -0500
36542@@ -1,1604 +1,1123 @@
36543 P3
36544-# Standard 224-color Linux logo
36545 80 80
36546 255
36547- 0 0 0 0 0 0 0 0 0 0 0 0
36548- 0 0 0 0 0 0 0 0 0 0 0 0
36549- 0 0 0 0 0 0 0 0 0 0 0 0
36550- 0 0 0 0 0 0 0 0 0 0 0 0
36551- 0 0 0 0 0 0 0 0 0 0 0 0
36552- 0 0 0 0 0 0 0 0 0 0 0 0
36553- 0 0 0 0 0 0 0 0 0 0 0 0
36554- 0 0 0 0 0 0 0 0 0 0 0 0
36555- 0 0 0 0 0 0 0 0 0 0 0 0
36556- 6 6 6 6 6 6 10 10 10 10 10 10
36557- 10 10 10 6 6 6 6 6 6 6 6 6
36558- 0 0 0 0 0 0 0 0 0 0 0 0
36559- 0 0 0 0 0 0 0 0 0 0 0 0
36560- 0 0 0 0 0 0 0 0 0 0 0 0
36561- 0 0 0 0 0 0 0 0 0 0 0 0
36562- 0 0 0 0 0 0 0 0 0 0 0 0
36563- 0 0 0 0 0 0 0 0 0 0 0 0
36564- 0 0 0 0 0 0 0 0 0 0 0 0
36565- 0 0 0 0 0 0 0 0 0 0 0 0
36566- 0 0 0 0 0 0 0 0 0 0 0 0
36567- 0 0 0 0 0 0 0 0 0 0 0 0
36568- 0 0 0 0 0 0 0 0 0 0 0 0
36569- 0 0 0 0 0 0 0 0 0 0 0 0
36570- 0 0 0 0 0 0 0 0 0 0 0 0
36571- 0 0 0 0 0 0 0 0 0 0 0 0
36572- 0 0 0 0 0 0 0 0 0 0 0 0
36573- 0 0 0 0 0 0 0 0 0 0 0 0
36574- 0 0 0 0 0 0 0 0 0 0 0 0
36575- 0 0 0 6 6 6 10 10 10 14 14 14
36576- 22 22 22 26 26 26 30 30 30 34 34 34
36577- 30 30 30 30 30 30 26 26 26 18 18 18
36578- 14 14 14 10 10 10 6 6 6 0 0 0
36579- 0 0 0 0 0 0 0 0 0 0 0 0
36580- 0 0 0 0 0 0 0 0 0 0 0 0
36581- 0 0 0 0 0 0 0 0 0 0 0 0
36582- 0 0 0 0 0 0 0 0 0 0 0 0
36583- 0 0 0 0 0 0 0 0 0 0 0 0
36584- 0 0 0 0 0 0 0 0 0 0 0 0
36585- 0 0 0 0 0 0 0 0 0 0 0 0
36586- 0 0 0 0 0 0 0 0 0 0 0 0
36587- 0 0 0 0 0 0 0 0 0 0 0 0
36588- 0 0 0 0 0 1 0 0 1 0 0 0
36589- 0 0 0 0 0 0 0 0 0 0 0 0
36590- 0 0 0 0 0 0 0 0 0 0 0 0
36591- 0 0 0 0 0 0 0 0 0 0 0 0
36592- 0 0 0 0 0 0 0 0 0 0 0 0
36593- 0 0 0 0 0 0 0 0 0 0 0 0
36594- 0 0 0 0 0 0 0 0 0 0 0 0
36595- 6 6 6 14 14 14 26 26 26 42 42 42
36596- 54 54 54 66 66 66 78 78 78 78 78 78
36597- 78 78 78 74 74 74 66 66 66 54 54 54
36598- 42 42 42 26 26 26 18 18 18 10 10 10
36599- 6 6 6 0 0 0 0 0 0 0 0 0
36600- 0 0 0 0 0 0 0 0 0 0 0 0
36601- 0 0 0 0 0 0 0 0 0 0 0 0
36602- 0 0 0 0 0 0 0 0 0 0 0 0
36603- 0 0 0 0 0 0 0 0 0 0 0 0
36604- 0 0 0 0 0 0 0 0 0 0 0 0
36605- 0 0 0 0 0 0 0 0 0 0 0 0
36606- 0 0 0 0 0 0 0 0 0 0 0 0
36607- 0 0 0 0 0 0 0 0 0 0 0 0
36608- 0 0 1 0 0 0 0 0 0 0 0 0
36609- 0 0 0 0 0 0 0 0 0 0 0 0
36610- 0 0 0 0 0 0 0 0 0 0 0 0
36611- 0 0 0 0 0 0 0 0 0 0 0 0
36612- 0 0 0 0 0 0 0 0 0 0 0 0
36613- 0 0 0 0 0 0 0 0 0 0 0 0
36614- 0 0 0 0 0 0 0 0 0 10 10 10
36615- 22 22 22 42 42 42 66 66 66 86 86 86
36616- 66 66 66 38 38 38 38 38 38 22 22 22
36617- 26 26 26 34 34 34 54 54 54 66 66 66
36618- 86 86 86 70 70 70 46 46 46 26 26 26
36619- 14 14 14 6 6 6 0 0 0 0 0 0
36620- 0 0 0 0 0 0 0 0 0 0 0 0
36621- 0 0 0 0 0 0 0 0 0 0 0 0
36622- 0 0 0 0 0 0 0 0 0 0 0 0
36623- 0 0 0 0 0 0 0 0 0 0 0 0
36624- 0 0 0 0 0 0 0 0 0 0 0 0
36625- 0 0 0 0 0 0 0 0 0 0 0 0
36626- 0 0 0 0 0 0 0 0 0 0 0 0
36627- 0 0 0 0 0 0 0 0 0 0 0 0
36628- 0 0 1 0 0 1 0 0 1 0 0 0
36629- 0 0 0 0 0 0 0 0 0 0 0 0
36630- 0 0 0 0 0 0 0 0 0 0 0 0
36631- 0 0 0 0 0 0 0 0 0 0 0 0
36632- 0 0 0 0 0 0 0 0 0 0 0 0
36633- 0 0 0 0 0 0 0 0 0 0 0 0
36634- 0 0 0 0 0 0 10 10 10 26 26 26
36635- 50 50 50 82 82 82 58 58 58 6 6 6
36636- 2 2 6 2 2 6 2 2 6 2 2 6
36637- 2 2 6 2 2 6 2 2 6 2 2 6
36638- 6 6 6 54 54 54 86 86 86 66 66 66
36639- 38 38 38 18 18 18 6 6 6 0 0 0
36640- 0 0 0 0 0 0 0 0 0 0 0 0
36641- 0 0 0 0 0 0 0 0 0 0 0 0
36642- 0 0 0 0 0 0 0 0 0 0 0 0
36643- 0 0 0 0 0 0 0 0 0 0 0 0
36644- 0 0 0 0 0 0 0 0 0 0 0 0
36645- 0 0 0 0 0 0 0 0 0 0 0 0
36646- 0 0 0 0 0 0 0 0 0 0 0 0
36647- 0 0 0 0 0 0 0 0 0 0 0 0
36648- 0 0 0 0 0 0 0 0 0 0 0 0
36649- 0 0 0 0 0 0 0 0 0 0 0 0
36650- 0 0 0 0 0 0 0 0 0 0 0 0
36651- 0 0 0 0 0 0 0 0 0 0 0 0
36652- 0 0 0 0 0 0 0 0 0 0 0 0
36653- 0 0 0 0 0 0 0 0 0 0 0 0
36654- 0 0 0 6 6 6 22 22 22 50 50 50
36655- 78 78 78 34 34 34 2 2 6 2 2 6
36656- 2 2 6 2 2 6 2 2 6 2 2 6
36657- 2 2 6 2 2 6 2 2 6 2 2 6
36658- 2 2 6 2 2 6 6 6 6 70 70 70
36659- 78 78 78 46 46 46 22 22 22 6 6 6
36660- 0 0 0 0 0 0 0 0 0 0 0 0
36661- 0 0 0 0 0 0 0 0 0 0 0 0
36662- 0 0 0 0 0 0 0 0 0 0 0 0
36663- 0 0 0 0 0 0 0 0 0 0 0 0
36664- 0 0 0 0 0 0 0 0 0 0 0 0
36665- 0 0 0 0 0 0 0 0 0 0 0 0
36666- 0 0 0 0 0 0 0 0 0 0 0 0
36667- 0 0 0 0 0 0 0 0 0 0 0 0
36668- 0 0 1 0 0 1 0 0 1 0 0 0
36669- 0 0 0 0 0 0 0 0 0 0 0 0
36670- 0 0 0 0 0 0 0 0 0 0 0 0
36671- 0 0 0 0 0 0 0 0 0 0 0 0
36672- 0 0 0 0 0 0 0 0 0 0 0 0
36673- 0 0 0 0 0 0 0 0 0 0 0 0
36674- 6 6 6 18 18 18 42 42 42 82 82 82
36675- 26 26 26 2 2 6 2 2 6 2 2 6
36676- 2 2 6 2 2 6 2 2 6 2 2 6
36677- 2 2 6 2 2 6 2 2 6 14 14 14
36678- 46 46 46 34 34 34 6 6 6 2 2 6
36679- 42 42 42 78 78 78 42 42 42 18 18 18
36680- 6 6 6 0 0 0 0 0 0 0 0 0
36681- 0 0 0 0 0 0 0 0 0 0 0 0
36682- 0 0 0 0 0 0 0 0 0 0 0 0
36683- 0 0 0 0 0 0 0 0 0 0 0 0
36684- 0 0 0 0 0 0 0 0 0 0 0 0
36685- 0 0 0 0 0 0 0 0 0 0 0 0
36686- 0 0 0 0 0 0 0 0 0 0 0 0
36687- 0 0 0 0 0 0 0 0 0 0 0 0
36688- 0 0 1 0 0 0 0 0 1 0 0 0
36689- 0 0 0 0 0 0 0 0 0 0 0 0
36690- 0 0 0 0 0 0 0 0 0 0 0 0
36691- 0 0 0 0 0 0 0 0 0 0 0 0
36692- 0 0 0 0 0 0 0 0 0 0 0 0
36693- 0 0 0 0 0 0 0 0 0 0 0 0
36694- 10 10 10 30 30 30 66 66 66 58 58 58
36695- 2 2 6 2 2 6 2 2 6 2 2 6
36696- 2 2 6 2 2 6 2 2 6 2 2 6
36697- 2 2 6 2 2 6 2 2 6 26 26 26
36698- 86 86 86 101 101 101 46 46 46 10 10 10
36699- 2 2 6 58 58 58 70 70 70 34 34 34
36700- 10 10 10 0 0 0 0 0 0 0 0 0
36701- 0 0 0 0 0 0 0 0 0 0 0 0
36702- 0 0 0 0 0 0 0 0 0 0 0 0
36703- 0 0 0 0 0 0 0 0 0 0 0 0
36704- 0 0 0 0 0 0 0 0 0 0 0 0
36705- 0 0 0 0 0 0 0 0 0 0 0 0
36706- 0 0 0 0 0 0 0 0 0 0 0 0
36707- 0 0 0 0 0 0 0 0 0 0 0 0
36708- 0 0 1 0 0 1 0 0 1 0 0 0
36709- 0 0 0 0 0 0 0 0 0 0 0 0
36710- 0 0 0 0 0 0 0 0 0 0 0 0
36711- 0 0 0 0 0 0 0 0 0 0 0 0
36712- 0 0 0 0 0 0 0 0 0 0 0 0
36713- 0 0 0 0 0 0 0 0 0 0 0 0
36714- 14 14 14 42 42 42 86 86 86 10 10 10
36715- 2 2 6 2 2 6 2 2 6 2 2 6
36716- 2 2 6 2 2 6 2 2 6 2 2 6
36717- 2 2 6 2 2 6 2 2 6 30 30 30
36718- 94 94 94 94 94 94 58 58 58 26 26 26
36719- 2 2 6 6 6 6 78 78 78 54 54 54
36720- 22 22 22 6 6 6 0 0 0 0 0 0
36721- 0 0 0 0 0 0 0 0 0 0 0 0
36722- 0 0 0 0 0 0 0 0 0 0 0 0
36723- 0 0 0 0 0 0 0 0 0 0 0 0
36724- 0 0 0 0 0 0 0 0 0 0 0 0
36725- 0 0 0 0 0 0 0 0 0 0 0 0
36726- 0 0 0 0 0 0 0 0 0 0 0 0
36727- 0 0 0 0 0 0 0 0 0 0 0 0
36728- 0 0 0 0 0 0 0 0 0 0 0 0
36729- 0 0 0 0 0 0 0 0 0 0 0 0
36730- 0 0 0 0 0 0 0 0 0 0 0 0
36731- 0 0 0 0 0 0 0 0 0 0 0 0
36732- 0 0 0 0 0 0 0 0 0 0 0 0
36733- 0 0 0 0 0 0 0 0 0 6 6 6
36734- 22 22 22 62 62 62 62 62 62 2 2 6
36735- 2 2 6 2 2 6 2 2 6 2 2 6
36736- 2 2 6 2 2 6 2 2 6 2 2 6
36737- 2 2 6 2 2 6 2 2 6 26 26 26
36738- 54 54 54 38 38 38 18 18 18 10 10 10
36739- 2 2 6 2 2 6 34 34 34 82 82 82
36740- 38 38 38 14 14 14 0 0 0 0 0 0
36741- 0 0 0 0 0 0 0 0 0 0 0 0
36742- 0 0 0 0 0 0 0 0 0 0 0 0
36743- 0 0 0 0 0 0 0 0 0 0 0 0
36744- 0 0 0 0 0 0 0 0 0 0 0 0
36745- 0 0 0 0 0 0 0 0 0 0 0 0
36746- 0 0 0 0 0 0 0 0 0 0 0 0
36747- 0 0 0 0 0 0 0 0 0 0 0 0
36748- 0 0 0 0 0 1 0 0 1 0 0 0
36749- 0 0 0 0 0 0 0 0 0 0 0 0
36750- 0 0 0 0 0 0 0 0 0 0 0 0
36751- 0 0 0 0 0 0 0 0 0 0 0 0
36752- 0 0 0 0 0 0 0 0 0 0 0 0
36753- 0 0 0 0 0 0 0 0 0 6 6 6
36754- 30 30 30 78 78 78 30 30 30 2 2 6
36755- 2 2 6 2 2 6 2 2 6 2 2 6
36756- 2 2 6 2 2 6 2 2 6 2 2 6
36757- 2 2 6 2 2 6 2 2 6 10 10 10
36758- 10 10 10 2 2 6 2 2 6 2 2 6
36759- 2 2 6 2 2 6 2 2 6 78 78 78
36760- 50 50 50 18 18 18 6 6 6 0 0 0
36761- 0 0 0 0 0 0 0 0 0 0 0 0
36762- 0 0 0 0 0 0 0 0 0 0 0 0
36763- 0 0 0 0 0 0 0 0 0 0 0 0
36764- 0 0 0 0 0 0 0 0 0 0 0 0
36765- 0 0 0 0 0 0 0 0 0 0 0 0
36766- 0 0 0 0 0 0 0 0 0 0 0 0
36767- 0 0 0 0 0 0 0 0 0 0 0 0
36768- 0 0 1 0 0 0 0 0 0 0 0 0
36769- 0 0 0 0 0 0 0 0 0 0 0 0
36770- 0 0 0 0 0 0 0 0 0 0 0 0
36771- 0 0 0 0 0 0 0 0 0 0 0 0
36772- 0 0 0 0 0 0 0 0 0 0 0 0
36773- 0 0 0 0 0 0 0 0 0 10 10 10
36774- 38 38 38 86 86 86 14 14 14 2 2 6
36775- 2 2 6 2 2 6 2 2 6 2 2 6
36776- 2 2 6 2 2 6 2 2 6 2 2 6
36777- 2 2 6 2 2 6 2 2 6 2 2 6
36778- 2 2 6 2 2 6 2 2 6 2 2 6
36779- 2 2 6 2 2 6 2 2 6 54 54 54
36780- 66 66 66 26 26 26 6 6 6 0 0 0
36781- 0 0 0 0 0 0 0 0 0 0 0 0
36782- 0 0 0 0 0 0 0 0 0 0 0 0
36783- 0 0 0 0 0 0 0 0 0 0 0 0
36784- 0 0 0 0 0 0 0 0 0 0 0 0
36785- 0 0 0 0 0 0 0 0 0 0 0 0
36786- 0 0 0 0 0 0 0 0 0 0 0 0
36787- 0 0 0 0 0 0 0 0 0 0 0 0
36788- 0 0 0 0 0 1 0 0 1 0 0 0
36789- 0 0 0 0 0 0 0 0 0 0 0 0
36790- 0 0 0 0 0 0 0 0 0 0 0 0
36791- 0 0 0 0 0 0 0 0 0 0 0 0
36792- 0 0 0 0 0 0 0 0 0 0 0 0
36793- 0 0 0 0 0 0 0 0 0 14 14 14
36794- 42 42 42 82 82 82 2 2 6 2 2 6
36795- 2 2 6 6 6 6 10 10 10 2 2 6
36796- 2 2 6 2 2 6 2 2 6 2 2 6
36797- 2 2 6 2 2 6 2 2 6 6 6 6
36798- 14 14 14 10 10 10 2 2 6 2 2 6
36799- 2 2 6 2 2 6 2 2 6 18 18 18
36800- 82 82 82 34 34 34 10 10 10 0 0 0
36801- 0 0 0 0 0 0 0 0 0 0 0 0
36802- 0 0 0 0 0 0 0 0 0 0 0 0
36803- 0 0 0 0 0 0 0 0 0 0 0 0
36804- 0 0 0 0 0 0 0 0 0 0 0 0
36805- 0 0 0 0 0 0 0 0 0 0 0 0
36806- 0 0 0 0 0 0 0 0 0 0 0 0
36807- 0 0 0 0 0 0 0 0 0 0 0 0
36808- 0 0 1 0 0 0 0 0 0 0 0 0
36809- 0 0 0 0 0 0 0 0 0 0 0 0
36810- 0 0 0 0 0 0 0 0 0 0 0 0
36811- 0 0 0 0 0 0 0 0 0 0 0 0
36812- 0 0 0 0 0 0 0 0 0 0 0 0
36813- 0 0 0 0 0 0 0 0 0 14 14 14
36814- 46 46 46 86 86 86 2 2 6 2 2 6
36815- 6 6 6 6 6 6 22 22 22 34 34 34
36816- 6 6 6 2 2 6 2 2 6 2 2 6
36817- 2 2 6 2 2 6 18 18 18 34 34 34
36818- 10 10 10 50 50 50 22 22 22 2 2 6
36819- 2 2 6 2 2 6 2 2 6 10 10 10
36820- 86 86 86 42 42 42 14 14 14 0 0 0
36821- 0 0 0 0 0 0 0 0 0 0 0 0
36822- 0 0 0 0 0 0 0 0 0 0 0 0
36823- 0 0 0 0 0 0 0 0 0 0 0 0
36824- 0 0 0 0 0 0 0 0 0 0 0 0
36825- 0 0 0 0 0 0 0 0 0 0 0 0
36826- 0 0 0 0 0 0 0 0 0 0 0 0
36827- 0 0 0 0 0 0 0 0 0 0 0 0
36828- 0 0 1 0 0 1 0 0 1 0 0 0
36829- 0 0 0 0 0 0 0 0 0 0 0 0
36830- 0 0 0 0 0 0 0 0 0 0 0 0
36831- 0 0 0 0 0 0 0 0 0 0 0 0
36832- 0 0 0 0 0 0 0 0 0 0 0 0
36833- 0 0 0 0 0 0 0 0 0 14 14 14
36834- 46 46 46 86 86 86 2 2 6 2 2 6
36835- 38 38 38 116 116 116 94 94 94 22 22 22
36836- 22 22 22 2 2 6 2 2 6 2 2 6
36837- 14 14 14 86 86 86 138 138 138 162 162 162
36838-154 154 154 38 38 38 26 26 26 6 6 6
36839- 2 2 6 2 2 6 2 2 6 2 2 6
36840- 86 86 86 46 46 46 14 14 14 0 0 0
36841- 0 0 0 0 0 0 0 0 0 0 0 0
36842- 0 0 0 0 0 0 0 0 0 0 0 0
36843- 0 0 0 0 0 0 0 0 0 0 0 0
36844- 0 0 0 0 0 0 0 0 0 0 0 0
36845- 0 0 0 0 0 0 0 0 0 0 0 0
36846- 0 0 0 0 0 0 0 0 0 0 0 0
36847- 0 0 0 0 0 0 0 0 0 0 0 0
36848- 0 0 0 0 0 0 0 0 0 0 0 0
36849- 0 0 0 0 0 0 0 0 0 0 0 0
36850- 0 0 0 0 0 0 0 0 0 0 0 0
36851- 0 0 0 0 0 0 0 0 0 0 0 0
36852- 0 0 0 0 0 0 0 0 0 0 0 0
36853- 0 0 0 0 0 0 0 0 0 14 14 14
36854- 46 46 46 86 86 86 2 2 6 14 14 14
36855-134 134 134 198 198 198 195 195 195 116 116 116
36856- 10 10 10 2 2 6 2 2 6 6 6 6
36857-101 98 89 187 187 187 210 210 210 218 218 218
36858-214 214 214 134 134 134 14 14 14 6 6 6
36859- 2 2 6 2 2 6 2 2 6 2 2 6
36860- 86 86 86 50 50 50 18 18 18 6 6 6
36861- 0 0 0 0 0 0 0 0 0 0 0 0
36862- 0 0 0 0 0 0 0 0 0 0 0 0
36863- 0 0 0 0 0 0 0 0 0 0 0 0
36864- 0 0 0 0 0 0 0 0 0 0 0 0
36865- 0 0 0 0 0 0 0 0 0 0 0 0
36866- 0 0 0 0 0 0 0 0 0 0 0 0
36867- 0 0 0 0 0 0 0 0 1 0 0 0
36868- 0 0 1 0 0 1 0 0 1 0 0 0
36869- 0 0 0 0 0 0 0 0 0 0 0 0
36870- 0 0 0 0 0 0 0 0 0 0 0 0
36871- 0 0 0 0 0 0 0 0 0 0 0 0
36872- 0 0 0 0 0 0 0 0 0 0 0 0
36873- 0 0 0 0 0 0 0 0 0 14 14 14
36874- 46 46 46 86 86 86 2 2 6 54 54 54
36875-218 218 218 195 195 195 226 226 226 246 246 246
36876- 58 58 58 2 2 6 2 2 6 30 30 30
36877-210 210 210 253 253 253 174 174 174 123 123 123
36878-221 221 221 234 234 234 74 74 74 2 2 6
36879- 2 2 6 2 2 6 2 2 6 2 2 6
36880- 70 70 70 58 58 58 22 22 22 6 6 6
36881- 0 0 0 0 0 0 0 0 0 0 0 0
36882- 0 0 0 0 0 0 0 0 0 0 0 0
36883- 0 0 0 0 0 0 0 0 0 0 0 0
36884- 0 0 0 0 0 0 0 0 0 0 0 0
36885- 0 0 0 0 0 0 0 0 0 0 0 0
36886- 0 0 0 0 0 0 0 0 0 0 0 0
36887- 0 0 0 0 0 0 0 0 0 0 0 0
36888- 0 0 0 0 0 0 0 0 0 0 0 0
36889- 0 0 0 0 0 0 0 0 0 0 0 0
36890- 0 0 0 0 0 0 0 0 0 0 0 0
36891- 0 0 0 0 0 0 0 0 0 0 0 0
36892- 0 0 0 0 0 0 0 0 0 0 0 0
36893- 0 0 0 0 0 0 0 0 0 14 14 14
36894- 46 46 46 82 82 82 2 2 6 106 106 106
36895-170 170 170 26 26 26 86 86 86 226 226 226
36896-123 123 123 10 10 10 14 14 14 46 46 46
36897-231 231 231 190 190 190 6 6 6 70 70 70
36898- 90 90 90 238 238 238 158 158 158 2 2 6
36899- 2 2 6 2 2 6 2 2 6 2 2 6
36900- 70 70 70 58 58 58 22 22 22 6 6 6
36901- 0 0 0 0 0 0 0 0 0 0 0 0
36902- 0 0 0 0 0 0 0 0 0 0 0 0
36903- 0 0 0 0 0 0 0 0 0 0 0 0
36904- 0 0 0 0 0 0 0 0 0 0 0 0
36905- 0 0 0 0 0 0 0 0 0 0 0 0
36906- 0 0 0 0 0 0 0 0 0 0 0 0
36907- 0 0 0 0 0 0 0 0 1 0 0 0
36908- 0 0 1 0 0 1 0 0 1 0 0 0
36909- 0 0 0 0 0 0 0 0 0 0 0 0
36910- 0 0 0 0 0 0 0 0 0 0 0 0
36911- 0 0 0 0 0 0 0 0 0 0 0 0
36912- 0 0 0 0 0 0 0 0 0 0 0 0
36913- 0 0 0 0 0 0 0 0 0 14 14 14
36914- 42 42 42 86 86 86 6 6 6 116 116 116
36915-106 106 106 6 6 6 70 70 70 149 149 149
36916-128 128 128 18 18 18 38 38 38 54 54 54
36917-221 221 221 106 106 106 2 2 6 14 14 14
36918- 46 46 46 190 190 190 198 198 198 2 2 6
36919- 2 2 6 2 2 6 2 2 6 2 2 6
36920- 74 74 74 62 62 62 22 22 22 6 6 6
36921- 0 0 0 0 0 0 0 0 0 0 0 0
36922- 0 0 0 0 0 0 0 0 0 0 0 0
36923- 0 0 0 0 0 0 0 0 0 0 0 0
36924- 0 0 0 0 0 0 0 0 0 0 0 0
36925- 0 0 0 0 0 0 0 0 0 0 0 0
36926- 0 0 0 0 0 0 0 0 0 0 0 0
36927- 0 0 0 0 0 0 0 0 1 0 0 0
36928- 0 0 1 0 0 0 0 0 1 0 0 0
36929- 0 0 0 0 0 0 0 0 0 0 0 0
36930- 0 0 0 0 0 0 0 0 0 0 0 0
36931- 0 0 0 0 0 0 0 0 0 0 0 0
36932- 0 0 0 0 0 0 0 0 0 0 0 0
36933- 0 0 0 0 0 0 0 0 0 14 14 14
36934- 42 42 42 94 94 94 14 14 14 101 101 101
36935-128 128 128 2 2 6 18 18 18 116 116 116
36936-118 98 46 121 92 8 121 92 8 98 78 10
36937-162 162 162 106 106 106 2 2 6 2 2 6
36938- 2 2 6 195 195 195 195 195 195 6 6 6
36939- 2 2 6 2 2 6 2 2 6 2 2 6
36940- 74 74 74 62 62 62 22 22 22 6 6 6
36941- 0 0 0 0 0 0 0 0 0 0 0 0
36942- 0 0 0 0 0 0 0 0 0 0 0 0
36943- 0 0 0 0 0 0 0 0 0 0 0 0
36944- 0 0 0 0 0 0 0 0 0 0 0 0
36945- 0 0 0 0 0 0 0 0 0 0 0 0
36946- 0 0 0 0 0 0 0 0 0 0 0 0
36947- 0 0 0 0 0 0 0 0 1 0 0 1
36948- 0 0 1 0 0 0 0 0 1 0 0 0
36949- 0 0 0 0 0 0 0 0 0 0 0 0
36950- 0 0 0 0 0 0 0 0 0 0 0 0
36951- 0 0 0 0 0 0 0 0 0 0 0 0
36952- 0 0 0 0 0 0 0 0 0 0 0 0
36953- 0 0 0 0 0 0 0 0 0 10 10 10
36954- 38 38 38 90 90 90 14 14 14 58 58 58
36955-210 210 210 26 26 26 54 38 6 154 114 10
36956-226 170 11 236 186 11 225 175 15 184 144 12
36957-215 174 15 175 146 61 37 26 9 2 2 6
36958- 70 70 70 246 246 246 138 138 138 2 2 6
36959- 2 2 6 2 2 6 2 2 6 2 2 6
36960- 70 70 70 66 66 66 26 26 26 6 6 6
36961- 0 0 0 0 0 0 0 0 0 0 0 0
36962- 0 0 0 0 0 0 0 0 0 0 0 0
36963- 0 0 0 0 0 0 0 0 0 0 0 0
36964- 0 0 0 0 0 0 0 0 0 0 0 0
36965- 0 0 0 0 0 0 0 0 0 0 0 0
36966- 0 0 0 0 0 0 0 0 0 0 0 0
36967- 0 0 0 0 0 0 0 0 0 0 0 0
36968- 0 0 0 0 0 0 0 0 0 0 0 0
36969- 0 0 0 0 0 0 0 0 0 0 0 0
36970- 0 0 0 0 0 0 0 0 0 0 0 0
36971- 0 0 0 0 0 0 0 0 0 0 0 0
36972- 0 0 0 0 0 0 0 0 0 0 0 0
36973- 0 0 0 0 0 0 0 0 0 10 10 10
36974- 38 38 38 86 86 86 14 14 14 10 10 10
36975-195 195 195 188 164 115 192 133 9 225 175 15
36976-239 182 13 234 190 10 232 195 16 232 200 30
36977-245 207 45 241 208 19 232 195 16 184 144 12
36978-218 194 134 211 206 186 42 42 42 2 2 6
36979- 2 2 6 2 2 6 2 2 6 2 2 6
36980- 50 50 50 74 74 74 30 30 30 6 6 6
36981- 0 0 0 0 0 0 0 0 0 0 0 0
36982- 0 0 0 0 0 0 0 0 0 0 0 0
36983- 0 0 0 0 0 0 0 0 0 0 0 0
36984- 0 0 0 0 0 0 0 0 0 0 0 0
36985- 0 0 0 0 0 0 0 0 0 0 0 0
36986- 0 0 0 0 0 0 0 0 0 0 0 0
36987- 0 0 0 0 0 0 0 0 0 0 0 0
36988- 0 0 0 0 0 0 0 0 0 0 0 0
36989- 0 0 0 0 0 0 0 0 0 0 0 0
36990- 0 0 0 0 0 0 0 0 0 0 0 0
36991- 0 0 0 0 0 0 0 0 0 0 0 0
36992- 0 0 0 0 0 0 0 0 0 0 0 0
36993- 0 0 0 0 0 0 0 0 0 10 10 10
36994- 34 34 34 86 86 86 14 14 14 2 2 6
36995-121 87 25 192 133 9 219 162 10 239 182 13
36996-236 186 11 232 195 16 241 208 19 244 214 54
36997-246 218 60 246 218 38 246 215 20 241 208 19
36998-241 208 19 226 184 13 121 87 25 2 2 6
36999- 2 2 6 2 2 6 2 2 6 2 2 6
37000- 50 50 50 82 82 82 34 34 34 10 10 10
37001- 0 0 0 0 0 0 0 0 0 0 0 0
37002- 0 0 0 0 0 0 0 0 0 0 0 0
37003- 0 0 0 0 0 0 0 0 0 0 0 0
37004- 0 0 0 0 0 0 0 0 0 0 0 0
37005- 0 0 0 0 0 0 0 0 0 0 0 0
37006- 0 0 0 0 0 0 0 0 0 0 0 0
37007- 0 0 0 0 0 0 0 0 0 0 0 0
37008- 0 0 0 0 0 0 0 0 0 0 0 0
37009- 0 0 0 0 0 0 0 0 0 0 0 0
37010- 0 0 0 0 0 0 0 0 0 0 0 0
37011- 0 0 0 0 0 0 0 0 0 0 0 0
37012- 0 0 0 0 0 0 0 0 0 0 0 0
37013- 0 0 0 0 0 0 0 0 0 10 10 10
37014- 34 34 34 82 82 82 30 30 30 61 42 6
37015-180 123 7 206 145 10 230 174 11 239 182 13
37016-234 190 10 238 202 15 241 208 19 246 218 74
37017-246 218 38 246 215 20 246 215 20 246 215 20
37018-226 184 13 215 174 15 184 144 12 6 6 6
37019- 2 2 6 2 2 6 2 2 6 2 2 6
37020- 26 26 26 94 94 94 42 42 42 14 14 14
37021- 0 0 0 0 0 0 0 0 0 0 0 0
37022- 0 0 0 0 0 0 0 0 0 0 0 0
37023- 0 0 0 0 0 0 0 0 0 0 0 0
37024- 0 0 0 0 0 0 0 0 0 0 0 0
37025- 0 0 0 0 0 0 0 0 0 0 0 0
37026- 0 0 0 0 0 0 0 0 0 0 0 0
37027- 0 0 0 0 0 0 0 0 0 0 0 0
37028- 0 0 0 0 0 0 0 0 0 0 0 0
37029- 0 0 0 0 0 0 0 0 0 0 0 0
37030- 0 0 0 0 0 0 0 0 0 0 0 0
37031- 0 0 0 0 0 0 0 0 0 0 0 0
37032- 0 0 0 0 0 0 0 0 0 0 0 0
37033- 0 0 0 0 0 0 0 0 0 10 10 10
37034- 30 30 30 78 78 78 50 50 50 104 69 6
37035-192 133 9 216 158 10 236 178 12 236 186 11
37036-232 195 16 241 208 19 244 214 54 245 215 43
37037-246 215 20 246 215 20 241 208 19 198 155 10
37038-200 144 11 216 158 10 156 118 10 2 2 6
37039- 2 2 6 2 2 6 2 2 6 2 2 6
37040- 6 6 6 90 90 90 54 54 54 18 18 18
37041- 6 6 6 0 0 0 0 0 0 0 0 0
37042- 0 0 0 0 0 0 0 0 0 0 0 0
37043- 0 0 0 0 0 0 0 0 0 0 0 0
37044- 0 0 0 0 0 0 0 0 0 0 0 0
37045- 0 0 0 0 0 0 0 0 0 0 0 0
37046- 0 0 0 0 0 0 0 0 0 0 0 0
37047- 0 0 0 0 0 0 0 0 0 0 0 0
37048- 0 0 0 0 0 0 0 0 0 0 0 0
37049- 0 0 0 0 0 0 0 0 0 0 0 0
37050- 0 0 0 0 0 0 0 0 0 0 0 0
37051- 0 0 0 0 0 0 0 0 0 0 0 0
37052- 0 0 0 0 0 0 0 0 0 0 0 0
37053- 0 0 0 0 0 0 0 0 0 10 10 10
37054- 30 30 30 78 78 78 46 46 46 22 22 22
37055-137 92 6 210 162 10 239 182 13 238 190 10
37056-238 202 15 241 208 19 246 215 20 246 215 20
37057-241 208 19 203 166 17 185 133 11 210 150 10
37058-216 158 10 210 150 10 102 78 10 2 2 6
37059- 6 6 6 54 54 54 14 14 14 2 2 6
37060- 2 2 6 62 62 62 74 74 74 30 30 30
37061- 10 10 10 0 0 0 0 0 0 0 0 0
37062- 0 0 0 0 0 0 0 0 0 0 0 0
37063- 0 0 0 0 0 0 0 0 0 0 0 0
37064- 0 0 0 0 0 0 0 0 0 0 0 0
37065- 0 0 0 0 0 0 0 0 0 0 0 0
37066- 0 0 0 0 0 0 0 0 0 0 0 0
37067- 0 0 0 0 0 0 0 0 0 0 0 0
37068- 0 0 0 0 0 0 0 0 0 0 0 0
37069- 0 0 0 0 0 0 0 0 0 0 0 0
37070- 0 0 0 0 0 0 0 0 0 0 0 0
37071- 0 0 0 0 0 0 0 0 0 0 0 0
37072- 0 0 0 0 0 0 0 0 0 0 0 0
37073- 0 0 0 0 0 0 0 0 0 10 10 10
37074- 34 34 34 78 78 78 50 50 50 6 6 6
37075- 94 70 30 139 102 15 190 146 13 226 184 13
37076-232 200 30 232 195 16 215 174 15 190 146 13
37077-168 122 10 192 133 9 210 150 10 213 154 11
37078-202 150 34 182 157 106 101 98 89 2 2 6
37079- 2 2 6 78 78 78 116 116 116 58 58 58
37080- 2 2 6 22 22 22 90 90 90 46 46 46
37081- 18 18 18 6 6 6 0 0 0 0 0 0
37082- 0 0 0 0 0 0 0 0 0 0 0 0
37083- 0 0 0 0 0 0 0 0 0 0 0 0
37084- 0 0 0 0 0 0 0 0 0 0 0 0
37085- 0 0 0 0 0 0 0 0 0 0 0 0
37086- 0 0 0 0 0 0 0 0 0 0 0 0
37087- 0 0 0 0 0 0 0 0 0 0 0 0
37088- 0 0 0 0 0 0 0 0 0 0 0 0
37089- 0 0 0 0 0 0 0 0 0 0 0 0
37090- 0 0 0 0 0 0 0 0 0 0 0 0
37091- 0 0 0 0 0 0 0 0 0 0 0 0
37092- 0 0 0 0 0 0 0 0 0 0 0 0
37093- 0 0 0 0 0 0 0 0 0 10 10 10
37094- 38 38 38 86 86 86 50 50 50 6 6 6
37095-128 128 128 174 154 114 156 107 11 168 122 10
37096-198 155 10 184 144 12 197 138 11 200 144 11
37097-206 145 10 206 145 10 197 138 11 188 164 115
37098-195 195 195 198 198 198 174 174 174 14 14 14
37099- 2 2 6 22 22 22 116 116 116 116 116 116
37100- 22 22 22 2 2 6 74 74 74 70 70 70
37101- 30 30 30 10 10 10 0 0 0 0 0 0
37102- 0 0 0 0 0 0 0 0 0 0 0 0
37103- 0 0 0 0 0 0 0 0 0 0 0 0
37104- 0 0 0 0 0 0 0 0 0 0 0 0
37105- 0 0 0 0 0 0 0 0 0 0 0 0
37106- 0 0 0 0 0 0 0 0 0 0 0 0
37107- 0 0 0 0 0 0 0 0 0 0 0 0
37108- 0 0 0 0 0 0 0 0 0 0 0 0
37109- 0 0 0 0 0 0 0 0 0 0 0 0
37110- 0 0 0 0 0 0 0 0 0 0 0 0
37111- 0 0 0 0 0 0 0 0 0 0 0 0
37112- 0 0 0 0 0 0 0 0 0 0 0 0
37113- 0 0 0 0 0 0 6 6 6 18 18 18
37114- 50 50 50 101 101 101 26 26 26 10 10 10
37115-138 138 138 190 190 190 174 154 114 156 107 11
37116-197 138 11 200 144 11 197 138 11 192 133 9
37117-180 123 7 190 142 34 190 178 144 187 187 187
37118-202 202 202 221 221 221 214 214 214 66 66 66
37119- 2 2 6 2 2 6 50 50 50 62 62 62
37120- 6 6 6 2 2 6 10 10 10 90 90 90
37121- 50 50 50 18 18 18 6 6 6 0 0 0
37122- 0 0 0 0 0 0 0 0 0 0 0 0
37123- 0 0 0 0 0 0 0 0 0 0 0 0
37124- 0 0 0 0 0 0 0 0 0 0 0 0
37125- 0 0 0 0 0 0 0 0 0 0 0 0
37126- 0 0 0 0 0 0 0 0 0 0 0 0
37127- 0 0 0 0 0 0 0 0 0 0 0 0
37128- 0 0 0 0 0 0 0 0 0 0 0 0
37129- 0 0 0 0 0 0 0 0 0 0 0 0
37130- 0 0 0 0 0 0 0 0 0 0 0 0
37131- 0 0 0 0 0 0 0 0 0 0 0 0
37132- 0 0 0 0 0 0 0 0 0 0 0 0
37133- 0 0 0 0 0 0 10 10 10 34 34 34
37134- 74 74 74 74 74 74 2 2 6 6 6 6
37135-144 144 144 198 198 198 190 190 190 178 166 146
37136-154 121 60 156 107 11 156 107 11 168 124 44
37137-174 154 114 187 187 187 190 190 190 210 210 210
37138-246 246 246 253 253 253 253 253 253 182 182 182
37139- 6 6 6 2 2 6 2 2 6 2 2 6
37140- 2 2 6 2 2 6 2 2 6 62 62 62
37141- 74 74 74 34 34 34 14 14 14 0 0 0
37142- 0 0 0 0 0 0 0 0 0 0 0 0
37143- 0 0 0 0 0 0 0 0 0 0 0 0
37144- 0 0 0 0 0 0 0 0 0 0 0 0
37145- 0 0 0 0 0 0 0 0 0 0 0 0
37146- 0 0 0 0 0 0 0 0 0 0 0 0
37147- 0 0 0 0 0 0 0 0 0 0 0 0
37148- 0 0 0 0 0 0 0 0 0 0 0 0
37149- 0 0 0 0 0 0 0 0 0 0 0 0
37150- 0 0 0 0 0 0 0 0 0 0 0 0
37151- 0 0 0 0 0 0 0 0 0 0 0 0
37152- 0 0 0 0 0 0 0 0 0 0 0 0
37153- 0 0 0 10 10 10 22 22 22 54 54 54
37154- 94 94 94 18 18 18 2 2 6 46 46 46
37155-234 234 234 221 221 221 190 190 190 190 190 190
37156-190 190 190 187 187 187 187 187 187 190 190 190
37157-190 190 190 195 195 195 214 214 214 242 242 242
37158-253 253 253 253 253 253 253 253 253 253 253 253
37159- 82 82 82 2 2 6 2 2 6 2 2 6
37160- 2 2 6 2 2 6 2 2 6 14 14 14
37161- 86 86 86 54 54 54 22 22 22 6 6 6
37162- 0 0 0 0 0 0 0 0 0 0 0 0
37163- 0 0 0 0 0 0 0 0 0 0 0 0
37164- 0 0 0 0 0 0 0 0 0 0 0 0
37165- 0 0 0 0 0 0 0 0 0 0 0 0
37166- 0 0 0 0 0 0 0 0 0 0 0 0
37167- 0 0 0 0 0 0 0 0 0 0 0 0
37168- 0 0 0 0 0 0 0 0 0 0 0 0
37169- 0 0 0 0 0 0 0 0 0 0 0 0
37170- 0 0 0 0 0 0 0 0 0 0 0 0
37171- 0 0 0 0 0 0 0 0 0 0 0 0
37172- 0 0 0 0 0 0 0 0 0 0 0 0
37173- 6 6 6 18 18 18 46 46 46 90 90 90
37174- 46 46 46 18 18 18 6 6 6 182 182 182
37175-253 253 253 246 246 246 206 206 206 190 190 190
37176-190 190 190 190 190 190 190 190 190 190 190 190
37177-206 206 206 231 231 231 250 250 250 253 253 253
37178-253 253 253 253 253 253 253 253 253 253 253 253
37179-202 202 202 14 14 14 2 2 6 2 2 6
37180- 2 2 6 2 2 6 2 2 6 2 2 6
37181- 42 42 42 86 86 86 42 42 42 18 18 18
37182- 6 6 6 0 0 0 0 0 0 0 0 0
37183- 0 0 0 0 0 0 0 0 0 0 0 0
37184- 0 0 0 0 0 0 0 0 0 0 0 0
37185- 0 0 0 0 0 0 0 0 0 0 0 0
37186- 0 0 0 0 0 0 0 0 0 0 0 0
37187- 0 0 0 0 0 0 0 0 0 0 0 0
37188- 0 0 0 0 0 0 0 0 0 0 0 0
37189- 0 0 0 0 0 0 0 0 0 0 0 0
37190- 0 0 0 0 0 0 0 0 0 0 0 0
37191- 0 0 0 0 0 0 0 0 0 0 0 0
37192- 0 0 0 0 0 0 0 0 0 6 6 6
37193- 14 14 14 38 38 38 74 74 74 66 66 66
37194- 2 2 6 6 6 6 90 90 90 250 250 250
37195-253 253 253 253 253 253 238 238 238 198 198 198
37196-190 190 190 190 190 190 195 195 195 221 221 221
37197-246 246 246 253 253 253 253 253 253 253 253 253
37198-253 253 253 253 253 253 253 253 253 253 253 253
37199-253 253 253 82 82 82 2 2 6 2 2 6
37200- 2 2 6 2 2 6 2 2 6 2 2 6
37201- 2 2 6 78 78 78 70 70 70 34 34 34
37202- 14 14 14 6 6 6 0 0 0 0 0 0
37203- 0 0 0 0 0 0 0 0 0 0 0 0
37204- 0 0 0 0 0 0 0 0 0 0 0 0
37205- 0 0 0 0 0 0 0 0 0 0 0 0
37206- 0 0 0 0 0 0 0 0 0 0 0 0
37207- 0 0 0 0 0 0 0 0 0 0 0 0
37208- 0 0 0 0 0 0 0 0 0 0 0 0
37209- 0 0 0 0 0 0 0 0 0 0 0 0
37210- 0 0 0 0 0 0 0 0 0 0 0 0
37211- 0 0 0 0 0 0 0 0 0 0 0 0
37212- 0 0 0 0 0 0 0 0 0 14 14 14
37213- 34 34 34 66 66 66 78 78 78 6 6 6
37214- 2 2 6 18 18 18 218 218 218 253 253 253
37215-253 253 253 253 253 253 253 253 253 246 246 246
37216-226 226 226 231 231 231 246 246 246 253 253 253
37217-253 253 253 253 253 253 253 253 253 253 253 253
37218-253 253 253 253 253 253 253 253 253 253 253 253
37219-253 253 253 178 178 178 2 2 6 2 2 6
37220- 2 2 6 2 2 6 2 2 6 2 2 6
37221- 2 2 6 18 18 18 90 90 90 62 62 62
37222- 30 30 30 10 10 10 0 0 0 0 0 0
37223- 0 0 0 0 0 0 0 0 0 0 0 0
37224- 0 0 0 0 0 0 0 0 0 0 0 0
37225- 0 0 0 0 0 0 0 0 0 0 0 0
37226- 0 0 0 0 0 0 0 0 0 0 0 0
37227- 0 0 0 0 0 0 0 0 0 0 0 0
37228- 0 0 0 0 0 0 0 0 0 0 0 0
37229- 0 0 0 0 0 0 0 0 0 0 0 0
37230- 0 0 0 0 0 0 0 0 0 0 0 0
37231- 0 0 0 0 0 0 0 0 0 0 0 0
37232- 0 0 0 0 0 0 10 10 10 26 26 26
37233- 58 58 58 90 90 90 18 18 18 2 2 6
37234- 2 2 6 110 110 110 253 253 253 253 253 253
37235-253 253 253 253 253 253 253 253 253 253 253 253
37236-250 250 250 253 253 253 253 253 253 253 253 253
37237-253 253 253 253 253 253 253 253 253 253 253 253
37238-253 253 253 253 253 253 253 253 253 253 253 253
37239-253 253 253 231 231 231 18 18 18 2 2 6
37240- 2 2 6 2 2 6 2 2 6 2 2 6
37241- 2 2 6 2 2 6 18 18 18 94 94 94
37242- 54 54 54 26 26 26 10 10 10 0 0 0
37243- 0 0 0 0 0 0 0 0 0 0 0 0
37244- 0 0 0 0 0 0 0 0 0 0 0 0
37245- 0 0 0 0 0 0 0 0 0 0 0 0
37246- 0 0 0 0 0 0 0 0 0 0 0 0
37247- 0 0 0 0 0 0 0 0 0 0 0 0
37248- 0 0 0 0 0 0 0 0 0 0 0 0
37249- 0 0 0 0 0 0 0 0 0 0 0 0
37250- 0 0 0 0 0 0 0 0 0 0 0 0
37251- 0 0 0 0 0 0 0 0 0 0 0 0
37252- 0 0 0 6 6 6 22 22 22 50 50 50
37253- 90 90 90 26 26 26 2 2 6 2 2 6
37254- 14 14 14 195 195 195 250 250 250 253 253 253
37255-253 253 253 253 253 253 253 253 253 253 253 253
37256-253 253 253 253 253 253 253 253 253 253 253 253
37257-253 253 253 253 253 253 253 253 253 253 253 253
37258-253 253 253 253 253 253 253 253 253 253 253 253
37259-250 250 250 242 242 242 54 54 54 2 2 6
37260- 2 2 6 2 2 6 2 2 6 2 2 6
37261- 2 2 6 2 2 6 2 2 6 38 38 38
37262- 86 86 86 50 50 50 22 22 22 6 6 6
37263- 0 0 0 0 0 0 0 0 0 0 0 0
37264- 0 0 0 0 0 0 0 0 0 0 0 0
37265- 0 0 0 0 0 0 0 0 0 0 0 0
37266- 0 0 0 0 0 0 0 0 0 0 0 0
37267- 0 0 0 0 0 0 0 0 0 0 0 0
37268- 0 0 0 0 0 0 0 0 0 0 0 0
37269- 0 0 0 0 0 0 0 0 0 0 0 0
37270- 0 0 0 0 0 0 0 0 0 0 0 0
37271- 0 0 0 0 0 0 0 0 0 0 0 0
37272- 6 6 6 14 14 14 38 38 38 82 82 82
37273- 34 34 34 2 2 6 2 2 6 2 2 6
37274- 42 42 42 195 195 195 246 246 246 253 253 253
37275-253 253 253 253 253 253 253 253 253 250 250 250
37276-242 242 242 242 242 242 250 250 250 253 253 253
37277-253 253 253 253 253 253 253 253 253 253 253 253
37278-253 253 253 250 250 250 246 246 246 238 238 238
37279-226 226 226 231 231 231 101 101 101 6 6 6
37280- 2 2 6 2 2 6 2 2 6 2 2 6
37281- 2 2 6 2 2 6 2 2 6 2 2 6
37282- 38 38 38 82 82 82 42 42 42 14 14 14
37283- 6 6 6 0 0 0 0 0 0 0 0 0
37284- 0 0 0 0 0 0 0 0 0 0 0 0
37285- 0 0 0 0 0 0 0 0 0 0 0 0
37286- 0 0 0 0 0 0 0 0 0 0 0 0
37287- 0 0 0 0 0 0 0 0 0 0 0 0
37288- 0 0 0 0 0 0 0 0 0 0 0 0
37289- 0 0 0 0 0 0 0 0 0 0 0 0
37290- 0 0 0 0 0 0 0 0 0 0 0 0
37291- 0 0 0 0 0 0 0 0 0 0 0 0
37292- 10 10 10 26 26 26 62 62 62 66 66 66
37293- 2 2 6 2 2 6 2 2 6 6 6 6
37294- 70 70 70 170 170 170 206 206 206 234 234 234
37295-246 246 246 250 250 250 250 250 250 238 238 238
37296-226 226 226 231 231 231 238 238 238 250 250 250
37297-250 250 250 250 250 250 246 246 246 231 231 231
37298-214 214 214 206 206 206 202 202 202 202 202 202
37299-198 198 198 202 202 202 182 182 182 18 18 18
37300- 2 2 6 2 2 6 2 2 6 2 2 6
37301- 2 2 6 2 2 6 2 2 6 2 2 6
37302- 2 2 6 62 62 62 66 66 66 30 30 30
37303- 10 10 10 0 0 0 0 0 0 0 0 0
37304- 0 0 0 0 0 0 0 0 0 0 0 0
37305- 0 0 0 0 0 0 0 0 0 0 0 0
37306- 0 0 0 0 0 0 0 0 0 0 0 0
37307- 0 0 0 0 0 0 0 0 0 0 0 0
37308- 0 0 0 0 0 0 0 0 0 0 0 0
37309- 0 0 0 0 0 0 0 0 0 0 0 0
37310- 0 0 0 0 0 0 0 0 0 0 0 0
37311- 0 0 0 0 0 0 0 0 0 0 0 0
37312- 14 14 14 42 42 42 82 82 82 18 18 18
37313- 2 2 6 2 2 6 2 2 6 10 10 10
37314- 94 94 94 182 182 182 218 218 218 242 242 242
37315-250 250 250 253 253 253 253 253 253 250 250 250
37316-234 234 234 253 253 253 253 253 253 253 253 253
37317-253 253 253 253 253 253 253 253 253 246 246 246
37318-238 238 238 226 226 226 210 210 210 202 202 202
37319-195 195 195 195 195 195 210 210 210 158 158 158
37320- 6 6 6 14 14 14 50 50 50 14 14 14
37321- 2 2 6 2 2 6 2 2 6 2 2 6
37322- 2 2 6 6 6 6 86 86 86 46 46 46
37323- 18 18 18 6 6 6 0 0 0 0 0 0
37324- 0 0 0 0 0 0 0 0 0 0 0 0
37325- 0 0 0 0 0 0 0 0 0 0 0 0
37326- 0 0 0 0 0 0 0 0 0 0 0 0
37327- 0 0 0 0 0 0 0 0 0 0 0 0
37328- 0 0 0 0 0 0 0 0 0 0 0 0
37329- 0 0 0 0 0 0 0 0 0 0 0 0
37330- 0 0 0 0 0 0 0 0 0 0 0 0
37331- 0 0 0 0 0 0 0 0 0 6 6 6
37332- 22 22 22 54 54 54 70 70 70 2 2 6
37333- 2 2 6 10 10 10 2 2 6 22 22 22
37334-166 166 166 231 231 231 250 250 250 253 253 253
37335-253 253 253 253 253 253 253 253 253 250 250 250
37336-242 242 242 253 253 253 253 253 253 253 253 253
37337-253 253 253 253 253 253 253 253 253 253 253 253
37338-253 253 253 253 253 253 253 253 253 246 246 246
37339-231 231 231 206 206 206 198 198 198 226 226 226
37340- 94 94 94 2 2 6 6 6 6 38 38 38
37341- 30 30 30 2 2 6 2 2 6 2 2 6
37342- 2 2 6 2 2 6 62 62 62 66 66 66
37343- 26 26 26 10 10 10 0 0 0 0 0 0
37344- 0 0 0 0 0 0 0 0 0 0 0 0
37345- 0 0 0 0 0 0 0 0 0 0 0 0
37346- 0 0 0 0 0 0 0 0 0 0 0 0
37347- 0 0 0 0 0 0 0 0 0 0 0 0
37348- 0 0 0 0 0 0 0 0 0 0 0 0
37349- 0 0 0 0 0 0 0 0 0 0 0 0
37350- 0 0 0 0 0 0 0 0 0 0 0 0
37351- 0 0 0 0 0 0 0 0 0 10 10 10
37352- 30 30 30 74 74 74 50 50 50 2 2 6
37353- 26 26 26 26 26 26 2 2 6 106 106 106
37354-238 238 238 253 253 253 253 253 253 253 253 253
37355-253 253 253 253 253 253 253 253 253 253 253 253
37356-253 253 253 253 253 253 253 253 253 253 253 253
37357-253 253 253 253 253 253 253 253 253 253 253 253
37358-253 253 253 253 253 253 253 253 253 253 253 253
37359-253 253 253 246 246 246 218 218 218 202 202 202
37360-210 210 210 14 14 14 2 2 6 2 2 6
37361- 30 30 30 22 22 22 2 2 6 2 2 6
37362- 2 2 6 2 2 6 18 18 18 86 86 86
37363- 42 42 42 14 14 14 0 0 0 0 0 0
37364- 0 0 0 0 0 0 0 0 0 0 0 0
37365- 0 0 0 0 0 0 0 0 0 0 0 0
37366- 0 0 0 0 0 0 0 0 0 0 0 0
37367- 0 0 0 0 0 0 0 0 0 0 0 0
37368- 0 0 0 0 0 0 0 0 0 0 0 0
37369- 0 0 0 0 0 0 0 0 0 0 0 0
37370- 0 0 0 0 0 0 0 0 0 0 0 0
37371- 0 0 0 0 0 0 0 0 0 14 14 14
37372- 42 42 42 90 90 90 22 22 22 2 2 6
37373- 42 42 42 2 2 6 18 18 18 218 218 218
37374-253 253 253 253 253 253 253 253 253 253 253 253
37375-253 253 253 253 253 253 253 253 253 253 253 253
37376-253 253 253 253 253 253 253 253 253 253 253 253
37377-253 253 253 253 253 253 253 253 253 253 253 253
37378-253 253 253 253 253 253 253 253 253 253 253 253
37379-253 253 253 253 253 253 250 250 250 221 221 221
37380-218 218 218 101 101 101 2 2 6 14 14 14
37381- 18 18 18 38 38 38 10 10 10 2 2 6
37382- 2 2 6 2 2 6 2 2 6 78 78 78
37383- 58 58 58 22 22 22 6 6 6 0 0 0
37384- 0 0 0 0 0 0 0 0 0 0 0 0
37385- 0 0 0 0 0 0 0 0 0 0 0 0
37386- 0 0 0 0 0 0 0 0 0 0 0 0
37387- 0 0 0 0 0 0 0 0 0 0 0 0
37388- 0 0 0 0 0 0 0 0 0 0 0 0
37389- 0 0 0 0 0 0 0 0 0 0 0 0
37390- 0 0 0 0 0 0 0 0 0 0 0 0
37391- 0 0 0 0 0 0 6 6 6 18 18 18
37392- 54 54 54 82 82 82 2 2 6 26 26 26
37393- 22 22 22 2 2 6 123 123 123 253 253 253
37394-253 253 253 253 253 253 253 253 253 253 253 253
37395-253 253 253 253 253 253 253 253 253 253 253 253
37396-253 253 253 253 253 253 253 253 253 253 253 253
37397-253 253 253 253 253 253 253 253 253 253 253 253
37398-253 253 253 253 253 253 253 253 253 253 253 253
37399-253 253 253 253 253 253 253 253 253 250 250 250
37400-238 238 238 198 198 198 6 6 6 38 38 38
37401- 58 58 58 26 26 26 38 38 38 2 2 6
37402- 2 2 6 2 2 6 2 2 6 46 46 46
37403- 78 78 78 30 30 30 10 10 10 0 0 0
37404- 0 0 0 0 0 0 0 0 0 0 0 0
37405- 0 0 0 0 0 0 0 0 0 0 0 0
37406- 0 0 0 0 0 0 0 0 0 0 0 0
37407- 0 0 0 0 0 0 0 0 0 0 0 0
37408- 0 0 0 0 0 0 0 0 0 0 0 0
37409- 0 0 0 0 0 0 0 0 0 0 0 0
37410- 0 0 0 0 0 0 0 0 0 0 0 0
37411- 0 0 0 0 0 0 10 10 10 30 30 30
37412- 74 74 74 58 58 58 2 2 6 42 42 42
37413- 2 2 6 22 22 22 231 231 231 253 253 253
37414-253 253 253 253 253 253 253 253 253 253 253 253
37415-253 253 253 253 253 253 253 253 253 250 250 250
37416-253 253 253 253 253 253 253 253 253 253 253 253
37417-253 253 253 253 253 253 253 253 253 253 253 253
37418-253 253 253 253 253 253 253 253 253 253 253 253
37419-253 253 253 253 253 253 253 253 253 253 253 253
37420-253 253 253 246 246 246 46 46 46 38 38 38
37421- 42 42 42 14 14 14 38 38 38 14 14 14
37422- 2 2 6 2 2 6 2 2 6 6 6 6
37423- 86 86 86 46 46 46 14 14 14 0 0 0
37424- 0 0 0 0 0 0 0 0 0 0 0 0
37425- 0 0 0 0 0 0 0 0 0 0 0 0
37426- 0 0 0 0 0 0 0 0 0 0 0 0
37427- 0 0 0 0 0 0 0 0 0 0 0 0
37428- 0 0 0 0 0 0 0 0 0 0 0 0
37429- 0 0 0 0 0 0 0 0 0 0 0 0
37430- 0 0 0 0 0 0 0 0 0 0 0 0
37431- 0 0 0 6 6 6 14 14 14 42 42 42
37432- 90 90 90 18 18 18 18 18 18 26 26 26
37433- 2 2 6 116 116 116 253 253 253 253 253 253
37434-253 253 253 253 253 253 253 253 253 253 253 253
37435-253 253 253 253 253 253 250 250 250 238 238 238
37436-253 253 253 253 253 253 253 253 253 253 253 253
37437-253 253 253 253 253 253 253 253 253 253 253 253
37438-253 253 253 253 253 253 253 253 253 253 253 253
37439-253 253 253 253 253 253 253 253 253 253 253 253
37440-253 253 253 253 253 253 94 94 94 6 6 6
37441- 2 2 6 2 2 6 10 10 10 34 34 34
37442- 2 2 6 2 2 6 2 2 6 2 2 6
37443- 74 74 74 58 58 58 22 22 22 6 6 6
37444- 0 0 0 0 0 0 0 0 0 0 0 0
37445- 0 0 0 0 0 0 0 0 0 0 0 0
37446- 0 0 0 0 0 0 0 0 0 0 0 0
37447- 0 0 0 0 0 0 0 0 0 0 0 0
37448- 0 0 0 0 0 0 0 0 0 0 0 0
37449- 0 0 0 0 0 0 0 0 0 0 0 0
37450- 0 0 0 0 0 0 0 0 0 0 0 0
37451- 0 0 0 10 10 10 26 26 26 66 66 66
37452- 82 82 82 2 2 6 38 38 38 6 6 6
37453- 14 14 14 210 210 210 253 253 253 253 253 253
37454-253 253 253 253 253 253 253 253 253 253 253 253
37455-253 253 253 253 253 253 246 246 246 242 242 242
37456-253 253 253 253 253 253 253 253 253 253 253 253
37457-253 253 253 253 253 253 253 253 253 253 253 253
37458-253 253 253 253 253 253 253 253 253 253 253 253
37459-253 253 253 253 253 253 253 253 253 253 253 253
37460-253 253 253 253 253 253 144 144 144 2 2 6
37461- 2 2 6 2 2 6 2 2 6 46 46 46
37462- 2 2 6 2 2 6 2 2 6 2 2 6
37463- 42 42 42 74 74 74 30 30 30 10 10 10
37464- 0 0 0 0 0 0 0 0 0 0 0 0
37465- 0 0 0 0 0 0 0 0 0 0 0 0
37466- 0 0 0 0 0 0 0 0 0 0 0 0
37467- 0 0 0 0 0 0 0 0 0 0 0 0
37468- 0 0 0 0 0 0 0 0 0 0 0 0
37469- 0 0 0 0 0 0 0 0 0 0 0 0
37470- 0 0 0 0 0 0 0 0 0 0 0 0
37471- 6 6 6 14 14 14 42 42 42 90 90 90
37472- 26 26 26 6 6 6 42 42 42 2 2 6
37473- 74 74 74 250 250 250 253 253 253 253 253 253
37474-253 253 253 253 253 253 253 253 253 253 253 253
37475-253 253 253 253 253 253 242 242 242 242 242 242
37476-253 253 253 253 253 253 253 253 253 253 253 253
37477-253 253 253 253 253 253 253 253 253 253 253 253
37478-253 253 253 253 253 253 253 253 253 253 253 253
37479-253 253 253 253 253 253 253 253 253 253 253 253
37480-253 253 253 253 253 253 182 182 182 2 2 6
37481- 2 2 6 2 2 6 2 2 6 46 46 46
37482- 2 2 6 2 2 6 2 2 6 2 2 6
37483- 10 10 10 86 86 86 38 38 38 10 10 10
37484- 0 0 0 0 0 0 0 0 0 0 0 0
37485- 0 0 0 0 0 0 0 0 0 0 0 0
37486- 0 0 0 0 0 0 0 0 0 0 0 0
37487- 0 0 0 0 0 0 0 0 0 0 0 0
37488- 0 0 0 0 0 0 0 0 0 0 0 0
37489- 0 0 0 0 0 0 0 0 0 0 0 0
37490- 0 0 0 0 0 0 0 0 0 0 0 0
37491- 10 10 10 26 26 26 66 66 66 82 82 82
37492- 2 2 6 22 22 22 18 18 18 2 2 6
37493-149 149 149 253 253 253 253 253 253 253 253 253
37494-253 253 253 253 253 253 253 253 253 253 253 253
37495-253 253 253 253 253 253 234 234 234 242 242 242
37496-253 253 253 253 253 253 253 253 253 253 253 253
37497-253 253 253 253 253 253 253 253 253 253 253 253
37498-253 253 253 253 253 253 253 253 253 253 253 253
37499-253 253 253 253 253 253 253 253 253 253 253 253
37500-253 253 253 253 253 253 206 206 206 2 2 6
37501- 2 2 6 2 2 6 2 2 6 38 38 38
37502- 2 2 6 2 2 6 2 2 6 2 2 6
37503- 6 6 6 86 86 86 46 46 46 14 14 14
37504- 0 0 0 0 0 0 0 0 0 0 0 0
37505- 0 0 0 0 0 0 0 0 0 0 0 0
37506- 0 0 0 0 0 0 0 0 0 0 0 0
37507- 0 0 0 0 0 0 0 0 0 0 0 0
37508- 0 0 0 0 0 0 0 0 0 0 0 0
37509- 0 0 0 0 0 0 0 0 0 0 0 0
37510- 0 0 0 0 0 0 0 0 0 6 6 6
37511- 18 18 18 46 46 46 86 86 86 18 18 18
37512- 2 2 6 34 34 34 10 10 10 6 6 6
37513-210 210 210 253 253 253 253 253 253 253 253 253
37514-253 253 253 253 253 253 253 253 253 253 253 253
37515-253 253 253 253 253 253 234 234 234 242 242 242
37516-253 253 253 253 253 253 253 253 253 253 253 253
37517-253 253 253 253 253 253 253 253 253 253 253 253
37518-253 253 253 253 253 253 253 253 253 253 253 253
37519-253 253 253 253 253 253 253 253 253 253 253 253
37520-253 253 253 253 253 253 221 221 221 6 6 6
37521- 2 2 6 2 2 6 6 6 6 30 30 30
37522- 2 2 6 2 2 6 2 2 6 2 2 6
37523- 2 2 6 82 82 82 54 54 54 18 18 18
37524- 6 6 6 0 0 0 0 0 0 0 0 0
37525- 0 0 0 0 0 0 0 0 0 0 0 0
37526- 0 0 0 0 0 0 0 0 0 0 0 0
37527- 0 0 0 0 0 0 0 0 0 0 0 0
37528- 0 0 0 0 0 0 0 0 0 0 0 0
37529- 0 0 0 0 0 0 0 0 0 0 0 0
37530- 0 0 0 0 0 0 0 0 0 10 10 10
37531- 26 26 26 66 66 66 62 62 62 2 2 6
37532- 2 2 6 38 38 38 10 10 10 26 26 26
37533-238 238 238 253 253 253 253 253 253 253 253 253
37534-253 253 253 253 253 253 253 253 253 253 253 253
37535-253 253 253 253 253 253 231 231 231 238 238 238
37536-253 253 253 253 253 253 253 253 253 253 253 253
37537-253 253 253 253 253 253 253 253 253 253 253 253
37538-253 253 253 253 253 253 253 253 253 253 253 253
37539-253 253 253 253 253 253 253 253 253 253 253 253
37540-253 253 253 253 253 253 231 231 231 6 6 6
37541- 2 2 6 2 2 6 10 10 10 30 30 30
37542- 2 2 6 2 2 6 2 2 6 2 2 6
37543- 2 2 6 66 66 66 58 58 58 22 22 22
37544- 6 6 6 0 0 0 0 0 0 0 0 0
37545- 0 0 0 0 0 0 0 0 0 0 0 0
37546- 0 0 0 0 0 0 0 0 0 0 0 0
37547- 0 0 0 0 0 0 0 0 0 0 0 0
37548- 0 0 0 0 0 0 0 0 0 0 0 0
37549- 0 0 0 0 0 0 0 0 0 0 0 0
37550- 0 0 0 0 0 0 0 0 0 10 10 10
37551- 38 38 38 78 78 78 6 6 6 2 2 6
37552- 2 2 6 46 46 46 14 14 14 42 42 42
37553-246 246 246 253 253 253 253 253 253 253 253 253
37554-253 253 253 253 253 253 253 253 253 253 253 253
37555-253 253 253 253 253 253 231 231 231 242 242 242
37556-253 253 253 253 253 253 253 253 253 253 253 253
37557-253 253 253 253 253 253 253 253 253 253 253 253
37558-253 253 253 253 253 253 253 253 253 253 253 253
37559-253 253 253 253 253 253 253 253 253 253 253 253
37560-253 253 253 253 253 253 234 234 234 10 10 10
37561- 2 2 6 2 2 6 22 22 22 14 14 14
37562- 2 2 6 2 2 6 2 2 6 2 2 6
37563- 2 2 6 66 66 66 62 62 62 22 22 22
37564- 6 6 6 0 0 0 0 0 0 0 0 0
37565- 0 0 0 0 0 0 0 0 0 0 0 0
37566- 0 0 0 0 0 0 0 0 0 0 0 0
37567- 0 0 0 0 0 0 0 0 0 0 0 0
37568- 0 0 0 0 0 0 0 0 0 0 0 0
37569- 0 0 0 0 0 0 0 0 0 0 0 0
37570- 0 0 0 0 0 0 6 6 6 18 18 18
37571- 50 50 50 74 74 74 2 2 6 2 2 6
37572- 14 14 14 70 70 70 34 34 34 62 62 62
37573-250 250 250 253 253 253 253 253 253 253 253 253
37574-253 253 253 253 253 253 253 253 253 253 253 253
37575-253 253 253 253 253 253 231 231 231 246 246 246
37576-253 253 253 253 253 253 253 253 253 253 253 253
37577-253 253 253 253 253 253 253 253 253 253 253 253
37578-253 253 253 253 253 253 253 253 253 253 253 253
37579-253 253 253 253 253 253 253 253 253 253 253 253
37580-253 253 253 253 253 253 234 234 234 14 14 14
37581- 2 2 6 2 2 6 30 30 30 2 2 6
37582- 2 2 6 2 2 6 2 2 6 2 2 6
37583- 2 2 6 66 66 66 62 62 62 22 22 22
37584- 6 6 6 0 0 0 0 0 0 0 0 0
37585- 0 0 0 0 0 0 0 0 0 0 0 0
37586- 0 0 0 0 0 0 0 0 0 0 0 0
37587- 0 0 0 0 0 0 0 0 0 0 0 0
37588- 0 0 0 0 0 0 0 0 0 0 0 0
37589- 0 0 0 0 0 0 0 0 0 0 0 0
37590- 0 0 0 0 0 0 6 6 6 18 18 18
37591- 54 54 54 62 62 62 2 2 6 2 2 6
37592- 2 2 6 30 30 30 46 46 46 70 70 70
37593-250 250 250 253 253 253 253 253 253 253 253 253
37594-253 253 253 253 253 253 253 253 253 253 253 253
37595-253 253 253 253 253 253 231 231 231 246 246 246
37596-253 253 253 253 253 253 253 253 253 253 253 253
37597-253 253 253 253 253 253 253 253 253 253 253 253
37598-253 253 253 253 253 253 253 253 253 253 253 253
37599-253 253 253 253 253 253 253 253 253 253 253 253
37600-253 253 253 253 253 253 226 226 226 10 10 10
37601- 2 2 6 6 6 6 30 30 30 2 2 6
37602- 2 2 6 2 2 6 2 2 6 2 2 6
37603- 2 2 6 66 66 66 58 58 58 22 22 22
37604- 6 6 6 0 0 0 0 0 0 0 0 0
37605- 0 0 0 0 0 0 0 0 0 0 0 0
37606- 0 0 0 0 0 0 0 0 0 0 0 0
37607- 0 0 0 0 0 0 0 0 0 0 0 0
37608- 0 0 0 0 0 0 0 0 0 0 0 0
37609- 0 0 0 0 0 0 0 0 0 0 0 0
37610- 0 0 0 0 0 0 6 6 6 22 22 22
37611- 58 58 58 62 62 62 2 2 6 2 2 6
37612- 2 2 6 2 2 6 30 30 30 78 78 78
37613-250 250 250 253 253 253 253 253 253 253 253 253
37614-253 253 253 253 253 253 253 253 253 253 253 253
37615-253 253 253 253 253 253 231 231 231 246 246 246
37616-253 253 253 253 253 253 253 253 253 253 253 253
37617-253 253 253 253 253 253 253 253 253 253 253 253
37618-253 253 253 253 253 253 253 253 253 253 253 253
37619-253 253 253 253 253 253 253 253 253 253 253 253
37620-253 253 253 253 253 253 206 206 206 2 2 6
37621- 22 22 22 34 34 34 18 14 6 22 22 22
37622- 26 26 26 18 18 18 6 6 6 2 2 6
37623- 2 2 6 82 82 82 54 54 54 18 18 18
37624- 6 6 6 0 0 0 0 0 0 0 0 0
37625- 0 0 0 0 0 0 0 0 0 0 0 0
37626- 0 0 0 0 0 0 0 0 0 0 0 0
37627- 0 0 0 0 0 0 0 0 0 0 0 0
37628- 0 0 0 0 0 0 0 0 0 0 0 0
37629- 0 0 0 0 0 0 0 0 0 0 0 0
37630- 0 0 0 0 0 0 6 6 6 26 26 26
37631- 62 62 62 106 106 106 74 54 14 185 133 11
37632-210 162 10 121 92 8 6 6 6 62 62 62
37633-238 238 238 253 253 253 253 253 253 253 253 253
37634-253 253 253 253 253 253 253 253 253 253 253 253
37635-253 253 253 253 253 253 231 231 231 246 246 246
37636-253 253 253 253 253 253 253 253 253 253 253 253
37637-253 253 253 253 253 253 253 253 253 253 253 253
37638-253 253 253 253 253 253 253 253 253 253 253 253
37639-253 253 253 253 253 253 253 253 253 253 253 253
37640-253 253 253 253 253 253 158 158 158 18 18 18
37641- 14 14 14 2 2 6 2 2 6 2 2 6
37642- 6 6 6 18 18 18 66 66 66 38 38 38
37643- 6 6 6 94 94 94 50 50 50 18 18 18
37644- 6 6 6 0 0 0 0 0 0 0 0 0
37645- 0 0 0 0 0 0 0 0 0 0 0 0
37646- 0 0 0 0 0 0 0 0 0 0 0 0
37647- 0 0 0 0 0 0 0 0 0 0 0 0
37648- 0 0 0 0 0 0 0 0 0 0 0 0
37649- 0 0 0 0 0 0 0 0 0 6 6 6
37650- 10 10 10 10 10 10 18 18 18 38 38 38
37651- 78 78 78 142 134 106 216 158 10 242 186 14
37652-246 190 14 246 190 14 156 118 10 10 10 10
37653- 90 90 90 238 238 238 253 253 253 253 253 253
37654-253 253 253 253 253 253 253 253 253 253 253 253
37655-253 253 253 253 253 253 231 231 231 250 250 250
37656-253 253 253 253 253 253 253 253 253 253 253 253
37657-253 253 253 253 253 253 253 253 253 253 253 253
37658-253 253 253 253 253 253 253 253 253 253 253 253
37659-253 253 253 253 253 253 253 253 253 246 230 190
37660-238 204 91 238 204 91 181 142 44 37 26 9
37661- 2 2 6 2 2 6 2 2 6 2 2 6
37662- 2 2 6 2 2 6 38 38 38 46 46 46
37663- 26 26 26 106 106 106 54 54 54 18 18 18
37664- 6 6 6 0 0 0 0 0 0 0 0 0
37665- 0 0 0 0 0 0 0 0 0 0 0 0
37666- 0 0 0 0 0 0 0 0 0 0 0 0
37667- 0 0 0 0 0 0 0 0 0 0 0 0
37668- 0 0 0 0 0 0 0 0 0 0 0 0
37669- 0 0 0 6 6 6 14 14 14 22 22 22
37670- 30 30 30 38 38 38 50 50 50 70 70 70
37671-106 106 106 190 142 34 226 170 11 242 186 14
37672-246 190 14 246 190 14 246 190 14 154 114 10
37673- 6 6 6 74 74 74 226 226 226 253 253 253
37674-253 253 253 253 253 253 253 253 253 253 253 253
37675-253 253 253 253 253 253 231 231 231 250 250 250
37676-253 253 253 253 253 253 253 253 253 253 253 253
37677-253 253 253 253 253 253 253 253 253 253 253 253
37678-253 253 253 253 253 253 253 253 253 253 253 253
37679-253 253 253 253 253 253 253 253 253 228 184 62
37680-241 196 14 241 208 19 232 195 16 38 30 10
37681- 2 2 6 2 2 6 2 2 6 2 2 6
37682- 2 2 6 6 6 6 30 30 30 26 26 26
37683-203 166 17 154 142 90 66 66 66 26 26 26
37684- 6 6 6 0 0 0 0 0 0 0 0 0
37685- 0 0 0 0 0 0 0 0 0 0 0 0
37686- 0 0 0 0 0 0 0 0 0 0 0 0
37687- 0 0 0 0 0 0 0 0 0 0 0 0
37688- 0 0 0 0 0 0 0 0 0 0 0 0
37689- 6 6 6 18 18 18 38 38 38 58 58 58
37690- 78 78 78 86 86 86 101 101 101 123 123 123
37691-175 146 61 210 150 10 234 174 13 246 186 14
37692-246 190 14 246 190 14 246 190 14 238 190 10
37693-102 78 10 2 2 6 46 46 46 198 198 198
37694-253 253 253 253 253 253 253 253 253 253 253 253
37695-253 253 253 253 253 253 234 234 234 242 242 242
37696-253 253 253 253 253 253 253 253 253 253 253 253
37697-253 253 253 253 253 253 253 253 253 253 253 253
37698-253 253 253 253 253 253 253 253 253 253 253 253
37699-253 253 253 253 253 253 253 253 253 224 178 62
37700-242 186 14 241 196 14 210 166 10 22 18 6
37701- 2 2 6 2 2 6 2 2 6 2 2 6
37702- 2 2 6 2 2 6 6 6 6 121 92 8
37703-238 202 15 232 195 16 82 82 82 34 34 34
37704- 10 10 10 0 0 0 0 0 0 0 0 0
37705- 0 0 0 0 0 0 0 0 0 0 0 0
37706- 0 0 0 0 0 0 0 0 0 0 0 0
37707- 0 0 0 0 0 0 0 0 0 0 0 0
37708- 0 0 0 0 0 0 0 0 0 0 0 0
37709- 14 14 14 38 38 38 70 70 70 154 122 46
37710-190 142 34 200 144 11 197 138 11 197 138 11
37711-213 154 11 226 170 11 242 186 14 246 190 14
37712-246 190 14 246 190 14 246 190 14 246 190 14
37713-225 175 15 46 32 6 2 2 6 22 22 22
37714-158 158 158 250 250 250 253 253 253 253 253 253
37715-253 253 253 253 253 253 253 253 253 253 253 253
37716-253 253 253 253 253 253 253 253 253 253 253 253
37717-253 253 253 253 253 253 253 253 253 253 253 253
37718-253 253 253 253 253 253 253 253 253 253 253 253
37719-253 253 253 250 250 250 242 242 242 224 178 62
37720-239 182 13 236 186 11 213 154 11 46 32 6
37721- 2 2 6 2 2 6 2 2 6 2 2 6
37722- 2 2 6 2 2 6 61 42 6 225 175 15
37723-238 190 10 236 186 11 112 100 78 42 42 42
37724- 14 14 14 0 0 0 0 0 0 0 0 0
37725- 0 0 0 0 0 0 0 0 0 0 0 0
37726- 0 0 0 0 0 0 0 0 0 0 0 0
37727- 0 0 0 0 0 0 0 0 0 0 0 0
37728- 0 0 0 0 0 0 0 0 0 6 6 6
37729- 22 22 22 54 54 54 154 122 46 213 154 11
37730-226 170 11 230 174 11 226 170 11 226 170 11
37731-236 178 12 242 186 14 246 190 14 246 190 14
37732-246 190 14 246 190 14 246 190 14 246 190 14
37733-241 196 14 184 144 12 10 10 10 2 2 6
37734- 6 6 6 116 116 116 242 242 242 253 253 253
37735-253 253 253 253 253 253 253 253 253 253 253 253
37736-253 253 253 253 253 253 253 253 253 253 253 253
37737-253 253 253 253 253 253 253 253 253 253 253 253
37738-253 253 253 253 253 253 253 253 253 253 253 253
37739-253 253 253 231 231 231 198 198 198 214 170 54
37740-236 178 12 236 178 12 210 150 10 137 92 6
37741- 18 14 6 2 2 6 2 2 6 2 2 6
37742- 6 6 6 70 47 6 200 144 11 236 178 12
37743-239 182 13 239 182 13 124 112 88 58 58 58
37744- 22 22 22 6 6 6 0 0 0 0 0 0
37745- 0 0 0 0 0 0 0 0 0 0 0 0
37746- 0 0 0 0 0 0 0 0 0 0 0 0
37747- 0 0 0 0 0 0 0 0 0 0 0 0
37748- 0 0 0 0 0 0 0 0 0 10 10 10
37749- 30 30 30 70 70 70 180 133 36 226 170 11
37750-239 182 13 242 186 14 242 186 14 246 186 14
37751-246 190 14 246 190 14 246 190 14 246 190 14
37752-246 190 14 246 190 14 246 190 14 246 190 14
37753-246 190 14 232 195 16 98 70 6 2 2 6
37754- 2 2 6 2 2 6 66 66 66 221 221 221
37755-253 253 253 253 253 253 253 253 253 253 253 253
37756-253 253 253 253 253 253 253 253 253 253 253 253
37757-253 253 253 253 253 253 253 253 253 253 253 253
37758-253 253 253 253 253 253 253 253 253 253 253 253
37759-253 253 253 206 206 206 198 198 198 214 166 58
37760-230 174 11 230 174 11 216 158 10 192 133 9
37761-163 110 8 116 81 8 102 78 10 116 81 8
37762-167 114 7 197 138 11 226 170 11 239 182 13
37763-242 186 14 242 186 14 162 146 94 78 78 78
37764- 34 34 34 14 14 14 6 6 6 0 0 0
37765- 0 0 0 0 0 0 0 0 0 0 0 0
37766- 0 0 0 0 0 0 0 0 0 0 0 0
37767- 0 0 0 0 0 0 0 0 0 0 0 0
37768- 0 0 0 0 0 0 0 0 0 6 6 6
37769- 30 30 30 78 78 78 190 142 34 226 170 11
37770-239 182 13 246 190 14 246 190 14 246 190 14
37771-246 190 14 246 190 14 246 190 14 246 190 14
37772-246 190 14 246 190 14 246 190 14 246 190 14
37773-246 190 14 241 196 14 203 166 17 22 18 6
37774- 2 2 6 2 2 6 2 2 6 38 38 38
37775-218 218 218 253 253 253 253 253 253 253 253 253
37776-253 253 253 253 253 253 253 253 253 253 253 253
37777-253 253 253 253 253 253 253 253 253 253 253 253
37778-253 253 253 253 253 253 253 253 253 253 253 253
37779-250 250 250 206 206 206 198 198 198 202 162 69
37780-226 170 11 236 178 12 224 166 10 210 150 10
37781-200 144 11 197 138 11 192 133 9 197 138 11
37782-210 150 10 226 170 11 242 186 14 246 190 14
37783-246 190 14 246 186 14 225 175 15 124 112 88
37784- 62 62 62 30 30 30 14 14 14 6 6 6
37785- 0 0 0 0 0 0 0 0 0 0 0 0
37786- 0 0 0 0 0 0 0 0 0 0 0 0
37787- 0 0 0 0 0 0 0 0 0 0 0 0
37788- 0 0 0 0 0 0 0 0 0 10 10 10
37789- 30 30 30 78 78 78 174 135 50 224 166 10
37790-239 182 13 246 190 14 246 190 14 246 190 14
37791-246 190 14 246 190 14 246 190 14 246 190 14
37792-246 190 14 246 190 14 246 190 14 246 190 14
37793-246 190 14 246 190 14 241 196 14 139 102 15
37794- 2 2 6 2 2 6 2 2 6 2 2 6
37795- 78 78 78 250 250 250 253 253 253 253 253 253
37796-253 253 253 253 253 253 253 253 253 253 253 253
37797-253 253 253 253 253 253 253 253 253 253 253 253
37798-253 253 253 253 253 253 253 253 253 253 253 253
37799-250 250 250 214 214 214 198 198 198 190 150 46
37800-219 162 10 236 178 12 234 174 13 224 166 10
37801-216 158 10 213 154 11 213 154 11 216 158 10
37802-226 170 11 239 182 13 246 190 14 246 190 14
37803-246 190 14 246 190 14 242 186 14 206 162 42
37804-101 101 101 58 58 58 30 30 30 14 14 14
37805- 6 6 6 0 0 0 0 0 0 0 0 0
37806- 0 0 0 0 0 0 0 0 0 0 0 0
37807- 0 0 0 0 0 0 0 0 0 0 0 0
37808- 0 0 0 0 0 0 0 0 0 10 10 10
37809- 30 30 30 74 74 74 174 135 50 216 158 10
37810-236 178 12 246 190 14 246 190 14 246 190 14
37811-246 190 14 246 190 14 246 190 14 246 190 14
37812-246 190 14 246 190 14 246 190 14 246 190 14
37813-246 190 14 246 190 14 241 196 14 226 184 13
37814- 61 42 6 2 2 6 2 2 6 2 2 6
37815- 22 22 22 238 238 238 253 253 253 253 253 253
37816-253 253 253 253 253 253 253 253 253 253 253 253
37817-253 253 253 253 253 253 253 253 253 253 253 253
37818-253 253 253 253 253 253 253 253 253 253 253 253
37819-253 253 253 226 226 226 187 187 187 180 133 36
37820-216 158 10 236 178 12 239 182 13 236 178 12
37821-230 174 11 226 170 11 226 170 11 230 174 11
37822-236 178 12 242 186 14 246 190 14 246 190 14
37823-246 190 14 246 190 14 246 186 14 239 182 13
37824-206 162 42 106 106 106 66 66 66 34 34 34
37825- 14 14 14 6 6 6 0 0 0 0 0 0
37826- 0 0 0 0 0 0 0 0 0 0 0 0
37827- 0 0 0 0 0 0 0 0 0 0 0 0
37828- 0 0 0 0 0 0 0 0 0 6 6 6
37829- 26 26 26 70 70 70 163 133 67 213 154 11
37830-236 178 12 246 190 14 246 190 14 246 190 14
37831-246 190 14 246 190 14 246 190 14 246 190 14
37832-246 190 14 246 190 14 246 190 14 246 190 14
37833-246 190 14 246 190 14 246 190 14 241 196 14
37834-190 146 13 18 14 6 2 2 6 2 2 6
37835- 46 46 46 246 246 246 253 253 253 253 253 253
37836-253 253 253 253 253 253 253 253 253 253 253 253
37837-253 253 253 253 253 253 253 253 253 253 253 253
37838-253 253 253 253 253 253 253 253 253 253 253 253
37839-253 253 253 221 221 221 86 86 86 156 107 11
37840-216 158 10 236 178 12 242 186 14 246 186 14
37841-242 186 14 239 182 13 239 182 13 242 186 14
37842-242 186 14 246 186 14 246 190 14 246 190 14
37843-246 190 14 246 190 14 246 190 14 246 190 14
37844-242 186 14 225 175 15 142 122 72 66 66 66
37845- 30 30 30 10 10 10 0 0 0 0 0 0
37846- 0 0 0 0 0 0 0 0 0 0 0 0
37847- 0 0 0 0 0 0 0 0 0 0 0 0
37848- 0 0 0 0 0 0 0 0 0 6 6 6
37849- 26 26 26 70 70 70 163 133 67 210 150 10
37850-236 178 12 246 190 14 246 190 14 246 190 14
37851-246 190 14 246 190 14 246 190 14 246 190 14
37852-246 190 14 246 190 14 246 190 14 246 190 14
37853-246 190 14 246 190 14 246 190 14 246 190 14
37854-232 195 16 121 92 8 34 34 34 106 106 106
37855-221 221 221 253 253 253 253 253 253 253 253 253
37856-253 253 253 253 253 253 253 253 253 253 253 253
37857-253 253 253 253 253 253 253 253 253 253 253 253
37858-253 253 253 253 253 253 253 253 253 253 253 253
37859-242 242 242 82 82 82 18 14 6 163 110 8
37860-216 158 10 236 178 12 242 186 14 246 190 14
37861-246 190 14 246 190 14 246 190 14 246 190 14
37862-246 190 14 246 190 14 246 190 14 246 190 14
37863-246 190 14 246 190 14 246 190 14 246 190 14
37864-246 190 14 246 190 14 242 186 14 163 133 67
37865- 46 46 46 18 18 18 6 6 6 0 0 0
37866- 0 0 0 0 0 0 0 0 0 0 0 0
37867- 0 0 0 0 0 0 0 0 0 0 0 0
37868- 0 0 0 0 0 0 0 0 0 10 10 10
37869- 30 30 30 78 78 78 163 133 67 210 150 10
37870-236 178 12 246 186 14 246 190 14 246 190 14
37871-246 190 14 246 190 14 246 190 14 246 190 14
37872-246 190 14 246 190 14 246 190 14 246 190 14
37873-246 190 14 246 190 14 246 190 14 246 190 14
37874-241 196 14 215 174 15 190 178 144 253 253 253
37875-253 253 253 253 253 253 253 253 253 253 253 253
37876-253 253 253 253 253 253 253 253 253 253 253 253
37877-253 253 253 253 253 253 253 253 253 253 253 253
37878-253 253 253 253 253 253 253 253 253 218 218 218
37879- 58 58 58 2 2 6 22 18 6 167 114 7
37880-216 158 10 236 178 12 246 186 14 246 190 14
37881-246 190 14 246 190 14 246 190 14 246 190 14
37882-246 190 14 246 190 14 246 190 14 246 190 14
37883-246 190 14 246 190 14 246 190 14 246 190 14
37884-246 190 14 246 186 14 242 186 14 190 150 46
37885- 54 54 54 22 22 22 6 6 6 0 0 0
37886- 0 0 0 0 0 0 0 0 0 0 0 0
37887- 0 0 0 0 0 0 0 0 0 0 0 0
37888- 0 0 0 0 0 0 0 0 0 14 14 14
37889- 38 38 38 86 86 86 180 133 36 213 154 11
37890-236 178 12 246 186 14 246 190 14 246 190 14
37891-246 190 14 246 190 14 246 190 14 246 190 14
37892-246 190 14 246 190 14 246 190 14 246 190 14
37893-246 190 14 246 190 14 246 190 14 246 190 14
37894-246 190 14 232 195 16 190 146 13 214 214 214
37895-253 253 253 253 253 253 253 253 253 253 253 253
37896-253 253 253 253 253 253 253 253 253 253 253 253
37897-253 253 253 253 253 253 253 253 253 253 253 253
37898-253 253 253 250 250 250 170 170 170 26 26 26
37899- 2 2 6 2 2 6 37 26 9 163 110 8
37900-219 162 10 239 182 13 246 186 14 246 190 14
37901-246 190 14 246 190 14 246 190 14 246 190 14
37902-246 190 14 246 190 14 246 190 14 246 190 14
37903-246 190 14 246 190 14 246 190 14 246 190 14
37904-246 186 14 236 178 12 224 166 10 142 122 72
37905- 46 46 46 18 18 18 6 6 6 0 0 0
37906- 0 0 0 0 0 0 0 0 0 0 0 0
37907- 0 0 0 0 0 0 0 0 0 0 0 0
37908- 0 0 0 0 0 0 6 6 6 18 18 18
37909- 50 50 50 109 106 95 192 133 9 224 166 10
37910-242 186 14 246 190 14 246 190 14 246 190 14
37911-246 190 14 246 190 14 246 190 14 246 190 14
37912-246 190 14 246 190 14 246 190 14 246 190 14
37913-246 190 14 246 190 14 246 190 14 246 190 14
37914-242 186 14 226 184 13 210 162 10 142 110 46
37915-226 226 226 253 253 253 253 253 253 253 253 253
37916-253 253 253 253 253 253 253 253 253 253 253 253
37917-253 253 253 253 253 253 253 253 253 253 253 253
37918-198 198 198 66 66 66 2 2 6 2 2 6
37919- 2 2 6 2 2 6 50 34 6 156 107 11
37920-219 162 10 239 182 13 246 186 14 246 190 14
37921-246 190 14 246 190 14 246 190 14 246 190 14
37922-246 190 14 246 190 14 246 190 14 246 190 14
37923-246 190 14 246 190 14 246 190 14 242 186 14
37924-234 174 13 213 154 11 154 122 46 66 66 66
37925- 30 30 30 10 10 10 0 0 0 0 0 0
37926- 0 0 0 0 0 0 0 0 0 0 0 0
37927- 0 0 0 0 0 0 0 0 0 0 0 0
37928- 0 0 0 0 0 0 6 6 6 22 22 22
37929- 58 58 58 154 121 60 206 145 10 234 174 13
37930-242 186 14 246 186 14 246 190 14 246 190 14
37931-246 190 14 246 190 14 246 190 14 246 190 14
37932-246 190 14 246 190 14 246 190 14 246 190 14
37933-246 190 14 246 190 14 246 190 14 246 190 14
37934-246 186 14 236 178 12 210 162 10 163 110 8
37935- 61 42 6 138 138 138 218 218 218 250 250 250
37936-253 253 253 253 253 253 253 253 253 250 250 250
37937-242 242 242 210 210 210 144 144 144 66 66 66
37938- 6 6 6 2 2 6 2 2 6 2 2 6
37939- 2 2 6 2 2 6 61 42 6 163 110 8
37940-216 158 10 236 178 12 246 190 14 246 190 14
37941-246 190 14 246 190 14 246 190 14 246 190 14
37942-246 190 14 246 190 14 246 190 14 246 190 14
37943-246 190 14 239 182 13 230 174 11 216 158 10
37944-190 142 34 124 112 88 70 70 70 38 38 38
37945- 18 18 18 6 6 6 0 0 0 0 0 0
37946- 0 0 0 0 0 0 0 0 0 0 0 0
37947- 0 0 0 0 0 0 0 0 0 0 0 0
37948- 0 0 0 0 0 0 6 6 6 22 22 22
37949- 62 62 62 168 124 44 206 145 10 224 166 10
37950-236 178 12 239 182 13 242 186 14 242 186 14
37951-246 186 14 246 190 14 246 190 14 246 190 14
37952-246 190 14 246 190 14 246 190 14 246 190 14
37953-246 190 14 246 190 14 246 190 14 246 190 14
37954-246 190 14 236 178 12 216 158 10 175 118 6
37955- 80 54 7 2 2 6 6 6 6 30 30 30
37956- 54 54 54 62 62 62 50 50 50 38 38 38
37957- 14 14 14 2 2 6 2 2 6 2 2 6
37958- 2 2 6 2 2 6 2 2 6 2 2 6
37959- 2 2 6 6 6 6 80 54 7 167 114 7
37960-213 154 11 236 178 12 246 190 14 246 190 14
37961-246 190 14 246 190 14 246 190 14 246 190 14
37962-246 190 14 242 186 14 239 182 13 239 182 13
37963-230 174 11 210 150 10 174 135 50 124 112 88
37964- 82 82 82 54 54 54 34 34 34 18 18 18
37965- 6 6 6 0 0 0 0 0 0 0 0 0
37966- 0 0 0 0 0 0 0 0 0 0 0 0
37967- 0 0 0 0 0 0 0 0 0 0 0 0
37968- 0 0 0 0 0 0 6 6 6 18 18 18
37969- 50 50 50 158 118 36 192 133 9 200 144 11
37970-216 158 10 219 162 10 224 166 10 226 170 11
37971-230 174 11 236 178 12 239 182 13 239 182 13
37972-242 186 14 246 186 14 246 190 14 246 190 14
37973-246 190 14 246 190 14 246 190 14 246 190 14
37974-246 186 14 230 174 11 210 150 10 163 110 8
37975-104 69 6 10 10 10 2 2 6 2 2 6
37976- 2 2 6 2 2 6 2 2 6 2 2 6
37977- 2 2 6 2 2 6 2 2 6 2 2 6
37978- 2 2 6 2 2 6 2 2 6 2 2 6
37979- 2 2 6 6 6 6 91 60 6 167 114 7
37980-206 145 10 230 174 11 242 186 14 246 190 14
37981-246 190 14 246 190 14 246 186 14 242 186 14
37982-239 182 13 230 174 11 224 166 10 213 154 11
37983-180 133 36 124 112 88 86 86 86 58 58 58
37984- 38 38 38 22 22 22 10 10 10 6 6 6
37985- 0 0 0 0 0 0 0 0 0 0 0 0
37986- 0 0 0 0 0 0 0 0 0 0 0 0
37987- 0 0 0 0 0 0 0 0 0 0 0 0
37988- 0 0 0 0 0 0 0 0 0 14 14 14
37989- 34 34 34 70 70 70 138 110 50 158 118 36
37990-167 114 7 180 123 7 192 133 9 197 138 11
37991-200 144 11 206 145 10 213 154 11 219 162 10
37992-224 166 10 230 174 11 239 182 13 242 186 14
37993-246 186 14 246 186 14 246 186 14 246 186 14
37994-239 182 13 216 158 10 185 133 11 152 99 6
37995-104 69 6 18 14 6 2 2 6 2 2 6
37996- 2 2 6 2 2 6 2 2 6 2 2 6
37997- 2 2 6 2 2 6 2 2 6 2 2 6
37998- 2 2 6 2 2 6 2 2 6 2 2 6
37999- 2 2 6 6 6 6 80 54 7 152 99 6
38000-192 133 9 219 162 10 236 178 12 239 182 13
38001-246 186 14 242 186 14 239 182 13 236 178 12
38002-224 166 10 206 145 10 192 133 9 154 121 60
38003- 94 94 94 62 62 62 42 42 42 22 22 22
38004- 14 14 14 6 6 6 0 0 0 0 0 0
38005- 0 0 0 0 0 0 0 0 0 0 0 0
38006- 0 0 0 0 0 0 0 0 0 0 0 0
38007- 0 0 0 0 0 0 0 0 0 0 0 0
38008- 0 0 0 0 0 0 0 0 0 6 6 6
38009- 18 18 18 34 34 34 58 58 58 78 78 78
38010-101 98 89 124 112 88 142 110 46 156 107 11
38011-163 110 8 167 114 7 175 118 6 180 123 7
38012-185 133 11 197 138 11 210 150 10 219 162 10
38013-226 170 11 236 178 12 236 178 12 234 174 13
38014-219 162 10 197 138 11 163 110 8 130 83 6
38015- 91 60 6 10 10 10 2 2 6 2 2 6
38016- 18 18 18 38 38 38 38 38 38 38 38 38
38017- 38 38 38 38 38 38 38 38 38 38 38 38
38018- 38 38 38 38 38 38 26 26 26 2 2 6
38019- 2 2 6 6 6 6 70 47 6 137 92 6
38020-175 118 6 200 144 11 219 162 10 230 174 11
38021-234 174 13 230 174 11 219 162 10 210 150 10
38022-192 133 9 163 110 8 124 112 88 82 82 82
38023- 50 50 50 30 30 30 14 14 14 6 6 6
38024- 0 0 0 0 0 0 0 0 0 0 0 0
38025- 0 0 0 0 0 0 0 0 0 0 0 0
38026- 0 0 0 0 0 0 0 0 0 0 0 0
38027- 0 0 0 0 0 0 0 0 0 0 0 0
38028- 0 0 0 0 0 0 0 0 0 0 0 0
38029- 6 6 6 14 14 14 22 22 22 34 34 34
38030- 42 42 42 58 58 58 74 74 74 86 86 86
38031-101 98 89 122 102 70 130 98 46 121 87 25
38032-137 92 6 152 99 6 163 110 8 180 123 7
38033-185 133 11 197 138 11 206 145 10 200 144 11
38034-180 123 7 156 107 11 130 83 6 104 69 6
38035- 50 34 6 54 54 54 110 110 110 101 98 89
38036- 86 86 86 82 82 82 78 78 78 78 78 78
38037- 78 78 78 78 78 78 78 78 78 78 78 78
38038- 78 78 78 82 82 82 86 86 86 94 94 94
38039-106 106 106 101 101 101 86 66 34 124 80 6
38040-156 107 11 180 123 7 192 133 9 200 144 11
38041-206 145 10 200 144 11 192 133 9 175 118 6
38042-139 102 15 109 106 95 70 70 70 42 42 42
38043- 22 22 22 10 10 10 0 0 0 0 0 0
38044- 0 0 0 0 0 0 0 0 0 0 0 0
38045- 0 0 0 0 0 0 0 0 0 0 0 0
38046- 0 0 0 0 0 0 0 0 0 0 0 0
38047- 0 0 0 0 0 0 0 0 0 0 0 0
38048- 0 0 0 0 0 0 0 0 0 0 0 0
38049- 0 0 0 0 0 0 6 6 6 10 10 10
38050- 14 14 14 22 22 22 30 30 30 38 38 38
38051- 50 50 50 62 62 62 74 74 74 90 90 90
38052-101 98 89 112 100 78 121 87 25 124 80 6
38053-137 92 6 152 99 6 152 99 6 152 99 6
38054-138 86 6 124 80 6 98 70 6 86 66 30
38055-101 98 89 82 82 82 58 58 58 46 46 46
38056- 38 38 38 34 34 34 34 34 34 34 34 34
38057- 34 34 34 34 34 34 34 34 34 34 34 34
38058- 34 34 34 34 34 34 38 38 38 42 42 42
38059- 54 54 54 82 82 82 94 86 76 91 60 6
38060-134 86 6 156 107 11 167 114 7 175 118 6
38061-175 118 6 167 114 7 152 99 6 121 87 25
38062-101 98 89 62 62 62 34 34 34 18 18 18
38063- 6 6 6 0 0 0 0 0 0 0 0 0
38064- 0 0 0 0 0 0 0 0 0 0 0 0
38065- 0 0 0 0 0 0 0 0 0 0 0 0
38066- 0 0 0 0 0 0 0 0 0 0 0 0
38067- 0 0 0 0 0 0 0 0 0 0 0 0
38068- 0 0 0 0 0 0 0 0 0 0 0 0
38069- 0 0 0 0 0 0 0 0 0 0 0 0
38070- 0 0 0 6 6 6 6 6 6 10 10 10
38071- 18 18 18 22 22 22 30 30 30 42 42 42
38072- 50 50 50 66 66 66 86 86 86 101 98 89
38073-106 86 58 98 70 6 104 69 6 104 69 6
38074-104 69 6 91 60 6 82 62 34 90 90 90
38075- 62 62 62 38 38 38 22 22 22 14 14 14
38076- 10 10 10 10 10 10 10 10 10 10 10 10
38077- 10 10 10 10 10 10 6 6 6 10 10 10
38078- 10 10 10 10 10 10 10 10 10 14 14 14
38079- 22 22 22 42 42 42 70 70 70 89 81 66
38080- 80 54 7 104 69 6 124 80 6 137 92 6
38081-134 86 6 116 81 8 100 82 52 86 86 86
38082- 58 58 58 30 30 30 14 14 14 6 6 6
38083- 0 0 0 0 0 0 0 0 0 0 0 0
38084- 0 0 0 0 0 0 0 0 0 0 0 0
38085- 0 0 0 0 0 0 0 0 0 0 0 0
38086- 0 0 0 0 0 0 0 0 0 0 0 0
38087- 0 0 0 0 0 0 0 0 0 0 0 0
38088- 0 0 0 0 0 0 0 0 0 0 0 0
38089- 0 0 0 0 0 0 0 0 0 0 0 0
38090- 0 0 0 0 0 0 0 0 0 0 0 0
38091- 0 0 0 6 6 6 10 10 10 14 14 14
38092- 18 18 18 26 26 26 38 38 38 54 54 54
38093- 70 70 70 86 86 86 94 86 76 89 81 66
38094- 89 81 66 86 86 86 74 74 74 50 50 50
38095- 30 30 30 14 14 14 6 6 6 0 0 0
38096- 0 0 0 0 0 0 0 0 0 0 0 0
38097- 0 0 0 0 0 0 0 0 0 0 0 0
38098- 0 0 0 0 0 0 0 0 0 0 0 0
38099- 6 6 6 18 18 18 34 34 34 58 58 58
38100- 82 82 82 89 81 66 89 81 66 89 81 66
38101- 94 86 66 94 86 76 74 74 74 50 50 50
38102- 26 26 26 14 14 14 6 6 6 0 0 0
38103- 0 0 0 0 0 0 0 0 0 0 0 0
38104- 0 0 0 0 0 0 0 0 0 0 0 0
38105- 0 0 0 0 0 0 0 0 0 0 0 0
38106- 0 0 0 0 0 0 0 0 0 0 0 0
38107- 0 0 0 0 0 0 0 0 0 0 0 0
38108- 0 0 0 0 0 0 0 0 0 0 0 0
38109- 0 0 0 0 0 0 0 0 0 0 0 0
38110- 0 0 0 0 0 0 0 0 0 0 0 0
38111- 0 0 0 0 0 0 0 0 0 0 0 0
38112- 6 6 6 6 6 6 14 14 14 18 18 18
38113- 30 30 30 38 38 38 46 46 46 54 54 54
38114- 50 50 50 42 42 42 30 30 30 18 18 18
38115- 10 10 10 0 0 0 0 0 0 0 0 0
38116- 0 0 0 0 0 0 0 0 0 0 0 0
38117- 0 0 0 0 0 0 0 0 0 0 0 0
38118- 0 0 0 0 0 0 0 0 0 0 0 0
38119- 0 0 0 6 6 6 14 14 14 26 26 26
38120- 38 38 38 50 50 50 58 58 58 58 58 58
38121- 54 54 54 42 42 42 30 30 30 18 18 18
38122- 10 10 10 0 0 0 0 0 0 0 0 0
38123- 0 0 0 0 0 0 0 0 0 0 0 0
38124- 0 0 0 0 0 0 0 0 0 0 0 0
38125- 0 0 0 0 0 0 0 0 0 0 0 0
38126- 0 0 0 0 0 0 0 0 0 0 0 0
38127- 0 0 0 0 0 0 0 0 0 0 0 0
38128- 0 0 0 0 0 0 0 0 0 0 0 0
38129- 0 0 0 0 0 0 0 0 0 0 0 0
38130- 0 0 0 0 0 0 0 0 0 0 0 0
38131- 0 0 0 0 0 0 0 0 0 0 0 0
38132- 0 0 0 0 0 0 0 0 0 6 6 6
38133- 6 6 6 10 10 10 14 14 14 18 18 18
38134- 18 18 18 14 14 14 10 10 10 6 6 6
38135- 0 0 0 0 0 0 0 0 0 0 0 0
38136- 0 0 0 0 0 0 0 0 0 0 0 0
38137- 0 0 0 0 0 0 0 0 0 0 0 0
38138- 0 0 0 0 0 0 0 0 0 0 0 0
38139- 0 0 0 0 0 0 0 0 0 6 6 6
38140- 14 14 14 18 18 18 22 22 22 22 22 22
38141- 18 18 18 14 14 14 10 10 10 6 6 6
38142- 0 0 0 0 0 0 0 0 0 0 0 0
38143- 0 0 0 0 0 0 0 0 0 0 0 0
38144- 0 0 0 0 0 0 0 0 0 0 0 0
38145- 0 0 0 0 0 0 0 0 0 0 0 0
38146- 0 0 0 0 0 0 0 0 0 0 0 0
38147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38160+4 4 4 4 4 4
38161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38174+4 4 4 4 4 4
38175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38188+4 4 4 4 4 4
38189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38202+4 4 4 4 4 4
38203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38216+4 4 4 4 4 4
38217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38230+4 4 4 4 4 4
38231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38235+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38236+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38240+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38241+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38242+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38244+4 4 4 4 4 4
38245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38249+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38250+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38251+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38254+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38255+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38256+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38257+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38258+4 4 4 4 4 4
38259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38263+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38264+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38265+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38268+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38269+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38270+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38271+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38272+4 4 4 4 4 4
38273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38276+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38277+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38278+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38279+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38281+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38282+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38283+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38284+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38285+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38286+4 4 4 4 4 4
38287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38290+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38291+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38292+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38293+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38294+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38295+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38296+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38297+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38298+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38299+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38300+4 4 4 4 4 4
38301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38304+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38305+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38306+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38307+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38308+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38309+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38310+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38311+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38312+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38313+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38314+4 4 4 4 4 4
38315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38317+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38318+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38319+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38320+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38321+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38322+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38323+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38324+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38325+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38326+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38327+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38328+4 4 4 4 4 4
38329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38331+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38332+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38333+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38334+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38335+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38336+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38337+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38338+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38339+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38340+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38341+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38342+4 4 4 4 4 4
38343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38345+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38346+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38347+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38348+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38349+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38350+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38351+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38352+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38353+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38354+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38355+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38356+4 4 4 4 4 4
38357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38359+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38360+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38361+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38362+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38363+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38364+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38365+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38366+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38367+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38368+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38369+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38370+4 4 4 4 4 4
38371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38372+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38373+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38374+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38375+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38376+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38377+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38378+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38379+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38380+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38381+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38382+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38383+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38384+4 4 4 4 4 4
38385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38386+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38387+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38388+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38389+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38390+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38391+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38392+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38393+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38394+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38395+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38396+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38397+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38398+0 0 0 4 4 4
38399+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38400+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38401+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38402+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38403+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38404+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38405+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38406+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38407+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38408+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38409+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38410+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38411+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38412+2 0 0 0 0 0
38413+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38414+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38415+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38416+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38417+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38418+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38419+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38420+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38421+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38422+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38423+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38424+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38425+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38426+37 38 37 0 0 0
38427+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38428+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38429+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38430+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38431+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38432+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38433+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38434+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38435+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38436+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38437+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38438+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38439+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38440+85 115 134 4 0 0
38441+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38442+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38443+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38444+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38445+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38446+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38447+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38448+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38449+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38450+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38451+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38452+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38453+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38454+60 73 81 4 0 0
38455+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38456+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38457+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38458+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38459+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38460+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38461+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38462+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38463+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38464+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38465+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38466+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38467+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38468+16 19 21 4 0 0
38469+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38470+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38471+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38472+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38473+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38474+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38475+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38476+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38477+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38478+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38479+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38480+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38481+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38482+4 0 0 4 3 3
38483+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38484+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38485+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38487+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38488+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38489+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38490+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38491+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38492+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38493+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38494+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38495+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38496+3 2 2 4 4 4
38497+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38498+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38499+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38500+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38501+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38502+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38503+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38504+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38505+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38506+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38507+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38508+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38509+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38510+4 4 4 4 4 4
38511+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38512+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38513+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38514+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38515+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38516+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38517+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38518+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38519+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38520+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38521+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38522+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38523+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38524+4 4 4 4 4 4
38525+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38526+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38527+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38528+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38529+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38530+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38531+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38532+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38533+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38534+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38535+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38536+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38537+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38538+5 5 5 5 5 5
38539+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38540+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38541+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38542+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38543+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38544+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38545+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38546+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38547+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38548+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38549+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38550+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38551+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38552+5 5 5 4 4 4
38553+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38554+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38555+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38556+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38557+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38558+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38559+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38560+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38561+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38562+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38563+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38564+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38566+4 4 4 4 4 4
38567+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38568+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38569+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38570+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38571+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38572+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38573+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38574+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38575+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38576+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38577+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38578+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38580+4 4 4 4 4 4
38581+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38582+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38583+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38584+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38585+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38586+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38587+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38588+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38589+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38590+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38591+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38594+4 4 4 4 4 4
38595+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38596+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38597+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38598+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38599+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38600+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38601+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38602+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38603+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38604+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38605+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38608+4 4 4 4 4 4
38609+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38610+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38611+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38612+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38613+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38614+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38615+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38616+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38617+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38618+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38619+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38622+4 4 4 4 4 4
38623+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38624+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38625+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38626+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38627+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38628+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38629+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38630+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38631+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38632+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38633+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38636+4 4 4 4 4 4
38637+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38638+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38639+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38640+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38641+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38642+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38643+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38644+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38645+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38646+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38647+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38650+4 4 4 4 4 4
38651+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38652+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38653+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38654+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38655+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38656+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38657+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38658+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38659+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38660+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38661+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38664+4 4 4 4 4 4
38665+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38666+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38667+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38668+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38669+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38670+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38671+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38672+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38673+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38674+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38675+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38678+4 4 4 4 4 4
38679+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38680+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38681+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38682+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38683+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38684+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38685+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38686+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38687+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38688+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38689+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38692+4 4 4 4 4 4
38693+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38694+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38695+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38696+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38697+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38698+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38699+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38700+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38701+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38702+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38703+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38706+4 4 4 4 4 4
38707+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38708+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38709+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38710+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38711+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38712+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38713+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38714+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38715+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38716+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38717+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38720+4 4 4 4 4 4
38721+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38722+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38723+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38724+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38725+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38726+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38727+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38728+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38729+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38730+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38731+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38734+4 4 4 4 4 4
38735+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38736+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38737+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38738+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38739+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38740+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38741+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38742+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38743+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38744+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38745+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38748+4 4 4 4 4 4
38749+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38750+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38751+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38752+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38753+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38754+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38755+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38756+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38757+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38758+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38759+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38762+4 4 4 4 4 4
38763+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38764+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38765+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38766+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38767+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38768+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38769+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38770+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38771+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38772+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38773+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38776+4 4 4 4 4 4
38777+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38778+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38779+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38780+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38781+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38782+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38783+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38784+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38785+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38786+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38787+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38790+4 4 4 4 4 4
38791+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38792+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38793+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38794+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38795+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38796+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38797+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38798+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38799+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38800+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38801+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38804+4 4 4 4 4 4
38805+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38806+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38807+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38808+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38809+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38810+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38811+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38812+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38813+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38814+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38815+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38818+4 4 4 4 4 4
38819+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38820+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38821+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38822+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38823+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38824+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38825+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38826+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38827+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38828+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38829+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38832+4 4 4 4 4 4
38833+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38834+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38835+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38836+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38837+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38838+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38839+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38840+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38841+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38842+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38843+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38846+4 4 4 4 4 4
38847+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38848+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38849+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38850+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38851+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38852+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38853+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38854+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38855+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38856+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38857+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38860+4 4 4 4 4 4
38861+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38862+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38863+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38864+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38865+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38866+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38867+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38868+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38869+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38870+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38871+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38874+4 4 4 4 4 4
38875+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38876+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38877+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38878+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38879+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38880+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38881+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38882+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38883+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38884+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38885+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38888+4 4 4 4 4 4
38889+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38890+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38891+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38892+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38893+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38894+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38895+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38896+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38897+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38898+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38899+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38902+4 4 4 4 4 4
38903+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38904+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38905+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38906+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38907+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38908+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38909+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
38910+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
38911+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
38912+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
38913+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38916+4 4 4 4 4 4
38917+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
38918+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38919+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
38920+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
38921+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
38922+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
38923+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
38924+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
38925+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
38926+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
38927+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38930+4 4 4 4 4 4
38931+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
38932+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38933+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
38934+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
38935+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
38936+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
38937+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38938+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
38939+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
38940+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
38941+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38944+4 4 4 4 4 4
38945+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
38946+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
38947+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
38948+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
38949+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
38950+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
38951+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
38952+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
38953+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
38954+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
38955+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38958+4 4 4 4 4 4
38959+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
38960+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
38961+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38962+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
38963+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
38964+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
38965+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
38966+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
38967+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
38968+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
38969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38972+4 4 4 4 4 4
38973+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38974+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
38975+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
38976+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
38977+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
38978+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
38979+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
38980+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
38981+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
38982+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38986+4 4 4 4 4 4
38987+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
38988+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
38989+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
38990+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
38991+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
38992+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
38993+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
38994+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
38995+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
38996+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39000+4 4 4 4 4 4
39001+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
39002+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
39003+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
39004+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
39005+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
39006+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
39007+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
39008+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
39009+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
39010+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39014+4 4 4 4 4 4
39015+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
39016+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39017+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
39018+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
39019+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
39020+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
39021+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
39022+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
39023+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
39024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39028+4 4 4 4 4 4
39029+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
39030+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
39031+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
39032+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
39033+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
39034+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
39035+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
39036+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
39037+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
39038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39042+4 4 4 4 4 4
39043+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39044+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
39045+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
39046+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
39047+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
39048+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
39049+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
39050+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
39051+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39056+4 4 4 4 4 4
39057+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
39058+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
39059+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39060+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
39061+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
39062+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
39063+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
39064+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
39065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39070+4 4 4 4 4 4
39071+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39072+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
39073+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
39074+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
39075+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
39076+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
39077+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
39078+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
39079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39084+4 4 4 4 4 4
39085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39086+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
39087+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39088+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
39089+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
39090+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
39091+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
39092+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
39093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39098+4 4 4 4 4 4
39099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39100+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
39101+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
39102+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
39103+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
39104+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
39105+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
39106+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
39107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39112+4 4 4 4 4 4
39113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39114+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39115+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
39116+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39117+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
39118+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
39119+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
39120+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39126+4 4 4 4 4 4
39127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39129+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39130+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
39131+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
39132+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
39133+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
39134+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39140+4 4 4 4 4 4
39141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39144+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39145+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
39146+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
39147+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
39148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39154+4 4 4 4 4 4
39155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39158+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39159+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39160+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
39161+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
39162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39168+4 4 4 4 4 4
39169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39172+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39173+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39174+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39175+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
39176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39182+4 4 4 4 4 4
39183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39186+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
39187+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
39188+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
39189+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
39190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39196+4 4 4 4 4 4
39197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39201+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
39202+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39203+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39210+4 4 4 4 4 4
39211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39215+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39216+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39217+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39224+4 4 4 4 4 4
39225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39229+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39230+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39231+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39238+4 4 4 4 4 4
39239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39243+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39244+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39252+4 4 4 4 4 4
39253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39257+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39258+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39266+4 4 4 4 4 4
39267diff -urNp linux-3.1.1/drivers/video/udlfb.c linux-3.1.1/drivers/video/udlfb.c
39268--- linux-3.1.1/drivers/video/udlfb.c 2011-11-11 15:19:27.000000000 -0500
39269+++ linux-3.1.1/drivers/video/udlfb.c 2011-11-16 18:39:08.000000000 -0500
39270@@ -585,11 +585,11 @@ int dlfb_handle_damage(struct dlfb_data
39271 dlfb_urb_completion(urb);
39272
39273 error:
39274- atomic_add(bytes_sent, &dev->bytes_sent);
39275- atomic_add(bytes_identical, &dev->bytes_identical);
39276- atomic_add(width*height*2, &dev->bytes_rendered);
39277+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39278+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39279+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39280 end_cycles = get_cycles();
39281- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39282+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39283 >> 10)), /* Kcycles */
39284 &dev->cpu_kcycles_used);
39285
39286@@ -710,11 +710,11 @@ static void dlfb_dpy_deferred_io(struct
39287 dlfb_urb_completion(urb);
39288
39289 error:
39290- atomic_add(bytes_sent, &dev->bytes_sent);
39291- atomic_add(bytes_identical, &dev->bytes_identical);
39292- atomic_add(bytes_rendered, &dev->bytes_rendered);
39293+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39294+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39295+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39296 end_cycles = get_cycles();
39297- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39298+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39299 >> 10)), /* Kcycles */
39300 &dev->cpu_kcycles_used);
39301 }
39302@@ -1306,7 +1306,7 @@ static ssize_t metrics_bytes_rendered_sh
39303 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39304 struct dlfb_data *dev = fb_info->par;
39305 return snprintf(buf, PAGE_SIZE, "%u\n",
39306- atomic_read(&dev->bytes_rendered));
39307+ atomic_read_unchecked(&dev->bytes_rendered));
39308 }
39309
39310 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39311@@ -1314,7 +1314,7 @@ static ssize_t metrics_bytes_identical_s
39312 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39313 struct dlfb_data *dev = fb_info->par;
39314 return snprintf(buf, PAGE_SIZE, "%u\n",
39315- atomic_read(&dev->bytes_identical));
39316+ atomic_read_unchecked(&dev->bytes_identical));
39317 }
39318
39319 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39320@@ -1322,7 +1322,7 @@ static ssize_t metrics_bytes_sent_show(s
39321 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39322 struct dlfb_data *dev = fb_info->par;
39323 return snprintf(buf, PAGE_SIZE, "%u\n",
39324- atomic_read(&dev->bytes_sent));
39325+ atomic_read_unchecked(&dev->bytes_sent));
39326 }
39327
39328 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39329@@ -1330,7 +1330,7 @@ static ssize_t metrics_cpu_kcycles_used_
39330 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39331 struct dlfb_data *dev = fb_info->par;
39332 return snprintf(buf, PAGE_SIZE, "%u\n",
39333- atomic_read(&dev->cpu_kcycles_used));
39334+ atomic_read_unchecked(&dev->cpu_kcycles_used));
39335 }
39336
39337 static ssize_t edid_show(
39338@@ -1387,10 +1387,10 @@ static ssize_t metrics_reset_store(struc
39339 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39340 struct dlfb_data *dev = fb_info->par;
39341
39342- atomic_set(&dev->bytes_rendered, 0);
39343- atomic_set(&dev->bytes_identical, 0);
39344- atomic_set(&dev->bytes_sent, 0);
39345- atomic_set(&dev->cpu_kcycles_used, 0);
39346+ atomic_set_unchecked(&dev->bytes_rendered, 0);
39347+ atomic_set_unchecked(&dev->bytes_identical, 0);
39348+ atomic_set_unchecked(&dev->bytes_sent, 0);
39349+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39350
39351 return count;
39352 }
39353diff -urNp linux-3.1.1/drivers/video/uvesafb.c linux-3.1.1/drivers/video/uvesafb.c
39354--- linux-3.1.1/drivers/video/uvesafb.c 2011-11-11 15:19:27.000000000 -0500
39355+++ linux-3.1.1/drivers/video/uvesafb.c 2011-11-16 18:39:08.000000000 -0500
39356@@ -19,6 +19,7 @@
39357 #include <linux/io.h>
39358 #include <linux/mutex.h>
39359 #include <linux/slab.h>
39360+#include <linux/moduleloader.h>
39361 #include <video/edid.h>
39362 #include <video/uvesafb.h>
39363 #ifdef CONFIG_X86
39364@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39365 NULL,
39366 };
39367
39368- return call_usermodehelper(v86d_path, argv, envp, 1);
39369+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39370 }
39371
39372 /*
39373@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
39374 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39375 par->pmi_setpal = par->ypan = 0;
39376 } else {
39377+
39378+#ifdef CONFIG_PAX_KERNEXEC
39379+#ifdef CONFIG_MODULES
39380+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39381+#endif
39382+ if (!par->pmi_code) {
39383+ par->pmi_setpal = par->ypan = 0;
39384+ return 0;
39385+ }
39386+#endif
39387+
39388 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39389 + task->t.regs.edi);
39390+
39391+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39392+ pax_open_kernel();
39393+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39394+ pax_close_kernel();
39395+
39396+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39397+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39398+#else
39399 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39400 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39401+#endif
39402+
39403 printk(KERN_INFO "uvesafb: protected mode interface info at "
39404 "%04x:%04x\n",
39405 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39406@@ -1821,6 +1844,11 @@ out:
39407 if (par->vbe_modes)
39408 kfree(par->vbe_modes);
39409
39410+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39411+ if (par->pmi_code)
39412+ module_free_exec(NULL, par->pmi_code);
39413+#endif
39414+
39415 framebuffer_release(info);
39416 return err;
39417 }
39418@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
39419 kfree(par->vbe_state_orig);
39420 if (par->vbe_state_saved)
39421 kfree(par->vbe_state_saved);
39422+
39423+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39424+ if (par->pmi_code)
39425+ module_free_exec(NULL, par->pmi_code);
39426+#endif
39427+
39428 }
39429
39430 framebuffer_release(info);
39431diff -urNp linux-3.1.1/drivers/video/vesafb.c linux-3.1.1/drivers/video/vesafb.c
39432--- linux-3.1.1/drivers/video/vesafb.c 2011-11-11 15:19:27.000000000 -0500
39433+++ linux-3.1.1/drivers/video/vesafb.c 2011-11-16 18:39:08.000000000 -0500
39434@@ -9,6 +9,7 @@
39435 */
39436
39437 #include <linux/module.h>
39438+#include <linux/moduleloader.h>
39439 #include <linux/kernel.h>
39440 #include <linux/errno.h>
39441 #include <linux/string.h>
39442@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
39443 static int vram_total __initdata; /* Set total amount of memory */
39444 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39445 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39446-static void (*pmi_start)(void) __read_mostly;
39447-static void (*pmi_pal) (void) __read_mostly;
39448+static void (*pmi_start)(void) __read_only;
39449+static void (*pmi_pal) (void) __read_only;
39450 static int depth __read_mostly;
39451 static int vga_compat __read_mostly;
39452 /* --------------------------------------------------------------------- */
39453@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
39454 unsigned int size_vmode;
39455 unsigned int size_remap;
39456 unsigned int size_total;
39457+ void *pmi_code = NULL;
39458
39459 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39460 return -ENODEV;
39461@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
39462 size_remap = size_total;
39463 vesafb_fix.smem_len = size_remap;
39464
39465-#ifndef __i386__
39466- screen_info.vesapm_seg = 0;
39467-#endif
39468-
39469 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39470 printk(KERN_WARNING
39471 "vesafb: cannot reserve video memory at 0x%lx\n",
39472@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
39473 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39474 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39475
39476+#ifdef __i386__
39477+
39478+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39479+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
39480+ if (!pmi_code)
39481+#elif !defined(CONFIG_PAX_KERNEXEC)
39482+ if (0)
39483+#endif
39484+
39485+#endif
39486+ screen_info.vesapm_seg = 0;
39487+
39488 if (screen_info.vesapm_seg) {
39489- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39490- screen_info.vesapm_seg,screen_info.vesapm_off);
39491+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39492+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39493 }
39494
39495 if (screen_info.vesapm_seg < 0xc000)
39496@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
39497
39498 if (ypan || pmi_setpal) {
39499 unsigned short *pmi_base;
39500+
39501 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39502- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39503- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39504+
39505+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39506+ pax_open_kernel();
39507+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39508+#else
39509+ pmi_code = pmi_base;
39510+#endif
39511+
39512+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39513+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39514+
39515+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39516+ pmi_start = ktva_ktla(pmi_start);
39517+ pmi_pal = ktva_ktla(pmi_pal);
39518+ pax_close_kernel();
39519+#endif
39520+
39521 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39522 if (pmi_base[3]) {
39523 printk(KERN_INFO "vesafb: pmi: ports = ");
39524@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
39525 info->node, info->fix.id);
39526 return 0;
39527 err:
39528+
39529+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39530+ module_free_exec(NULL, pmi_code);
39531+#endif
39532+
39533 if (info->screen_base)
39534 iounmap(info->screen_base);
39535 framebuffer_release(info);
39536diff -urNp linux-3.1.1/drivers/video/via/via_clock.h linux-3.1.1/drivers/video/via/via_clock.h
39537--- linux-3.1.1/drivers/video/via/via_clock.h 2011-11-11 15:19:27.000000000 -0500
39538+++ linux-3.1.1/drivers/video/via/via_clock.h 2011-11-16 18:39:08.000000000 -0500
39539@@ -56,7 +56,7 @@ struct via_clock {
39540
39541 void (*set_engine_pll_state)(u8 state);
39542 void (*set_engine_pll)(struct via_pll_config config);
39543-};
39544+} __no_const;
39545
39546
39547 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39548diff -urNp linux-3.1.1/drivers/virtio/virtio_balloon.c linux-3.1.1/drivers/virtio/virtio_balloon.c
39549--- linux-3.1.1/drivers/virtio/virtio_balloon.c 2011-11-11 15:19:27.000000000 -0500
39550+++ linux-3.1.1/drivers/virtio/virtio_balloon.c 2011-11-16 18:40:29.000000000 -0500
39551@@ -174,6 +174,8 @@ static void update_balloon_stats(struct
39552 struct sysinfo i;
39553 int idx = 0;
39554
39555+ pax_track_stack();
39556+
39557 all_vm_events(events);
39558 si_meminfo(&i);
39559
39560diff -urNp linux-3.1.1/drivers/xen/xen-pciback/conf_space.h linux-3.1.1/drivers/xen/xen-pciback/conf_space.h
39561--- linux-3.1.1/drivers/xen/xen-pciback/conf_space.h 2011-11-11 15:19:27.000000000 -0500
39562+++ linux-3.1.1/drivers/xen/xen-pciback/conf_space.h 2011-11-16 18:39:08.000000000 -0500
39563@@ -44,15 +44,15 @@ struct config_field {
39564 struct {
39565 conf_dword_write write;
39566 conf_dword_read read;
39567- } dw;
39568+ } __no_const dw;
39569 struct {
39570 conf_word_write write;
39571 conf_word_read read;
39572- } w;
39573+ } __no_const w;
39574 struct {
39575 conf_byte_write write;
39576 conf_byte_read read;
39577- } b;
39578+ } __no_const b;
39579 } u;
39580 struct list_head list;
39581 };
39582diff -urNp linux-3.1.1/fs/9p/vfs_inode.c linux-3.1.1/fs/9p/vfs_inode.c
39583--- linux-3.1.1/fs/9p/vfs_inode.c 2011-11-11 15:19:27.000000000 -0500
39584+++ linux-3.1.1/fs/9p/vfs_inode.c 2011-11-16 18:39:08.000000000 -0500
39585@@ -1288,7 +1288,7 @@ static void *v9fs_vfs_follow_link(struct
39586 void
39587 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39588 {
39589- char *s = nd_get_link(nd);
39590+ const char *s = nd_get_link(nd);
39591
39592 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39593 IS_ERR(s) ? "<error>" : s);
39594diff -urNp linux-3.1.1/fs/aio.c linux-3.1.1/fs/aio.c
39595--- linux-3.1.1/fs/aio.c 2011-11-11 15:19:27.000000000 -0500
39596+++ linux-3.1.1/fs/aio.c 2011-11-16 18:40:29.000000000 -0500
39597@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
39598 size += sizeof(struct io_event) * nr_events;
39599 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39600
39601- if (nr_pages < 0)
39602+ if (nr_pages <= 0)
39603 return -EINVAL;
39604
39605 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39606@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
39607 struct aio_timeout to;
39608 int retry = 0;
39609
39610+ pax_track_stack();
39611+
39612 /* needed to zero any padding within an entry (there shouldn't be
39613 * any, but C is fun!
39614 */
39615@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
39616 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39617 {
39618 ssize_t ret;
39619+ struct iovec iovstack;
39620
39621 #ifdef CONFIG_COMPAT
39622 if (compat)
39623 ret = compat_rw_copy_check_uvector(type,
39624 (struct compat_iovec __user *)kiocb->ki_buf,
39625- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39626+ kiocb->ki_nbytes, 1, &iovstack,
39627 &kiocb->ki_iovec);
39628 else
39629 #endif
39630 ret = rw_copy_check_uvector(type,
39631 (struct iovec __user *)kiocb->ki_buf,
39632- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39633+ kiocb->ki_nbytes, 1, &iovstack,
39634 &kiocb->ki_iovec);
39635 if (ret < 0)
39636 goto out;
39637
39638+ if (kiocb->ki_iovec == &iovstack) {
39639+ kiocb->ki_inline_vec = iovstack;
39640+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
39641+ }
39642 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39643 kiocb->ki_cur_seg = 0;
39644 /* ki_nbytes/left now reflect bytes instead of segs */
39645diff -urNp linux-3.1.1/fs/attr.c linux-3.1.1/fs/attr.c
39646--- linux-3.1.1/fs/attr.c 2011-11-11 15:19:27.000000000 -0500
39647+++ linux-3.1.1/fs/attr.c 2011-11-16 18:40:29.000000000 -0500
39648@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
39649 unsigned long limit;
39650
39651 limit = rlimit(RLIMIT_FSIZE);
39652+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39653 if (limit != RLIM_INFINITY && offset > limit)
39654 goto out_sig;
39655 if (offset > inode->i_sb->s_maxbytes)
39656diff -urNp linux-3.1.1/fs/autofs4/waitq.c linux-3.1.1/fs/autofs4/waitq.c
39657--- linux-3.1.1/fs/autofs4/waitq.c 2011-11-11 15:19:27.000000000 -0500
39658+++ linux-3.1.1/fs/autofs4/waitq.c 2011-11-16 18:39:08.000000000 -0500
39659@@ -60,7 +60,7 @@ static int autofs4_write(struct file *fi
39660 {
39661 unsigned long sigpipe, flags;
39662 mm_segment_t fs;
39663- const char *data = (const char *)addr;
39664+ const char __user *data = (const char __force_user *)addr;
39665 ssize_t wr = 0;
39666
39667 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39668diff -urNp linux-3.1.1/fs/befs/linuxvfs.c linux-3.1.1/fs/befs/linuxvfs.c
39669--- linux-3.1.1/fs/befs/linuxvfs.c 2011-11-11 15:19:27.000000000 -0500
39670+++ linux-3.1.1/fs/befs/linuxvfs.c 2011-11-16 18:39:08.000000000 -0500
39671@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
39672 {
39673 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39674 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39675- char *link = nd_get_link(nd);
39676+ const char *link = nd_get_link(nd);
39677 if (!IS_ERR(link))
39678 kfree(link);
39679 }
39680diff -urNp linux-3.1.1/fs/binfmt_aout.c linux-3.1.1/fs/binfmt_aout.c
39681--- linux-3.1.1/fs/binfmt_aout.c 2011-11-11 15:19:27.000000000 -0500
39682+++ linux-3.1.1/fs/binfmt_aout.c 2011-11-16 18:40:29.000000000 -0500
39683@@ -16,6 +16,7 @@
39684 #include <linux/string.h>
39685 #include <linux/fs.h>
39686 #include <linux/file.h>
39687+#include <linux/security.h>
39688 #include <linux/stat.h>
39689 #include <linux/fcntl.h>
39690 #include <linux/ptrace.h>
39691@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
39692 #endif
39693 # define START_STACK(u) ((void __user *)u.start_stack)
39694
39695+ memset(&dump, 0, sizeof(dump));
39696+
39697 fs = get_fs();
39698 set_fs(KERNEL_DS);
39699 has_dumped = 1;
39700@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
39701
39702 /* If the size of the dump file exceeds the rlimit, then see what would happen
39703 if we wrote the stack, but not the data area. */
39704+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39705 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39706 dump.u_dsize = 0;
39707
39708 /* Make sure we have enough room to write the stack and data areas. */
39709+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39710 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39711 dump.u_ssize = 0;
39712
39713@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
39714 rlim = rlimit(RLIMIT_DATA);
39715 if (rlim >= RLIM_INFINITY)
39716 rlim = ~0;
39717+
39718+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39719 if (ex.a_data + ex.a_bss > rlim)
39720 return -ENOMEM;
39721
39722@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
39723 install_exec_creds(bprm);
39724 current->flags &= ~PF_FORKNOEXEC;
39725
39726+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39727+ current->mm->pax_flags = 0UL;
39728+#endif
39729+
39730+#ifdef CONFIG_PAX_PAGEEXEC
39731+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39732+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39733+
39734+#ifdef CONFIG_PAX_EMUTRAMP
39735+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39736+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39737+#endif
39738+
39739+#ifdef CONFIG_PAX_MPROTECT
39740+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39741+ current->mm->pax_flags |= MF_PAX_MPROTECT;
39742+#endif
39743+
39744+ }
39745+#endif
39746+
39747 if (N_MAGIC(ex) == OMAGIC) {
39748 unsigned long text_addr, map_size;
39749 loff_t pos;
39750@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
39751
39752 down_write(&current->mm->mmap_sem);
39753 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39754- PROT_READ | PROT_WRITE | PROT_EXEC,
39755+ PROT_READ | PROT_WRITE,
39756 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39757 fd_offset + ex.a_text);
39758 up_write(&current->mm->mmap_sem);
39759diff -urNp linux-3.1.1/fs/binfmt_elf.c linux-3.1.1/fs/binfmt_elf.c
39760--- linux-3.1.1/fs/binfmt_elf.c 2011-11-11 15:19:27.000000000 -0500
39761+++ linux-3.1.1/fs/binfmt_elf.c 2011-11-16 18:40:29.000000000 -0500
39762@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
39763 #define elf_core_dump NULL
39764 #endif
39765
39766+#ifdef CONFIG_PAX_MPROTECT
39767+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39768+#endif
39769+
39770 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39771 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39772 #else
39773@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
39774 .load_binary = load_elf_binary,
39775 .load_shlib = load_elf_library,
39776 .core_dump = elf_core_dump,
39777+
39778+#ifdef CONFIG_PAX_MPROTECT
39779+ .handle_mprotect= elf_handle_mprotect,
39780+#endif
39781+
39782 .min_coredump = ELF_EXEC_PAGESIZE,
39783 };
39784
39785@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
39786
39787 static int set_brk(unsigned long start, unsigned long end)
39788 {
39789+ unsigned long e = end;
39790+
39791 start = ELF_PAGEALIGN(start);
39792 end = ELF_PAGEALIGN(end);
39793 if (end > start) {
39794@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
39795 if (BAD_ADDR(addr))
39796 return addr;
39797 }
39798- current->mm->start_brk = current->mm->brk = end;
39799+ current->mm->start_brk = current->mm->brk = e;
39800 return 0;
39801 }
39802
39803@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
39804 elf_addr_t __user *u_rand_bytes;
39805 const char *k_platform = ELF_PLATFORM;
39806 const char *k_base_platform = ELF_BASE_PLATFORM;
39807- unsigned char k_rand_bytes[16];
39808+ u32 k_rand_bytes[4];
39809 int items;
39810 elf_addr_t *elf_info;
39811 int ei_index = 0;
39812 const struct cred *cred = current_cred();
39813 struct vm_area_struct *vma;
39814+ unsigned long saved_auxv[AT_VECTOR_SIZE];
39815+
39816+ pax_track_stack();
39817
39818 /*
39819 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39820@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
39821 * Generate 16 random bytes for userspace PRNG seeding.
39822 */
39823 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39824- u_rand_bytes = (elf_addr_t __user *)
39825- STACK_ALLOC(p, sizeof(k_rand_bytes));
39826+ srandom32(k_rand_bytes[0] ^ random32());
39827+ srandom32(k_rand_bytes[1] ^ random32());
39828+ srandom32(k_rand_bytes[2] ^ random32());
39829+ srandom32(k_rand_bytes[3] ^ random32());
39830+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
39831+ u_rand_bytes = (elf_addr_t __user *) p;
39832 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39833 return -EFAULT;
39834
39835@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
39836 return -EFAULT;
39837 current->mm->env_end = p;
39838
39839+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39840+
39841 /* Put the elf_info on the stack in the right place. */
39842 sp = (elf_addr_t __user *)envp + 1;
39843- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39844+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39845 return -EFAULT;
39846 return 0;
39847 }
39848@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
39849 {
39850 struct elf_phdr *elf_phdata;
39851 struct elf_phdr *eppnt;
39852- unsigned long load_addr = 0;
39853+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39854 int load_addr_set = 0;
39855 unsigned long last_bss = 0, elf_bss = 0;
39856- unsigned long error = ~0UL;
39857+ unsigned long error = -EINVAL;
39858 unsigned long total_size;
39859 int retval, i, size;
39860
39861@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
39862 goto out_close;
39863 }
39864
39865+#ifdef CONFIG_PAX_SEGMEXEC
39866+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39867+ pax_task_size = SEGMEXEC_TASK_SIZE;
39868+#endif
39869+
39870 eppnt = elf_phdata;
39871 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39872 if (eppnt->p_type == PT_LOAD) {
39873@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
39874 k = load_addr + eppnt->p_vaddr;
39875 if (BAD_ADDR(k) ||
39876 eppnt->p_filesz > eppnt->p_memsz ||
39877- eppnt->p_memsz > TASK_SIZE ||
39878- TASK_SIZE - eppnt->p_memsz < k) {
39879+ eppnt->p_memsz > pax_task_size ||
39880+ pax_task_size - eppnt->p_memsz < k) {
39881 error = -ENOMEM;
39882 goto out_close;
39883 }
39884@@ -528,6 +553,193 @@ out:
39885 return error;
39886 }
39887
39888+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
39889+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
39890+{
39891+ unsigned long pax_flags = 0UL;
39892+
39893+#ifdef CONFIG_PAX_PAGEEXEC
39894+ if (elf_phdata->p_flags & PF_PAGEEXEC)
39895+ pax_flags |= MF_PAX_PAGEEXEC;
39896+#endif
39897+
39898+#ifdef CONFIG_PAX_SEGMEXEC
39899+ if (elf_phdata->p_flags & PF_SEGMEXEC)
39900+ pax_flags |= MF_PAX_SEGMEXEC;
39901+#endif
39902+
39903+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39904+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39905+ if ((__supported_pte_mask & _PAGE_NX))
39906+ pax_flags &= ~MF_PAX_SEGMEXEC;
39907+ else
39908+ pax_flags &= ~MF_PAX_PAGEEXEC;
39909+ }
39910+#endif
39911+
39912+#ifdef CONFIG_PAX_EMUTRAMP
39913+ if (elf_phdata->p_flags & PF_EMUTRAMP)
39914+ pax_flags |= MF_PAX_EMUTRAMP;
39915+#endif
39916+
39917+#ifdef CONFIG_PAX_MPROTECT
39918+ if (elf_phdata->p_flags & PF_MPROTECT)
39919+ pax_flags |= MF_PAX_MPROTECT;
39920+#endif
39921+
39922+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39923+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39924+ pax_flags |= MF_PAX_RANDMMAP;
39925+#endif
39926+
39927+ return pax_flags;
39928+}
39929+#endif
39930+
39931+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39932+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
39933+{
39934+ unsigned long pax_flags = 0UL;
39935+
39936+#ifdef CONFIG_PAX_PAGEEXEC
39937+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39938+ pax_flags |= MF_PAX_PAGEEXEC;
39939+#endif
39940+
39941+#ifdef CONFIG_PAX_SEGMEXEC
39942+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39943+ pax_flags |= MF_PAX_SEGMEXEC;
39944+#endif
39945+
39946+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39947+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39948+ if ((__supported_pte_mask & _PAGE_NX))
39949+ pax_flags &= ~MF_PAX_SEGMEXEC;
39950+ else
39951+ pax_flags &= ~MF_PAX_PAGEEXEC;
39952+ }
39953+#endif
39954+
39955+#ifdef CONFIG_PAX_EMUTRAMP
39956+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39957+ pax_flags |= MF_PAX_EMUTRAMP;
39958+#endif
39959+
39960+#ifdef CONFIG_PAX_MPROTECT
39961+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39962+ pax_flags |= MF_PAX_MPROTECT;
39963+#endif
39964+
39965+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39966+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39967+ pax_flags |= MF_PAX_RANDMMAP;
39968+#endif
39969+
39970+ return pax_flags;
39971+}
39972+#endif
39973+
39974+#ifdef CONFIG_PAX_EI_PAX
39975+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39976+{
39977+ unsigned long pax_flags = 0UL;
39978+
39979+#ifdef CONFIG_PAX_PAGEEXEC
39980+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39981+ pax_flags |= MF_PAX_PAGEEXEC;
39982+#endif
39983+
39984+#ifdef CONFIG_PAX_SEGMEXEC
39985+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39986+ pax_flags |= MF_PAX_SEGMEXEC;
39987+#endif
39988+
39989+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39990+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39991+ if ((__supported_pte_mask & _PAGE_NX))
39992+ pax_flags &= ~MF_PAX_SEGMEXEC;
39993+ else
39994+ pax_flags &= ~MF_PAX_PAGEEXEC;
39995+ }
39996+#endif
39997+
39998+#ifdef CONFIG_PAX_EMUTRAMP
39999+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
40000+ pax_flags |= MF_PAX_EMUTRAMP;
40001+#endif
40002+
40003+#ifdef CONFIG_PAX_MPROTECT
40004+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
40005+ pax_flags |= MF_PAX_MPROTECT;
40006+#endif
40007+
40008+#ifdef CONFIG_PAX_ASLR
40009+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
40010+ pax_flags |= MF_PAX_RANDMMAP;
40011+#endif
40012+
40013+ return pax_flags;
40014+}
40015+#endif
40016+
40017+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
40018+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
40019+{
40020+ unsigned long pax_flags = 0UL;
40021+
40022+#ifdef CONFIG_PAX_PT_PAX_FLAGS
40023+ unsigned long i;
40024+ int found_flags = 0;
40025+#endif
40026+
40027+#ifdef CONFIG_PAX_EI_PAX
40028+ pax_flags = pax_parse_ei_pax(elf_ex);
40029+#endif
40030+
40031+#ifdef CONFIG_PAX_PT_PAX_FLAGS
40032+ for (i = 0UL; i < elf_ex->e_phnum; i++)
40033+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
40034+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
40035+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
40036+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
40037+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
40038+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
40039+ return -EINVAL;
40040+
40041+#ifdef CONFIG_PAX_SOFTMODE
40042+ if (pax_softmode)
40043+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
40044+ else
40045+#endif
40046+
40047+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
40048+ found_flags = 1;
40049+ break;
40050+ }
40051+#endif
40052+
40053+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
40054+ if (found_flags == 0) {
40055+ struct elf_phdr phdr;
40056+ memset(&phdr, 0, sizeof(phdr));
40057+ phdr.p_flags = PF_NOEMUTRAMP;
40058+#ifdef CONFIG_PAX_SOFTMODE
40059+ if (pax_softmode)
40060+ pax_flags = pax_parse_softmode(&phdr);
40061+ else
40062+#endif
40063+ pax_flags = pax_parse_hardmode(&phdr);
40064+ }
40065+#endif
40066+
40067+ if (0 > pax_check_flags(&pax_flags))
40068+ return -EINVAL;
40069+
40070+ current->mm->pax_flags = pax_flags;
40071+ return 0;
40072+}
40073+#endif
40074+
40075 /*
40076 * These are the functions used to load ELF style executables and shared
40077 * libraries. There is no binary dependent code anywhere else.
40078@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
40079 {
40080 unsigned int random_variable = 0;
40081
40082+#ifdef CONFIG_PAX_RANDUSTACK
40083+ if (randomize_va_space)
40084+ return stack_top - current->mm->delta_stack;
40085+#endif
40086+
40087 if ((current->flags & PF_RANDOMIZE) &&
40088 !(current->personality & ADDR_NO_RANDOMIZE)) {
40089 random_variable = get_random_int() & STACK_RND_MASK;
40090@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
40091 unsigned long load_addr = 0, load_bias = 0;
40092 int load_addr_set = 0;
40093 char * elf_interpreter = NULL;
40094- unsigned long error;
40095+ unsigned long error = 0;
40096 struct elf_phdr *elf_ppnt, *elf_phdata;
40097 unsigned long elf_bss, elf_brk;
40098 int retval, i;
40099@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
40100 unsigned long start_code, end_code, start_data, end_data;
40101 unsigned long reloc_func_desc __maybe_unused = 0;
40102 int executable_stack = EXSTACK_DEFAULT;
40103- unsigned long def_flags = 0;
40104 struct {
40105 struct elfhdr elf_ex;
40106 struct elfhdr interp_elf_ex;
40107 } *loc;
40108+ unsigned long pax_task_size = TASK_SIZE;
40109
40110 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40111 if (!loc) {
40112@@ -713,11 +930,81 @@ static int load_elf_binary(struct linux_
40113
40114 /* OK, This is the point of no return */
40115 current->flags &= ~PF_FORKNOEXEC;
40116- current->mm->def_flags = def_flags;
40117+
40118+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40119+ current->mm->pax_flags = 0UL;
40120+#endif
40121+
40122+#ifdef CONFIG_PAX_DLRESOLVE
40123+ current->mm->call_dl_resolve = 0UL;
40124+#endif
40125+
40126+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40127+ current->mm->call_syscall = 0UL;
40128+#endif
40129+
40130+#ifdef CONFIG_PAX_ASLR
40131+ current->mm->delta_mmap = 0UL;
40132+ current->mm->delta_stack = 0UL;
40133+#endif
40134+
40135+ current->mm->def_flags = 0;
40136+
40137+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
40138+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
40139+ send_sig(SIGKILL, current, 0);
40140+ goto out_free_dentry;
40141+ }
40142+#endif
40143+
40144+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40145+ pax_set_initial_flags(bprm);
40146+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40147+ if (pax_set_initial_flags_func)
40148+ (pax_set_initial_flags_func)(bprm);
40149+#endif
40150+
40151+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40152+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40153+ current->mm->context.user_cs_limit = PAGE_SIZE;
40154+ current->mm->def_flags |= VM_PAGEEXEC;
40155+ }
40156+#endif
40157+
40158+#ifdef CONFIG_PAX_SEGMEXEC
40159+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40160+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40161+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40162+ pax_task_size = SEGMEXEC_TASK_SIZE;
40163+ current->mm->def_flags |= VM_NOHUGEPAGE;
40164+ }
40165+#endif
40166+
40167+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40168+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40169+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40170+ put_cpu();
40171+ }
40172+#endif
40173
40174 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40175 may depend on the personality. */
40176 SET_PERSONALITY(loc->elf_ex);
40177+
40178+#ifdef CONFIG_PAX_ASLR
40179+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40180+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40181+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40182+ }
40183+#endif
40184+
40185+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40186+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40187+ executable_stack = EXSTACK_DISABLE_X;
40188+ current->personality &= ~READ_IMPLIES_EXEC;
40189+ } else
40190+#endif
40191+
40192 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40193 current->personality |= READ_IMPLIES_EXEC;
40194
40195@@ -808,6 +1095,20 @@ static int load_elf_binary(struct linux_
40196 #else
40197 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40198 #endif
40199+
40200+#ifdef CONFIG_PAX_RANDMMAP
40201+ /* PaX: randomize base address at the default exe base if requested */
40202+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40203+#ifdef CONFIG_SPARC64
40204+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40205+#else
40206+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40207+#endif
40208+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40209+ elf_flags |= MAP_FIXED;
40210+ }
40211+#endif
40212+
40213 }
40214
40215 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40216@@ -840,9 +1141,9 @@ static int load_elf_binary(struct linux_
40217 * allowed task size. Note that p_filesz must always be
40218 * <= p_memsz so it is only necessary to check p_memsz.
40219 */
40220- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40221- elf_ppnt->p_memsz > TASK_SIZE ||
40222- TASK_SIZE - elf_ppnt->p_memsz < k) {
40223+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40224+ elf_ppnt->p_memsz > pax_task_size ||
40225+ pax_task_size - elf_ppnt->p_memsz < k) {
40226 /* set_brk can never work. Avoid overflows. */
40227 send_sig(SIGKILL, current, 0);
40228 retval = -EINVAL;
40229@@ -870,6 +1171,11 @@ static int load_elf_binary(struct linux_
40230 start_data += load_bias;
40231 end_data += load_bias;
40232
40233+#ifdef CONFIG_PAX_RANDMMAP
40234+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40235+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40236+#endif
40237+
40238 /* Calling set_brk effectively mmaps the pages that we need
40239 * for the bss and break sections. We must do this before
40240 * mapping in the interpreter, to make sure it doesn't wind
40241@@ -881,9 +1187,11 @@ static int load_elf_binary(struct linux_
40242 goto out_free_dentry;
40243 }
40244 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40245- send_sig(SIGSEGV, current, 0);
40246- retval = -EFAULT; /* Nobody gets to see this, but.. */
40247- goto out_free_dentry;
40248+ /*
40249+ * This bss-zeroing can fail if the ELF
40250+ * file specifies odd protections. So
40251+ * we don't check the return value
40252+ */
40253 }
40254
40255 if (elf_interpreter) {
40256@@ -1098,7 +1406,7 @@ out:
40257 * Decide what to dump of a segment, part, all or none.
40258 */
40259 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40260- unsigned long mm_flags)
40261+ unsigned long mm_flags, long signr)
40262 {
40263 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40264
40265@@ -1132,7 +1440,7 @@ static unsigned long vma_dump_size(struc
40266 if (vma->vm_file == NULL)
40267 return 0;
40268
40269- if (FILTER(MAPPED_PRIVATE))
40270+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40271 goto whole;
40272
40273 /*
40274@@ -1354,9 +1662,9 @@ static void fill_auxv_note(struct memelf
40275 {
40276 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40277 int i = 0;
40278- do
40279+ do {
40280 i += 2;
40281- while (auxv[i - 2] != AT_NULL);
40282+ } while (auxv[i - 2] != AT_NULL);
40283 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40284 }
40285
40286@@ -1862,14 +2170,14 @@ static void fill_extnum_info(struct elfh
40287 }
40288
40289 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40290- unsigned long mm_flags)
40291+ struct coredump_params *cprm)
40292 {
40293 struct vm_area_struct *vma;
40294 size_t size = 0;
40295
40296 for (vma = first_vma(current, gate_vma); vma != NULL;
40297 vma = next_vma(vma, gate_vma))
40298- size += vma_dump_size(vma, mm_flags);
40299+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40300 return size;
40301 }
40302
40303@@ -1963,7 +2271,7 @@ static int elf_core_dump(struct coredump
40304
40305 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40306
40307- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40308+ offset += elf_core_vma_data_size(gate_vma, cprm);
40309 offset += elf_core_extra_data_size();
40310 e_shoff = offset;
40311
40312@@ -1977,10 +2285,12 @@ static int elf_core_dump(struct coredump
40313 offset = dataoff;
40314
40315 size += sizeof(*elf);
40316+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40317 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40318 goto end_coredump;
40319
40320 size += sizeof(*phdr4note);
40321+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40322 if (size > cprm->limit
40323 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40324 goto end_coredump;
40325@@ -1994,7 +2304,7 @@ static int elf_core_dump(struct coredump
40326 phdr.p_offset = offset;
40327 phdr.p_vaddr = vma->vm_start;
40328 phdr.p_paddr = 0;
40329- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40330+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40331 phdr.p_memsz = vma->vm_end - vma->vm_start;
40332 offset += phdr.p_filesz;
40333 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40334@@ -2005,6 +2315,7 @@ static int elf_core_dump(struct coredump
40335 phdr.p_align = ELF_EXEC_PAGESIZE;
40336
40337 size += sizeof(phdr);
40338+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40339 if (size > cprm->limit
40340 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40341 goto end_coredump;
40342@@ -2029,7 +2340,7 @@ static int elf_core_dump(struct coredump
40343 unsigned long addr;
40344 unsigned long end;
40345
40346- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40347+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40348
40349 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40350 struct page *page;
40351@@ -2038,6 +2349,7 @@ static int elf_core_dump(struct coredump
40352 page = get_dump_page(addr);
40353 if (page) {
40354 void *kaddr = kmap(page);
40355+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40356 stop = ((size += PAGE_SIZE) > cprm->limit) ||
40357 !dump_write(cprm->file, kaddr,
40358 PAGE_SIZE);
40359@@ -2055,6 +2367,7 @@ static int elf_core_dump(struct coredump
40360
40361 if (e_phnum == PN_XNUM) {
40362 size += sizeof(*shdr4extnum);
40363+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40364 if (size > cprm->limit
40365 || !dump_write(cprm->file, shdr4extnum,
40366 sizeof(*shdr4extnum)))
40367@@ -2075,6 +2388,97 @@ out:
40368
40369 #endif /* CONFIG_ELF_CORE */
40370
40371+#ifdef CONFIG_PAX_MPROTECT
40372+/* PaX: non-PIC ELF libraries need relocations on their executable segments
40373+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40374+ * we'll remove VM_MAYWRITE for good on RELRO segments.
40375+ *
40376+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40377+ * basis because we want to allow the common case and not the special ones.
40378+ */
40379+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40380+{
40381+ struct elfhdr elf_h;
40382+ struct elf_phdr elf_p;
40383+ unsigned long i;
40384+ unsigned long oldflags;
40385+ bool is_textrel_rw, is_textrel_rx, is_relro;
40386+
40387+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40388+ return;
40389+
40390+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40391+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40392+
40393+#ifdef CONFIG_PAX_ELFRELOCS
40394+ /* possible TEXTREL */
40395+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40396+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40397+#else
40398+ is_textrel_rw = false;
40399+ is_textrel_rx = false;
40400+#endif
40401+
40402+ /* possible RELRO */
40403+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40404+
40405+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40406+ return;
40407+
40408+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40409+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40410+
40411+#ifdef CONFIG_PAX_ETEXECRELOCS
40412+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40413+#else
40414+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40415+#endif
40416+
40417+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40418+ !elf_check_arch(&elf_h) ||
40419+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40420+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40421+ return;
40422+
40423+ for (i = 0UL; i < elf_h.e_phnum; i++) {
40424+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40425+ return;
40426+ switch (elf_p.p_type) {
40427+ case PT_DYNAMIC:
40428+ if (!is_textrel_rw && !is_textrel_rx)
40429+ continue;
40430+ i = 0UL;
40431+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40432+ elf_dyn dyn;
40433+
40434+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40435+ return;
40436+ if (dyn.d_tag == DT_NULL)
40437+ return;
40438+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40439+ gr_log_textrel(vma);
40440+ if (is_textrel_rw)
40441+ vma->vm_flags |= VM_MAYWRITE;
40442+ else
40443+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40444+ vma->vm_flags &= ~VM_MAYWRITE;
40445+ return;
40446+ }
40447+ i++;
40448+ }
40449+ return;
40450+
40451+ case PT_GNU_RELRO:
40452+ if (!is_relro)
40453+ continue;
40454+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40455+ vma->vm_flags &= ~VM_MAYWRITE;
40456+ return;
40457+ }
40458+ }
40459+}
40460+#endif
40461+
40462 static int __init init_elf_binfmt(void)
40463 {
40464 return register_binfmt(&elf_format);
40465diff -urNp linux-3.1.1/fs/binfmt_flat.c linux-3.1.1/fs/binfmt_flat.c
40466--- linux-3.1.1/fs/binfmt_flat.c 2011-11-11 15:19:27.000000000 -0500
40467+++ linux-3.1.1/fs/binfmt_flat.c 2011-11-16 18:39:08.000000000 -0500
40468@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
40469 realdatastart = (unsigned long) -ENOMEM;
40470 printk("Unable to allocate RAM for process data, errno %d\n",
40471 (int)-realdatastart);
40472+ down_write(&current->mm->mmap_sem);
40473 do_munmap(current->mm, textpos, text_len);
40474+ up_write(&current->mm->mmap_sem);
40475 ret = realdatastart;
40476 goto err;
40477 }
40478@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
40479 }
40480 if (IS_ERR_VALUE(result)) {
40481 printk("Unable to read data+bss, errno %d\n", (int)-result);
40482+ down_write(&current->mm->mmap_sem);
40483 do_munmap(current->mm, textpos, text_len);
40484 do_munmap(current->mm, realdatastart, len);
40485+ up_write(&current->mm->mmap_sem);
40486 ret = result;
40487 goto err;
40488 }
40489@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
40490 }
40491 if (IS_ERR_VALUE(result)) {
40492 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40493+ down_write(&current->mm->mmap_sem);
40494 do_munmap(current->mm, textpos, text_len + data_len + extra +
40495 MAX_SHARED_LIBS * sizeof(unsigned long));
40496+ up_write(&current->mm->mmap_sem);
40497 ret = result;
40498 goto err;
40499 }
40500diff -urNp linux-3.1.1/fs/bio.c linux-3.1.1/fs/bio.c
40501--- linux-3.1.1/fs/bio.c 2011-11-11 15:19:27.000000000 -0500
40502+++ linux-3.1.1/fs/bio.c 2011-11-16 18:39:08.000000000 -0500
40503@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
40504 const int read = bio_data_dir(bio) == READ;
40505 struct bio_map_data *bmd = bio->bi_private;
40506 int i;
40507- char *p = bmd->sgvecs[0].iov_base;
40508+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40509
40510 __bio_for_each_segment(bvec, bio, i, 0) {
40511 char *addr = page_address(bvec->bv_page);
40512diff -urNp linux-3.1.1/fs/block_dev.c linux-3.1.1/fs/block_dev.c
40513--- linux-3.1.1/fs/block_dev.c 2011-11-11 15:19:27.000000000 -0500
40514+++ linux-3.1.1/fs/block_dev.c 2011-11-16 18:39:08.000000000 -0500
40515@@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_de
40516 else if (bdev->bd_contains == bdev)
40517 return true; /* is a whole device which isn't held */
40518
40519- else if (whole->bd_holder == bd_may_claim)
40520+ else if (whole->bd_holder == (void *)bd_may_claim)
40521 return true; /* is a partition of a device that is being partitioned */
40522 else if (whole->bd_holder != NULL)
40523 return false; /* is a partition of a held device */
40524diff -urNp linux-3.1.1/fs/btrfs/ctree.c linux-3.1.1/fs/btrfs/ctree.c
40525--- linux-3.1.1/fs/btrfs/ctree.c 2011-11-11 15:19:27.000000000 -0500
40526+++ linux-3.1.1/fs/btrfs/ctree.c 2011-11-16 18:39:08.000000000 -0500
40527@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(st
40528 free_extent_buffer(buf);
40529 add_root_to_dirty_list(root);
40530 } else {
40531- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40532- parent_start = parent->start;
40533- else
40534+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40535+ if (parent)
40536+ parent_start = parent->start;
40537+ else
40538+ parent_start = 0;
40539+ } else
40540 parent_start = 0;
40541
40542 WARN_ON(trans->transid != btrfs_header_generation(parent));
40543diff -urNp linux-3.1.1/fs/btrfs/inode.c linux-3.1.1/fs/btrfs/inode.c
40544--- linux-3.1.1/fs/btrfs/inode.c 2011-11-11 15:19:27.000000000 -0500
40545+++ linux-3.1.1/fs/btrfs/inode.c 2011-11-17 18:12:11.000000000 -0500
40546@@ -6922,7 +6922,7 @@ fail:
40547 return -ENOMEM;
40548 }
40549
40550-static int btrfs_getattr(struct vfsmount *mnt,
40551+int btrfs_getattr(struct vfsmount *mnt,
40552 struct dentry *dentry, struct kstat *stat)
40553 {
40554 struct inode *inode = dentry->d_inode;
40555@@ -6934,6 +6934,14 @@ static int btrfs_getattr(struct vfsmount
40556 return 0;
40557 }
40558
40559+EXPORT_SYMBOL(btrfs_getattr);
40560+
40561+dev_t get_btrfs_dev_from_inode(struct inode *inode)
40562+{
40563+ return BTRFS_I(inode)->root->anon_dev;
40564+}
40565+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40566+
40567 /*
40568 * If a file is moved, it will inherit the cow and compression flags of the new
40569 * directory.
40570diff -urNp linux-3.1.1/fs/btrfs/ioctl.c linux-3.1.1/fs/btrfs/ioctl.c
40571--- linux-3.1.1/fs/btrfs/ioctl.c 2011-11-11 15:19:27.000000000 -0500
40572+++ linux-3.1.1/fs/btrfs/ioctl.c 2011-11-16 18:40:29.000000000 -0500
40573@@ -2704,9 +2704,12 @@ long btrfs_ioctl_space_info(struct btrfs
40574 for (i = 0; i < num_types; i++) {
40575 struct btrfs_space_info *tmp;
40576
40577+ /* Don't copy in more than we allocated */
40578 if (!slot_count)
40579 break;
40580
40581+ slot_count--;
40582+
40583 info = NULL;
40584 rcu_read_lock();
40585 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40586@@ -2728,15 +2731,12 @@ long btrfs_ioctl_space_info(struct btrfs
40587 memcpy(dest, &space, sizeof(space));
40588 dest++;
40589 space_args.total_spaces++;
40590- slot_count--;
40591 }
40592- if (!slot_count)
40593- break;
40594 }
40595 up_read(&info->groups_sem);
40596 }
40597
40598- user_dest = (struct btrfs_ioctl_space_info *)
40599+ user_dest = (struct btrfs_ioctl_space_info __user *)
40600 (arg + sizeof(struct btrfs_ioctl_space_args));
40601
40602 if (copy_to_user(user_dest, dest_orig, alloc_size))
40603diff -urNp linux-3.1.1/fs/btrfs/relocation.c linux-3.1.1/fs/btrfs/relocation.c
40604--- linux-3.1.1/fs/btrfs/relocation.c 2011-11-11 15:19:27.000000000 -0500
40605+++ linux-3.1.1/fs/btrfs/relocation.c 2011-11-16 18:39:08.000000000 -0500
40606@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
40607 }
40608 spin_unlock(&rc->reloc_root_tree.lock);
40609
40610- BUG_ON((struct btrfs_root *)node->data != root);
40611+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
40612
40613 if (!del) {
40614 spin_lock(&rc->reloc_root_tree.lock);
40615diff -urNp linux-3.1.1/fs/cachefiles/bind.c linux-3.1.1/fs/cachefiles/bind.c
40616--- linux-3.1.1/fs/cachefiles/bind.c 2011-11-11 15:19:27.000000000 -0500
40617+++ linux-3.1.1/fs/cachefiles/bind.c 2011-11-16 18:39:08.000000000 -0500
40618@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
40619 args);
40620
40621 /* start by checking things over */
40622- ASSERT(cache->fstop_percent >= 0 &&
40623- cache->fstop_percent < cache->fcull_percent &&
40624+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
40625 cache->fcull_percent < cache->frun_percent &&
40626 cache->frun_percent < 100);
40627
40628- ASSERT(cache->bstop_percent >= 0 &&
40629- cache->bstop_percent < cache->bcull_percent &&
40630+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
40631 cache->bcull_percent < cache->brun_percent &&
40632 cache->brun_percent < 100);
40633
40634diff -urNp linux-3.1.1/fs/cachefiles/daemon.c linux-3.1.1/fs/cachefiles/daemon.c
40635--- linux-3.1.1/fs/cachefiles/daemon.c 2011-11-11 15:19:27.000000000 -0500
40636+++ linux-3.1.1/fs/cachefiles/daemon.c 2011-11-16 18:39:08.000000000 -0500
40637@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
40638 if (n > buflen)
40639 return -EMSGSIZE;
40640
40641- if (copy_to_user(_buffer, buffer, n) != 0)
40642+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40643 return -EFAULT;
40644
40645 return n;
40646@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
40647 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40648 return -EIO;
40649
40650- if (datalen < 0 || datalen > PAGE_SIZE - 1)
40651+ if (datalen > PAGE_SIZE - 1)
40652 return -EOPNOTSUPP;
40653
40654 /* drag the command string into the kernel so we can parse it */
40655@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
40656 if (args[0] != '%' || args[1] != '\0')
40657 return -EINVAL;
40658
40659- if (fstop < 0 || fstop >= cache->fcull_percent)
40660+ if (fstop >= cache->fcull_percent)
40661 return cachefiles_daemon_range_error(cache, args);
40662
40663 cache->fstop_percent = fstop;
40664@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
40665 if (args[0] != '%' || args[1] != '\0')
40666 return -EINVAL;
40667
40668- if (bstop < 0 || bstop >= cache->bcull_percent)
40669+ if (bstop >= cache->bcull_percent)
40670 return cachefiles_daemon_range_error(cache, args);
40671
40672 cache->bstop_percent = bstop;
40673diff -urNp linux-3.1.1/fs/cachefiles/internal.h linux-3.1.1/fs/cachefiles/internal.h
40674--- linux-3.1.1/fs/cachefiles/internal.h 2011-11-11 15:19:27.000000000 -0500
40675+++ linux-3.1.1/fs/cachefiles/internal.h 2011-11-16 18:39:08.000000000 -0500
40676@@ -57,7 +57,7 @@ struct cachefiles_cache {
40677 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40678 struct rb_root active_nodes; /* active nodes (can't be culled) */
40679 rwlock_t active_lock; /* lock for active_nodes */
40680- atomic_t gravecounter; /* graveyard uniquifier */
40681+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40682 unsigned frun_percent; /* when to stop culling (% files) */
40683 unsigned fcull_percent; /* when to start culling (% files) */
40684 unsigned fstop_percent; /* when to stop allocating (% files) */
40685@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
40686 * proc.c
40687 */
40688 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40689-extern atomic_t cachefiles_lookup_histogram[HZ];
40690-extern atomic_t cachefiles_mkdir_histogram[HZ];
40691-extern atomic_t cachefiles_create_histogram[HZ];
40692+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40693+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40694+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40695
40696 extern int __init cachefiles_proc_init(void);
40697 extern void cachefiles_proc_cleanup(void);
40698 static inline
40699-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40700+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40701 {
40702 unsigned long jif = jiffies - start_jif;
40703 if (jif >= HZ)
40704 jif = HZ - 1;
40705- atomic_inc(&histogram[jif]);
40706+ atomic_inc_unchecked(&histogram[jif]);
40707 }
40708
40709 #else
40710diff -urNp linux-3.1.1/fs/cachefiles/namei.c linux-3.1.1/fs/cachefiles/namei.c
40711--- linux-3.1.1/fs/cachefiles/namei.c 2011-11-11 15:19:27.000000000 -0500
40712+++ linux-3.1.1/fs/cachefiles/namei.c 2011-11-16 18:39:08.000000000 -0500
40713@@ -318,7 +318,7 @@ try_again:
40714 /* first step is to make up a grave dentry in the graveyard */
40715 sprintf(nbuffer, "%08x%08x",
40716 (uint32_t) get_seconds(),
40717- (uint32_t) atomic_inc_return(&cache->gravecounter));
40718+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40719
40720 /* do the multiway lock magic */
40721 trap = lock_rename(cache->graveyard, dir);
40722diff -urNp linux-3.1.1/fs/cachefiles/proc.c linux-3.1.1/fs/cachefiles/proc.c
40723--- linux-3.1.1/fs/cachefiles/proc.c 2011-11-11 15:19:27.000000000 -0500
40724+++ linux-3.1.1/fs/cachefiles/proc.c 2011-11-16 18:39:08.000000000 -0500
40725@@ -14,9 +14,9 @@
40726 #include <linux/seq_file.h>
40727 #include "internal.h"
40728
40729-atomic_t cachefiles_lookup_histogram[HZ];
40730-atomic_t cachefiles_mkdir_histogram[HZ];
40731-atomic_t cachefiles_create_histogram[HZ];
40732+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40733+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40734+atomic_unchecked_t cachefiles_create_histogram[HZ];
40735
40736 /*
40737 * display the latency histogram
40738@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
40739 return 0;
40740 default:
40741 index = (unsigned long) v - 3;
40742- x = atomic_read(&cachefiles_lookup_histogram[index]);
40743- y = atomic_read(&cachefiles_mkdir_histogram[index]);
40744- z = atomic_read(&cachefiles_create_histogram[index]);
40745+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40746+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40747+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40748 if (x == 0 && y == 0 && z == 0)
40749 return 0;
40750
40751diff -urNp linux-3.1.1/fs/cachefiles/rdwr.c linux-3.1.1/fs/cachefiles/rdwr.c
40752--- linux-3.1.1/fs/cachefiles/rdwr.c 2011-11-11 15:19:27.000000000 -0500
40753+++ linux-3.1.1/fs/cachefiles/rdwr.c 2011-11-16 18:39:08.000000000 -0500
40754@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
40755 old_fs = get_fs();
40756 set_fs(KERNEL_DS);
40757 ret = file->f_op->write(
40758- file, (const void __user *) data, len, &pos);
40759+ file, (const void __force_user *) data, len, &pos);
40760 set_fs(old_fs);
40761 kunmap(page);
40762 if (ret != len)
40763diff -urNp linux-3.1.1/fs/ceph/dir.c linux-3.1.1/fs/ceph/dir.c
40764--- linux-3.1.1/fs/ceph/dir.c 2011-11-11 15:19:27.000000000 -0500
40765+++ linux-3.1.1/fs/ceph/dir.c 2011-11-16 18:39:08.000000000 -0500
40766@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *fil
40767 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
40768 struct ceph_mds_client *mdsc = fsc->mdsc;
40769 unsigned frag = fpos_frag(filp->f_pos);
40770- int off = fpos_off(filp->f_pos);
40771+ unsigned int off = fpos_off(filp->f_pos);
40772 int err;
40773 u32 ftype;
40774 struct ceph_mds_reply_info_parsed *rinfo;
40775diff -urNp linux-3.1.1/fs/cifs/cifs_debug.c linux-3.1.1/fs/cifs/cifs_debug.c
40776--- linux-3.1.1/fs/cifs/cifs_debug.c 2011-11-11 15:19:27.000000000 -0500
40777+++ linux-3.1.1/fs/cifs/cifs_debug.c 2011-11-16 18:39:08.000000000 -0500
40778@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
40779
40780 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
40781 #ifdef CONFIG_CIFS_STATS2
40782- atomic_set(&totBufAllocCount, 0);
40783- atomic_set(&totSmBufAllocCount, 0);
40784+ atomic_set_unchecked(&totBufAllocCount, 0);
40785+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40786 #endif /* CONFIG_CIFS_STATS2 */
40787 spin_lock(&cifs_tcp_ses_lock);
40788 list_for_each(tmp1, &cifs_tcp_ses_list) {
40789@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
40790 tcon = list_entry(tmp3,
40791 struct cifs_tcon,
40792 tcon_list);
40793- atomic_set(&tcon->num_smbs_sent, 0);
40794- atomic_set(&tcon->num_writes, 0);
40795- atomic_set(&tcon->num_reads, 0);
40796- atomic_set(&tcon->num_oplock_brks, 0);
40797- atomic_set(&tcon->num_opens, 0);
40798- atomic_set(&tcon->num_posixopens, 0);
40799- atomic_set(&tcon->num_posixmkdirs, 0);
40800- atomic_set(&tcon->num_closes, 0);
40801- atomic_set(&tcon->num_deletes, 0);
40802- atomic_set(&tcon->num_mkdirs, 0);
40803- atomic_set(&tcon->num_rmdirs, 0);
40804- atomic_set(&tcon->num_renames, 0);
40805- atomic_set(&tcon->num_t2renames, 0);
40806- atomic_set(&tcon->num_ffirst, 0);
40807- atomic_set(&tcon->num_fnext, 0);
40808- atomic_set(&tcon->num_fclose, 0);
40809- atomic_set(&tcon->num_hardlinks, 0);
40810- atomic_set(&tcon->num_symlinks, 0);
40811- atomic_set(&tcon->num_locks, 0);
40812+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40813+ atomic_set_unchecked(&tcon->num_writes, 0);
40814+ atomic_set_unchecked(&tcon->num_reads, 0);
40815+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40816+ atomic_set_unchecked(&tcon->num_opens, 0);
40817+ atomic_set_unchecked(&tcon->num_posixopens, 0);
40818+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40819+ atomic_set_unchecked(&tcon->num_closes, 0);
40820+ atomic_set_unchecked(&tcon->num_deletes, 0);
40821+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
40822+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
40823+ atomic_set_unchecked(&tcon->num_renames, 0);
40824+ atomic_set_unchecked(&tcon->num_t2renames, 0);
40825+ atomic_set_unchecked(&tcon->num_ffirst, 0);
40826+ atomic_set_unchecked(&tcon->num_fnext, 0);
40827+ atomic_set_unchecked(&tcon->num_fclose, 0);
40828+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
40829+ atomic_set_unchecked(&tcon->num_symlinks, 0);
40830+ atomic_set_unchecked(&tcon->num_locks, 0);
40831 }
40832 }
40833 }
40834@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
40835 smBufAllocCount.counter, cifs_min_small);
40836 #ifdef CONFIG_CIFS_STATS2
40837 seq_printf(m, "Total Large %d Small %d Allocations\n",
40838- atomic_read(&totBufAllocCount),
40839- atomic_read(&totSmBufAllocCount));
40840+ atomic_read_unchecked(&totBufAllocCount),
40841+ atomic_read_unchecked(&totSmBufAllocCount));
40842 #endif /* CONFIG_CIFS_STATS2 */
40843
40844 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
40845@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
40846 if (tcon->need_reconnect)
40847 seq_puts(m, "\tDISCONNECTED ");
40848 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40849- atomic_read(&tcon->num_smbs_sent),
40850- atomic_read(&tcon->num_oplock_brks));
40851+ atomic_read_unchecked(&tcon->num_smbs_sent),
40852+ atomic_read_unchecked(&tcon->num_oplock_brks));
40853 seq_printf(m, "\nReads: %d Bytes: %lld",
40854- atomic_read(&tcon->num_reads),
40855+ atomic_read_unchecked(&tcon->num_reads),
40856 (long long)(tcon->bytes_read));
40857 seq_printf(m, "\nWrites: %d Bytes: %lld",
40858- atomic_read(&tcon->num_writes),
40859+ atomic_read_unchecked(&tcon->num_writes),
40860 (long long)(tcon->bytes_written));
40861 seq_printf(m, "\nFlushes: %d",
40862- atomic_read(&tcon->num_flushes));
40863+ atomic_read_unchecked(&tcon->num_flushes));
40864 seq_printf(m, "\nLocks: %d HardLinks: %d "
40865 "Symlinks: %d",
40866- atomic_read(&tcon->num_locks),
40867- atomic_read(&tcon->num_hardlinks),
40868- atomic_read(&tcon->num_symlinks));
40869+ atomic_read_unchecked(&tcon->num_locks),
40870+ atomic_read_unchecked(&tcon->num_hardlinks),
40871+ atomic_read_unchecked(&tcon->num_symlinks));
40872 seq_printf(m, "\nOpens: %d Closes: %d "
40873 "Deletes: %d",
40874- atomic_read(&tcon->num_opens),
40875- atomic_read(&tcon->num_closes),
40876- atomic_read(&tcon->num_deletes));
40877+ atomic_read_unchecked(&tcon->num_opens),
40878+ atomic_read_unchecked(&tcon->num_closes),
40879+ atomic_read_unchecked(&tcon->num_deletes));
40880 seq_printf(m, "\nPosix Opens: %d "
40881 "Posix Mkdirs: %d",
40882- atomic_read(&tcon->num_posixopens),
40883- atomic_read(&tcon->num_posixmkdirs));
40884+ atomic_read_unchecked(&tcon->num_posixopens),
40885+ atomic_read_unchecked(&tcon->num_posixmkdirs));
40886 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40887- atomic_read(&tcon->num_mkdirs),
40888- atomic_read(&tcon->num_rmdirs));
40889+ atomic_read_unchecked(&tcon->num_mkdirs),
40890+ atomic_read_unchecked(&tcon->num_rmdirs));
40891 seq_printf(m, "\nRenames: %d T2 Renames %d",
40892- atomic_read(&tcon->num_renames),
40893- atomic_read(&tcon->num_t2renames));
40894+ atomic_read_unchecked(&tcon->num_renames),
40895+ atomic_read_unchecked(&tcon->num_t2renames));
40896 seq_printf(m, "\nFindFirst: %d FNext %d "
40897 "FClose %d",
40898- atomic_read(&tcon->num_ffirst),
40899- atomic_read(&tcon->num_fnext),
40900- atomic_read(&tcon->num_fclose));
40901+ atomic_read_unchecked(&tcon->num_ffirst),
40902+ atomic_read_unchecked(&tcon->num_fnext),
40903+ atomic_read_unchecked(&tcon->num_fclose));
40904 }
40905 }
40906 }
40907diff -urNp linux-3.1.1/fs/cifs/cifsfs.c linux-3.1.1/fs/cifs/cifsfs.c
40908--- linux-3.1.1/fs/cifs/cifsfs.c 2011-11-11 15:19:27.000000000 -0500
40909+++ linux-3.1.1/fs/cifs/cifsfs.c 2011-11-16 18:39:08.000000000 -0500
40910@@ -981,7 +981,7 @@ cifs_init_request_bufs(void)
40911 cifs_req_cachep = kmem_cache_create("cifs_request",
40912 CIFSMaxBufSize +
40913 MAX_CIFS_HDR_SIZE, 0,
40914- SLAB_HWCACHE_ALIGN, NULL);
40915+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
40916 if (cifs_req_cachep == NULL)
40917 return -ENOMEM;
40918
40919@@ -1008,7 +1008,7 @@ cifs_init_request_bufs(void)
40920 efficient to alloc 1 per page off the slab compared to 17K (5page)
40921 alloc of large cifs buffers even when page debugging is on */
40922 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
40923- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
40924+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
40925 NULL);
40926 if (cifs_sm_req_cachep == NULL) {
40927 mempool_destroy(cifs_req_poolp);
40928@@ -1093,8 +1093,8 @@ init_cifs(void)
40929 atomic_set(&bufAllocCount, 0);
40930 atomic_set(&smBufAllocCount, 0);
40931 #ifdef CONFIG_CIFS_STATS2
40932- atomic_set(&totBufAllocCount, 0);
40933- atomic_set(&totSmBufAllocCount, 0);
40934+ atomic_set_unchecked(&totBufAllocCount, 0);
40935+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40936 #endif /* CONFIG_CIFS_STATS2 */
40937
40938 atomic_set(&midCount, 0);
40939diff -urNp linux-3.1.1/fs/cifs/cifsglob.h linux-3.1.1/fs/cifs/cifsglob.h
40940--- linux-3.1.1/fs/cifs/cifsglob.h 2011-11-11 15:19:27.000000000 -0500
40941+++ linux-3.1.1/fs/cifs/cifsglob.h 2011-11-16 18:39:08.000000000 -0500
40942@@ -381,28 +381,28 @@ struct cifs_tcon {
40943 __u16 Flags; /* optional support bits */
40944 enum statusEnum tidStatus;
40945 #ifdef CONFIG_CIFS_STATS
40946- atomic_t num_smbs_sent;
40947- atomic_t num_writes;
40948- atomic_t num_reads;
40949- atomic_t num_flushes;
40950- atomic_t num_oplock_brks;
40951- atomic_t num_opens;
40952- atomic_t num_closes;
40953- atomic_t num_deletes;
40954- atomic_t num_mkdirs;
40955- atomic_t num_posixopens;
40956- atomic_t num_posixmkdirs;
40957- atomic_t num_rmdirs;
40958- atomic_t num_renames;
40959- atomic_t num_t2renames;
40960- atomic_t num_ffirst;
40961- atomic_t num_fnext;
40962- atomic_t num_fclose;
40963- atomic_t num_hardlinks;
40964- atomic_t num_symlinks;
40965- atomic_t num_locks;
40966- atomic_t num_acl_get;
40967- atomic_t num_acl_set;
40968+ atomic_unchecked_t num_smbs_sent;
40969+ atomic_unchecked_t num_writes;
40970+ atomic_unchecked_t num_reads;
40971+ atomic_unchecked_t num_flushes;
40972+ atomic_unchecked_t num_oplock_brks;
40973+ atomic_unchecked_t num_opens;
40974+ atomic_unchecked_t num_closes;
40975+ atomic_unchecked_t num_deletes;
40976+ atomic_unchecked_t num_mkdirs;
40977+ atomic_unchecked_t num_posixopens;
40978+ atomic_unchecked_t num_posixmkdirs;
40979+ atomic_unchecked_t num_rmdirs;
40980+ atomic_unchecked_t num_renames;
40981+ atomic_unchecked_t num_t2renames;
40982+ atomic_unchecked_t num_ffirst;
40983+ atomic_unchecked_t num_fnext;
40984+ atomic_unchecked_t num_fclose;
40985+ atomic_unchecked_t num_hardlinks;
40986+ atomic_unchecked_t num_symlinks;
40987+ atomic_unchecked_t num_locks;
40988+ atomic_unchecked_t num_acl_get;
40989+ atomic_unchecked_t num_acl_set;
40990 #ifdef CONFIG_CIFS_STATS2
40991 unsigned long long time_writes;
40992 unsigned long long time_reads;
40993@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
40994 }
40995
40996 #ifdef CONFIG_CIFS_STATS
40997-#define cifs_stats_inc atomic_inc
40998+#define cifs_stats_inc atomic_inc_unchecked
40999
41000 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
41001 unsigned int bytes)
41002@@ -953,8 +953,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
41003 /* Various Debug counters */
41004 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
41005 #ifdef CONFIG_CIFS_STATS2
41006-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
41007-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
41008+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
41009+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
41010 #endif
41011 GLOBAL_EXTERN atomic_t smBufAllocCount;
41012 GLOBAL_EXTERN atomic_t midCount;
41013diff -urNp linux-3.1.1/fs/cifs/link.c linux-3.1.1/fs/cifs/link.c
41014--- linux-3.1.1/fs/cifs/link.c 2011-11-11 15:19:27.000000000 -0500
41015+++ linux-3.1.1/fs/cifs/link.c 2011-11-16 18:39:08.000000000 -0500
41016@@ -593,7 +593,7 @@ symlink_exit:
41017
41018 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
41019 {
41020- char *p = nd_get_link(nd);
41021+ const char *p = nd_get_link(nd);
41022 if (!IS_ERR(p))
41023 kfree(p);
41024 }
41025diff -urNp linux-3.1.1/fs/cifs/misc.c linux-3.1.1/fs/cifs/misc.c
41026--- linux-3.1.1/fs/cifs/misc.c 2011-11-11 15:19:27.000000000 -0500
41027+++ linux-3.1.1/fs/cifs/misc.c 2011-11-16 18:39:08.000000000 -0500
41028@@ -156,7 +156,7 @@ cifs_buf_get(void)
41029 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
41030 atomic_inc(&bufAllocCount);
41031 #ifdef CONFIG_CIFS_STATS2
41032- atomic_inc(&totBufAllocCount);
41033+ atomic_inc_unchecked(&totBufAllocCount);
41034 #endif /* CONFIG_CIFS_STATS2 */
41035 }
41036
41037@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
41038 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
41039 atomic_inc(&smBufAllocCount);
41040 #ifdef CONFIG_CIFS_STATS2
41041- atomic_inc(&totSmBufAllocCount);
41042+ atomic_inc_unchecked(&totSmBufAllocCount);
41043 #endif /* CONFIG_CIFS_STATS2 */
41044
41045 }
41046diff -urNp linux-3.1.1/fs/coda/cache.c linux-3.1.1/fs/coda/cache.c
41047--- linux-3.1.1/fs/coda/cache.c 2011-11-11 15:19:27.000000000 -0500
41048+++ linux-3.1.1/fs/coda/cache.c 2011-11-16 18:39:08.000000000 -0500
41049@@ -24,7 +24,7 @@
41050 #include "coda_linux.h"
41051 #include "coda_cache.h"
41052
41053-static atomic_t permission_epoch = ATOMIC_INIT(0);
41054+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41055
41056 /* replace or extend an acl cache hit */
41057 void coda_cache_enter(struct inode *inode, int mask)
41058@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
41059 struct coda_inode_info *cii = ITOC(inode);
41060
41061 spin_lock(&cii->c_lock);
41062- cii->c_cached_epoch = atomic_read(&permission_epoch);
41063+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41064 if (cii->c_uid != current_fsuid()) {
41065 cii->c_uid = current_fsuid();
41066 cii->c_cached_perm = mask;
41067@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
41068 {
41069 struct coda_inode_info *cii = ITOC(inode);
41070 spin_lock(&cii->c_lock);
41071- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41072+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41073 spin_unlock(&cii->c_lock);
41074 }
41075
41076 /* remove all acl caches */
41077 void coda_cache_clear_all(struct super_block *sb)
41078 {
41079- atomic_inc(&permission_epoch);
41080+ atomic_inc_unchecked(&permission_epoch);
41081 }
41082
41083
41084@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
41085 spin_lock(&cii->c_lock);
41086 hit = (mask & cii->c_cached_perm) == mask &&
41087 cii->c_uid == current_fsuid() &&
41088- cii->c_cached_epoch == atomic_read(&permission_epoch);
41089+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41090 spin_unlock(&cii->c_lock);
41091
41092 return hit;
41093diff -urNp linux-3.1.1/fs/compat_binfmt_elf.c linux-3.1.1/fs/compat_binfmt_elf.c
41094--- linux-3.1.1/fs/compat_binfmt_elf.c 2011-11-11 15:19:27.000000000 -0500
41095+++ linux-3.1.1/fs/compat_binfmt_elf.c 2011-11-16 18:39:08.000000000 -0500
41096@@ -30,11 +30,13 @@
41097 #undef elf_phdr
41098 #undef elf_shdr
41099 #undef elf_note
41100+#undef elf_dyn
41101 #undef elf_addr_t
41102 #define elfhdr elf32_hdr
41103 #define elf_phdr elf32_phdr
41104 #define elf_shdr elf32_shdr
41105 #define elf_note elf32_note
41106+#define elf_dyn Elf32_Dyn
41107 #define elf_addr_t Elf32_Addr
41108
41109 /*
41110diff -urNp linux-3.1.1/fs/compat.c linux-3.1.1/fs/compat.c
41111--- linux-3.1.1/fs/compat.c 2011-11-11 15:19:27.000000000 -0500
41112+++ linux-3.1.1/fs/compat.c 2011-11-16 18:40:29.000000000 -0500
41113@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const
41114 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41115 {
41116 compat_ino_t ino = stat->ino;
41117- typeof(ubuf->st_uid) uid = 0;
41118- typeof(ubuf->st_gid) gid = 0;
41119+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41120+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41121 int err;
41122
41123 SET_UID(uid, stat->uid);
41124@@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u3
41125
41126 set_fs(KERNEL_DS);
41127 /* The __user pointer cast is valid because of the set_fs() */
41128- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41129+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41130 set_fs(oldfs);
41131 /* truncating is ok because it's a user address */
41132 if (!ret)
41133@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
41134 goto out;
41135
41136 ret = -EINVAL;
41137- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41138+ if (nr_segs > UIO_MAXIOV)
41139 goto out;
41140 if (nr_segs > fast_segs) {
41141 ret = -ENOMEM;
41142@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
41143
41144 struct compat_readdir_callback {
41145 struct compat_old_linux_dirent __user *dirent;
41146+ struct file * file;
41147 int result;
41148 };
41149
41150@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
41151 buf->result = -EOVERFLOW;
41152 return -EOVERFLOW;
41153 }
41154+
41155+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41156+ return 0;
41157+
41158 buf->result++;
41159 dirent = buf->dirent;
41160 if (!access_ok(VERIFY_WRITE, dirent,
41161@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
41162
41163 buf.result = 0;
41164 buf.dirent = dirent;
41165+ buf.file = file;
41166
41167 error = vfs_readdir(file, compat_fillonedir, &buf);
41168 if (buf.result)
41169@@ -917,6 +923,7 @@ struct compat_linux_dirent {
41170 struct compat_getdents_callback {
41171 struct compat_linux_dirent __user *current_dir;
41172 struct compat_linux_dirent __user *previous;
41173+ struct file * file;
41174 int count;
41175 int error;
41176 };
41177@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
41178 buf->error = -EOVERFLOW;
41179 return -EOVERFLOW;
41180 }
41181+
41182+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41183+ return 0;
41184+
41185 dirent = buf->previous;
41186 if (dirent) {
41187 if (__put_user(offset, &dirent->d_off))
41188@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
41189 buf.previous = NULL;
41190 buf.count = count;
41191 buf.error = 0;
41192+ buf.file = file;
41193
41194 error = vfs_readdir(file, compat_filldir, &buf);
41195 if (error >= 0)
41196@@ -1006,6 +1018,7 @@ out:
41197 struct compat_getdents_callback64 {
41198 struct linux_dirent64 __user *current_dir;
41199 struct linux_dirent64 __user *previous;
41200+ struct file * file;
41201 int count;
41202 int error;
41203 };
41204@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
41205 buf->error = -EINVAL; /* only used if we fail.. */
41206 if (reclen > buf->count)
41207 return -EINVAL;
41208+
41209+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41210+ return 0;
41211+
41212 dirent = buf->previous;
41213
41214 if (dirent) {
41215@@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(un
41216 buf.previous = NULL;
41217 buf.count = count;
41218 buf.error = 0;
41219+ buf.file = file;
41220
41221 error = vfs_readdir(file, compat_filldir64, &buf);
41222 if (error >= 0)
41223 error = buf.error;
41224 lastdirent = buf.previous;
41225 if (lastdirent) {
41226- typeof(lastdirent->d_off) d_off = file->f_pos;
41227+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41228 if (__put_user_unaligned(d_off, &lastdirent->d_off))
41229 error = -EFAULT;
41230 else
41231@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
41232 struct fdtable *fdt;
41233 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
41234
41235+ pax_track_stack();
41236+
41237 if (n < 0)
41238 goto out_nofds;
41239
41240diff -urNp linux-3.1.1/fs/compat_ioctl.c linux-3.1.1/fs/compat_ioctl.c
41241--- linux-3.1.1/fs/compat_ioctl.c 2011-11-11 15:19:27.000000000 -0500
41242+++ linux-3.1.1/fs/compat_ioctl.c 2011-11-16 18:39:08.000000000 -0500
41243@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsi
41244
41245 err = get_user(palp, &up->palette);
41246 err |= get_user(length, &up->length);
41247+ if (err)
41248+ return -EFAULT;
41249
41250 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41251 err = put_user(compat_ptr(palp), &up_native->palette);
41252@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned
41253 return -EFAULT;
41254 if (__get_user(udata, &ss32->iomem_base))
41255 return -EFAULT;
41256- ss.iomem_base = compat_ptr(udata);
41257+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41258 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41259 __get_user(ss.port_high, &ss32->port_high))
41260 return -EFAULT;
41261@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(stru
41262 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41263 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41264 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41265- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41266+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41267 return -EFAULT;
41268
41269 return ioctl_preallocate(file, p);
41270@@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigne
41271 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41272 {
41273 unsigned int a, b;
41274- a = *(unsigned int *)p;
41275- b = *(unsigned int *)q;
41276+ a = *(const unsigned int *)p;
41277+ b = *(const unsigned int *)q;
41278 if (a > b)
41279 return 1;
41280 if (a < b)
41281diff -urNp linux-3.1.1/fs/configfs/dir.c linux-3.1.1/fs/configfs/dir.c
41282--- linux-3.1.1/fs/configfs/dir.c 2011-11-11 15:19:27.000000000 -0500
41283+++ linux-3.1.1/fs/configfs/dir.c 2011-11-16 18:39:08.000000000 -0500
41284@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
41285 }
41286 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41287 struct configfs_dirent *next;
41288- const char * name;
41289+ const unsigned char * name;
41290+ char d_name[sizeof(next->s_dentry->d_iname)];
41291 int len;
41292 struct inode *inode = NULL;
41293
41294@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
41295 continue;
41296
41297 name = configfs_get_name(next);
41298- len = strlen(name);
41299+ if (next->s_dentry && name == next->s_dentry->d_iname) {
41300+ len = next->s_dentry->d_name.len;
41301+ memcpy(d_name, name, len);
41302+ name = d_name;
41303+ } else
41304+ len = strlen(name);
41305
41306 /*
41307 * We'll have a dentry and an inode for
41308diff -urNp linux-3.1.1/fs/dcache.c linux-3.1.1/fs/dcache.c
41309--- linux-3.1.1/fs/dcache.c 2011-11-11 15:19:27.000000000 -0500
41310+++ linux-3.1.1/fs/dcache.c 2011-11-16 18:39:08.000000000 -0500
41311@@ -2998,7 +2998,7 @@ void __init vfs_caches_init(unsigned lon
41312 mempages -= reserve;
41313
41314 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41315- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41316+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41317
41318 dcache_init();
41319 inode_init();
41320diff -urNp linux-3.1.1/fs/ecryptfs/inode.c linux-3.1.1/fs/ecryptfs/inode.c
41321--- linux-3.1.1/fs/ecryptfs/inode.c 2011-11-11 15:19:27.000000000 -0500
41322+++ linux-3.1.1/fs/ecryptfs/inode.c 2011-11-16 18:39:08.000000000 -0500
41323@@ -681,7 +681,7 @@ static int ecryptfs_readlink_lower(struc
41324 old_fs = get_fs();
41325 set_fs(get_ds());
41326 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41327- (char __user *)lower_buf,
41328+ (char __force_user *)lower_buf,
41329 lower_bufsiz);
41330 set_fs(old_fs);
41331 if (rc < 0)
41332@@ -727,7 +727,7 @@ static void *ecryptfs_follow_link(struct
41333 }
41334 old_fs = get_fs();
41335 set_fs(get_ds());
41336- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41337+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41338 set_fs(old_fs);
41339 if (rc < 0) {
41340 kfree(buf);
41341@@ -742,7 +742,7 @@ out:
41342 static void
41343 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41344 {
41345- char *buf = nd_get_link(nd);
41346+ const char *buf = nd_get_link(nd);
41347 if (!IS_ERR(buf)) {
41348 /* Free the char* */
41349 kfree(buf);
41350diff -urNp linux-3.1.1/fs/ecryptfs/miscdev.c linux-3.1.1/fs/ecryptfs/miscdev.c
41351--- linux-3.1.1/fs/ecryptfs/miscdev.c 2011-11-11 15:19:27.000000000 -0500
41352+++ linux-3.1.1/fs/ecryptfs/miscdev.c 2011-11-16 18:39:08.000000000 -0500
41353@@ -328,7 +328,7 @@ check_list:
41354 goto out_unlock_msg_ctx;
41355 i = 5;
41356 if (msg_ctx->msg) {
41357- if (copy_to_user(&buf[i], packet_length, packet_length_size))
41358+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41359 goto out_unlock_msg_ctx;
41360 i += packet_length_size;
41361 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41362diff -urNp linux-3.1.1/fs/ecryptfs/read_write.c linux-3.1.1/fs/ecryptfs/read_write.c
41363--- linux-3.1.1/fs/ecryptfs/read_write.c 2011-11-11 15:19:27.000000000 -0500
41364+++ linux-3.1.1/fs/ecryptfs/read_write.c 2011-11-16 18:39:08.000000000 -0500
41365@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *e
41366 return -EIO;
41367 fs_save = get_fs();
41368 set_fs(get_ds());
41369- rc = vfs_write(lower_file, data, size, &offset);
41370+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41371 set_fs(fs_save);
41372 mark_inode_dirty_sync(ecryptfs_inode);
41373 return rc;
41374@@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff
41375 return -EIO;
41376 fs_save = get_fs();
41377 set_fs(get_ds());
41378- rc = vfs_read(lower_file, data, size, &offset);
41379+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41380 set_fs(fs_save);
41381 return rc;
41382 }
41383diff -urNp linux-3.1.1/fs/exec.c linux-3.1.1/fs/exec.c
41384--- linux-3.1.1/fs/exec.c 2011-11-11 15:19:27.000000000 -0500
41385+++ linux-3.1.1/fs/exec.c 2011-11-17 18:40:47.000000000 -0500
41386@@ -55,12 +55,24 @@
41387 #include <linux/pipe_fs_i.h>
41388 #include <linux/oom.h>
41389 #include <linux/compat.h>
41390+#include <linux/random.h>
41391+#include <linux/seq_file.h>
41392+
41393+#ifdef CONFIG_PAX_REFCOUNT
41394+#include <linux/kallsyms.h>
41395+#include <linux/kdebug.h>
41396+#endif
41397
41398 #include <asm/uaccess.h>
41399 #include <asm/mmu_context.h>
41400 #include <asm/tlb.h>
41401 #include "internal.h"
41402
41403+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41404+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41405+EXPORT_SYMBOL(pax_set_initial_flags_func);
41406+#endif
41407+
41408 int core_uses_pid;
41409 char core_pattern[CORENAME_MAX_SIZE] = "core";
41410 unsigned int core_pipe_limit;
41411@@ -70,7 +82,7 @@ struct core_name {
41412 char *corename;
41413 int used, size;
41414 };
41415-static atomic_t call_count = ATOMIC_INIT(1);
41416+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41417
41418 /* The maximal length of core_pattern is also specified in sysctl.c */
41419
41420@@ -188,18 +200,10 @@ static struct page *get_arg_page(struct
41421 int write)
41422 {
41423 struct page *page;
41424- int ret;
41425
41426-#ifdef CONFIG_STACK_GROWSUP
41427- if (write) {
41428- ret = expand_downwards(bprm->vma, pos);
41429- if (ret < 0)
41430- return NULL;
41431- }
41432-#endif
41433- ret = get_user_pages(current, bprm->mm, pos,
41434- 1, write, 1, &page, NULL);
41435- if (ret <= 0)
41436+ if (0 > expand_downwards(bprm->vma, pos))
41437+ return NULL;
41438+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41439 return NULL;
41440
41441 if (write) {
41442@@ -274,6 +278,11 @@ static int __bprm_mm_init(struct linux_b
41443 vma->vm_end = STACK_TOP_MAX;
41444 vma->vm_start = vma->vm_end - PAGE_SIZE;
41445 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41446+
41447+#ifdef CONFIG_PAX_SEGMEXEC
41448+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41449+#endif
41450+
41451 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41452 INIT_LIST_HEAD(&vma->anon_vma_chain);
41453
41454@@ -288,6 +297,12 @@ static int __bprm_mm_init(struct linux_b
41455 mm->stack_vm = mm->total_vm = 1;
41456 up_write(&mm->mmap_sem);
41457 bprm->p = vma->vm_end - sizeof(void *);
41458+
41459+#ifdef CONFIG_PAX_RANDUSTACK
41460+ if (randomize_va_space)
41461+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41462+#endif
41463+
41464 return 0;
41465 err:
41466 up_write(&mm->mmap_sem);
41467@@ -396,19 +411,7 @@ err:
41468 return err;
41469 }
41470
41471-struct user_arg_ptr {
41472-#ifdef CONFIG_COMPAT
41473- bool is_compat;
41474-#endif
41475- union {
41476- const char __user *const __user *native;
41477-#ifdef CONFIG_COMPAT
41478- compat_uptr_t __user *compat;
41479-#endif
41480- } ptr;
41481-};
41482-
41483-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41484+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41485 {
41486 const char __user *native;
41487
41488@@ -417,14 +420,14 @@ static const char __user *get_user_arg_p
41489 compat_uptr_t compat;
41490
41491 if (get_user(compat, argv.ptr.compat + nr))
41492- return ERR_PTR(-EFAULT);
41493+ return (const char __force_user *)ERR_PTR(-EFAULT);
41494
41495 return compat_ptr(compat);
41496 }
41497 #endif
41498
41499 if (get_user(native, argv.ptr.native + nr))
41500- return ERR_PTR(-EFAULT);
41501+ return (const char __force_user *)ERR_PTR(-EFAULT);
41502
41503 return native;
41504 }
41505@@ -443,7 +446,7 @@ static int count(struct user_arg_ptr arg
41506 if (!p)
41507 break;
41508
41509- if (IS_ERR(p))
41510+ if (IS_ERR((const char __force_kernel *)p))
41511 return -EFAULT;
41512
41513 if (i++ >= max)
41514@@ -477,7 +480,7 @@ static int copy_strings(int argc, struct
41515
41516 ret = -EFAULT;
41517 str = get_user_arg_ptr(argv, argc);
41518- if (IS_ERR(str))
41519+ if (IS_ERR((const char __force_kernel *)str))
41520 goto out;
41521
41522 len = strnlen_user(str, MAX_ARG_STRLEN);
41523@@ -559,7 +562,7 @@ int copy_strings_kernel(int argc, const
41524 int r;
41525 mm_segment_t oldfs = get_fs();
41526 struct user_arg_ptr argv = {
41527- .ptr.native = (const char __user *const __user *)__argv,
41528+ .ptr.native = (const char __force_user *const __force_user *)__argv,
41529 };
41530
41531 set_fs(KERNEL_DS);
41532@@ -594,7 +597,8 @@ static int shift_arg_pages(struct vm_are
41533 unsigned long new_end = old_end - shift;
41534 struct mmu_gather tlb;
41535
41536- BUG_ON(new_start > new_end);
41537+ if (new_start >= new_end || new_start < mmap_min_addr)
41538+ return -ENOMEM;
41539
41540 /*
41541 * ensure there are no vmas between where we want to go
41542@@ -603,6 +607,10 @@ static int shift_arg_pages(struct vm_are
41543 if (vma != find_vma(mm, new_start))
41544 return -EFAULT;
41545
41546+#ifdef CONFIG_PAX_SEGMEXEC
41547+ BUG_ON(pax_find_mirror_vma(vma));
41548+#endif
41549+
41550 /*
41551 * cover the whole range: [new_start, old_end)
41552 */
41553@@ -683,10 +691,6 @@ int setup_arg_pages(struct linux_binprm
41554 stack_top = arch_align_stack(stack_top);
41555 stack_top = PAGE_ALIGN(stack_top);
41556
41557- if (unlikely(stack_top < mmap_min_addr) ||
41558- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41559- return -ENOMEM;
41560-
41561 stack_shift = vma->vm_end - stack_top;
41562
41563 bprm->p -= stack_shift;
41564@@ -698,8 +702,28 @@ int setup_arg_pages(struct linux_binprm
41565 bprm->exec -= stack_shift;
41566
41567 down_write(&mm->mmap_sem);
41568+
41569+ /* Move stack pages down in memory. */
41570+ if (stack_shift) {
41571+ ret = shift_arg_pages(vma, stack_shift);
41572+ if (ret)
41573+ goto out_unlock;
41574+ }
41575+
41576 vm_flags = VM_STACK_FLAGS;
41577
41578+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41579+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41580+ vm_flags &= ~VM_EXEC;
41581+
41582+#ifdef CONFIG_PAX_MPROTECT
41583+ if (mm->pax_flags & MF_PAX_MPROTECT)
41584+ vm_flags &= ~VM_MAYEXEC;
41585+#endif
41586+
41587+ }
41588+#endif
41589+
41590 /*
41591 * Adjust stack execute permissions; explicitly enable for
41592 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41593@@ -718,13 +742,6 @@ int setup_arg_pages(struct linux_binprm
41594 goto out_unlock;
41595 BUG_ON(prev != vma);
41596
41597- /* Move stack pages down in memory. */
41598- if (stack_shift) {
41599- ret = shift_arg_pages(vma, stack_shift);
41600- if (ret)
41601- goto out_unlock;
41602- }
41603-
41604 /* mprotect_fixup is overkill to remove the temporary stack flags */
41605 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41606
41607@@ -805,7 +822,7 @@ int kernel_read(struct file *file, loff_
41608 old_fs = get_fs();
41609 set_fs(get_ds());
41610 /* The cast to a user pointer is valid due to the set_fs() */
41611- result = vfs_read(file, (void __user *)addr, count, &pos);
41612+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
41613 set_fs(old_fs);
41614 return result;
41615 }
41616@@ -1251,7 +1268,7 @@ int check_unsafe_exec(struct linux_binpr
41617 }
41618 rcu_read_unlock();
41619
41620- if (p->fs->users > n_fs) {
41621+ if (atomic_read(&p->fs->users) > n_fs) {
41622 bprm->unsafe |= LSM_UNSAFE_SHARE;
41623 } else {
41624 res = -EAGAIN;
41625@@ -1454,6 +1471,11 @@ static int do_execve_common(const char *
41626 struct user_arg_ptr envp,
41627 struct pt_regs *regs)
41628 {
41629+#ifdef CONFIG_GRKERNSEC
41630+ struct file *old_exec_file;
41631+ struct acl_subject_label *old_acl;
41632+ struct rlimit old_rlim[RLIM_NLIMITS];
41633+#endif
41634 struct linux_binprm *bprm;
41635 struct file *file;
41636 struct files_struct *displaced;
41637@@ -1461,6 +1483,8 @@ static int do_execve_common(const char *
41638 int retval;
41639 const struct cred *cred = current_cred();
41640
41641+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41642+
41643 /*
41644 * We move the actual failure in case of RLIMIT_NPROC excess from
41645 * set*uid() to execve() because too many poorly written programs
41646@@ -1507,6 +1531,16 @@ static int do_execve_common(const char *
41647 bprm->filename = filename;
41648 bprm->interp = filename;
41649
41650+ if (gr_process_user_ban()) {
41651+ retval = -EPERM;
41652+ goto out_file;
41653+ }
41654+
41655+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41656+ retval = -EACCES;
41657+ goto out_file;
41658+ }
41659+
41660 retval = bprm_mm_init(bprm);
41661 if (retval)
41662 goto out_file;
41663@@ -1536,9 +1570,40 @@ static int do_execve_common(const char *
41664 if (retval < 0)
41665 goto out;
41666
41667+ if (!gr_tpe_allow(file)) {
41668+ retval = -EACCES;
41669+ goto out;
41670+ }
41671+
41672+ if (gr_check_crash_exec(file)) {
41673+ retval = -EACCES;
41674+ goto out;
41675+ }
41676+
41677+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41678+
41679+ gr_handle_exec_args(bprm, argv);
41680+
41681+#ifdef CONFIG_GRKERNSEC
41682+ old_acl = current->acl;
41683+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41684+ old_exec_file = current->exec_file;
41685+ get_file(file);
41686+ current->exec_file = file;
41687+#endif
41688+
41689+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41690+ bprm->unsafe & LSM_UNSAFE_SHARE);
41691+ if (retval < 0)
41692+ goto out_fail;
41693+
41694 retval = search_binary_handler(bprm,regs);
41695 if (retval < 0)
41696- goto out;
41697+ goto out_fail;
41698+#ifdef CONFIG_GRKERNSEC
41699+ if (old_exec_file)
41700+ fput(old_exec_file);
41701+#endif
41702
41703 /* execve succeeded */
41704 current->fs->in_exec = 0;
41705@@ -1549,6 +1614,14 @@ static int do_execve_common(const char *
41706 put_files_struct(displaced);
41707 return retval;
41708
41709+out_fail:
41710+#ifdef CONFIG_GRKERNSEC
41711+ current->acl = old_acl;
41712+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41713+ fput(current->exec_file);
41714+ current->exec_file = old_exec_file;
41715+#endif
41716+
41717 out:
41718 if (bprm->mm) {
41719 acct_arg_size(bprm, 0);
41720@@ -1622,7 +1695,7 @@ static int expand_corename(struct core_n
41721 {
41722 char *old_corename = cn->corename;
41723
41724- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
41725+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
41726 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
41727
41728 if (!cn->corename) {
41729@@ -1719,7 +1792,7 @@ static int format_corename(struct core_n
41730 int pid_in_pattern = 0;
41731 int err = 0;
41732
41733- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
41734+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
41735 cn->corename = kmalloc(cn->size, GFP_KERNEL);
41736 cn->used = 0;
41737
41738@@ -1816,6 +1889,218 @@ out:
41739 return ispipe;
41740 }
41741
41742+int pax_check_flags(unsigned long *flags)
41743+{
41744+ int retval = 0;
41745+
41746+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41747+ if (*flags & MF_PAX_SEGMEXEC)
41748+ {
41749+ *flags &= ~MF_PAX_SEGMEXEC;
41750+ retval = -EINVAL;
41751+ }
41752+#endif
41753+
41754+ if ((*flags & MF_PAX_PAGEEXEC)
41755+
41756+#ifdef CONFIG_PAX_PAGEEXEC
41757+ && (*flags & MF_PAX_SEGMEXEC)
41758+#endif
41759+
41760+ )
41761+ {
41762+ *flags &= ~MF_PAX_PAGEEXEC;
41763+ retval = -EINVAL;
41764+ }
41765+
41766+ if ((*flags & MF_PAX_MPROTECT)
41767+
41768+#ifdef CONFIG_PAX_MPROTECT
41769+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41770+#endif
41771+
41772+ )
41773+ {
41774+ *flags &= ~MF_PAX_MPROTECT;
41775+ retval = -EINVAL;
41776+ }
41777+
41778+ if ((*flags & MF_PAX_EMUTRAMP)
41779+
41780+#ifdef CONFIG_PAX_EMUTRAMP
41781+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41782+#endif
41783+
41784+ )
41785+ {
41786+ *flags &= ~MF_PAX_EMUTRAMP;
41787+ retval = -EINVAL;
41788+ }
41789+
41790+ return retval;
41791+}
41792+
41793+EXPORT_SYMBOL(pax_check_flags);
41794+
41795+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41796+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41797+{
41798+ struct task_struct *tsk = current;
41799+ struct mm_struct *mm = current->mm;
41800+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41801+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41802+ char *path_exec = NULL;
41803+ char *path_fault = NULL;
41804+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
41805+
41806+ if (buffer_exec && buffer_fault) {
41807+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41808+
41809+ down_read(&mm->mmap_sem);
41810+ vma = mm->mmap;
41811+ while (vma && (!vma_exec || !vma_fault)) {
41812+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41813+ vma_exec = vma;
41814+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41815+ vma_fault = vma;
41816+ vma = vma->vm_next;
41817+ }
41818+ if (vma_exec) {
41819+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41820+ if (IS_ERR(path_exec))
41821+ path_exec = "<path too long>";
41822+ else {
41823+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41824+ if (path_exec) {
41825+ *path_exec = 0;
41826+ path_exec = buffer_exec;
41827+ } else
41828+ path_exec = "<path too long>";
41829+ }
41830+ }
41831+ if (vma_fault) {
41832+ start = vma_fault->vm_start;
41833+ end = vma_fault->vm_end;
41834+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41835+ if (vma_fault->vm_file) {
41836+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41837+ if (IS_ERR(path_fault))
41838+ path_fault = "<path too long>";
41839+ else {
41840+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41841+ if (path_fault) {
41842+ *path_fault = 0;
41843+ path_fault = buffer_fault;
41844+ } else
41845+ path_fault = "<path too long>";
41846+ }
41847+ } else
41848+ path_fault = "<anonymous mapping>";
41849+ }
41850+ up_read(&mm->mmap_sem);
41851+ }
41852+ if (tsk->signal->curr_ip)
41853+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41854+ else
41855+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41856+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41857+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41858+ task_uid(tsk), task_euid(tsk), pc, sp);
41859+ free_page((unsigned long)buffer_exec);
41860+ free_page((unsigned long)buffer_fault);
41861+ pax_report_insns(regs, pc, sp);
41862+ do_coredump(SIGKILL, SIGKILL, regs);
41863+}
41864+#endif
41865+
41866+#ifdef CONFIG_PAX_REFCOUNT
41867+void pax_report_refcount_overflow(struct pt_regs *regs)
41868+{
41869+ if (current->signal->curr_ip)
41870+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41871+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41872+ else
41873+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41874+ current->comm, task_pid_nr(current), current_uid(), current_euid());
41875+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41876+ show_regs(regs);
41877+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
41878+}
41879+#endif
41880+
41881+#ifdef CONFIG_PAX_USERCOPY
41882+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41883+int object_is_on_stack(const void *obj, unsigned long len)
41884+{
41885+ const void * const stack = task_stack_page(current);
41886+ const void * const stackend = stack + THREAD_SIZE;
41887+
41888+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41889+ const void *frame = NULL;
41890+ const void *oldframe;
41891+#endif
41892+
41893+ if (obj + len < obj)
41894+ return -1;
41895+
41896+ if (obj + len <= stack || stackend <= obj)
41897+ return 0;
41898+
41899+ if (obj < stack || stackend < obj + len)
41900+ return -1;
41901+
41902+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41903+ oldframe = __builtin_frame_address(1);
41904+ if (oldframe)
41905+ frame = __builtin_frame_address(2);
41906+ /*
41907+ low ----------------------------------------------> high
41908+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
41909+ ^----------------^
41910+ allow copies only within here
41911+ */
41912+ while (stack <= frame && frame < stackend) {
41913+ /* if obj + len extends past the last frame, this
41914+ check won't pass and the next frame will be 0,
41915+ causing us to bail out and correctly report
41916+ the copy as invalid
41917+ */
41918+ if (obj + len <= frame)
41919+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41920+ oldframe = frame;
41921+ frame = *(const void * const *)frame;
41922+ }
41923+ return -1;
41924+#else
41925+ return 1;
41926+#endif
41927+}
41928+
41929+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41930+{
41931+ if (current->signal->curr_ip)
41932+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41933+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41934+ else
41935+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41936+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41937+ dump_stack();
41938+ gr_handle_kernel_exploit();
41939+ do_group_exit(SIGKILL);
41940+}
41941+#endif
41942+
41943+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41944+void pax_track_stack(void)
41945+{
41946+ unsigned long sp = (unsigned long)&sp;
41947+ if (sp < current_thread_info()->lowest_stack &&
41948+ sp > (unsigned long)task_stack_page(current))
41949+ current_thread_info()->lowest_stack = sp;
41950+}
41951+EXPORT_SYMBOL(pax_track_stack);
41952+#endif
41953+
41954 static int zap_process(struct task_struct *start, int exit_code)
41955 {
41956 struct task_struct *t;
41957@@ -2027,17 +2312,17 @@ static void wait_for_dump_helpers(struct
41958 pipe = file->f_path.dentry->d_inode->i_pipe;
41959
41960 pipe_lock(pipe);
41961- pipe->readers++;
41962- pipe->writers--;
41963+ atomic_inc(&pipe->readers);
41964+ atomic_dec(&pipe->writers);
41965
41966- while ((pipe->readers > 1) && (!signal_pending(current))) {
41967+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41968 wake_up_interruptible_sync(&pipe->wait);
41969 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41970 pipe_wait(pipe);
41971 }
41972
41973- pipe->readers--;
41974- pipe->writers++;
41975+ atomic_dec(&pipe->readers);
41976+ atomic_inc(&pipe->writers);
41977 pipe_unlock(pipe);
41978
41979 }
41980@@ -2098,7 +2383,7 @@ void do_coredump(long signr, int exit_co
41981 int retval = 0;
41982 int flag = 0;
41983 int ispipe;
41984- static atomic_t core_dump_count = ATOMIC_INIT(0);
41985+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41986 struct coredump_params cprm = {
41987 .signr = signr,
41988 .regs = regs,
41989@@ -2113,6 +2398,9 @@ void do_coredump(long signr, int exit_co
41990
41991 audit_core_dumps(signr);
41992
41993+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41994+ gr_handle_brute_attach(current, cprm.mm_flags);
41995+
41996 binfmt = mm->binfmt;
41997 if (!binfmt || !binfmt->core_dump)
41998 goto fail;
41999@@ -2180,7 +2468,7 @@ void do_coredump(long signr, int exit_co
42000 }
42001 cprm.limit = RLIM_INFINITY;
42002
42003- dump_count = atomic_inc_return(&core_dump_count);
42004+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
42005 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
42006 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
42007 task_tgid_vnr(current), current->comm);
42008@@ -2207,6 +2495,8 @@ void do_coredump(long signr, int exit_co
42009 } else {
42010 struct inode *inode;
42011
42012+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
42013+
42014 if (cprm.limit < binfmt->min_coredump)
42015 goto fail_unlock;
42016
42017@@ -2250,7 +2540,7 @@ close_fail:
42018 filp_close(cprm.file, NULL);
42019 fail_dropcount:
42020 if (ispipe)
42021- atomic_dec(&core_dump_count);
42022+ atomic_dec_unchecked(&core_dump_count);
42023 fail_unlock:
42024 kfree(cn.corename);
42025 fail_corename:
42026@@ -2269,7 +2559,7 @@ fail:
42027 */
42028 int dump_write(struct file *file, const void *addr, int nr)
42029 {
42030- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
42031+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
42032 }
42033 EXPORT_SYMBOL(dump_write);
42034
42035diff -urNp linux-3.1.1/fs/ext2/balloc.c linux-3.1.1/fs/ext2/balloc.c
42036--- linux-3.1.1/fs/ext2/balloc.c 2011-11-11 15:19:27.000000000 -0500
42037+++ linux-3.1.1/fs/ext2/balloc.c 2011-11-16 18:40:29.000000000 -0500
42038@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
42039
42040 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42041 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42042- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42043+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42044 sbi->s_resuid != current_fsuid() &&
42045 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42046 return 0;
42047diff -urNp linux-3.1.1/fs/ext3/balloc.c linux-3.1.1/fs/ext3/balloc.c
42048--- linux-3.1.1/fs/ext3/balloc.c 2011-11-11 15:19:27.000000000 -0500
42049+++ linux-3.1.1/fs/ext3/balloc.c 2011-11-16 18:40:29.000000000 -0500
42050@@ -1446,7 +1446,7 @@ static int ext3_has_free_blocks(struct e
42051
42052 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42053 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42054- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42055+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42056 sbi->s_resuid != current_fsuid() &&
42057 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42058 return 0;
42059diff -urNp linux-3.1.1/fs/ext4/balloc.c linux-3.1.1/fs/ext4/balloc.c
42060--- linux-3.1.1/fs/ext4/balloc.c 2011-11-11 15:19:27.000000000 -0500
42061+++ linux-3.1.1/fs/ext4/balloc.c 2011-11-16 18:40:29.000000000 -0500
42062@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
42063 /* Hm, nope. Are (enough) root reserved blocks available? */
42064 if (sbi->s_resuid == current_fsuid() ||
42065 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42066- capable(CAP_SYS_RESOURCE) ||
42067- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42068+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42069+ capable_nolog(CAP_SYS_RESOURCE)) {
42070
42071 if (free_blocks >= (nblocks + dirty_blocks))
42072 return 1;
42073diff -urNp linux-3.1.1/fs/ext4/ext4.h linux-3.1.1/fs/ext4/ext4.h
42074--- linux-3.1.1/fs/ext4/ext4.h 2011-11-11 15:19:27.000000000 -0500
42075+++ linux-3.1.1/fs/ext4/ext4.h 2011-11-16 18:39:08.000000000 -0500
42076@@ -1180,19 +1180,19 @@ struct ext4_sb_info {
42077 unsigned long s_mb_last_start;
42078
42079 /* stats for buddy allocator */
42080- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42081- atomic_t s_bal_success; /* we found long enough chunks */
42082- atomic_t s_bal_allocated; /* in blocks */
42083- atomic_t s_bal_ex_scanned; /* total extents scanned */
42084- atomic_t s_bal_goals; /* goal hits */
42085- atomic_t s_bal_breaks; /* too long searches */
42086- atomic_t s_bal_2orders; /* 2^order hits */
42087+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42088+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42089+ atomic_unchecked_t s_bal_allocated; /* in blocks */
42090+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42091+ atomic_unchecked_t s_bal_goals; /* goal hits */
42092+ atomic_unchecked_t s_bal_breaks; /* too long searches */
42093+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42094 spinlock_t s_bal_lock;
42095 unsigned long s_mb_buddies_generated;
42096 unsigned long long s_mb_generation_time;
42097- atomic_t s_mb_lost_chunks;
42098- atomic_t s_mb_preallocated;
42099- atomic_t s_mb_discarded;
42100+ atomic_unchecked_t s_mb_lost_chunks;
42101+ atomic_unchecked_t s_mb_preallocated;
42102+ atomic_unchecked_t s_mb_discarded;
42103 atomic_t s_lock_busy;
42104
42105 /* locality groups */
42106diff -urNp linux-3.1.1/fs/ext4/file.c linux-3.1.1/fs/ext4/file.c
42107--- linux-3.1.1/fs/ext4/file.c 2011-11-11 15:19:27.000000000 -0500
42108+++ linux-3.1.1/fs/ext4/file.c 2011-11-16 18:40:29.000000000 -0500
42109@@ -181,8 +181,8 @@ static int ext4_file_open(struct inode *
42110 path.dentry = mnt->mnt_root;
42111 cp = d_path(&path, buf, sizeof(buf));
42112 if (!IS_ERR(cp)) {
42113- memcpy(sbi->s_es->s_last_mounted, cp,
42114- sizeof(sbi->s_es->s_last_mounted));
42115+ strlcpy(sbi->s_es->s_last_mounted, cp,
42116+ sizeof(sbi->s_es->s_last_mounted));
42117 ext4_mark_super_dirty(sb);
42118 }
42119 }
42120diff -urNp linux-3.1.1/fs/ext4/ioctl.c linux-3.1.1/fs/ext4/ioctl.c
42121--- linux-3.1.1/fs/ext4/ioctl.c 2011-11-11 15:19:27.000000000 -0500
42122+++ linux-3.1.1/fs/ext4/ioctl.c 2011-11-16 18:39:08.000000000 -0500
42123@@ -348,7 +348,7 @@ mext_out:
42124 if (!blk_queue_discard(q))
42125 return -EOPNOTSUPP;
42126
42127- if (copy_from_user(&range, (struct fstrim_range *)arg,
42128+ if (copy_from_user(&range, (struct fstrim_range __user *)arg,
42129 sizeof(range)))
42130 return -EFAULT;
42131
42132@@ -358,7 +358,7 @@ mext_out:
42133 if (ret < 0)
42134 return ret;
42135
42136- if (copy_to_user((struct fstrim_range *)arg, &range,
42137+ if (copy_to_user((struct fstrim_range __user *)arg, &range,
42138 sizeof(range)))
42139 return -EFAULT;
42140
42141diff -urNp linux-3.1.1/fs/ext4/mballoc.c linux-3.1.1/fs/ext4/mballoc.c
42142--- linux-3.1.1/fs/ext4/mballoc.c 2011-11-11 15:19:27.000000000 -0500
42143+++ linux-3.1.1/fs/ext4/mballoc.c 2011-11-16 18:40:29.000000000 -0500
42144@@ -1795,7 +1795,7 @@ void ext4_mb_simple_scan_group(struct ex
42145 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42146
42147 if (EXT4_SB(sb)->s_mb_stats)
42148- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42149+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42150
42151 break;
42152 }
42153@@ -2089,7 +2089,7 @@ repeat:
42154 ac->ac_status = AC_STATUS_CONTINUE;
42155 ac->ac_flags |= EXT4_MB_HINT_FIRST;
42156 cr = 3;
42157- atomic_inc(&sbi->s_mb_lost_chunks);
42158+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42159 goto repeat;
42160 }
42161 }
42162@@ -2132,6 +2132,8 @@ static int ext4_mb_seq_groups_show(struc
42163 ext4_grpblk_t counters[16];
42164 } sg;
42165
42166+ pax_track_stack();
42167+
42168 group--;
42169 if (group == 0)
42170 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
42171@@ -2573,25 +2575,25 @@ int ext4_mb_release(struct super_block *
42172 if (sbi->s_mb_stats) {
42173 ext4_msg(sb, KERN_INFO,
42174 "mballoc: %u blocks %u reqs (%u success)",
42175- atomic_read(&sbi->s_bal_allocated),
42176- atomic_read(&sbi->s_bal_reqs),
42177- atomic_read(&sbi->s_bal_success));
42178+ atomic_read_unchecked(&sbi->s_bal_allocated),
42179+ atomic_read_unchecked(&sbi->s_bal_reqs),
42180+ atomic_read_unchecked(&sbi->s_bal_success));
42181 ext4_msg(sb, KERN_INFO,
42182 "mballoc: %u extents scanned, %u goal hits, "
42183 "%u 2^N hits, %u breaks, %u lost",
42184- atomic_read(&sbi->s_bal_ex_scanned),
42185- atomic_read(&sbi->s_bal_goals),
42186- atomic_read(&sbi->s_bal_2orders),
42187- atomic_read(&sbi->s_bal_breaks),
42188- atomic_read(&sbi->s_mb_lost_chunks));
42189+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42190+ atomic_read_unchecked(&sbi->s_bal_goals),
42191+ atomic_read_unchecked(&sbi->s_bal_2orders),
42192+ atomic_read_unchecked(&sbi->s_bal_breaks),
42193+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42194 ext4_msg(sb, KERN_INFO,
42195 "mballoc: %lu generated and it took %Lu",
42196 sbi->s_mb_buddies_generated,
42197 sbi->s_mb_generation_time);
42198 ext4_msg(sb, KERN_INFO,
42199 "mballoc: %u preallocated, %u discarded",
42200- atomic_read(&sbi->s_mb_preallocated),
42201- atomic_read(&sbi->s_mb_discarded));
42202+ atomic_read_unchecked(&sbi->s_mb_preallocated),
42203+ atomic_read_unchecked(&sbi->s_mb_discarded));
42204 }
42205
42206 free_percpu(sbi->s_locality_groups);
42207@@ -3070,16 +3072,16 @@ static void ext4_mb_collect_stats(struct
42208 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42209
42210 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42211- atomic_inc(&sbi->s_bal_reqs);
42212- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42213+ atomic_inc_unchecked(&sbi->s_bal_reqs);
42214+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42215 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42216- atomic_inc(&sbi->s_bal_success);
42217- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42218+ atomic_inc_unchecked(&sbi->s_bal_success);
42219+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42220 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42221 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42222- atomic_inc(&sbi->s_bal_goals);
42223+ atomic_inc_unchecked(&sbi->s_bal_goals);
42224 if (ac->ac_found > sbi->s_mb_max_to_scan)
42225- atomic_inc(&sbi->s_bal_breaks);
42226+ atomic_inc_unchecked(&sbi->s_bal_breaks);
42227 }
42228
42229 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42230@@ -3477,7 +3479,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
42231 trace_ext4_mb_new_inode_pa(ac, pa);
42232
42233 ext4_mb_use_inode_pa(ac, pa);
42234- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42235+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42236
42237 ei = EXT4_I(ac->ac_inode);
42238 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42239@@ -3537,7 +3539,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
42240 trace_ext4_mb_new_group_pa(ac, pa);
42241
42242 ext4_mb_use_group_pa(ac, pa);
42243- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42244+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42245
42246 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42247 lg = ac->ac_lg;
42248@@ -3625,7 +3627,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
42249 * from the bitmap and continue.
42250 */
42251 }
42252- atomic_add(free, &sbi->s_mb_discarded);
42253+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
42254
42255 return err;
42256 }
42257@@ -3643,7 +3645,7 @@ ext4_mb_release_group_pa(struct ext4_bud
42258 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42259 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42260 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42261- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42262+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42263 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42264
42265 return 0;
42266diff -urNp linux-3.1.1/fs/fcntl.c linux-3.1.1/fs/fcntl.c
42267--- linux-3.1.1/fs/fcntl.c 2011-11-11 15:19:27.000000000 -0500
42268+++ linux-3.1.1/fs/fcntl.c 2011-11-16 23:40:25.000000000 -0500
42269@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
42270 if (err)
42271 return err;
42272
42273+ if (gr_handle_chroot_fowner(pid, type))
42274+ return -ENOENT;
42275+ if (gr_check_protected_task_fowner(pid, type))
42276+ return -EACCES;
42277+
42278 f_modown(filp, pid, type, force);
42279 return 0;
42280 }
42281@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42282
42283 static int f_setown_ex(struct file *filp, unsigned long arg)
42284 {
42285- struct f_owner_ex * __user owner_p = (void * __user)arg;
42286+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42287 struct f_owner_ex owner;
42288 struct pid *pid;
42289 int type;
42290@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp
42291
42292 static int f_getown_ex(struct file *filp, unsigned long arg)
42293 {
42294- struct f_owner_ex * __user owner_p = (void * __user)arg;
42295+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42296 struct f_owner_ex owner;
42297 int ret = 0;
42298
42299@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
42300 switch (cmd) {
42301 case F_DUPFD:
42302 case F_DUPFD_CLOEXEC:
42303+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42304 if (arg >= rlimit(RLIMIT_NOFILE))
42305 break;
42306 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42307diff -urNp linux-3.1.1/fs/fifo.c linux-3.1.1/fs/fifo.c
42308--- linux-3.1.1/fs/fifo.c 2011-11-11 15:19:27.000000000 -0500
42309+++ linux-3.1.1/fs/fifo.c 2011-11-16 18:39:08.000000000 -0500
42310@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
42311 */
42312 filp->f_op = &read_pipefifo_fops;
42313 pipe->r_counter++;
42314- if (pipe->readers++ == 0)
42315+ if (atomic_inc_return(&pipe->readers) == 1)
42316 wake_up_partner(inode);
42317
42318- if (!pipe->writers) {
42319+ if (!atomic_read(&pipe->writers)) {
42320 if ((filp->f_flags & O_NONBLOCK)) {
42321 /* suppress POLLHUP until we have
42322 * seen a writer */
42323@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
42324 * errno=ENXIO when there is no process reading the FIFO.
42325 */
42326 ret = -ENXIO;
42327- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42328+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42329 goto err;
42330
42331 filp->f_op = &write_pipefifo_fops;
42332 pipe->w_counter++;
42333- if (!pipe->writers++)
42334+ if (atomic_inc_return(&pipe->writers) == 1)
42335 wake_up_partner(inode);
42336
42337- if (!pipe->readers) {
42338+ if (!atomic_read(&pipe->readers)) {
42339 wait_for_partner(inode, &pipe->r_counter);
42340 if (signal_pending(current))
42341 goto err_wr;
42342@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
42343 */
42344 filp->f_op = &rdwr_pipefifo_fops;
42345
42346- pipe->readers++;
42347- pipe->writers++;
42348+ atomic_inc(&pipe->readers);
42349+ atomic_inc(&pipe->writers);
42350 pipe->r_counter++;
42351 pipe->w_counter++;
42352- if (pipe->readers == 1 || pipe->writers == 1)
42353+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42354 wake_up_partner(inode);
42355 break;
42356
42357@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
42358 return 0;
42359
42360 err_rd:
42361- if (!--pipe->readers)
42362+ if (atomic_dec_and_test(&pipe->readers))
42363 wake_up_interruptible(&pipe->wait);
42364 ret = -ERESTARTSYS;
42365 goto err;
42366
42367 err_wr:
42368- if (!--pipe->writers)
42369+ if (atomic_dec_and_test(&pipe->writers))
42370 wake_up_interruptible(&pipe->wait);
42371 ret = -ERESTARTSYS;
42372 goto err;
42373
42374 err:
42375- if (!pipe->readers && !pipe->writers)
42376+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42377 free_pipe_info(inode);
42378
42379 err_nocleanup:
42380diff -urNp linux-3.1.1/fs/file.c linux-3.1.1/fs/file.c
42381--- linux-3.1.1/fs/file.c 2011-11-11 15:19:27.000000000 -0500
42382+++ linux-3.1.1/fs/file.c 2011-11-16 18:40:29.000000000 -0500
42383@@ -15,6 +15,7 @@
42384 #include <linux/slab.h>
42385 #include <linux/vmalloc.h>
42386 #include <linux/file.h>
42387+#include <linux/security.h>
42388 #include <linux/fdtable.h>
42389 #include <linux/bitops.h>
42390 #include <linux/interrupt.h>
42391@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
42392 * N.B. For clone tasks sharing a files structure, this test
42393 * will limit the total number of files that can be opened.
42394 */
42395+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42396 if (nr >= rlimit(RLIMIT_NOFILE))
42397 return -EMFILE;
42398
42399diff -urNp linux-3.1.1/fs/filesystems.c linux-3.1.1/fs/filesystems.c
42400--- linux-3.1.1/fs/filesystems.c 2011-11-11 15:19:27.000000000 -0500
42401+++ linux-3.1.1/fs/filesystems.c 2011-11-16 18:40:29.000000000 -0500
42402@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
42403 int len = dot ? dot - name : strlen(name);
42404
42405 fs = __get_fs_type(name, len);
42406+
42407+#ifdef CONFIG_GRKERNSEC_MODHARDEN
42408+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42409+#else
42410 if (!fs && (request_module("%.*s", len, name) == 0))
42411+#endif
42412 fs = __get_fs_type(name, len);
42413
42414 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42415diff -urNp linux-3.1.1/fs/fscache/cookie.c linux-3.1.1/fs/fscache/cookie.c
42416--- linux-3.1.1/fs/fscache/cookie.c 2011-11-11 15:19:27.000000000 -0500
42417+++ linux-3.1.1/fs/fscache/cookie.c 2011-11-16 18:39:08.000000000 -0500
42418@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
42419 parent ? (char *) parent->def->name : "<no-parent>",
42420 def->name, netfs_data);
42421
42422- fscache_stat(&fscache_n_acquires);
42423+ fscache_stat_unchecked(&fscache_n_acquires);
42424
42425 /* if there's no parent cookie, then we don't create one here either */
42426 if (!parent) {
42427- fscache_stat(&fscache_n_acquires_null);
42428+ fscache_stat_unchecked(&fscache_n_acquires_null);
42429 _leave(" [no parent]");
42430 return NULL;
42431 }
42432@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
42433 /* allocate and initialise a cookie */
42434 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42435 if (!cookie) {
42436- fscache_stat(&fscache_n_acquires_oom);
42437+ fscache_stat_unchecked(&fscache_n_acquires_oom);
42438 _leave(" [ENOMEM]");
42439 return NULL;
42440 }
42441@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
42442
42443 switch (cookie->def->type) {
42444 case FSCACHE_COOKIE_TYPE_INDEX:
42445- fscache_stat(&fscache_n_cookie_index);
42446+ fscache_stat_unchecked(&fscache_n_cookie_index);
42447 break;
42448 case FSCACHE_COOKIE_TYPE_DATAFILE:
42449- fscache_stat(&fscache_n_cookie_data);
42450+ fscache_stat_unchecked(&fscache_n_cookie_data);
42451 break;
42452 default:
42453- fscache_stat(&fscache_n_cookie_special);
42454+ fscache_stat_unchecked(&fscache_n_cookie_special);
42455 break;
42456 }
42457
42458@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
42459 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42460 atomic_dec(&parent->n_children);
42461 __fscache_cookie_put(cookie);
42462- fscache_stat(&fscache_n_acquires_nobufs);
42463+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42464 _leave(" = NULL");
42465 return NULL;
42466 }
42467 }
42468
42469- fscache_stat(&fscache_n_acquires_ok);
42470+ fscache_stat_unchecked(&fscache_n_acquires_ok);
42471 _leave(" = %p", cookie);
42472 return cookie;
42473 }
42474@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
42475 cache = fscache_select_cache_for_object(cookie->parent);
42476 if (!cache) {
42477 up_read(&fscache_addremove_sem);
42478- fscache_stat(&fscache_n_acquires_no_cache);
42479+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42480 _leave(" = -ENOMEDIUM [no cache]");
42481 return -ENOMEDIUM;
42482 }
42483@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
42484 object = cache->ops->alloc_object(cache, cookie);
42485 fscache_stat_d(&fscache_n_cop_alloc_object);
42486 if (IS_ERR(object)) {
42487- fscache_stat(&fscache_n_object_no_alloc);
42488+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
42489 ret = PTR_ERR(object);
42490 goto error;
42491 }
42492
42493- fscache_stat(&fscache_n_object_alloc);
42494+ fscache_stat_unchecked(&fscache_n_object_alloc);
42495
42496 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42497
42498@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
42499 struct fscache_object *object;
42500 struct hlist_node *_p;
42501
42502- fscache_stat(&fscache_n_updates);
42503+ fscache_stat_unchecked(&fscache_n_updates);
42504
42505 if (!cookie) {
42506- fscache_stat(&fscache_n_updates_null);
42507+ fscache_stat_unchecked(&fscache_n_updates_null);
42508 _leave(" [no cookie]");
42509 return;
42510 }
42511@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
42512 struct fscache_object *object;
42513 unsigned long event;
42514
42515- fscache_stat(&fscache_n_relinquishes);
42516+ fscache_stat_unchecked(&fscache_n_relinquishes);
42517 if (retire)
42518- fscache_stat(&fscache_n_relinquishes_retire);
42519+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42520
42521 if (!cookie) {
42522- fscache_stat(&fscache_n_relinquishes_null);
42523+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
42524 _leave(" [no cookie]");
42525 return;
42526 }
42527@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
42528
42529 /* wait for the cookie to finish being instantiated (or to fail) */
42530 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42531- fscache_stat(&fscache_n_relinquishes_waitcrt);
42532+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42533 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42534 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42535 }
42536diff -urNp linux-3.1.1/fs/fscache/internal.h linux-3.1.1/fs/fscache/internal.h
42537--- linux-3.1.1/fs/fscache/internal.h 2011-11-11 15:19:27.000000000 -0500
42538+++ linux-3.1.1/fs/fscache/internal.h 2011-11-16 18:39:08.000000000 -0500
42539@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
42540 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42541 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42542
42543-extern atomic_t fscache_n_op_pend;
42544-extern atomic_t fscache_n_op_run;
42545-extern atomic_t fscache_n_op_enqueue;
42546-extern atomic_t fscache_n_op_deferred_release;
42547-extern atomic_t fscache_n_op_release;
42548-extern atomic_t fscache_n_op_gc;
42549-extern atomic_t fscache_n_op_cancelled;
42550-extern atomic_t fscache_n_op_rejected;
42551-
42552-extern atomic_t fscache_n_attr_changed;
42553-extern atomic_t fscache_n_attr_changed_ok;
42554-extern atomic_t fscache_n_attr_changed_nobufs;
42555-extern atomic_t fscache_n_attr_changed_nomem;
42556-extern atomic_t fscache_n_attr_changed_calls;
42557-
42558-extern atomic_t fscache_n_allocs;
42559-extern atomic_t fscache_n_allocs_ok;
42560-extern atomic_t fscache_n_allocs_wait;
42561-extern atomic_t fscache_n_allocs_nobufs;
42562-extern atomic_t fscache_n_allocs_intr;
42563-extern atomic_t fscache_n_allocs_object_dead;
42564-extern atomic_t fscache_n_alloc_ops;
42565-extern atomic_t fscache_n_alloc_op_waits;
42566-
42567-extern atomic_t fscache_n_retrievals;
42568-extern atomic_t fscache_n_retrievals_ok;
42569-extern atomic_t fscache_n_retrievals_wait;
42570-extern atomic_t fscache_n_retrievals_nodata;
42571-extern atomic_t fscache_n_retrievals_nobufs;
42572-extern atomic_t fscache_n_retrievals_intr;
42573-extern atomic_t fscache_n_retrievals_nomem;
42574-extern atomic_t fscache_n_retrievals_object_dead;
42575-extern atomic_t fscache_n_retrieval_ops;
42576-extern atomic_t fscache_n_retrieval_op_waits;
42577-
42578-extern atomic_t fscache_n_stores;
42579-extern atomic_t fscache_n_stores_ok;
42580-extern atomic_t fscache_n_stores_again;
42581-extern atomic_t fscache_n_stores_nobufs;
42582-extern atomic_t fscache_n_stores_oom;
42583-extern atomic_t fscache_n_store_ops;
42584-extern atomic_t fscache_n_store_calls;
42585-extern atomic_t fscache_n_store_pages;
42586-extern atomic_t fscache_n_store_radix_deletes;
42587-extern atomic_t fscache_n_store_pages_over_limit;
42588-
42589-extern atomic_t fscache_n_store_vmscan_not_storing;
42590-extern atomic_t fscache_n_store_vmscan_gone;
42591-extern atomic_t fscache_n_store_vmscan_busy;
42592-extern atomic_t fscache_n_store_vmscan_cancelled;
42593-
42594-extern atomic_t fscache_n_marks;
42595-extern atomic_t fscache_n_uncaches;
42596-
42597-extern atomic_t fscache_n_acquires;
42598-extern atomic_t fscache_n_acquires_null;
42599-extern atomic_t fscache_n_acquires_no_cache;
42600-extern atomic_t fscache_n_acquires_ok;
42601-extern atomic_t fscache_n_acquires_nobufs;
42602-extern atomic_t fscache_n_acquires_oom;
42603-
42604-extern atomic_t fscache_n_updates;
42605-extern atomic_t fscache_n_updates_null;
42606-extern atomic_t fscache_n_updates_run;
42607-
42608-extern atomic_t fscache_n_relinquishes;
42609-extern atomic_t fscache_n_relinquishes_null;
42610-extern atomic_t fscache_n_relinquishes_waitcrt;
42611-extern atomic_t fscache_n_relinquishes_retire;
42612-
42613-extern atomic_t fscache_n_cookie_index;
42614-extern atomic_t fscache_n_cookie_data;
42615-extern atomic_t fscache_n_cookie_special;
42616-
42617-extern atomic_t fscache_n_object_alloc;
42618-extern atomic_t fscache_n_object_no_alloc;
42619-extern atomic_t fscache_n_object_lookups;
42620-extern atomic_t fscache_n_object_lookups_negative;
42621-extern atomic_t fscache_n_object_lookups_positive;
42622-extern atomic_t fscache_n_object_lookups_timed_out;
42623-extern atomic_t fscache_n_object_created;
42624-extern atomic_t fscache_n_object_avail;
42625-extern atomic_t fscache_n_object_dead;
42626-
42627-extern atomic_t fscache_n_checkaux_none;
42628-extern atomic_t fscache_n_checkaux_okay;
42629-extern atomic_t fscache_n_checkaux_update;
42630-extern atomic_t fscache_n_checkaux_obsolete;
42631+extern atomic_unchecked_t fscache_n_op_pend;
42632+extern atomic_unchecked_t fscache_n_op_run;
42633+extern atomic_unchecked_t fscache_n_op_enqueue;
42634+extern atomic_unchecked_t fscache_n_op_deferred_release;
42635+extern atomic_unchecked_t fscache_n_op_release;
42636+extern atomic_unchecked_t fscache_n_op_gc;
42637+extern atomic_unchecked_t fscache_n_op_cancelled;
42638+extern atomic_unchecked_t fscache_n_op_rejected;
42639+
42640+extern atomic_unchecked_t fscache_n_attr_changed;
42641+extern atomic_unchecked_t fscache_n_attr_changed_ok;
42642+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42643+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42644+extern atomic_unchecked_t fscache_n_attr_changed_calls;
42645+
42646+extern atomic_unchecked_t fscache_n_allocs;
42647+extern atomic_unchecked_t fscache_n_allocs_ok;
42648+extern atomic_unchecked_t fscache_n_allocs_wait;
42649+extern atomic_unchecked_t fscache_n_allocs_nobufs;
42650+extern atomic_unchecked_t fscache_n_allocs_intr;
42651+extern atomic_unchecked_t fscache_n_allocs_object_dead;
42652+extern atomic_unchecked_t fscache_n_alloc_ops;
42653+extern atomic_unchecked_t fscache_n_alloc_op_waits;
42654+
42655+extern atomic_unchecked_t fscache_n_retrievals;
42656+extern atomic_unchecked_t fscache_n_retrievals_ok;
42657+extern atomic_unchecked_t fscache_n_retrievals_wait;
42658+extern atomic_unchecked_t fscache_n_retrievals_nodata;
42659+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42660+extern atomic_unchecked_t fscache_n_retrievals_intr;
42661+extern atomic_unchecked_t fscache_n_retrievals_nomem;
42662+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42663+extern atomic_unchecked_t fscache_n_retrieval_ops;
42664+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42665+
42666+extern atomic_unchecked_t fscache_n_stores;
42667+extern atomic_unchecked_t fscache_n_stores_ok;
42668+extern atomic_unchecked_t fscache_n_stores_again;
42669+extern atomic_unchecked_t fscache_n_stores_nobufs;
42670+extern atomic_unchecked_t fscache_n_stores_oom;
42671+extern atomic_unchecked_t fscache_n_store_ops;
42672+extern atomic_unchecked_t fscache_n_store_calls;
42673+extern atomic_unchecked_t fscache_n_store_pages;
42674+extern atomic_unchecked_t fscache_n_store_radix_deletes;
42675+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42676+
42677+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42678+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42679+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42680+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42681+
42682+extern atomic_unchecked_t fscache_n_marks;
42683+extern atomic_unchecked_t fscache_n_uncaches;
42684+
42685+extern atomic_unchecked_t fscache_n_acquires;
42686+extern atomic_unchecked_t fscache_n_acquires_null;
42687+extern atomic_unchecked_t fscache_n_acquires_no_cache;
42688+extern atomic_unchecked_t fscache_n_acquires_ok;
42689+extern atomic_unchecked_t fscache_n_acquires_nobufs;
42690+extern atomic_unchecked_t fscache_n_acquires_oom;
42691+
42692+extern atomic_unchecked_t fscache_n_updates;
42693+extern atomic_unchecked_t fscache_n_updates_null;
42694+extern atomic_unchecked_t fscache_n_updates_run;
42695+
42696+extern atomic_unchecked_t fscache_n_relinquishes;
42697+extern atomic_unchecked_t fscache_n_relinquishes_null;
42698+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42699+extern atomic_unchecked_t fscache_n_relinquishes_retire;
42700+
42701+extern atomic_unchecked_t fscache_n_cookie_index;
42702+extern atomic_unchecked_t fscache_n_cookie_data;
42703+extern atomic_unchecked_t fscache_n_cookie_special;
42704+
42705+extern atomic_unchecked_t fscache_n_object_alloc;
42706+extern atomic_unchecked_t fscache_n_object_no_alloc;
42707+extern atomic_unchecked_t fscache_n_object_lookups;
42708+extern atomic_unchecked_t fscache_n_object_lookups_negative;
42709+extern atomic_unchecked_t fscache_n_object_lookups_positive;
42710+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42711+extern atomic_unchecked_t fscache_n_object_created;
42712+extern atomic_unchecked_t fscache_n_object_avail;
42713+extern atomic_unchecked_t fscache_n_object_dead;
42714+
42715+extern atomic_unchecked_t fscache_n_checkaux_none;
42716+extern atomic_unchecked_t fscache_n_checkaux_okay;
42717+extern atomic_unchecked_t fscache_n_checkaux_update;
42718+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42719
42720 extern atomic_t fscache_n_cop_alloc_object;
42721 extern atomic_t fscache_n_cop_lookup_object;
42722@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
42723 atomic_inc(stat);
42724 }
42725
42726+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42727+{
42728+ atomic_inc_unchecked(stat);
42729+}
42730+
42731 static inline void fscache_stat_d(atomic_t *stat)
42732 {
42733 atomic_dec(stat);
42734@@ -267,6 +272,7 @@ extern const struct file_operations fsca
42735
42736 #define __fscache_stat(stat) (NULL)
42737 #define fscache_stat(stat) do {} while (0)
42738+#define fscache_stat_unchecked(stat) do {} while (0)
42739 #define fscache_stat_d(stat) do {} while (0)
42740 #endif
42741
42742diff -urNp linux-3.1.1/fs/fscache/object.c linux-3.1.1/fs/fscache/object.c
42743--- linux-3.1.1/fs/fscache/object.c 2011-11-11 15:19:27.000000000 -0500
42744+++ linux-3.1.1/fs/fscache/object.c 2011-11-16 18:39:08.000000000 -0500
42745@@ -128,7 +128,7 @@ static void fscache_object_state_machine
42746 /* update the object metadata on disk */
42747 case FSCACHE_OBJECT_UPDATING:
42748 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42749- fscache_stat(&fscache_n_updates_run);
42750+ fscache_stat_unchecked(&fscache_n_updates_run);
42751 fscache_stat(&fscache_n_cop_update_object);
42752 object->cache->ops->update_object(object);
42753 fscache_stat_d(&fscache_n_cop_update_object);
42754@@ -217,7 +217,7 @@ static void fscache_object_state_machine
42755 spin_lock(&object->lock);
42756 object->state = FSCACHE_OBJECT_DEAD;
42757 spin_unlock(&object->lock);
42758- fscache_stat(&fscache_n_object_dead);
42759+ fscache_stat_unchecked(&fscache_n_object_dead);
42760 goto terminal_transit;
42761
42762 /* handle the parent cache of this object being withdrawn from
42763@@ -232,7 +232,7 @@ static void fscache_object_state_machine
42764 spin_lock(&object->lock);
42765 object->state = FSCACHE_OBJECT_DEAD;
42766 spin_unlock(&object->lock);
42767- fscache_stat(&fscache_n_object_dead);
42768+ fscache_stat_unchecked(&fscache_n_object_dead);
42769 goto terminal_transit;
42770
42771 /* complain about the object being woken up once it is
42772@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
42773 parent->cookie->def->name, cookie->def->name,
42774 object->cache->tag->name);
42775
42776- fscache_stat(&fscache_n_object_lookups);
42777+ fscache_stat_unchecked(&fscache_n_object_lookups);
42778 fscache_stat(&fscache_n_cop_lookup_object);
42779 ret = object->cache->ops->lookup_object(object);
42780 fscache_stat_d(&fscache_n_cop_lookup_object);
42781@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
42782 if (ret == -ETIMEDOUT) {
42783 /* probably stuck behind another object, so move this one to
42784 * the back of the queue */
42785- fscache_stat(&fscache_n_object_lookups_timed_out);
42786+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42787 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42788 }
42789
42790@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
42791
42792 spin_lock(&object->lock);
42793 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42794- fscache_stat(&fscache_n_object_lookups_negative);
42795+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42796
42797 /* transit here to allow write requests to begin stacking up
42798 * and read requests to begin returning ENODATA */
42799@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
42800 * result, in which case there may be data available */
42801 spin_lock(&object->lock);
42802 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42803- fscache_stat(&fscache_n_object_lookups_positive);
42804+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42805
42806 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42807
42808@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
42809 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42810 } else {
42811 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42812- fscache_stat(&fscache_n_object_created);
42813+ fscache_stat_unchecked(&fscache_n_object_created);
42814
42815 object->state = FSCACHE_OBJECT_AVAILABLE;
42816 spin_unlock(&object->lock);
42817@@ -602,7 +602,7 @@ static void fscache_object_available(str
42818 fscache_enqueue_dependents(object);
42819
42820 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42821- fscache_stat(&fscache_n_object_avail);
42822+ fscache_stat_unchecked(&fscache_n_object_avail);
42823
42824 _leave("");
42825 }
42826@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
42827 enum fscache_checkaux result;
42828
42829 if (!object->cookie->def->check_aux) {
42830- fscache_stat(&fscache_n_checkaux_none);
42831+ fscache_stat_unchecked(&fscache_n_checkaux_none);
42832 return FSCACHE_CHECKAUX_OKAY;
42833 }
42834
42835@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
42836 switch (result) {
42837 /* entry okay as is */
42838 case FSCACHE_CHECKAUX_OKAY:
42839- fscache_stat(&fscache_n_checkaux_okay);
42840+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
42841 break;
42842
42843 /* entry requires update */
42844 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42845- fscache_stat(&fscache_n_checkaux_update);
42846+ fscache_stat_unchecked(&fscache_n_checkaux_update);
42847 break;
42848
42849 /* entry requires deletion */
42850 case FSCACHE_CHECKAUX_OBSOLETE:
42851- fscache_stat(&fscache_n_checkaux_obsolete);
42852+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42853 break;
42854
42855 default:
42856diff -urNp linux-3.1.1/fs/fscache/operation.c linux-3.1.1/fs/fscache/operation.c
42857--- linux-3.1.1/fs/fscache/operation.c 2011-11-11 15:19:27.000000000 -0500
42858+++ linux-3.1.1/fs/fscache/operation.c 2011-11-16 18:39:08.000000000 -0500
42859@@ -17,7 +17,7 @@
42860 #include <linux/slab.h>
42861 #include "internal.h"
42862
42863-atomic_t fscache_op_debug_id;
42864+atomic_unchecked_t fscache_op_debug_id;
42865 EXPORT_SYMBOL(fscache_op_debug_id);
42866
42867 /**
42868@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
42869 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42870 ASSERTCMP(atomic_read(&op->usage), >, 0);
42871
42872- fscache_stat(&fscache_n_op_enqueue);
42873+ fscache_stat_unchecked(&fscache_n_op_enqueue);
42874 switch (op->flags & FSCACHE_OP_TYPE) {
42875 case FSCACHE_OP_ASYNC:
42876 _debug("queue async");
42877@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
42878 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42879 if (op->processor)
42880 fscache_enqueue_operation(op);
42881- fscache_stat(&fscache_n_op_run);
42882+ fscache_stat_unchecked(&fscache_n_op_run);
42883 }
42884
42885 /*
42886@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
42887 if (object->n_ops > 1) {
42888 atomic_inc(&op->usage);
42889 list_add_tail(&op->pend_link, &object->pending_ops);
42890- fscache_stat(&fscache_n_op_pend);
42891+ fscache_stat_unchecked(&fscache_n_op_pend);
42892 } else if (!list_empty(&object->pending_ops)) {
42893 atomic_inc(&op->usage);
42894 list_add_tail(&op->pend_link, &object->pending_ops);
42895- fscache_stat(&fscache_n_op_pend);
42896+ fscache_stat_unchecked(&fscache_n_op_pend);
42897 fscache_start_operations(object);
42898 } else {
42899 ASSERTCMP(object->n_in_progress, ==, 0);
42900@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
42901 object->n_exclusive++; /* reads and writes must wait */
42902 atomic_inc(&op->usage);
42903 list_add_tail(&op->pend_link, &object->pending_ops);
42904- fscache_stat(&fscache_n_op_pend);
42905+ fscache_stat_unchecked(&fscache_n_op_pend);
42906 ret = 0;
42907 } else {
42908 /* not allowed to submit ops in any other state */
42909@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
42910 if (object->n_exclusive > 0) {
42911 atomic_inc(&op->usage);
42912 list_add_tail(&op->pend_link, &object->pending_ops);
42913- fscache_stat(&fscache_n_op_pend);
42914+ fscache_stat_unchecked(&fscache_n_op_pend);
42915 } else if (!list_empty(&object->pending_ops)) {
42916 atomic_inc(&op->usage);
42917 list_add_tail(&op->pend_link, &object->pending_ops);
42918- fscache_stat(&fscache_n_op_pend);
42919+ fscache_stat_unchecked(&fscache_n_op_pend);
42920 fscache_start_operations(object);
42921 } else {
42922 ASSERTCMP(object->n_exclusive, ==, 0);
42923@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
42924 object->n_ops++;
42925 atomic_inc(&op->usage);
42926 list_add_tail(&op->pend_link, &object->pending_ops);
42927- fscache_stat(&fscache_n_op_pend);
42928+ fscache_stat_unchecked(&fscache_n_op_pend);
42929 ret = 0;
42930 } else if (object->state == FSCACHE_OBJECT_DYING ||
42931 object->state == FSCACHE_OBJECT_LC_DYING ||
42932 object->state == FSCACHE_OBJECT_WITHDRAWING) {
42933- fscache_stat(&fscache_n_op_rejected);
42934+ fscache_stat_unchecked(&fscache_n_op_rejected);
42935 ret = -ENOBUFS;
42936 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42937 fscache_report_unexpected_submission(object, op, ostate);
42938@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
42939
42940 ret = -EBUSY;
42941 if (!list_empty(&op->pend_link)) {
42942- fscache_stat(&fscache_n_op_cancelled);
42943+ fscache_stat_unchecked(&fscache_n_op_cancelled);
42944 list_del_init(&op->pend_link);
42945 object->n_ops--;
42946 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42947@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
42948 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42949 BUG();
42950
42951- fscache_stat(&fscache_n_op_release);
42952+ fscache_stat_unchecked(&fscache_n_op_release);
42953
42954 if (op->release) {
42955 op->release(op);
42956@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
42957 * lock, and defer it otherwise */
42958 if (!spin_trylock(&object->lock)) {
42959 _debug("defer put");
42960- fscache_stat(&fscache_n_op_deferred_release);
42961+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
42962
42963 cache = object->cache;
42964 spin_lock(&cache->op_gc_list_lock);
42965@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
42966
42967 _debug("GC DEFERRED REL OBJ%x OP%x",
42968 object->debug_id, op->debug_id);
42969- fscache_stat(&fscache_n_op_gc);
42970+ fscache_stat_unchecked(&fscache_n_op_gc);
42971
42972 ASSERTCMP(atomic_read(&op->usage), ==, 0);
42973
42974diff -urNp linux-3.1.1/fs/fscache/page.c linux-3.1.1/fs/fscache/page.c
42975--- linux-3.1.1/fs/fscache/page.c 2011-11-11 15:19:27.000000000 -0500
42976+++ linux-3.1.1/fs/fscache/page.c 2011-11-16 18:39:08.000000000 -0500
42977@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
42978 val = radix_tree_lookup(&cookie->stores, page->index);
42979 if (!val) {
42980 rcu_read_unlock();
42981- fscache_stat(&fscache_n_store_vmscan_not_storing);
42982+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42983 __fscache_uncache_page(cookie, page);
42984 return true;
42985 }
42986@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
42987 spin_unlock(&cookie->stores_lock);
42988
42989 if (xpage) {
42990- fscache_stat(&fscache_n_store_vmscan_cancelled);
42991- fscache_stat(&fscache_n_store_radix_deletes);
42992+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42993+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42994 ASSERTCMP(xpage, ==, page);
42995 } else {
42996- fscache_stat(&fscache_n_store_vmscan_gone);
42997+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42998 }
42999
43000 wake_up_bit(&cookie->flags, 0);
43001@@ -107,7 +107,7 @@ page_busy:
43002 /* we might want to wait here, but that could deadlock the allocator as
43003 * the work threads writing to the cache may all end up sleeping
43004 * on memory allocation */
43005- fscache_stat(&fscache_n_store_vmscan_busy);
43006+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
43007 return false;
43008 }
43009 EXPORT_SYMBOL(__fscache_maybe_release_page);
43010@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
43011 FSCACHE_COOKIE_STORING_TAG);
43012 if (!radix_tree_tag_get(&cookie->stores, page->index,
43013 FSCACHE_COOKIE_PENDING_TAG)) {
43014- fscache_stat(&fscache_n_store_radix_deletes);
43015+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43016 xpage = radix_tree_delete(&cookie->stores, page->index);
43017 }
43018 spin_unlock(&cookie->stores_lock);
43019@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
43020
43021 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
43022
43023- fscache_stat(&fscache_n_attr_changed_calls);
43024+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
43025
43026 if (fscache_object_is_active(object)) {
43027 fscache_stat(&fscache_n_cop_attr_changed);
43028@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
43029
43030 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43031
43032- fscache_stat(&fscache_n_attr_changed);
43033+ fscache_stat_unchecked(&fscache_n_attr_changed);
43034
43035 op = kzalloc(sizeof(*op), GFP_KERNEL);
43036 if (!op) {
43037- fscache_stat(&fscache_n_attr_changed_nomem);
43038+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
43039 _leave(" = -ENOMEM");
43040 return -ENOMEM;
43041 }
43042@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
43043 if (fscache_submit_exclusive_op(object, op) < 0)
43044 goto nobufs;
43045 spin_unlock(&cookie->lock);
43046- fscache_stat(&fscache_n_attr_changed_ok);
43047+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43048 fscache_put_operation(op);
43049 _leave(" = 0");
43050 return 0;
43051@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
43052 nobufs:
43053 spin_unlock(&cookie->lock);
43054 kfree(op);
43055- fscache_stat(&fscache_n_attr_changed_nobufs);
43056+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43057 _leave(" = %d", -ENOBUFS);
43058 return -ENOBUFS;
43059 }
43060@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
43061 /* allocate a retrieval operation and attempt to submit it */
43062 op = kzalloc(sizeof(*op), GFP_NOIO);
43063 if (!op) {
43064- fscache_stat(&fscache_n_retrievals_nomem);
43065+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43066 return NULL;
43067 }
43068
43069@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
43070 return 0;
43071 }
43072
43073- fscache_stat(&fscache_n_retrievals_wait);
43074+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
43075
43076 jif = jiffies;
43077 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43078 fscache_wait_bit_interruptible,
43079 TASK_INTERRUPTIBLE) != 0) {
43080- fscache_stat(&fscache_n_retrievals_intr);
43081+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43082 _leave(" = -ERESTARTSYS");
43083 return -ERESTARTSYS;
43084 }
43085@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
43086 */
43087 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43088 struct fscache_retrieval *op,
43089- atomic_t *stat_op_waits,
43090- atomic_t *stat_object_dead)
43091+ atomic_unchecked_t *stat_op_waits,
43092+ atomic_unchecked_t *stat_object_dead)
43093 {
43094 int ret;
43095
43096@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
43097 goto check_if_dead;
43098
43099 _debug(">>> WT");
43100- fscache_stat(stat_op_waits);
43101+ fscache_stat_unchecked(stat_op_waits);
43102 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43103 fscache_wait_bit_interruptible,
43104 TASK_INTERRUPTIBLE) < 0) {
43105@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
43106
43107 check_if_dead:
43108 if (unlikely(fscache_object_is_dead(object))) {
43109- fscache_stat(stat_object_dead);
43110+ fscache_stat_unchecked(stat_object_dead);
43111 return -ENOBUFS;
43112 }
43113 return 0;
43114@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
43115
43116 _enter("%p,%p,,,", cookie, page);
43117
43118- fscache_stat(&fscache_n_retrievals);
43119+ fscache_stat_unchecked(&fscache_n_retrievals);
43120
43121 if (hlist_empty(&cookie->backing_objects))
43122 goto nobufs;
43123@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
43124 goto nobufs_unlock;
43125 spin_unlock(&cookie->lock);
43126
43127- fscache_stat(&fscache_n_retrieval_ops);
43128+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43129
43130 /* pin the netfs read context in case we need to do the actual netfs
43131 * read because we've encountered a cache read failure */
43132@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
43133
43134 error:
43135 if (ret == -ENOMEM)
43136- fscache_stat(&fscache_n_retrievals_nomem);
43137+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43138 else if (ret == -ERESTARTSYS)
43139- fscache_stat(&fscache_n_retrievals_intr);
43140+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43141 else if (ret == -ENODATA)
43142- fscache_stat(&fscache_n_retrievals_nodata);
43143+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43144 else if (ret < 0)
43145- fscache_stat(&fscache_n_retrievals_nobufs);
43146+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43147 else
43148- fscache_stat(&fscache_n_retrievals_ok);
43149+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43150
43151 fscache_put_retrieval(op);
43152 _leave(" = %d", ret);
43153@@ -429,7 +429,7 @@ nobufs_unlock:
43154 spin_unlock(&cookie->lock);
43155 kfree(op);
43156 nobufs:
43157- fscache_stat(&fscache_n_retrievals_nobufs);
43158+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43159 _leave(" = -ENOBUFS");
43160 return -ENOBUFS;
43161 }
43162@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
43163
43164 _enter("%p,,%d,,,", cookie, *nr_pages);
43165
43166- fscache_stat(&fscache_n_retrievals);
43167+ fscache_stat_unchecked(&fscache_n_retrievals);
43168
43169 if (hlist_empty(&cookie->backing_objects))
43170 goto nobufs;
43171@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
43172 goto nobufs_unlock;
43173 spin_unlock(&cookie->lock);
43174
43175- fscache_stat(&fscache_n_retrieval_ops);
43176+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43177
43178 /* pin the netfs read context in case we need to do the actual netfs
43179 * read because we've encountered a cache read failure */
43180@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
43181
43182 error:
43183 if (ret == -ENOMEM)
43184- fscache_stat(&fscache_n_retrievals_nomem);
43185+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43186 else if (ret == -ERESTARTSYS)
43187- fscache_stat(&fscache_n_retrievals_intr);
43188+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43189 else if (ret == -ENODATA)
43190- fscache_stat(&fscache_n_retrievals_nodata);
43191+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43192 else if (ret < 0)
43193- fscache_stat(&fscache_n_retrievals_nobufs);
43194+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43195 else
43196- fscache_stat(&fscache_n_retrievals_ok);
43197+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43198
43199 fscache_put_retrieval(op);
43200 _leave(" = %d", ret);
43201@@ -545,7 +545,7 @@ nobufs_unlock:
43202 spin_unlock(&cookie->lock);
43203 kfree(op);
43204 nobufs:
43205- fscache_stat(&fscache_n_retrievals_nobufs);
43206+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43207 _leave(" = -ENOBUFS");
43208 return -ENOBUFS;
43209 }
43210@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
43211
43212 _enter("%p,%p,,,", cookie, page);
43213
43214- fscache_stat(&fscache_n_allocs);
43215+ fscache_stat_unchecked(&fscache_n_allocs);
43216
43217 if (hlist_empty(&cookie->backing_objects))
43218 goto nobufs;
43219@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
43220 goto nobufs_unlock;
43221 spin_unlock(&cookie->lock);
43222
43223- fscache_stat(&fscache_n_alloc_ops);
43224+ fscache_stat_unchecked(&fscache_n_alloc_ops);
43225
43226 ret = fscache_wait_for_retrieval_activation(
43227 object, op,
43228@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
43229
43230 error:
43231 if (ret == -ERESTARTSYS)
43232- fscache_stat(&fscache_n_allocs_intr);
43233+ fscache_stat_unchecked(&fscache_n_allocs_intr);
43234 else if (ret < 0)
43235- fscache_stat(&fscache_n_allocs_nobufs);
43236+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43237 else
43238- fscache_stat(&fscache_n_allocs_ok);
43239+ fscache_stat_unchecked(&fscache_n_allocs_ok);
43240
43241 fscache_put_retrieval(op);
43242 _leave(" = %d", ret);
43243@@ -625,7 +625,7 @@ nobufs_unlock:
43244 spin_unlock(&cookie->lock);
43245 kfree(op);
43246 nobufs:
43247- fscache_stat(&fscache_n_allocs_nobufs);
43248+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43249 _leave(" = -ENOBUFS");
43250 return -ENOBUFS;
43251 }
43252@@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
43253
43254 spin_lock(&cookie->stores_lock);
43255
43256- fscache_stat(&fscache_n_store_calls);
43257+ fscache_stat_unchecked(&fscache_n_store_calls);
43258
43259 /* find a page to store */
43260 page = NULL;
43261@@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
43262 page = results[0];
43263 _debug("gang %d [%lx]", n, page->index);
43264 if (page->index > op->store_limit) {
43265- fscache_stat(&fscache_n_store_pages_over_limit);
43266+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43267 goto superseded;
43268 }
43269
43270@@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
43271 spin_unlock(&cookie->stores_lock);
43272 spin_unlock(&object->lock);
43273
43274- fscache_stat(&fscache_n_store_pages);
43275+ fscache_stat_unchecked(&fscache_n_store_pages);
43276 fscache_stat(&fscache_n_cop_write_page);
43277 ret = object->cache->ops->write_page(op, page);
43278 fscache_stat_d(&fscache_n_cop_write_page);
43279@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
43280 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43281 ASSERT(PageFsCache(page));
43282
43283- fscache_stat(&fscache_n_stores);
43284+ fscache_stat_unchecked(&fscache_n_stores);
43285
43286 op = kzalloc(sizeof(*op), GFP_NOIO);
43287 if (!op)
43288@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
43289 spin_unlock(&cookie->stores_lock);
43290 spin_unlock(&object->lock);
43291
43292- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43293+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43294 op->store_limit = object->store_limit;
43295
43296 if (fscache_submit_op(object, &op->op) < 0)
43297@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
43298
43299 spin_unlock(&cookie->lock);
43300 radix_tree_preload_end();
43301- fscache_stat(&fscache_n_store_ops);
43302- fscache_stat(&fscache_n_stores_ok);
43303+ fscache_stat_unchecked(&fscache_n_store_ops);
43304+ fscache_stat_unchecked(&fscache_n_stores_ok);
43305
43306 /* the work queue now carries its own ref on the object */
43307 fscache_put_operation(&op->op);
43308@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
43309 return 0;
43310
43311 already_queued:
43312- fscache_stat(&fscache_n_stores_again);
43313+ fscache_stat_unchecked(&fscache_n_stores_again);
43314 already_pending:
43315 spin_unlock(&cookie->stores_lock);
43316 spin_unlock(&object->lock);
43317 spin_unlock(&cookie->lock);
43318 radix_tree_preload_end();
43319 kfree(op);
43320- fscache_stat(&fscache_n_stores_ok);
43321+ fscache_stat_unchecked(&fscache_n_stores_ok);
43322 _leave(" = 0");
43323 return 0;
43324
43325@@ -851,14 +851,14 @@ nobufs:
43326 spin_unlock(&cookie->lock);
43327 radix_tree_preload_end();
43328 kfree(op);
43329- fscache_stat(&fscache_n_stores_nobufs);
43330+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
43331 _leave(" = -ENOBUFS");
43332 return -ENOBUFS;
43333
43334 nomem_free:
43335 kfree(op);
43336 nomem:
43337- fscache_stat(&fscache_n_stores_oom);
43338+ fscache_stat_unchecked(&fscache_n_stores_oom);
43339 _leave(" = -ENOMEM");
43340 return -ENOMEM;
43341 }
43342@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
43343 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43344 ASSERTCMP(page, !=, NULL);
43345
43346- fscache_stat(&fscache_n_uncaches);
43347+ fscache_stat_unchecked(&fscache_n_uncaches);
43348
43349 /* cache withdrawal may beat us to it */
43350 if (!PageFsCache(page))
43351@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
43352 unsigned long loop;
43353
43354 #ifdef CONFIG_FSCACHE_STATS
43355- atomic_add(pagevec->nr, &fscache_n_marks);
43356+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43357 #endif
43358
43359 for (loop = 0; loop < pagevec->nr; loop++) {
43360diff -urNp linux-3.1.1/fs/fscache/stats.c linux-3.1.1/fs/fscache/stats.c
43361--- linux-3.1.1/fs/fscache/stats.c 2011-11-11 15:19:27.000000000 -0500
43362+++ linux-3.1.1/fs/fscache/stats.c 2011-11-16 18:39:08.000000000 -0500
43363@@ -18,95 +18,95 @@
43364 /*
43365 * operation counters
43366 */
43367-atomic_t fscache_n_op_pend;
43368-atomic_t fscache_n_op_run;
43369-atomic_t fscache_n_op_enqueue;
43370-atomic_t fscache_n_op_requeue;
43371-atomic_t fscache_n_op_deferred_release;
43372-atomic_t fscache_n_op_release;
43373-atomic_t fscache_n_op_gc;
43374-atomic_t fscache_n_op_cancelled;
43375-atomic_t fscache_n_op_rejected;
43376-
43377-atomic_t fscache_n_attr_changed;
43378-atomic_t fscache_n_attr_changed_ok;
43379-atomic_t fscache_n_attr_changed_nobufs;
43380-atomic_t fscache_n_attr_changed_nomem;
43381-atomic_t fscache_n_attr_changed_calls;
43382-
43383-atomic_t fscache_n_allocs;
43384-atomic_t fscache_n_allocs_ok;
43385-atomic_t fscache_n_allocs_wait;
43386-atomic_t fscache_n_allocs_nobufs;
43387-atomic_t fscache_n_allocs_intr;
43388-atomic_t fscache_n_allocs_object_dead;
43389-atomic_t fscache_n_alloc_ops;
43390-atomic_t fscache_n_alloc_op_waits;
43391-
43392-atomic_t fscache_n_retrievals;
43393-atomic_t fscache_n_retrievals_ok;
43394-atomic_t fscache_n_retrievals_wait;
43395-atomic_t fscache_n_retrievals_nodata;
43396-atomic_t fscache_n_retrievals_nobufs;
43397-atomic_t fscache_n_retrievals_intr;
43398-atomic_t fscache_n_retrievals_nomem;
43399-atomic_t fscache_n_retrievals_object_dead;
43400-atomic_t fscache_n_retrieval_ops;
43401-atomic_t fscache_n_retrieval_op_waits;
43402-
43403-atomic_t fscache_n_stores;
43404-atomic_t fscache_n_stores_ok;
43405-atomic_t fscache_n_stores_again;
43406-atomic_t fscache_n_stores_nobufs;
43407-atomic_t fscache_n_stores_oom;
43408-atomic_t fscache_n_store_ops;
43409-atomic_t fscache_n_store_calls;
43410-atomic_t fscache_n_store_pages;
43411-atomic_t fscache_n_store_radix_deletes;
43412-atomic_t fscache_n_store_pages_over_limit;
43413-
43414-atomic_t fscache_n_store_vmscan_not_storing;
43415-atomic_t fscache_n_store_vmscan_gone;
43416-atomic_t fscache_n_store_vmscan_busy;
43417-atomic_t fscache_n_store_vmscan_cancelled;
43418-
43419-atomic_t fscache_n_marks;
43420-atomic_t fscache_n_uncaches;
43421-
43422-atomic_t fscache_n_acquires;
43423-atomic_t fscache_n_acquires_null;
43424-atomic_t fscache_n_acquires_no_cache;
43425-atomic_t fscache_n_acquires_ok;
43426-atomic_t fscache_n_acquires_nobufs;
43427-atomic_t fscache_n_acquires_oom;
43428-
43429-atomic_t fscache_n_updates;
43430-atomic_t fscache_n_updates_null;
43431-atomic_t fscache_n_updates_run;
43432-
43433-atomic_t fscache_n_relinquishes;
43434-atomic_t fscache_n_relinquishes_null;
43435-atomic_t fscache_n_relinquishes_waitcrt;
43436-atomic_t fscache_n_relinquishes_retire;
43437-
43438-atomic_t fscache_n_cookie_index;
43439-atomic_t fscache_n_cookie_data;
43440-atomic_t fscache_n_cookie_special;
43441-
43442-atomic_t fscache_n_object_alloc;
43443-atomic_t fscache_n_object_no_alloc;
43444-atomic_t fscache_n_object_lookups;
43445-atomic_t fscache_n_object_lookups_negative;
43446-atomic_t fscache_n_object_lookups_positive;
43447-atomic_t fscache_n_object_lookups_timed_out;
43448-atomic_t fscache_n_object_created;
43449-atomic_t fscache_n_object_avail;
43450-atomic_t fscache_n_object_dead;
43451-
43452-atomic_t fscache_n_checkaux_none;
43453-atomic_t fscache_n_checkaux_okay;
43454-atomic_t fscache_n_checkaux_update;
43455-atomic_t fscache_n_checkaux_obsolete;
43456+atomic_unchecked_t fscache_n_op_pend;
43457+atomic_unchecked_t fscache_n_op_run;
43458+atomic_unchecked_t fscache_n_op_enqueue;
43459+atomic_unchecked_t fscache_n_op_requeue;
43460+atomic_unchecked_t fscache_n_op_deferred_release;
43461+atomic_unchecked_t fscache_n_op_release;
43462+atomic_unchecked_t fscache_n_op_gc;
43463+atomic_unchecked_t fscache_n_op_cancelled;
43464+atomic_unchecked_t fscache_n_op_rejected;
43465+
43466+atomic_unchecked_t fscache_n_attr_changed;
43467+atomic_unchecked_t fscache_n_attr_changed_ok;
43468+atomic_unchecked_t fscache_n_attr_changed_nobufs;
43469+atomic_unchecked_t fscache_n_attr_changed_nomem;
43470+atomic_unchecked_t fscache_n_attr_changed_calls;
43471+
43472+atomic_unchecked_t fscache_n_allocs;
43473+atomic_unchecked_t fscache_n_allocs_ok;
43474+atomic_unchecked_t fscache_n_allocs_wait;
43475+atomic_unchecked_t fscache_n_allocs_nobufs;
43476+atomic_unchecked_t fscache_n_allocs_intr;
43477+atomic_unchecked_t fscache_n_allocs_object_dead;
43478+atomic_unchecked_t fscache_n_alloc_ops;
43479+atomic_unchecked_t fscache_n_alloc_op_waits;
43480+
43481+atomic_unchecked_t fscache_n_retrievals;
43482+atomic_unchecked_t fscache_n_retrievals_ok;
43483+atomic_unchecked_t fscache_n_retrievals_wait;
43484+atomic_unchecked_t fscache_n_retrievals_nodata;
43485+atomic_unchecked_t fscache_n_retrievals_nobufs;
43486+atomic_unchecked_t fscache_n_retrievals_intr;
43487+atomic_unchecked_t fscache_n_retrievals_nomem;
43488+atomic_unchecked_t fscache_n_retrievals_object_dead;
43489+atomic_unchecked_t fscache_n_retrieval_ops;
43490+atomic_unchecked_t fscache_n_retrieval_op_waits;
43491+
43492+atomic_unchecked_t fscache_n_stores;
43493+atomic_unchecked_t fscache_n_stores_ok;
43494+atomic_unchecked_t fscache_n_stores_again;
43495+atomic_unchecked_t fscache_n_stores_nobufs;
43496+atomic_unchecked_t fscache_n_stores_oom;
43497+atomic_unchecked_t fscache_n_store_ops;
43498+atomic_unchecked_t fscache_n_store_calls;
43499+atomic_unchecked_t fscache_n_store_pages;
43500+atomic_unchecked_t fscache_n_store_radix_deletes;
43501+atomic_unchecked_t fscache_n_store_pages_over_limit;
43502+
43503+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43504+atomic_unchecked_t fscache_n_store_vmscan_gone;
43505+atomic_unchecked_t fscache_n_store_vmscan_busy;
43506+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43507+
43508+atomic_unchecked_t fscache_n_marks;
43509+atomic_unchecked_t fscache_n_uncaches;
43510+
43511+atomic_unchecked_t fscache_n_acquires;
43512+atomic_unchecked_t fscache_n_acquires_null;
43513+atomic_unchecked_t fscache_n_acquires_no_cache;
43514+atomic_unchecked_t fscache_n_acquires_ok;
43515+atomic_unchecked_t fscache_n_acquires_nobufs;
43516+atomic_unchecked_t fscache_n_acquires_oom;
43517+
43518+atomic_unchecked_t fscache_n_updates;
43519+atomic_unchecked_t fscache_n_updates_null;
43520+atomic_unchecked_t fscache_n_updates_run;
43521+
43522+atomic_unchecked_t fscache_n_relinquishes;
43523+atomic_unchecked_t fscache_n_relinquishes_null;
43524+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43525+atomic_unchecked_t fscache_n_relinquishes_retire;
43526+
43527+atomic_unchecked_t fscache_n_cookie_index;
43528+atomic_unchecked_t fscache_n_cookie_data;
43529+atomic_unchecked_t fscache_n_cookie_special;
43530+
43531+atomic_unchecked_t fscache_n_object_alloc;
43532+atomic_unchecked_t fscache_n_object_no_alloc;
43533+atomic_unchecked_t fscache_n_object_lookups;
43534+atomic_unchecked_t fscache_n_object_lookups_negative;
43535+atomic_unchecked_t fscache_n_object_lookups_positive;
43536+atomic_unchecked_t fscache_n_object_lookups_timed_out;
43537+atomic_unchecked_t fscache_n_object_created;
43538+atomic_unchecked_t fscache_n_object_avail;
43539+atomic_unchecked_t fscache_n_object_dead;
43540+
43541+atomic_unchecked_t fscache_n_checkaux_none;
43542+atomic_unchecked_t fscache_n_checkaux_okay;
43543+atomic_unchecked_t fscache_n_checkaux_update;
43544+atomic_unchecked_t fscache_n_checkaux_obsolete;
43545
43546 atomic_t fscache_n_cop_alloc_object;
43547 atomic_t fscache_n_cop_lookup_object;
43548@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
43549 seq_puts(m, "FS-Cache statistics\n");
43550
43551 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43552- atomic_read(&fscache_n_cookie_index),
43553- atomic_read(&fscache_n_cookie_data),
43554- atomic_read(&fscache_n_cookie_special));
43555+ atomic_read_unchecked(&fscache_n_cookie_index),
43556+ atomic_read_unchecked(&fscache_n_cookie_data),
43557+ atomic_read_unchecked(&fscache_n_cookie_special));
43558
43559 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43560- atomic_read(&fscache_n_object_alloc),
43561- atomic_read(&fscache_n_object_no_alloc),
43562- atomic_read(&fscache_n_object_avail),
43563- atomic_read(&fscache_n_object_dead));
43564+ atomic_read_unchecked(&fscache_n_object_alloc),
43565+ atomic_read_unchecked(&fscache_n_object_no_alloc),
43566+ atomic_read_unchecked(&fscache_n_object_avail),
43567+ atomic_read_unchecked(&fscache_n_object_dead));
43568 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43569- atomic_read(&fscache_n_checkaux_none),
43570- atomic_read(&fscache_n_checkaux_okay),
43571- atomic_read(&fscache_n_checkaux_update),
43572- atomic_read(&fscache_n_checkaux_obsolete));
43573+ atomic_read_unchecked(&fscache_n_checkaux_none),
43574+ atomic_read_unchecked(&fscache_n_checkaux_okay),
43575+ atomic_read_unchecked(&fscache_n_checkaux_update),
43576+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43577
43578 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43579- atomic_read(&fscache_n_marks),
43580- atomic_read(&fscache_n_uncaches));
43581+ atomic_read_unchecked(&fscache_n_marks),
43582+ atomic_read_unchecked(&fscache_n_uncaches));
43583
43584 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
43585 " oom=%u\n",
43586- atomic_read(&fscache_n_acquires),
43587- atomic_read(&fscache_n_acquires_null),
43588- atomic_read(&fscache_n_acquires_no_cache),
43589- atomic_read(&fscache_n_acquires_ok),
43590- atomic_read(&fscache_n_acquires_nobufs),
43591- atomic_read(&fscache_n_acquires_oom));
43592+ atomic_read_unchecked(&fscache_n_acquires),
43593+ atomic_read_unchecked(&fscache_n_acquires_null),
43594+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
43595+ atomic_read_unchecked(&fscache_n_acquires_ok),
43596+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
43597+ atomic_read_unchecked(&fscache_n_acquires_oom));
43598
43599 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
43600- atomic_read(&fscache_n_object_lookups),
43601- atomic_read(&fscache_n_object_lookups_negative),
43602- atomic_read(&fscache_n_object_lookups_positive),
43603- atomic_read(&fscache_n_object_created),
43604- atomic_read(&fscache_n_object_lookups_timed_out));
43605+ atomic_read_unchecked(&fscache_n_object_lookups),
43606+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
43607+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
43608+ atomic_read_unchecked(&fscache_n_object_created),
43609+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
43610
43611 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
43612- atomic_read(&fscache_n_updates),
43613- atomic_read(&fscache_n_updates_null),
43614- atomic_read(&fscache_n_updates_run));
43615+ atomic_read_unchecked(&fscache_n_updates),
43616+ atomic_read_unchecked(&fscache_n_updates_null),
43617+ atomic_read_unchecked(&fscache_n_updates_run));
43618
43619 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43620- atomic_read(&fscache_n_relinquishes),
43621- atomic_read(&fscache_n_relinquishes_null),
43622- atomic_read(&fscache_n_relinquishes_waitcrt),
43623- atomic_read(&fscache_n_relinquishes_retire));
43624+ atomic_read_unchecked(&fscache_n_relinquishes),
43625+ atomic_read_unchecked(&fscache_n_relinquishes_null),
43626+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43627+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
43628
43629 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43630- atomic_read(&fscache_n_attr_changed),
43631- atomic_read(&fscache_n_attr_changed_ok),
43632- atomic_read(&fscache_n_attr_changed_nobufs),
43633- atomic_read(&fscache_n_attr_changed_nomem),
43634- atomic_read(&fscache_n_attr_changed_calls));
43635+ atomic_read_unchecked(&fscache_n_attr_changed),
43636+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
43637+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43638+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43639+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
43640
43641 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43642- atomic_read(&fscache_n_allocs),
43643- atomic_read(&fscache_n_allocs_ok),
43644- atomic_read(&fscache_n_allocs_wait),
43645- atomic_read(&fscache_n_allocs_nobufs),
43646- atomic_read(&fscache_n_allocs_intr));
43647+ atomic_read_unchecked(&fscache_n_allocs),
43648+ atomic_read_unchecked(&fscache_n_allocs_ok),
43649+ atomic_read_unchecked(&fscache_n_allocs_wait),
43650+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
43651+ atomic_read_unchecked(&fscache_n_allocs_intr));
43652 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43653- atomic_read(&fscache_n_alloc_ops),
43654- atomic_read(&fscache_n_alloc_op_waits),
43655- atomic_read(&fscache_n_allocs_object_dead));
43656+ atomic_read_unchecked(&fscache_n_alloc_ops),
43657+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
43658+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
43659
43660 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43661 " int=%u oom=%u\n",
43662- atomic_read(&fscache_n_retrievals),
43663- atomic_read(&fscache_n_retrievals_ok),
43664- atomic_read(&fscache_n_retrievals_wait),
43665- atomic_read(&fscache_n_retrievals_nodata),
43666- atomic_read(&fscache_n_retrievals_nobufs),
43667- atomic_read(&fscache_n_retrievals_intr),
43668- atomic_read(&fscache_n_retrievals_nomem));
43669+ atomic_read_unchecked(&fscache_n_retrievals),
43670+ atomic_read_unchecked(&fscache_n_retrievals_ok),
43671+ atomic_read_unchecked(&fscache_n_retrievals_wait),
43672+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
43673+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43674+ atomic_read_unchecked(&fscache_n_retrievals_intr),
43675+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
43676 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43677- atomic_read(&fscache_n_retrieval_ops),
43678- atomic_read(&fscache_n_retrieval_op_waits),
43679- atomic_read(&fscache_n_retrievals_object_dead));
43680+ atomic_read_unchecked(&fscache_n_retrieval_ops),
43681+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43682+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43683
43684 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43685- atomic_read(&fscache_n_stores),
43686- atomic_read(&fscache_n_stores_ok),
43687- atomic_read(&fscache_n_stores_again),
43688- atomic_read(&fscache_n_stores_nobufs),
43689- atomic_read(&fscache_n_stores_oom));
43690+ atomic_read_unchecked(&fscache_n_stores),
43691+ atomic_read_unchecked(&fscache_n_stores_ok),
43692+ atomic_read_unchecked(&fscache_n_stores_again),
43693+ atomic_read_unchecked(&fscache_n_stores_nobufs),
43694+ atomic_read_unchecked(&fscache_n_stores_oom));
43695 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43696- atomic_read(&fscache_n_store_ops),
43697- atomic_read(&fscache_n_store_calls),
43698- atomic_read(&fscache_n_store_pages),
43699- atomic_read(&fscache_n_store_radix_deletes),
43700- atomic_read(&fscache_n_store_pages_over_limit));
43701+ atomic_read_unchecked(&fscache_n_store_ops),
43702+ atomic_read_unchecked(&fscache_n_store_calls),
43703+ atomic_read_unchecked(&fscache_n_store_pages),
43704+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
43705+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43706
43707 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43708- atomic_read(&fscache_n_store_vmscan_not_storing),
43709- atomic_read(&fscache_n_store_vmscan_gone),
43710- atomic_read(&fscache_n_store_vmscan_busy),
43711- atomic_read(&fscache_n_store_vmscan_cancelled));
43712+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43713+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43714+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43715+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43716
43717 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43718- atomic_read(&fscache_n_op_pend),
43719- atomic_read(&fscache_n_op_run),
43720- atomic_read(&fscache_n_op_enqueue),
43721- atomic_read(&fscache_n_op_cancelled),
43722- atomic_read(&fscache_n_op_rejected));
43723+ atomic_read_unchecked(&fscache_n_op_pend),
43724+ atomic_read_unchecked(&fscache_n_op_run),
43725+ atomic_read_unchecked(&fscache_n_op_enqueue),
43726+ atomic_read_unchecked(&fscache_n_op_cancelled),
43727+ atomic_read_unchecked(&fscache_n_op_rejected));
43728 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43729- atomic_read(&fscache_n_op_deferred_release),
43730- atomic_read(&fscache_n_op_release),
43731- atomic_read(&fscache_n_op_gc));
43732+ atomic_read_unchecked(&fscache_n_op_deferred_release),
43733+ atomic_read_unchecked(&fscache_n_op_release),
43734+ atomic_read_unchecked(&fscache_n_op_gc));
43735
43736 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43737 atomic_read(&fscache_n_cop_alloc_object),
43738diff -urNp linux-3.1.1/fs/fs_struct.c linux-3.1.1/fs/fs_struct.c
43739--- linux-3.1.1/fs/fs_struct.c 2011-11-11 15:19:27.000000000 -0500
43740+++ linux-3.1.1/fs/fs_struct.c 2011-11-16 18:40:29.000000000 -0500
43741@@ -4,6 +4,7 @@
43742 #include <linux/path.h>
43743 #include <linux/slab.h>
43744 #include <linux/fs_struct.h>
43745+#include <linux/grsecurity.h>
43746 #include "internal.h"
43747
43748 static inline void path_get_longterm(struct path *path)
43749@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
43750 old_root = fs->root;
43751 fs->root = *path;
43752 path_get_longterm(path);
43753+ gr_set_chroot_entries(current, path);
43754 write_seqcount_end(&fs->seq);
43755 spin_unlock(&fs->lock);
43756 if (old_root.dentry)
43757@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
43758 && fs->root.mnt == old_root->mnt) {
43759 path_get_longterm(new_root);
43760 fs->root = *new_root;
43761+ gr_set_chroot_entries(p, new_root);
43762 count++;
43763 }
43764 if (fs->pwd.dentry == old_root->dentry
43765@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
43766 spin_lock(&fs->lock);
43767 write_seqcount_begin(&fs->seq);
43768 tsk->fs = NULL;
43769- kill = !--fs->users;
43770+ gr_clear_chroot_entries(tsk);
43771+ kill = !atomic_dec_return(&fs->users);
43772 write_seqcount_end(&fs->seq);
43773 spin_unlock(&fs->lock);
43774 task_unlock(tsk);
43775@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
43776 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
43777 /* We don't need to lock fs - think why ;-) */
43778 if (fs) {
43779- fs->users = 1;
43780+ atomic_set(&fs->users, 1);
43781 fs->in_exec = 0;
43782 spin_lock_init(&fs->lock);
43783 seqcount_init(&fs->seq);
43784@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
43785 spin_lock(&old->lock);
43786 fs->root = old->root;
43787 path_get_longterm(&fs->root);
43788+ /* instead of calling gr_set_chroot_entries here,
43789+ we call it from every caller of this function
43790+ */
43791 fs->pwd = old->pwd;
43792 path_get_longterm(&fs->pwd);
43793 spin_unlock(&old->lock);
43794@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
43795
43796 task_lock(current);
43797 spin_lock(&fs->lock);
43798- kill = !--fs->users;
43799+ kill = !atomic_dec_return(&fs->users);
43800 current->fs = new_fs;
43801+ gr_set_chroot_entries(current, &new_fs->root);
43802 spin_unlock(&fs->lock);
43803 task_unlock(current);
43804
43805@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
43806
43807 /* to be mentioned only in INIT_TASK */
43808 struct fs_struct init_fs = {
43809- .users = 1,
43810+ .users = ATOMIC_INIT(1),
43811 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
43812 .seq = SEQCNT_ZERO,
43813 .umask = 0022,
43814@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
43815 task_lock(current);
43816
43817 spin_lock(&init_fs.lock);
43818- init_fs.users++;
43819+ atomic_inc(&init_fs.users);
43820 spin_unlock(&init_fs.lock);
43821
43822 spin_lock(&fs->lock);
43823 current->fs = &init_fs;
43824- kill = !--fs->users;
43825+ gr_set_chroot_entries(current, &current->fs->root);
43826+ kill = !atomic_dec_return(&fs->users);
43827 spin_unlock(&fs->lock);
43828
43829 task_unlock(current);
43830diff -urNp linux-3.1.1/fs/fuse/cuse.c linux-3.1.1/fs/fuse/cuse.c
43831--- linux-3.1.1/fs/fuse/cuse.c 2011-11-11 15:19:27.000000000 -0500
43832+++ linux-3.1.1/fs/fuse/cuse.c 2011-11-16 18:39:08.000000000 -0500
43833@@ -586,10 +586,12 @@ static int __init cuse_init(void)
43834 INIT_LIST_HEAD(&cuse_conntbl[i]);
43835
43836 /* inherit and extend fuse_dev_operations */
43837- cuse_channel_fops = fuse_dev_operations;
43838- cuse_channel_fops.owner = THIS_MODULE;
43839- cuse_channel_fops.open = cuse_channel_open;
43840- cuse_channel_fops.release = cuse_channel_release;
43841+ pax_open_kernel();
43842+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43843+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43844+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
43845+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
43846+ pax_close_kernel();
43847
43848 cuse_class = class_create(THIS_MODULE, "cuse");
43849 if (IS_ERR(cuse_class))
43850diff -urNp linux-3.1.1/fs/fuse/dev.c linux-3.1.1/fs/fuse/dev.c
43851--- linux-3.1.1/fs/fuse/dev.c 2011-11-11 15:19:27.000000000 -0500
43852+++ linux-3.1.1/fs/fuse/dev.c 2011-11-16 18:39:08.000000000 -0500
43853@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(stru
43854 ret = 0;
43855 pipe_lock(pipe);
43856
43857- if (!pipe->readers) {
43858+ if (!atomic_read(&pipe->readers)) {
43859 send_sig(SIGPIPE, current, 0);
43860 if (!ret)
43861 ret = -EPIPE;
43862diff -urNp linux-3.1.1/fs/fuse/dir.c linux-3.1.1/fs/fuse/dir.c
43863--- linux-3.1.1/fs/fuse/dir.c 2011-11-11 15:19:27.000000000 -0500
43864+++ linux-3.1.1/fs/fuse/dir.c 2011-11-16 18:39:08.000000000 -0500
43865@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *de
43866 return link;
43867 }
43868
43869-static void free_link(char *link)
43870+static void free_link(const char *link)
43871 {
43872 if (!IS_ERR(link))
43873 free_page((unsigned long) link);
43874diff -urNp linux-3.1.1/fs/gfs2/inode.c linux-3.1.1/fs/gfs2/inode.c
43875--- linux-3.1.1/fs/gfs2/inode.c 2011-11-11 15:19:27.000000000 -0500
43876+++ linux-3.1.1/fs/gfs2/inode.c 2011-11-16 18:39:08.000000000 -0500
43877@@ -1517,7 +1517,7 @@ out:
43878
43879 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43880 {
43881- char *s = nd_get_link(nd);
43882+ const char *s = nd_get_link(nd);
43883 if (!IS_ERR(s))
43884 kfree(s);
43885 }
43886diff -urNp linux-3.1.1/fs/hfs/btree.c linux-3.1.1/fs/hfs/btree.c
43887--- linux-3.1.1/fs/hfs/btree.c 2011-11-11 15:19:27.000000000 -0500
43888+++ linux-3.1.1/fs/hfs/btree.c 2011-11-18 18:48:11.000000000 -0500
43889@@ -46,11 +46,27 @@ struct hfs_btree *hfs_btree_open(struct
43890 case HFS_EXT_CNID:
43891 hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
43892 mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
43893+
43894+ if (HFS_I(tree->inode)->alloc_blocks >
43895+ HFS_I(tree->inode)->first_blocks) {
43896+ printk(KERN_ERR "hfs: invalid btree extent records\n");
43897+ unlock_new_inode(tree->inode);
43898+ goto free_inode;
43899+ }
43900+
43901 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
43902 break;
43903 case HFS_CAT_CNID:
43904 hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
43905 mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
43906+
43907+ if (!HFS_I(tree->inode)->first_blocks) {
43908+ printk(KERN_ERR "hfs: invalid btree extent records "
43909+ "(0 size).\n");
43910+ unlock_new_inode(tree->inode);
43911+ goto free_inode;
43912+ }
43913+
43914 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
43915 break;
43916 default:
43917@@ -59,11 +75,6 @@ struct hfs_btree *hfs_btree_open(struct
43918 }
43919 unlock_new_inode(tree->inode);
43920
43921- if (!HFS_I(tree->inode)->first_blocks) {
43922- printk(KERN_ERR "hfs: invalid btree extent records (0 size).\n");
43923- goto free_inode;
43924- }
43925-
43926 mapping = tree->inode->i_mapping;
43927 page = read_mapping_page(mapping, 0, NULL);
43928 if (IS_ERR(page))
43929diff -urNp linux-3.1.1/fs/hfs/trans.c linux-3.1.1/fs/hfs/trans.c
43930--- linux-3.1.1/fs/hfs/trans.c 2011-11-11 15:19:27.000000000 -0500
43931+++ linux-3.1.1/fs/hfs/trans.c 2011-11-18 18:37:38.000000000 -0500
43932@@ -40,6 +40,8 @@ int hfs_mac2asc(struct super_block *sb,
43933
43934 src = in->name;
43935 srclen = in->len;
43936+ if (srclen > HFS_NAMELEN)
43937+ srclen = HFS_NAMELEN;
43938 dst = out;
43939 dstlen = HFS_MAX_NAMELEN;
43940 if (nls_io) {
43941diff -urNp linux-3.1.1/fs/hfsplus/catalog.c linux-3.1.1/fs/hfsplus/catalog.c
43942--- linux-3.1.1/fs/hfsplus/catalog.c 2011-11-11 15:19:27.000000000 -0500
43943+++ linux-3.1.1/fs/hfsplus/catalog.c 2011-11-16 19:23:09.000000000 -0500
43944@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
43945 int err;
43946 u16 type;
43947
43948+ pax_track_stack();
43949+
43950 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43951 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43952 if (err)
43953@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
43954 int entry_size;
43955 int err;
43956
43957+ pax_track_stack();
43958+
43959 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
43960 str->name, cnid, inode->i_nlink);
43961 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
43962@@ -353,6 +357,8 @@ int hfsplus_rename_cat(u32 cnid,
43963 int entry_size, type;
43964 int err;
43965
43966+ pax_track_stack();
43967+
43968 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
43969 cnid, src_dir->i_ino, src_name->name,
43970 dst_dir->i_ino, dst_name->name);
43971diff -urNp linux-3.1.1/fs/hfsplus/dir.c linux-3.1.1/fs/hfsplus/dir.c
43972--- linux-3.1.1/fs/hfsplus/dir.c 2011-11-11 15:19:27.000000000 -0500
43973+++ linux-3.1.1/fs/hfsplus/dir.c 2011-11-16 18:40:29.000000000 -0500
43974@@ -131,6 +131,8 @@ static int hfsplus_readdir(struct file *
43975 struct hfsplus_readdir_data *rd;
43976 u16 type;
43977
43978+ pax_track_stack();
43979+
43980 if (filp->f_pos >= inode->i_size)
43981 return 0;
43982
43983diff -urNp linux-3.1.1/fs/hfsplus/inode.c linux-3.1.1/fs/hfsplus/inode.c
43984--- linux-3.1.1/fs/hfsplus/inode.c 2011-11-11 15:19:27.000000000 -0500
43985+++ linux-3.1.1/fs/hfsplus/inode.c 2011-11-16 18:40:29.000000000 -0500
43986@@ -501,6 +501,8 @@ int hfsplus_cat_read_inode(struct inode
43987 int res = 0;
43988 u16 type;
43989
43990+ pax_track_stack();
43991+
43992 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43993
43994 HFSPLUS_I(inode)->linkid = 0;
43995@@ -564,6 +566,8 @@ int hfsplus_cat_write_inode(struct inode
43996 struct hfs_find_data fd;
43997 hfsplus_cat_entry entry;
43998
43999+ pax_track_stack();
44000+
44001 if (HFSPLUS_IS_RSRC(inode))
44002 main_inode = HFSPLUS_I(inode)->rsrc_inode;
44003
44004diff -urNp linux-3.1.1/fs/hfsplus/ioctl.c linux-3.1.1/fs/hfsplus/ioctl.c
44005--- linux-3.1.1/fs/hfsplus/ioctl.c 2011-11-11 15:19:27.000000000 -0500
44006+++ linux-3.1.1/fs/hfsplus/ioctl.c 2011-11-16 18:40:29.000000000 -0500
44007@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
44008 struct hfsplus_cat_file *file;
44009 int res;
44010
44011+ pax_track_stack();
44012+
44013 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
44014 return -EOPNOTSUPP;
44015
44016@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
44017 struct hfsplus_cat_file *file;
44018 ssize_t res = 0;
44019
44020+ pax_track_stack();
44021+
44022 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
44023 return -EOPNOTSUPP;
44024
44025diff -urNp linux-3.1.1/fs/hfsplus/super.c linux-3.1.1/fs/hfsplus/super.c
44026--- linux-3.1.1/fs/hfsplus/super.c 2011-11-11 15:19:27.000000000 -0500
44027+++ linux-3.1.1/fs/hfsplus/super.c 2011-11-16 19:23:30.000000000 -0500
44028@@ -347,6 +347,8 @@ static int hfsplus_fill_super(struct sup
44029 u64 last_fs_block, last_fs_page;
44030 int err;
44031
44032+ pax_track_stack();
44033+
44034 err = -EINVAL;
44035 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
44036 if (!sbi)
44037diff -urNp linux-3.1.1/fs/hugetlbfs/inode.c linux-3.1.1/fs/hugetlbfs/inode.c
44038--- linux-3.1.1/fs/hugetlbfs/inode.c 2011-11-11 15:19:27.000000000 -0500
44039+++ linux-3.1.1/fs/hugetlbfs/inode.c 2011-11-16 18:40:29.000000000 -0500
44040@@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs
44041 .kill_sb = kill_litter_super,
44042 };
44043
44044-static struct vfsmount *hugetlbfs_vfsmount;
44045+struct vfsmount *hugetlbfs_vfsmount;
44046
44047 static int can_do_hugetlb_shm(void)
44048 {
44049diff -urNp linux-3.1.1/fs/inode.c linux-3.1.1/fs/inode.c
44050--- linux-3.1.1/fs/inode.c 2011-11-11 15:19:27.000000000 -0500
44051+++ linux-3.1.1/fs/inode.c 2011-11-16 18:39:08.000000000 -0500
44052@@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
44053
44054 #ifdef CONFIG_SMP
44055 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
44056- static atomic_t shared_last_ino;
44057- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
44058+ static atomic_unchecked_t shared_last_ino;
44059+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
44060
44061 res = next - LAST_INO_BATCH;
44062 }
44063diff -urNp linux-3.1.1/fs/jbd/checkpoint.c linux-3.1.1/fs/jbd/checkpoint.c
44064--- linux-3.1.1/fs/jbd/checkpoint.c 2011-11-11 15:19:27.000000000 -0500
44065+++ linux-3.1.1/fs/jbd/checkpoint.c 2011-11-16 18:40:29.000000000 -0500
44066@@ -358,6 +358,8 @@ int log_do_checkpoint(journal_t *journal
44067 tid_t this_tid;
44068 int result;
44069
44070+ pax_track_stack();
44071+
44072 jbd_debug(1, "Start checkpoint\n");
44073
44074 /*
44075diff -urNp linux-3.1.1/fs/jffs2/compr_rtime.c linux-3.1.1/fs/jffs2/compr_rtime.c
44076--- linux-3.1.1/fs/jffs2/compr_rtime.c 2011-11-11 15:19:27.000000000 -0500
44077+++ linux-3.1.1/fs/jffs2/compr_rtime.c 2011-11-16 18:40:29.000000000 -0500
44078@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
44079 int outpos = 0;
44080 int pos=0;
44081
44082+ pax_track_stack();
44083+
44084 memset(positions,0,sizeof(positions));
44085
44086 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
44087@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
44088 int outpos = 0;
44089 int pos=0;
44090
44091+ pax_track_stack();
44092+
44093 memset(positions,0,sizeof(positions));
44094
44095 while (outpos<destlen) {
44096diff -urNp linux-3.1.1/fs/jffs2/compr_rubin.c linux-3.1.1/fs/jffs2/compr_rubin.c
44097--- linux-3.1.1/fs/jffs2/compr_rubin.c 2011-11-11 15:19:27.000000000 -0500
44098+++ linux-3.1.1/fs/jffs2/compr_rubin.c 2011-11-16 18:40:29.000000000 -0500
44099@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
44100 int ret;
44101 uint32_t mysrclen, mydstlen;
44102
44103+ pax_track_stack();
44104+
44105 mysrclen = *sourcelen;
44106 mydstlen = *dstlen - 8;
44107
44108diff -urNp linux-3.1.1/fs/jffs2/erase.c linux-3.1.1/fs/jffs2/erase.c
44109--- linux-3.1.1/fs/jffs2/erase.c 2011-11-11 15:19:27.000000000 -0500
44110+++ linux-3.1.1/fs/jffs2/erase.c 2011-11-16 18:39:08.000000000 -0500
44111@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
44112 struct jffs2_unknown_node marker = {
44113 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
44114 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44115- .totlen = cpu_to_je32(c->cleanmarker_size)
44116+ .totlen = cpu_to_je32(c->cleanmarker_size),
44117+ .hdr_crc = cpu_to_je32(0)
44118 };
44119
44120 jffs2_prealloc_raw_node_refs(c, jeb, 1);
44121diff -urNp linux-3.1.1/fs/jffs2/wbuf.c linux-3.1.1/fs/jffs2/wbuf.c
44122--- linux-3.1.1/fs/jffs2/wbuf.c 2011-11-11 15:19:27.000000000 -0500
44123+++ linux-3.1.1/fs/jffs2/wbuf.c 2011-11-16 18:39:08.000000000 -0500
44124@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
44125 {
44126 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
44127 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44128- .totlen = constant_cpu_to_je32(8)
44129+ .totlen = constant_cpu_to_je32(8),
44130+ .hdr_crc = constant_cpu_to_je32(0)
44131 };
44132
44133 /*
44134diff -urNp linux-3.1.1/fs/jffs2/xattr.c linux-3.1.1/fs/jffs2/xattr.c
44135--- linux-3.1.1/fs/jffs2/xattr.c 2011-11-11 15:19:27.000000000 -0500
44136+++ linux-3.1.1/fs/jffs2/xattr.c 2011-11-16 18:40:29.000000000 -0500
44137@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
44138
44139 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
44140
44141+ pax_track_stack();
44142+
44143 /* Phase.1 : Merge same xref */
44144 for (i=0; i < XREF_TMPHASH_SIZE; i++)
44145 xref_tmphash[i] = NULL;
44146diff -urNp linux-3.1.1/fs/jfs/super.c linux-3.1.1/fs/jfs/super.c
44147--- linux-3.1.1/fs/jfs/super.c 2011-11-11 15:19:27.000000000 -0500
44148+++ linux-3.1.1/fs/jfs/super.c 2011-11-16 18:39:08.000000000 -0500
44149@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
44150
44151 jfs_inode_cachep =
44152 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
44153- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
44154+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
44155 init_once);
44156 if (jfs_inode_cachep == NULL)
44157 return -ENOMEM;
44158diff -urNp linux-3.1.1/fs/Kconfig.binfmt linux-3.1.1/fs/Kconfig.binfmt
44159--- linux-3.1.1/fs/Kconfig.binfmt 2011-11-11 15:19:27.000000000 -0500
44160+++ linux-3.1.1/fs/Kconfig.binfmt 2011-11-16 18:39:08.000000000 -0500
44161@@ -86,7 +86,7 @@ config HAVE_AOUT
44162
44163 config BINFMT_AOUT
44164 tristate "Kernel support for a.out and ECOFF binaries"
44165- depends on HAVE_AOUT
44166+ depends on HAVE_AOUT && BROKEN
44167 ---help---
44168 A.out (Assembler.OUTput) is a set of formats for libraries and
44169 executables used in the earliest versions of UNIX. Linux used
44170diff -urNp linux-3.1.1/fs/libfs.c linux-3.1.1/fs/libfs.c
44171--- linux-3.1.1/fs/libfs.c 2011-11-11 15:19:27.000000000 -0500
44172+++ linux-3.1.1/fs/libfs.c 2011-11-16 18:39:08.000000000 -0500
44173@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, v
44174
44175 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
44176 struct dentry *next;
44177+ char d_name[sizeof(next->d_iname)];
44178+ const unsigned char *name;
44179+
44180 next = list_entry(p, struct dentry, d_u.d_child);
44181 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
44182 if (!simple_positive(next)) {
44183@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, v
44184
44185 spin_unlock(&next->d_lock);
44186 spin_unlock(&dentry->d_lock);
44187- if (filldir(dirent, next->d_name.name,
44188+ name = next->d_name.name;
44189+ if (name == next->d_iname) {
44190+ memcpy(d_name, name, next->d_name.len);
44191+ name = d_name;
44192+ }
44193+ if (filldir(dirent, name,
44194 next->d_name.len, filp->f_pos,
44195 next->d_inode->i_ino,
44196 dt_type(next->d_inode)) < 0)
44197diff -urNp linux-3.1.1/fs/lockd/clntproc.c linux-3.1.1/fs/lockd/clntproc.c
44198--- linux-3.1.1/fs/lockd/clntproc.c 2011-11-11 15:19:27.000000000 -0500
44199+++ linux-3.1.1/fs/lockd/clntproc.c 2011-11-16 18:40:29.000000000 -0500
44200@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
44201 /*
44202 * Cookie counter for NLM requests
44203 */
44204-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
44205+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
44206
44207 void nlmclnt_next_cookie(struct nlm_cookie *c)
44208 {
44209- u32 cookie = atomic_inc_return(&nlm_cookie);
44210+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44211
44212 memcpy(c->data, &cookie, 4);
44213 c->len=4;
44214@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
44215 struct nlm_rqst reqst, *req;
44216 int status;
44217
44218+ pax_track_stack();
44219+
44220 req = &reqst;
44221 memset(req, 0, sizeof(*req));
44222 locks_init_lock(&req->a_args.lock.fl);
44223diff -urNp linux-3.1.1/fs/locks.c linux-3.1.1/fs/locks.c
44224--- linux-3.1.1/fs/locks.c 2011-11-11 15:19:27.000000000 -0500
44225+++ linux-3.1.1/fs/locks.c 2011-11-16 18:39:08.000000000 -0500
44226@@ -2022,16 +2022,16 @@ void locks_remove_flock(struct file *fil
44227 return;
44228
44229 if (filp->f_op && filp->f_op->flock) {
44230- struct file_lock fl = {
44231+ struct file_lock flock = {
44232 .fl_pid = current->tgid,
44233 .fl_file = filp,
44234 .fl_flags = FL_FLOCK,
44235 .fl_type = F_UNLCK,
44236 .fl_end = OFFSET_MAX,
44237 };
44238- filp->f_op->flock(filp, F_SETLKW, &fl);
44239- if (fl.fl_ops && fl.fl_ops->fl_release_private)
44240- fl.fl_ops->fl_release_private(&fl);
44241+ filp->f_op->flock(filp, F_SETLKW, &flock);
44242+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
44243+ flock.fl_ops->fl_release_private(&flock);
44244 }
44245
44246 lock_flocks();
44247diff -urNp linux-3.1.1/fs/logfs/super.c linux-3.1.1/fs/logfs/super.c
44248--- linux-3.1.1/fs/logfs/super.c 2011-11-11 15:19:27.000000000 -0500
44249+++ linux-3.1.1/fs/logfs/super.c 2011-11-16 18:40:29.000000000 -0500
44250@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
44251 struct logfs_disk_super _ds1, *ds1 = &_ds1;
44252 int err, valid0, valid1;
44253
44254+ pax_track_stack();
44255+
44256 /* read first superblock */
44257 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
44258 if (err)
44259diff -urNp linux-3.1.1/fs/namei.c linux-3.1.1/fs/namei.c
44260--- linux-3.1.1/fs/namei.c 2011-11-11 15:19:27.000000000 -0500
44261+++ linux-3.1.1/fs/namei.c 2011-11-17 00:36:54.000000000 -0500
44262@@ -283,14 +283,22 @@ int generic_permission(struct inode *ino
44263
44264 if (S_ISDIR(inode->i_mode)) {
44265 /* DACs are overridable for directories */
44266- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44267- return 0;
44268 if (!(mask & MAY_WRITE))
44269 if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44270 return 0;
44271+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44272+ return 0;
44273 return -EACCES;
44274 }
44275 /*
44276+ * Searching includes executable on directories, else just read.
44277+ */
44278+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44279+ if (mask == MAY_READ)
44280+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44281+ return 0;
44282+
44283+ /*
44284 * Read/write DACs are always overridable.
44285 * Executable DACs are overridable when there is
44286 * at least one exec bit set.
44287@@ -299,14 +307,6 @@ int generic_permission(struct inode *ino
44288 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44289 return 0;
44290
44291- /*
44292- * Searching includes executable on directories, else just read.
44293- */
44294- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44295- if (mask == MAY_READ)
44296- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44297- return 0;
44298-
44299 return -EACCES;
44300 }
44301
44302@@ -653,11 +653,19 @@ follow_link(struct path *link, struct na
44303 return error;
44304 }
44305
44306+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
44307+ dentry->d_inode, dentry, nd->path.mnt)) {
44308+ error = -EACCES;
44309+ *p = ERR_PTR(error); /* no ->put_link(), please */
44310+ path_put(&nd->path);
44311+ return error;
44312+ }
44313+
44314 nd->last_type = LAST_BIND;
44315 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44316 error = PTR_ERR(*p);
44317 if (!IS_ERR(*p)) {
44318- char *s = nd_get_link(nd);
44319+ const char *s = nd_get_link(nd);
44320 error = 0;
44321 if (s)
44322 error = __vfs_follow_link(nd, s);
44323@@ -1622,6 +1630,12 @@ static int path_lookupat(int dfd, const
44324 if (!err)
44325 err = complete_walk(nd);
44326
44327+ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44328+ if (!err)
44329+ path_put(&nd->path);
44330+ err = -ENOENT;
44331+ }
44332+
44333 if (!err && nd->flags & LOOKUP_DIRECTORY) {
44334 if (!nd->inode->i_op->lookup) {
44335 path_put(&nd->path);
44336@@ -1649,6 +1663,9 @@ static int do_path_lookup(int dfd, const
44337 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44338
44339 if (likely(!retval)) {
44340+ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44341+ return -ENOENT;
44342+
44343 if (unlikely(!audit_dummy_context())) {
44344 if (nd->path.dentry && nd->inode)
44345 audit_inode(name, nd->path.dentry);
44346@@ -2049,7 +2066,27 @@ static int may_open(struct path *path, i
44347 /*
44348 * Ensure there are no outstanding leases on the file.
44349 */
44350- return break_lease(inode, flag);
44351+ error = break_lease(inode, flag);
44352+
44353+ if (error)
44354+ return error;
44355+
44356+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
44357+ error = -EPERM;
44358+ goto exit;
44359+ }
44360+
44361+ if (gr_handle_rawio(inode)) {
44362+ error = -EPERM;
44363+ goto exit;
44364+ }
44365+
44366+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
44367+ error = -EACCES;
44368+ goto exit;
44369+ }
44370+exit:
44371+ return error;
44372 }
44373
44374 static int handle_truncate(struct file *filp)
44375@@ -2110,6 +2147,10 @@ static struct file *do_last(struct namei
44376 error = complete_walk(nd);
44377 if (error)
44378 return ERR_PTR(error);
44379+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44380+ error = -ENOENT;
44381+ goto exit;
44382+ }
44383 audit_inode(pathname, nd->path.dentry);
44384 if (open_flag & O_CREAT) {
44385 error = -EISDIR;
44386@@ -2120,6 +2161,10 @@ static struct file *do_last(struct namei
44387 error = complete_walk(nd);
44388 if (error)
44389 return ERR_PTR(error);
44390+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44391+ error = -ENOENT;
44392+ goto exit;
44393+ }
44394 audit_inode(pathname, dir);
44395 goto ok;
44396 }
44397@@ -2142,6 +2187,11 @@ static struct file *do_last(struct namei
44398 if (error)
44399 return ERR_PTR(-ECHILD);
44400
44401+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44402+ error = -ENOENT;
44403+ goto exit;
44404+ }
44405+
44406 error = -ENOTDIR;
44407 if (nd->flags & LOOKUP_DIRECTORY) {
44408 if (!nd->inode->i_op->lookup)
44409@@ -2181,6 +2231,12 @@ static struct file *do_last(struct namei
44410 /* Negative dentry, just create the file */
44411 if (!dentry->d_inode) {
44412 int mode = op->mode;
44413+
44414+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
44415+ error = -EACCES;
44416+ goto exit_mutex_unlock;
44417+ }
44418+
44419 if (!IS_POSIXACL(dir->d_inode))
44420 mode &= ~current_umask();
44421 /*
44422@@ -2204,6 +2260,8 @@ static struct file *do_last(struct namei
44423 error = vfs_create(dir->d_inode, dentry, mode, nd);
44424 if (error)
44425 goto exit_mutex_unlock;
44426+ else
44427+ gr_handle_create(path->dentry, path->mnt);
44428 mutex_unlock(&dir->d_inode->i_mutex);
44429 dput(nd->path.dentry);
44430 nd->path.dentry = dentry;
44431@@ -2213,6 +2271,19 @@ static struct file *do_last(struct namei
44432 /*
44433 * It already exists.
44434 */
44435+
44436+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44437+ error = -ENOENT;
44438+ goto exit_mutex_unlock;
44439+ }
44440+
44441+ /* only check if O_CREAT is specified, all other checks need to go
44442+ into may_open */
44443+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
44444+ error = -EACCES;
44445+ goto exit_mutex_unlock;
44446+ }
44447+
44448 mutex_unlock(&dir->d_inode->i_mutex);
44449 audit_inode(pathname, path->dentry);
44450
44451@@ -2425,6 +2496,11 @@ struct dentry *kern_path_create(int dfd,
44452 *path = nd.path;
44453 return dentry;
44454 eexist:
44455+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
44456+ dput(dentry);
44457+ dentry = ERR_PTR(-ENOENT);
44458+ goto fail;
44459+ }
44460 dput(dentry);
44461 dentry = ERR_PTR(-EEXIST);
44462 fail:
44463@@ -2447,6 +2523,20 @@ struct dentry *user_path_create(int dfd,
44464 }
44465 EXPORT_SYMBOL(user_path_create);
44466
44467+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
44468+{
44469+ char *tmp = getname(pathname);
44470+ struct dentry *res;
44471+ if (IS_ERR(tmp))
44472+ return ERR_CAST(tmp);
44473+ res = kern_path_create(dfd, tmp, path, is_dir);
44474+ if (IS_ERR(res))
44475+ putname(tmp);
44476+ else
44477+ *to = tmp;
44478+ return res;
44479+}
44480+
44481 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
44482 {
44483 int error = may_create(dir, dentry);
44484@@ -2514,6 +2604,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44485 error = mnt_want_write(path.mnt);
44486 if (error)
44487 goto out_dput;
44488+
44489+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
44490+ error = -EPERM;
44491+ goto out_drop_write;
44492+ }
44493+
44494+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
44495+ error = -EACCES;
44496+ goto out_drop_write;
44497+ }
44498+
44499 error = security_path_mknod(&path, dentry, mode, dev);
44500 if (error)
44501 goto out_drop_write;
44502@@ -2531,6 +2632,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44503 }
44504 out_drop_write:
44505 mnt_drop_write(path.mnt);
44506+
44507+ if (!error)
44508+ gr_handle_create(dentry, path.mnt);
44509 out_dput:
44510 dput(dentry);
44511 mutex_unlock(&path.dentry->d_inode->i_mutex);
44512@@ -2580,12 +2684,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
44513 error = mnt_want_write(path.mnt);
44514 if (error)
44515 goto out_dput;
44516+
44517+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
44518+ error = -EACCES;
44519+ goto out_drop_write;
44520+ }
44521+
44522 error = security_path_mkdir(&path, dentry, mode);
44523 if (error)
44524 goto out_drop_write;
44525 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
44526 out_drop_write:
44527 mnt_drop_write(path.mnt);
44528+
44529+ if (!error)
44530+ gr_handle_create(dentry, path.mnt);
44531 out_dput:
44532 dput(dentry);
44533 mutex_unlock(&path.dentry->d_inode->i_mutex);
44534@@ -2665,6 +2778,8 @@ static long do_rmdir(int dfd, const char
44535 char * name;
44536 struct dentry *dentry;
44537 struct nameidata nd;
44538+ ino_t saved_ino = 0;
44539+ dev_t saved_dev = 0;
44540
44541 error = user_path_parent(dfd, pathname, &nd, &name);
44542 if (error)
44543@@ -2693,6 +2808,15 @@ static long do_rmdir(int dfd, const char
44544 error = -ENOENT;
44545 goto exit3;
44546 }
44547+
44548+ saved_ino = dentry->d_inode->i_ino;
44549+ saved_dev = gr_get_dev_from_dentry(dentry);
44550+
44551+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44552+ error = -EACCES;
44553+ goto exit3;
44554+ }
44555+
44556 error = mnt_want_write(nd.path.mnt);
44557 if (error)
44558 goto exit3;
44559@@ -2700,6 +2824,8 @@ static long do_rmdir(int dfd, const char
44560 if (error)
44561 goto exit4;
44562 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44563+ if (!error && (saved_dev || saved_ino))
44564+ gr_handle_delete(saved_ino, saved_dev);
44565 exit4:
44566 mnt_drop_write(nd.path.mnt);
44567 exit3:
44568@@ -2762,6 +2888,8 @@ static long do_unlinkat(int dfd, const c
44569 struct dentry *dentry;
44570 struct nameidata nd;
44571 struct inode *inode = NULL;
44572+ ino_t saved_ino = 0;
44573+ dev_t saved_dev = 0;
44574
44575 error = user_path_parent(dfd, pathname, &nd, &name);
44576 if (error)
44577@@ -2784,6 +2912,16 @@ static long do_unlinkat(int dfd, const c
44578 if (!inode)
44579 goto slashes;
44580 ihold(inode);
44581+
44582+ if (inode->i_nlink <= 1) {
44583+ saved_ino = inode->i_ino;
44584+ saved_dev = gr_get_dev_from_dentry(dentry);
44585+ }
44586+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44587+ error = -EACCES;
44588+ goto exit2;
44589+ }
44590+
44591 error = mnt_want_write(nd.path.mnt);
44592 if (error)
44593 goto exit2;
44594@@ -2791,6 +2929,8 @@ static long do_unlinkat(int dfd, const c
44595 if (error)
44596 goto exit3;
44597 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44598+ if (!error && (saved_ino || saved_dev))
44599+ gr_handle_delete(saved_ino, saved_dev);
44600 exit3:
44601 mnt_drop_write(nd.path.mnt);
44602 exit2:
44603@@ -2866,10 +3006,18 @@ SYSCALL_DEFINE3(symlinkat, const char __
44604 error = mnt_want_write(path.mnt);
44605 if (error)
44606 goto out_dput;
44607+
44608+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
44609+ error = -EACCES;
44610+ goto out_drop_write;
44611+ }
44612+
44613 error = security_path_symlink(&path, dentry, from);
44614 if (error)
44615 goto out_drop_write;
44616 error = vfs_symlink(path.dentry->d_inode, dentry, from);
44617+ if (!error)
44618+ gr_handle_create(dentry, path.mnt);
44619 out_drop_write:
44620 mnt_drop_write(path.mnt);
44621 out_dput:
44622@@ -2941,6 +3089,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44623 {
44624 struct dentry *new_dentry;
44625 struct path old_path, new_path;
44626+ char *to;
44627 int how = 0;
44628 int error;
44629
44630@@ -2964,7 +3113,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44631 if (error)
44632 return error;
44633
44634- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
44635+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
44636 error = PTR_ERR(new_dentry);
44637 if (IS_ERR(new_dentry))
44638 goto out;
44639@@ -2975,13 +3124,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44640 error = mnt_want_write(new_path.mnt);
44641 if (error)
44642 goto out_dput;
44643+
44644+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44645+ old_path.dentry->d_inode,
44646+ old_path.dentry->d_inode->i_mode, to)) {
44647+ error = -EACCES;
44648+ goto out_drop_write;
44649+ }
44650+
44651+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
44652+ old_path.dentry, old_path.mnt, to)) {
44653+ error = -EACCES;
44654+ goto out_drop_write;
44655+ }
44656+
44657 error = security_path_link(old_path.dentry, &new_path, new_dentry);
44658 if (error)
44659 goto out_drop_write;
44660 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
44661+ if (!error)
44662+ gr_handle_create(new_dentry, new_path.mnt);
44663 out_drop_write:
44664 mnt_drop_write(new_path.mnt);
44665 out_dput:
44666+ putname(to);
44667 dput(new_dentry);
44668 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
44669 path_put(&new_path);
44670@@ -3153,6 +3319,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44671 char *to;
44672 int error;
44673
44674+ pax_track_stack();
44675+
44676 error = user_path_parent(olddfd, oldname, &oldnd, &from);
44677 if (error)
44678 goto exit;
44679@@ -3209,6 +3377,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44680 if (new_dentry == trap)
44681 goto exit5;
44682
44683+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44684+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
44685+ to);
44686+ if (error)
44687+ goto exit5;
44688+
44689 error = mnt_want_write(oldnd.path.mnt);
44690 if (error)
44691 goto exit5;
44692@@ -3218,6 +3392,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44693 goto exit6;
44694 error = vfs_rename(old_dir->d_inode, old_dentry,
44695 new_dir->d_inode, new_dentry);
44696+ if (!error)
44697+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44698+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44699 exit6:
44700 mnt_drop_write(oldnd.path.mnt);
44701 exit5:
44702@@ -3243,6 +3420,8 @@ SYSCALL_DEFINE2(rename, const char __use
44703
44704 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44705 {
44706+ char tmpbuf[64];
44707+ const char *newlink;
44708 int len;
44709
44710 len = PTR_ERR(link);
44711@@ -3252,7 +3431,14 @@ int vfs_readlink(struct dentry *dentry,
44712 len = strlen(link);
44713 if (len > (unsigned) buflen)
44714 len = buflen;
44715- if (copy_to_user(buffer, link, len))
44716+
44717+ if (len < sizeof(tmpbuf)) {
44718+ memcpy(tmpbuf, link, len);
44719+ newlink = tmpbuf;
44720+ } else
44721+ newlink = link;
44722+
44723+ if (copy_to_user(buffer, newlink, len))
44724 len = -EFAULT;
44725 out:
44726 return len;
44727diff -urNp linux-3.1.1/fs/namespace.c linux-3.1.1/fs/namespace.c
44728--- linux-3.1.1/fs/namespace.c 2011-11-11 15:19:27.000000000 -0500
44729+++ linux-3.1.1/fs/namespace.c 2011-11-16 18:40:29.000000000 -0500
44730@@ -1329,6 +1329,9 @@ static int do_umount(struct vfsmount *mn
44731 if (!(sb->s_flags & MS_RDONLY))
44732 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44733 up_write(&sb->s_umount);
44734+
44735+ gr_log_remount(mnt->mnt_devname, retval);
44736+
44737 return retval;
44738 }
44739
44740@@ -1348,6 +1351,9 @@ static int do_umount(struct vfsmount *mn
44741 br_write_unlock(vfsmount_lock);
44742 up_write(&namespace_sem);
44743 release_mounts(&umount_list);
44744+
44745+ gr_log_unmount(mnt->mnt_devname, retval);
44746+
44747 return retval;
44748 }
44749
44750@@ -2339,6 +2345,16 @@ long do_mount(char *dev_name, char *dir_
44751 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44752 MS_STRICTATIME);
44753
44754+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44755+ retval = -EPERM;
44756+ goto dput_out;
44757+ }
44758+
44759+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44760+ retval = -EPERM;
44761+ goto dput_out;
44762+ }
44763+
44764 if (flags & MS_REMOUNT)
44765 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44766 data_page);
44767@@ -2353,6 +2369,9 @@ long do_mount(char *dev_name, char *dir_
44768 dev_name, data_page);
44769 dput_out:
44770 path_put(&path);
44771+
44772+ gr_log_mount(dev_name, dir_name, retval);
44773+
44774 return retval;
44775 }
44776
44777@@ -2576,6 +2595,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
44778 if (error)
44779 goto out2;
44780
44781+ if (gr_handle_chroot_pivot()) {
44782+ error = -EPERM;
44783+ goto out2;
44784+ }
44785+
44786 get_fs_root(current->fs, &root);
44787 error = lock_mount(&old);
44788 if (error)
44789diff -urNp linux-3.1.1/fs/ncpfs/dir.c linux-3.1.1/fs/ncpfs/dir.c
44790--- linux-3.1.1/fs/ncpfs/dir.c 2011-11-11 15:19:27.000000000 -0500
44791+++ linux-3.1.1/fs/ncpfs/dir.c 2011-11-16 18:40:29.000000000 -0500
44792@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
44793 int res, val = 0, len;
44794 __u8 __name[NCP_MAXPATHLEN + 1];
44795
44796+ pax_track_stack();
44797+
44798 if (dentry == dentry->d_sb->s_root)
44799 return 1;
44800
44801@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
44802 int error, res, len;
44803 __u8 __name[NCP_MAXPATHLEN + 1];
44804
44805+ pax_track_stack();
44806+
44807 error = -EIO;
44808 if (!ncp_conn_valid(server))
44809 goto finished;
44810@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
44811 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
44812 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
44813
44814+ pax_track_stack();
44815+
44816 ncp_age_dentry(server, dentry);
44817 len = sizeof(__name);
44818 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
44819@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
44820 int error, len;
44821 __u8 __name[NCP_MAXPATHLEN + 1];
44822
44823+ pax_track_stack();
44824+
44825 DPRINTK("ncp_mkdir: making %s/%s\n",
44826 dentry->d_parent->d_name.name, dentry->d_name.name);
44827
44828@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
44829 int old_len, new_len;
44830 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
44831
44832+ pax_track_stack();
44833+
44834 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
44835 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
44836 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
44837diff -urNp linux-3.1.1/fs/ncpfs/inode.c linux-3.1.1/fs/ncpfs/inode.c
44838--- linux-3.1.1/fs/ncpfs/inode.c 2011-11-11 15:19:27.000000000 -0500
44839+++ linux-3.1.1/fs/ncpfs/inode.c 2011-11-16 18:40:29.000000000 -0500
44840@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
44841 #endif
44842 struct ncp_entry_info finfo;
44843
44844+ pax_track_stack();
44845+
44846 memset(&data, 0, sizeof(data));
44847 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
44848 if (!server)
44849diff -urNp linux-3.1.1/fs/nfs/blocklayout/blocklayout.c linux-3.1.1/fs/nfs/blocklayout/blocklayout.c
44850--- linux-3.1.1/fs/nfs/blocklayout/blocklayout.c 2011-11-11 15:19:27.000000000 -0500
44851+++ linux-3.1.1/fs/nfs/blocklayout/blocklayout.c 2011-11-16 18:39:08.000000000 -0500
44852@@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block
44853 */
44854 struct parallel_io {
44855 struct kref refcnt;
44856- struct rpc_call_ops call_ops;
44857+ rpc_call_ops_no_const call_ops;
44858 void (*pnfs_callback) (void *data);
44859 void *data;
44860 };
44861diff -urNp linux-3.1.1/fs/nfs/inode.c linux-3.1.1/fs/nfs/inode.c
44862--- linux-3.1.1/fs/nfs/inode.c 2011-11-11 15:19:27.000000000 -0500
44863+++ linux-3.1.1/fs/nfs/inode.c 2011-11-16 18:39:08.000000000 -0500
44864@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
44865 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44866 nfsi->attrtimeo_timestamp = jiffies;
44867
44868- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44869+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44870 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44871 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44872 else
44873@@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const st
44874 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44875 }
44876
44877-static atomic_long_t nfs_attr_generation_counter;
44878+static atomic_long_unchecked_t nfs_attr_generation_counter;
44879
44880 static unsigned long nfs_read_attr_generation_counter(void)
44881 {
44882- return atomic_long_read(&nfs_attr_generation_counter);
44883+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44884 }
44885
44886 unsigned long nfs_inc_attr_generation_counter(void)
44887 {
44888- return atomic_long_inc_return(&nfs_attr_generation_counter);
44889+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44890 }
44891
44892 void nfs_fattr_init(struct nfs_fattr *fattr)
44893diff -urNp linux-3.1.1/fs/nfsd/nfs4state.c linux-3.1.1/fs/nfsd/nfs4state.c
44894--- linux-3.1.1/fs/nfsd/nfs4state.c 2011-11-11 15:19:27.000000000 -0500
44895+++ linux-3.1.1/fs/nfsd/nfs4state.c 2011-11-16 18:40:29.000000000 -0500
44896@@ -3999,6 +3999,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44897 unsigned int strhashval;
44898 int err;
44899
44900+ pax_track_stack();
44901+
44902 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44903 (long long) lock->lk_offset,
44904 (long long) lock->lk_length);
44905diff -urNp linux-3.1.1/fs/nfsd/nfs4xdr.c linux-3.1.1/fs/nfsd/nfs4xdr.c
44906--- linux-3.1.1/fs/nfsd/nfs4xdr.c 2011-11-11 15:19:27.000000000 -0500
44907+++ linux-3.1.1/fs/nfsd/nfs4xdr.c 2011-11-16 18:40:29.000000000 -0500
44908@@ -1875,6 +1875,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44909 .dentry = dentry,
44910 };
44911
44912+ pax_track_stack();
44913+
44914 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44915 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44916 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44917diff -urNp linux-3.1.1/fs/nfsd/vfs.c linux-3.1.1/fs/nfsd/vfs.c
44918--- linux-3.1.1/fs/nfsd/vfs.c 2011-11-11 15:19:27.000000000 -0500
44919+++ linux-3.1.1/fs/nfsd/vfs.c 2011-11-16 18:39:08.000000000 -0500
44920@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44921 } else {
44922 oldfs = get_fs();
44923 set_fs(KERNEL_DS);
44924- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44925+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
44926 set_fs(oldfs);
44927 }
44928
44929@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44930
44931 /* Write the data. */
44932 oldfs = get_fs(); set_fs(KERNEL_DS);
44933- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44934+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
44935 set_fs(oldfs);
44936 if (host_err < 0)
44937 goto out_nfserr;
44938@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44939 */
44940
44941 oldfs = get_fs(); set_fs(KERNEL_DS);
44942- host_err = inode->i_op->readlink(dentry, buf, *lenp);
44943+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
44944 set_fs(oldfs);
44945
44946 if (host_err < 0)
44947diff -urNp linux-3.1.1/fs/notify/fanotify/fanotify_user.c linux-3.1.1/fs/notify/fanotify/fanotify_user.c
44948--- linux-3.1.1/fs/notify/fanotify/fanotify_user.c 2011-11-11 15:19:27.000000000 -0500
44949+++ linux-3.1.1/fs/notify/fanotify/fanotify_user.c 2011-11-16 18:39:08.000000000 -0500
44950@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
44951 goto out_close_fd;
44952
44953 ret = -EFAULT;
44954- if (copy_to_user(buf, &fanotify_event_metadata,
44955+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
44956+ copy_to_user(buf, &fanotify_event_metadata,
44957 fanotify_event_metadata.event_len))
44958 goto out_kill_access_response;
44959
44960diff -urNp linux-3.1.1/fs/notify/notification.c linux-3.1.1/fs/notify/notification.c
44961--- linux-3.1.1/fs/notify/notification.c 2011-11-11 15:19:27.000000000 -0500
44962+++ linux-3.1.1/fs/notify/notification.c 2011-11-16 18:39:08.000000000 -0500
44963@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44964 * get set to 0 so it will never get 'freed'
44965 */
44966 static struct fsnotify_event *q_overflow_event;
44967-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44968+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44969
44970 /**
44971 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44972@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44973 */
44974 u32 fsnotify_get_cookie(void)
44975 {
44976- return atomic_inc_return(&fsnotify_sync_cookie);
44977+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44978 }
44979 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44980
44981diff -urNp linux-3.1.1/fs/ntfs/dir.c linux-3.1.1/fs/ntfs/dir.c
44982--- linux-3.1.1/fs/ntfs/dir.c 2011-11-11 15:19:27.000000000 -0500
44983+++ linux-3.1.1/fs/ntfs/dir.c 2011-11-16 18:39:08.000000000 -0500
44984@@ -1329,7 +1329,7 @@ find_next_index_buffer:
44985 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44986 ~(s64)(ndir->itype.index.block_size - 1)));
44987 /* Bounds checks. */
44988- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44989+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44990 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44991 "inode 0x%lx or driver bug.", vdir->i_ino);
44992 goto err_out;
44993diff -urNp linux-3.1.1/fs/ntfs/file.c linux-3.1.1/fs/ntfs/file.c
44994--- linux-3.1.1/fs/ntfs/file.c 2011-11-11 15:19:27.000000000 -0500
44995+++ linux-3.1.1/fs/ntfs/file.c 2011-11-16 18:39:08.000000000 -0500
44996@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_
44997 #endif /* NTFS_RW */
44998 };
44999
45000-const struct file_operations ntfs_empty_file_ops = {};
45001+const struct file_operations ntfs_empty_file_ops __read_only;
45002
45003-const struct inode_operations ntfs_empty_inode_ops = {};
45004+const struct inode_operations ntfs_empty_inode_ops __read_only;
45005diff -urNp linux-3.1.1/fs/ocfs2/localalloc.c linux-3.1.1/fs/ocfs2/localalloc.c
45006--- linux-3.1.1/fs/ocfs2/localalloc.c 2011-11-11 15:19:27.000000000 -0500
45007+++ linux-3.1.1/fs/ocfs2/localalloc.c 2011-11-16 18:39:08.000000000 -0500
45008@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
45009 goto bail;
45010 }
45011
45012- atomic_inc(&osb->alloc_stats.moves);
45013+ atomic_inc_unchecked(&osb->alloc_stats.moves);
45014
45015 bail:
45016 if (handle)
45017diff -urNp linux-3.1.1/fs/ocfs2/namei.c linux-3.1.1/fs/ocfs2/namei.c
45018--- linux-3.1.1/fs/ocfs2/namei.c 2011-11-11 15:19:27.000000000 -0500
45019+++ linux-3.1.1/fs/ocfs2/namei.c 2011-11-16 18:40:29.000000000 -0500
45020@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
45021 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
45022 struct ocfs2_dir_lookup_result target_insert = { NULL, };
45023
45024+ pax_track_stack();
45025+
45026 /* At some point it might be nice to break this function up a
45027 * bit. */
45028
45029diff -urNp linux-3.1.1/fs/ocfs2/ocfs2.h linux-3.1.1/fs/ocfs2/ocfs2.h
45030--- linux-3.1.1/fs/ocfs2/ocfs2.h 2011-11-11 15:19:27.000000000 -0500
45031+++ linux-3.1.1/fs/ocfs2/ocfs2.h 2011-11-16 18:39:08.000000000 -0500
45032@@ -235,11 +235,11 @@ enum ocfs2_vol_state
45033
45034 struct ocfs2_alloc_stats
45035 {
45036- atomic_t moves;
45037- atomic_t local_data;
45038- atomic_t bitmap_data;
45039- atomic_t bg_allocs;
45040- atomic_t bg_extends;
45041+ atomic_unchecked_t moves;
45042+ atomic_unchecked_t local_data;
45043+ atomic_unchecked_t bitmap_data;
45044+ atomic_unchecked_t bg_allocs;
45045+ atomic_unchecked_t bg_extends;
45046 };
45047
45048 enum ocfs2_local_alloc_state
45049diff -urNp linux-3.1.1/fs/ocfs2/suballoc.c linux-3.1.1/fs/ocfs2/suballoc.c
45050--- linux-3.1.1/fs/ocfs2/suballoc.c 2011-11-11 15:19:27.000000000 -0500
45051+++ linux-3.1.1/fs/ocfs2/suballoc.c 2011-11-16 18:39:08.000000000 -0500
45052@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
45053 mlog_errno(status);
45054 goto bail;
45055 }
45056- atomic_inc(&osb->alloc_stats.bg_extends);
45057+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
45058
45059 /* You should never ask for this much metadata */
45060 BUG_ON(bits_wanted >
45061@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
45062 mlog_errno(status);
45063 goto bail;
45064 }
45065- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45066+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45067
45068 *suballoc_loc = res.sr_bg_blkno;
45069 *suballoc_bit_start = res.sr_bit_offset;
45070@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
45071 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
45072 res->sr_bits);
45073
45074- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45075+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45076
45077 BUG_ON(res->sr_bits != 1);
45078
45079@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
45080 mlog_errno(status);
45081 goto bail;
45082 }
45083- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45084+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45085
45086 BUG_ON(res.sr_bits != 1);
45087
45088@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
45089 cluster_start,
45090 num_clusters);
45091 if (!status)
45092- atomic_inc(&osb->alloc_stats.local_data);
45093+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
45094 } else {
45095 if (min_clusters > (osb->bitmap_cpg - 1)) {
45096 /* The only paths asking for contiguousness
45097@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
45098 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
45099 res.sr_bg_blkno,
45100 res.sr_bit_offset);
45101- atomic_inc(&osb->alloc_stats.bitmap_data);
45102+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
45103 *num_clusters = res.sr_bits;
45104 }
45105 }
45106diff -urNp linux-3.1.1/fs/ocfs2/super.c linux-3.1.1/fs/ocfs2/super.c
45107--- linux-3.1.1/fs/ocfs2/super.c 2011-11-11 15:19:27.000000000 -0500
45108+++ linux-3.1.1/fs/ocfs2/super.c 2011-11-16 18:39:08.000000000 -0500
45109@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
45110 "%10s => GlobalAllocs: %d LocalAllocs: %d "
45111 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
45112 "Stats",
45113- atomic_read(&osb->alloc_stats.bitmap_data),
45114- atomic_read(&osb->alloc_stats.local_data),
45115- atomic_read(&osb->alloc_stats.bg_allocs),
45116- atomic_read(&osb->alloc_stats.moves),
45117- atomic_read(&osb->alloc_stats.bg_extends));
45118+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
45119+ atomic_read_unchecked(&osb->alloc_stats.local_data),
45120+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
45121+ atomic_read_unchecked(&osb->alloc_stats.moves),
45122+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
45123
45124 out += snprintf(buf + out, len - out,
45125 "%10s => State: %u Descriptor: %llu Size: %u bits "
45126@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
45127 spin_lock_init(&osb->osb_xattr_lock);
45128 ocfs2_init_steal_slots(osb);
45129
45130- atomic_set(&osb->alloc_stats.moves, 0);
45131- atomic_set(&osb->alloc_stats.local_data, 0);
45132- atomic_set(&osb->alloc_stats.bitmap_data, 0);
45133- atomic_set(&osb->alloc_stats.bg_allocs, 0);
45134- atomic_set(&osb->alloc_stats.bg_extends, 0);
45135+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
45136+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
45137+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
45138+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
45139+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
45140
45141 /* Copy the blockcheck stats from the superblock probe */
45142 osb->osb_ecc_stats = *stats;
45143diff -urNp linux-3.1.1/fs/ocfs2/symlink.c linux-3.1.1/fs/ocfs2/symlink.c
45144--- linux-3.1.1/fs/ocfs2/symlink.c 2011-11-11 15:19:27.000000000 -0500
45145+++ linux-3.1.1/fs/ocfs2/symlink.c 2011-11-16 18:39:08.000000000 -0500
45146@@ -142,7 +142,7 @@ bail:
45147
45148 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
45149 {
45150- char *link = nd_get_link(nd);
45151+ const char *link = nd_get_link(nd);
45152 if (!IS_ERR(link))
45153 kfree(link);
45154 }
45155diff -urNp linux-3.1.1/fs/open.c linux-3.1.1/fs/open.c
45156--- linux-3.1.1/fs/open.c 2011-11-11 15:19:27.000000000 -0500
45157+++ linux-3.1.1/fs/open.c 2011-11-17 19:07:55.000000000 -0500
45158@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
45159 error = locks_verify_truncate(inode, NULL, length);
45160 if (!error)
45161 error = security_path_truncate(&path);
45162+
45163+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
45164+ error = -EACCES;
45165+
45166 if (!error)
45167 error = do_truncate(path.dentry, length, 0, NULL);
45168
45169@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
45170 if (__mnt_is_readonly(path.mnt))
45171 res = -EROFS;
45172
45173+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
45174+ res = -EACCES;
45175+
45176 out_path_release:
45177 path_put(&path);
45178 out:
45179@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
45180 if (error)
45181 goto dput_and_out;
45182
45183+ gr_log_chdir(path.dentry, path.mnt);
45184+
45185 set_fs_pwd(current->fs, &path);
45186
45187 dput_and_out:
45188@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
45189 goto out_putf;
45190
45191 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
45192+
45193+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
45194+ error = -EPERM;
45195+
45196+ if (!error)
45197+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
45198+
45199 if (!error)
45200 set_fs_pwd(current->fs, &file->f_path);
45201 out_putf:
45202@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __use
45203 if (error)
45204 goto dput_and_out;
45205
45206+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
45207+ goto dput_and_out;
45208+
45209 set_fs_root(current->fs, &path);
45210+
45211+ gr_handle_chroot_chdir(&path);
45212+
45213 error = 0;
45214 dput_and_out:
45215 path_put(&path);
45216@@ -456,6 +478,16 @@ static int chmod_common(struct path *pat
45217 if (error)
45218 return error;
45219 mutex_lock(&inode->i_mutex);
45220+
45221+ if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) {
45222+ error = -EACCES;
45223+ goto out_unlock;
45224+ }
45225+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
45226+ error = -EACCES;
45227+ goto out_unlock;
45228+ }
45229+
45230 error = security_path_chmod(path->dentry, path->mnt, mode);
45231 if (error)
45232 goto out_unlock;
45233@@ -506,6 +538,9 @@ static int chown_common(struct path *pat
45234 int error;
45235 struct iattr newattrs;
45236
45237+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
45238+ return -EACCES;
45239+
45240 newattrs.ia_valid = ATTR_CTIME;
45241 if (user != (uid_t) -1) {
45242 newattrs.ia_valid |= ATTR_UID;
45243diff -urNp linux-3.1.1/fs/partitions/ldm.c linux-3.1.1/fs/partitions/ldm.c
45244--- linux-3.1.1/fs/partitions/ldm.c 2011-11-11 15:19:27.000000000 -0500
45245+++ linux-3.1.1/fs/partitions/ldm.c 2011-11-17 19:08:15.000000000 -0500
45246@@ -1322,7 +1322,7 @@ static bool ldm_frag_add (const u8 *data
45247 goto found;
45248 }
45249
45250- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
45251+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
45252 if (!f) {
45253 ldm_crit ("Out of memory.");
45254 return false;
45255diff -urNp linux-3.1.1/fs/pipe.c linux-3.1.1/fs/pipe.c
45256--- linux-3.1.1/fs/pipe.c 2011-11-11 15:19:27.000000000 -0500
45257+++ linux-3.1.1/fs/pipe.c 2011-11-16 18:40:29.000000000 -0500
45258@@ -420,9 +420,9 @@ redo:
45259 }
45260 if (bufs) /* More to do? */
45261 continue;
45262- if (!pipe->writers)
45263+ if (!atomic_read(&pipe->writers))
45264 break;
45265- if (!pipe->waiting_writers) {
45266+ if (!atomic_read(&pipe->waiting_writers)) {
45267 /* syscall merging: Usually we must not sleep
45268 * if O_NONBLOCK is set, or if we got some data.
45269 * But if a writer sleeps in kernel space, then
45270@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
45271 mutex_lock(&inode->i_mutex);
45272 pipe = inode->i_pipe;
45273
45274- if (!pipe->readers) {
45275+ if (!atomic_read(&pipe->readers)) {
45276 send_sig(SIGPIPE, current, 0);
45277 ret = -EPIPE;
45278 goto out;
45279@@ -530,7 +530,7 @@ redo1:
45280 for (;;) {
45281 int bufs;
45282
45283- if (!pipe->readers) {
45284+ if (!atomic_read(&pipe->readers)) {
45285 send_sig(SIGPIPE, current, 0);
45286 if (!ret)
45287 ret = -EPIPE;
45288@@ -616,9 +616,9 @@ redo2:
45289 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45290 do_wakeup = 0;
45291 }
45292- pipe->waiting_writers++;
45293+ atomic_inc(&pipe->waiting_writers);
45294 pipe_wait(pipe);
45295- pipe->waiting_writers--;
45296+ atomic_dec(&pipe->waiting_writers);
45297 }
45298 out:
45299 mutex_unlock(&inode->i_mutex);
45300@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
45301 mask = 0;
45302 if (filp->f_mode & FMODE_READ) {
45303 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45304- if (!pipe->writers && filp->f_version != pipe->w_counter)
45305+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45306 mask |= POLLHUP;
45307 }
45308
45309@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
45310 * Most Unices do not set POLLERR for FIFOs but on Linux they
45311 * behave exactly like pipes for poll().
45312 */
45313- if (!pipe->readers)
45314+ if (!atomic_read(&pipe->readers))
45315 mask |= POLLERR;
45316 }
45317
45318@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
45319
45320 mutex_lock(&inode->i_mutex);
45321 pipe = inode->i_pipe;
45322- pipe->readers -= decr;
45323- pipe->writers -= decw;
45324+ atomic_sub(decr, &pipe->readers);
45325+ atomic_sub(decw, &pipe->writers);
45326
45327- if (!pipe->readers && !pipe->writers) {
45328+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45329 free_pipe_info(inode);
45330 } else {
45331 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45332@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
45333
45334 if (inode->i_pipe) {
45335 ret = 0;
45336- inode->i_pipe->readers++;
45337+ atomic_inc(&inode->i_pipe->readers);
45338 }
45339
45340 mutex_unlock(&inode->i_mutex);
45341@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
45342
45343 if (inode->i_pipe) {
45344 ret = 0;
45345- inode->i_pipe->writers++;
45346+ atomic_inc(&inode->i_pipe->writers);
45347 }
45348
45349 mutex_unlock(&inode->i_mutex);
45350@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
45351 if (inode->i_pipe) {
45352 ret = 0;
45353 if (filp->f_mode & FMODE_READ)
45354- inode->i_pipe->readers++;
45355+ atomic_inc(&inode->i_pipe->readers);
45356 if (filp->f_mode & FMODE_WRITE)
45357- inode->i_pipe->writers++;
45358+ atomic_inc(&inode->i_pipe->writers);
45359 }
45360
45361 mutex_unlock(&inode->i_mutex);
45362@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45363 inode->i_pipe = NULL;
45364 }
45365
45366-static struct vfsmount *pipe_mnt __read_mostly;
45367+struct vfsmount *pipe_mnt __read_mostly;
45368
45369 /*
45370 * pipefs_dname() is called from d_path().
45371@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
45372 goto fail_iput;
45373 inode->i_pipe = pipe;
45374
45375- pipe->readers = pipe->writers = 1;
45376+ atomic_set(&pipe->readers, 1);
45377+ atomic_set(&pipe->writers, 1);
45378 inode->i_fop = &rdwr_pipefifo_fops;
45379
45380 /*
45381diff -urNp linux-3.1.1/fs/proc/array.c linux-3.1.1/fs/proc/array.c
45382--- linux-3.1.1/fs/proc/array.c 2011-11-11 15:19:27.000000000 -0500
45383+++ linux-3.1.1/fs/proc/array.c 2011-11-17 18:42:02.000000000 -0500
45384@@ -60,6 +60,7 @@
45385 #include <linux/tty.h>
45386 #include <linux/string.h>
45387 #include <linux/mman.h>
45388+#include <linux/grsecurity.h>
45389 #include <linux/proc_fs.h>
45390 #include <linux/ioport.h>
45391 #include <linux/uaccess.h>
45392@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
45393 seq_putc(m, '\n');
45394 }
45395
45396+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45397+static inline void task_pax(struct seq_file *m, struct task_struct *p)
45398+{
45399+ if (p->mm)
45400+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45401+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45402+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45403+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45404+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45405+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45406+ else
45407+ seq_printf(m, "PaX:\t-----\n");
45408+}
45409+#endif
45410+
45411 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45412 struct pid *pid, struct task_struct *task)
45413 {
45414@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
45415 task_cpus_allowed(m, task);
45416 cpuset_task_status_allowed(m, task);
45417 task_context_switch_counts(m, task);
45418+
45419+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45420+ task_pax(m, task);
45421+#endif
45422+
45423+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45424+ task_grsec_rbac(m, task);
45425+#endif
45426+
45427 return 0;
45428 }
45429
45430+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45431+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45432+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45433+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45434+#endif
45435+
45436 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45437 struct pid *pid, struct task_struct *task, int whole)
45438 {
45439@@ -378,6 +409,8 @@ static int do_task_stat(struct seq_file
45440 char tcomm[sizeof(task->comm)];
45441 unsigned long flags;
45442
45443+ pax_track_stack();
45444+
45445 state = *get_task_state(task);
45446 vsize = eip = esp = 0;
45447 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45448@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
45449 gtime = task->gtime;
45450 }
45451
45452+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45453+ if (PAX_RAND_FLAGS(mm)) {
45454+ eip = 0;
45455+ esp = 0;
45456+ wchan = 0;
45457+ }
45458+#endif
45459+#ifdef CONFIG_GRKERNSEC_HIDESYM
45460+ wchan = 0;
45461+ eip =0;
45462+ esp =0;
45463+#endif
45464+
45465 /* scale priority and nice values from timeslices to -20..20 */
45466 /* to make it look like a "normal" Unix priority/nice value */
45467 priority = task_prio(task);
45468@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
45469 vsize,
45470 mm ? get_mm_rss(mm) : 0,
45471 rsslim,
45472+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45473+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45474+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45475+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45476+#else
45477 mm ? (permitted ? mm->start_code : 1) : 0,
45478 mm ? (permitted ? mm->end_code : 1) : 0,
45479 (permitted && mm) ? mm->start_stack : 0,
45480+#endif
45481 esp,
45482 eip,
45483 /* The signal information here is obsolete.
45484@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
45485
45486 return 0;
45487 }
45488+
45489+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45490+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45491+{
45492+ u32 curr_ip = 0;
45493+ unsigned long flags;
45494+
45495+ if (lock_task_sighand(task, &flags)) {
45496+ curr_ip = task->signal->curr_ip;
45497+ unlock_task_sighand(task, &flags);
45498+ }
45499+
45500+ return sprintf(buffer, "%pI4\n", &curr_ip);
45501+}
45502+#endif
45503diff -urNp linux-3.1.1/fs/proc/base.c linux-3.1.1/fs/proc/base.c
45504--- linux-3.1.1/fs/proc/base.c 2011-11-11 15:19:27.000000000 -0500
45505+++ linux-3.1.1/fs/proc/base.c 2011-11-17 18:43:19.000000000 -0500
45506@@ -107,6 +107,22 @@ struct pid_entry {
45507 union proc_op op;
45508 };
45509
45510+struct getdents_callback {
45511+ struct linux_dirent __user * current_dir;
45512+ struct linux_dirent __user * previous;
45513+ struct file * file;
45514+ int count;
45515+ int error;
45516+};
45517+
45518+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45519+ loff_t offset, u64 ino, unsigned int d_type)
45520+{
45521+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
45522+ buf->error = -EINVAL;
45523+ return 0;
45524+}
45525+
45526 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45527 .name = (NAME), \
45528 .len = sizeof(NAME) - 1, \
45529@@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
45530 if (task == current)
45531 return mm;
45532
45533+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
45534+ return ERR_PTR(-EPERM);
45535+
45536 /*
45537 * If current is actively ptrace'ing, and would also be
45538 * permitted to freshly attach with ptrace now, permit it.
45539@@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
45540 if (!mm->arg_end)
45541 goto out_mm; /* Shh! No looking before we're done */
45542
45543+ if (gr_acl_handle_procpidmem(task))
45544+ goto out_mm;
45545+
45546 len = mm->arg_end - mm->arg_start;
45547
45548 if (len > PAGE_SIZE)
45549@@ -309,12 +331,28 @@ out:
45550 return res;
45551 }
45552
45553+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45554+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45555+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45556+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45557+#endif
45558+
45559 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45560 {
45561 struct mm_struct *mm = mm_for_maps(task);
45562 int res = PTR_ERR(mm);
45563 if (mm && !IS_ERR(mm)) {
45564 unsigned int nwords = 0;
45565+
45566+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45567+ /* allow if we're currently ptracing this task */
45568+ if (PAX_RAND_FLAGS(mm) &&
45569+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45570+ mmput(mm);
45571+ return 0;
45572+ }
45573+#endif
45574+
45575 do {
45576 nwords += 2;
45577 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45578@@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
45579 }
45580
45581
45582-#ifdef CONFIG_KALLSYMS
45583+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45584 /*
45585 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45586 * Returns the resolved symbol. If that fails, simply return the address.
45587@@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
45588 mutex_unlock(&task->signal->cred_guard_mutex);
45589 }
45590
45591-#ifdef CONFIG_STACKTRACE
45592+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45593
45594 #define MAX_STACK_TRACE_DEPTH 64
45595
45596@@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
45597 return count;
45598 }
45599
45600-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45601+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45602 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45603 {
45604 long nr;
45605@@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
45606 /************************************************************************/
45607
45608 /* permission checks */
45609-static int proc_fd_access_allowed(struct inode *inode)
45610+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45611 {
45612 struct task_struct *task;
45613 int allowed = 0;
45614@@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
45615 */
45616 task = get_proc_task(inode);
45617 if (task) {
45618- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45619+ if (log)
45620+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45621+ else
45622+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45623 put_task_struct(task);
45624 }
45625 return allowed;
45626@@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
45627 if (!task)
45628 goto out_no_task;
45629
45630+ if (gr_acl_handle_procpidmem(task))
45631+ goto out;
45632+
45633 ret = -ENOMEM;
45634 page = (char *)__get_free_page(GFP_TEMPORARY);
45635 if (!page)
45636@@ -1613,7 +1657,7 @@ static void *proc_pid_follow_link(struct
45637 path_put(&nd->path);
45638
45639 /* Are we allowed to snoop on the tasks file descriptors? */
45640- if (!proc_fd_access_allowed(inode))
45641+ if (!proc_fd_access_allowed(inode,0))
45642 goto out;
45643
45644 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45645@@ -1652,8 +1696,18 @@ static int proc_pid_readlink(struct dent
45646 struct path path;
45647
45648 /* Are we allowed to snoop on the tasks file descriptors? */
45649- if (!proc_fd_access_allowed(inode))
45650- goto out;
45651+ /* logging this is needed for learning on chromium to work properly,
45652+ but we don't want to flood the logs from 'ps' which does a readlink
45653+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45654+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
45655+ */
45656+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45657+ if (!proc_fd_access_allowed(inode,0))
45658+ goto out;
45659+ } else {
45660+ if (!proc_fd_access_allowed(inode,1))
45661+ goto out;
45662+ }
45663
45664 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45665 if (error)
45666@@ -1718,7 +1772,11 @@ struct inode *proc_pid_make_inode(struct
45667 rcu_read_lock();
45668 cred = __task_cred(task);
45669 inode->i_uid = cred->euid;
45670+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45671+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45672+#else
45673 inode->i_gid = cred->egid;
45674+#endif
45675 rcu_read_unlock();
45676 }
45677 security_task_to_inode(task, inode);
45678@@ -1736,6 +1794,9 @@ int pid_getattr(struct vfsmount *mnt, st
45679 struct inode *inode = dentry->d_inode;
45680 struct task_struct *task;
45681 const struct cred *cred;
45682+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45683+ const struct cred *tmpcred = current_cred();
45684+#endif
45685
45686 generic_fillattr(inode, stat);
45687
45688@@ -1743,13 +1804,41 @@ int pid_getattr(struct vfsmount *mnt, st
45689 stat->uid = 0;
45690 stat->gid = 0;
45691 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45692+
45693+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45694+ rcu_read_unlock();
45695+ return -ENOENT;
45696+ }
45697+
45698 if (task) {
45699+ cred = __task_cred(task);
45700+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45701+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45702+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45703+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45704+#endif
45705+ ) {
45706+#endif
45707 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45708+#ifdef CONFIG_GRKERNSEC_PROC_USER
45709+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45710+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45711+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45712+#endif
45713 task_dumpable(task)) {
45714- cred = __task_cred(task);
45715 stat->uid = cred->euid;
45716+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45717+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45718+#else
45719 stat->gid = cred->egid;
45720+#endif
45721 }
45722+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45723+ } else {
45724+ rcu_read_unlock();
45725+ return -ENOENT;
45726+ }
45727+#endif
45728 }
45729 rcu_read_unlock();
45730 return 0;
45731@@ -1786,11 +1875,20 @@ int pid_revalidate(struct dentry *dentry
45732
45733 if (task) {
45734 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45735+#ifdef CONFIG_GRKERNSEC_PROC_USER
45736+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45737+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45738+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45739+#endif
45740 task_dumpable(task)) {
45741 rcu_read_lock();
45742 cred = __task_cred(task);
45743 inode->i_uid = cred->euid;
45744+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45745+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45746+#else
45747 inode->i_gid = cred->egid;
45748+#endif
45749 rcu_read_unlock();
45750 } else {
45751 inode->i_uid = 0;
45752@@ -1908,7 +2006,8 @@ static int proc_fd_info(struct inode *in
45753 int fd = proc_fd(inode);
45754
45755 if (task) {
45756- files = get_files_struct(task);
45757+ if (!gr_acl_handle_procpidmem(task))
45758+ files = get_files_struct(task);
45759 put_task_struct(task);
45760 }
45761 if (files) {
45762@@ -2176,11 +2275,21 @@ static const struct file_operations proc
45763 */
45764 static int proc_fd_permission(struct inode *inode, int mask)
45765 {
45766+ struct task_struct *task;
45767 int rv = generic_permission(inode, mask);
45768- if (rv == 0)
45769- return 0;
45770+
45771 if (task_pid(current) == proc_pid(inode))
45772 rv = 0;
45773+
45774+ task = get_proc_task(inode);
45775+ if (task == NULL)
45776+ return rv;
45777+
45778+ if (gr_acl_handle_procpidmem(task))
45779+ rv = -EACCES;
45780+
45781+ put_task_struct(task);
45782+
45783 return rv;
45784 }
45785
45786@@ -2290,6 +2399,9 @@ static struct dentry *proc_pident_lookup
45787 if (!task)
45788 goto out_no_task;
45789
45790+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45791+ goto out;
45792+
45793 /*
45794 * Yes, it does not scale. And it should not. Don't add
45795 * new entries into /proc/<tgid>/ without very good reasons.
45796@@ -2334,6 +2446,9 @@ static int proc_pident_readdir(struct fi
45797 if (!task)
45798 goto out_no_task;
45799
45800+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45801+ goto out;
45802+
45803 ret = 0;
45804 i = filp->f_pos;
45805 switch (i) {
45806@@ -2604,7 +2719,7 @@ static void *proc_self_follow_link(struc
45807 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45808 void *cookie)
45809 {
45810- char *s = nd_get_link(nd);
45811+ const char *s = nd_get_link(nd);
45812 if (!IS_ERR(s))
45813 __putname(s);
45814 }
45815@@ -2802,7 +2917,7 @@ static const struct pid_entry tgid_base_
45816 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
45817 #endif
45818 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45819-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45820+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45821 INF("syscall", S_IRUGO, proc_pid_syscall),
45822 #endif
45823 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45824@@ -2827,10 +2942,10 @@ static const struct pid_entry tgid_base_
45825 #ifdef CONFIG_SECURITY
45826 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45827 #endif
45828-#ifdef CONFIG_KALLSYMS
45829+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45830 INF("wchan", S_IRUGO, proc_pid_wchan),
45831 #endif
45832-#ifdef CONFIG_STACKTRACE
45833+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45834 ONE("stack", S_IRUGO, proc_pid_stack),
45835 #endif
45836 #ifdef CONFIG_SCHEDSTATS
45837@@ -2864,6 +2979,9 @@ static const struct pid_entry tgid_base_
45838 #ifdef CONFIG_HARDWALL
45839 INF("hardwall", S_IRUGO, proc_pid_hardwall),
45840 #endif
45841+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45842+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45843+#endif
45844 };
45845
45846 static int proc_tgid_base_readdir(struct file * filp,
45847@@ -2989,7 +3107,14 @@ static struct dentry *proc_pid_instantia
45848 if (!inode)
45849 goto out;
45850
45851+#ifdef CONFIG_GRKERNSEC_PROC_USER
45852+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45853+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45854+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45855+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45856+#else
45857 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45858+#endif
45859 inode->i_op = &proc_tgid_base_inode_operations;
45860 inode->i_fop = &proc_tgid_base_operations;
45861 inode->i_flags|=S_IMMUTABLE;
45862@@ -3031,7 +3156,14 @@ struct dentry *proc_pid_lookup(struct in
45863 if (!task)
45864 goto out;
45865
45866+ if (!has_group_leader_pid(task))
45867+ goto out_put_task;
45868+
45869+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45870+ goto out_put_task;
45871+
45872 result = proc_pid_instantiate(dir, dentry, task, NULL);
45873+out_put_task:
45874 put_task_struct(task);
45875 out:
45876 return result;
45877@@ -3096,6 +3228,11 @@ int proc_pid_readdir(struct file * filp,
45878 {
45879 unsigned int nr;
45880 struct task_struct *reaper;
45881+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45882+ const struct cred *tmpcred = current_cred();
45883+ const struct cred *itercred;
45884+#endif
45885+ filldir_t __filldir = filldir;
45886 struct tgid_iter iter;
45887 struct pid_namespace *ns;
45888
45889@@ -3119,8 +3256,27 @@ int proc_pid_readdir(struct file * filp,
45890 for (iter = next_tgid(ns, iter);
45891 iter.task;
45892 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45893+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45894+ rcu_read_lock();
45895+ itercred = __task_cred(iter.task);
45896+#endif
45897+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45898+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45899+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45900+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45901+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45902+#endif
45903+ )
45904+#endif
45905+ )
45906+ __filldir = &gr_fake_filldir;
45907+ else
45908+ __filldir = filldir;
45909+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45910+ rcu_read_unlock();
45911+#endif
45912 filp->f_pos = iter.tgid + TGID_OFFSET;
45913- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45914+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45915 put_task_struct(iter.task);
45916 goto out;
45917 }
45918@@ -3148,7 +3304,7 @@ static const struct pid_entry tid_base_s
45919 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45920 #endif
45921 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45922-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45923+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45924 INF("syscall", S_IRUGO, proc_pid_syscall),
45925 #endif
45926 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45927@@ -3172,10 +3328,10 @@ static const struct pid_entry tid_base_s
45928 #ifdef CONFIG_SECURITY
45929 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45930 #endif
45931-#ifdef CONFIG_KALLSYMS
45932+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45933 INF("wchan", S_IRUGO, proc_pid_wchan),
45934 #endif
45935-#ifdef CONFIG_STACKTRACE
45936+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45937 ONE("stack", S_IRUGO, proc_pid_stack),
45938 #endif
45939 #ifdef CONFIG_SCHEDSTATS
45940diff -urNp linux-3.1.1/fs/proc/cmdline.c linux-3.1.1/fs/proc/cmdline.c
45941--- linux-3.1.1/fs/proc/cmdline.c 2011-11-11 15:19:27.000000000 -0500
45942+++ linux-3.1.1/fs/proc/cmdline.c 2011-11-16 18:40:29.000000000 -0500
45943@@ -23,7 +23,11 @@ static const struct file_operations cmdl
45944
45945 static int __init proc_cmdline_init(void)
45946 {
45947+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45948+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45949+#else
45950 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45951+#endif
45952 return 0;
45953 }
45954 module_init(proc_cmdline_init);
45955diff -urNp linux-3.1.1/fs/proc/devices.c linux-3.1.1/fs/proc/devices.c
45956--- linux-3.1.1/fs/proc/devices.c 2011-11-11 15:19:27.000000000 -0500
45957+++ linux-3.1.1/fs/proc/devices.c 2011-11-16 18:40:29.000000000 -0500
45958@@ -64,7 +64,11 @@ static const struct file_operations proc
45959
45960 static int __init proc_devices_init(void)
45961 {
45962+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45963+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45964+#else
45965 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45966+#endif
45967 return 0;
45968 }
45969 module_init(proc_devices_init);
45970diff -urNp linux-3.1.1/fs/proc/inode.c linux-3.1.1/fs/proc/inode.c
45971--- linux-3.1.1/fs/proc/inode.c 2011-11-11 15:19:27.000000000 -0500
45972+++ linux-3.1.1/fs/proc/inode.c 2011-11-16 18:40:29.000000000 -0500
45973@@ -18,12 +18,18 @@
45974 #include <linux/module.h>
45975 #include <linux/sysctl.h>
45976 #include <linux/slab.h>
45977+#include <linux/grsecurity.h>
45978
45979 #include <asm/system.h>
45980 #include <asm/uaccess.h>
45981
45982 #include "internal.h"
45983
45984+#ifdef CONFIG_PROC_SYSCTL
45985+extern const struct inode_operations proc_sys_inode_operations;
45986+extern const struct inode_operations proc_sys_dir_operations;
45987+#endif
45988+
45989 static void proc_evict_inode(struct inode *inode)
45990 {
45991 struct proc_dir_entry *de;
45992@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inod
45993 ns_ops = PROC_I(inode)->ns_ops;
45994 if (ns_ops && ns_ops->put)
45995 ns_ops->put(PROC_I(inode)->ns);
45996+
45997+#ifdef CONFIG_PROC_SYSCTL
45998+ if (inode->i_op == &proc_sys_inode_operations ||
45999+ inode->i_op == &proc_sys_dir_operations)
46000+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
46001+#endif
46002+
46003 }
46004
46005 static struct kmem_cache * proc_inode_cachep;
46006@@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct supe
46007 if (de->mode) {
46008 inode->i_mode = de->mode;
46009 inode->i_uid = de->uid;
46010+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46011+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46012+#else
46013 inode->i_gid = de->gid;
46014+#endif
46015 }
46016 if (de->size)
46017 inode->i_size = de->size;
46018diff -urNp linux-3.1.1/fs/proc/internal.h linux-3.1.1/fs/proc/internal.h
46019--- linux-3.1.1/fs/proc/internal.h 2011-11-11 15:19:27.000000000 -0500
46020+++ linux-3.1.1/fs/proc/internal.h 2011-11-16 18:40:29.000000000 -0500
46021@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
46022 struct pid *pid, struct task_struct *task);
46023 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46024 struct pid *pid, struct task_struct *task);
46025+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46026+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
46027+#endif
46028 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
46029
46030 extern const struct file_operations proc_maps_operations;
46031diff -urNp linux-3.1.1/fs/proc/Kconfig linux-3.1.1/fs/proc/Kconfig
46032--- linux-3.1.1/fs/proc/Kconfig 2011-11-11 15:19:27.000000000 -0500
46033+++ linux-3.1.1/fs/proc/Kconfig 2011-11-16 18:40:29.000000000 -0500
46034@@ -30,12 +30,12 @@ config PROC_FS
46035
46036 config PROC_KCORE
46037 bool "/proc/kcore support" if !ARM
46038- depends on PROC_FS && MMU
46039+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
46040
46041 config PROC_VMCORE
46042 bool "/proc/vmcore support"
46043- depends on PROC_FS && CRASH_DUMP
46044- default y
46045+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
46046+ default n
46047 help
46048 Exports the dump image of crashed kernel in ELF format.
46049
46050@@ -59,8 +59,8 @@ config PROC_SYSCTL
46051 limited in memory.
46052
46053 config PROC_PAGE_MONITOR
46054- default y
46055- depends on PROC_FS && MMU
46056+ default n
46057+ depends on PROC_FS && MMU && !GRKERNSEC
46058 bool "Enable /proc page monitoring" if EXPERT
46059 help
46060 Various /proc files exist to monitor process memory utilization:
46061diff -urNp linux-3.1.1/fs/proc/kcore.c linux-3.1.1/fs/proc/kcore.c
46062--- linux-3.1.1/fs/proc/kcore.c 2011-11-11 15:19:27.000000000 -0500
46063+++ linux-3.1.1/fs/proc/kcore.c 2011-11-16 18:40:29.000000000 -0500
46064@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
46065 off_t offset = 0;
46066 struct kcore_list *m;
46067
46068+ pax_track_stack();
46069+
46070 /* setup ELF header */
46071 elf = (struct elfhdr *) bufp;
46072 bufp += sizeof(struct elfhdr);
46073@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
46074 * the addresses in the elf_phdr on our list.
46075 */
46076 start = kc_offset_to_vaddr(*fpos - elf_buflen);
46077- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
46078+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
46079+ if (tsz > buflen)
46080 tsz = buflen;
46081-
46082+
46083 while (buflen) {
46084 struct kcore_list *m;
46085
46086@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
46087 kfree(elf_buf);
46088 } else {
46089 if (kern_addr_valid(start)) {
46090- unsigned long n;
46091+ char *elf_buf;
46092+ mm_segment_t oldfs;
46093
46094- n = copy_to_user(buffer, (char *)start, tsz);
46095- /*
46096- * We cannot distingush between fault on source
46097- * and fault on destination. When this happens
46098- * we clear too and hope it will trigger the
46099- * EFAULT again.
46100- */
46101- if (n) {
46102- if (clear_user(buffer + tsz - n,
46103- n))
46104+ elf_buf = kmalloc(tsz, GFP_KERNEL);
46105+ if (!elf_buf)
46106+ return -ENOMEM;
46107+ oldfs = get_fs();
46108+ set_fs(KERNEL_DS);
46109+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46110+ set_fs(oldfs);
46111+ if (copy_to_user(buffer, elf_buf, tsz)) {
46112+ kfree(elf_buf);
46113 return -EFAULT;
46114+ }
46115 }
46116+ set_fs(oldfs);
46117+ kfree(elf_buf);
46118 } else {
46119 if (clear_user(buffer, tsz))
46120 return -EFAULT;
46121@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
46122
46123 static int open_kcore(struct inode *inode, struct file *filp)
46124 {
46125+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46126+ return -EPERM;
46127+#endif
46128 if (!capable(CAP_SYS_RAWIO))
46129 return -EPERM;
46130 if (kcore_need_update)
46131diff -urNp linux-3.1.1/fs/proc/meminfo.c linux-3.1.1/fs/proc/meminfo.c
46132--- linux-3.1.1/fs/proc/meminfo.c 2011-11-11 15:19:27.000000000 -0500
46133+++ linux-3.1.1/fs/proc/meminfo.c 2011-11-16 18:40:29.000000000 -0500
46134@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
46135 unsigned long pages[NR_LRU_LISTS];
46136 int lru;
46137
46138+ pax_track_stack();
46139+
46140 /*
46141 * display in kilobytes.
46142 */
46143@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
46144 vmi.used >> 10,
46145 vmi.largest_chunk >> 10
46146 #ifdef CONFIG_MEMORY_FAILURE
46147- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46148+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46149 #endif
46150 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46151 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
46152diff -urNp linux-3.1.1/fs/proc/nommu.c linux-3.1.1/fs/proc/nommu.c
46153--- linux-3.1.1/fs/proc/nommu.c 2011-11-11 15:19:27.000000000 -0500
46154+++ linux-3.1.1/fs/proc/nommu.c 2011-11-16 18:39:08.000000000 -0500
46155@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
46156 if (len < 1)
46157 len = 1;
46158 seq_printf(m, "%*c", len, ' ');
46159- seq_path(m, &file->f_path, "");
46160+ seq_path(m, &file->f_path, "\n\\");
46161 }
46162
46163 seq_putc(m, '\n');
46164diff -urNp linux-3.1.1/fs/proc/proc_net.c linux-3.1.1/fs/proc/proc_net.c
46165--- linux-3.1.1/fs/proc/proc_net.c 2011-11-11 15:19:27.000000000 -0500
46166+++ linux-3.1.1/fs/proc/proc_net.c 2011-11-16 18:40:29.000000000 -0500
46167@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
46168 struct task_struct *task;
46169 struct nsproxy *ns;
46170 struct net *net = NULL;
46171+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46172+ const struct cred *cred = current_cred();
46173+#endif
46174+
46175+#ifdef CONFIG_GRKERNSEC_PROC_USER
46176+ if (cred->fsuid)
46177+ return net;
46178+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46179+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46180+ return net;
46181+#endif
46182
46183 rcu_read_lock();
46184 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46185diff -urNp linux-3.1.1/fs/proc/proc_sysctl.c linux-3.1.1/fs/proc/proc_sysctl.c
46186--- linux-3.1.1/fs/proc/proc_sysctl.c 2011-11-11 15:19:27.000000000 -0500
46187+++ linux-3.1.1/fs/proc/proc_sysctl.c 2011-11-18 18:45:33.000000000 -0500
46188@@ -8,11 +8,13 @@
46189 #include <linux/namei.h>
46190 #include "internal.h"
46191
46192+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46193+
46194 static const struct dentry_operations proc_sys_dentry_operations;
46195 static const struct file_operations proc_sys_file_operations;
46196-static const struct inode_operations proc_sys_inode_operations;
46197+const struct inode_operations proc_sys_inode_operations;
46198 static const struct file_operations proc_sys_dir_file_operations;
46199-static const struct inode_operations proc_sys_dir_operations;
46200+const struct inode_operations proc_sys_dir_operations;
46201
46202 static struct inode *proc_sys_make_inode(struct super_block *sb,
46203 struct ctl_table_header *head, struct ctl_table *table)
46204@@ -121,8 +123,14 @@ static struct dentry *proc_sys_lookup(st
46205
46206 err = NULL;
46207 d_set_d_op(dentry, &proc_sys_dentry_operations);
46208+
46209+ gr_handle_proc_create(dentry, inode);
46210+
46211 d_add(dentry, inode);
46212
46213+ if (gr_handle_sysctl(p, MAY_EXEC))
46214+ err = ERR_PTR(-ENOENT);
46215+
46216 out:
46217 sysctl_head_finish(head);
46218 return err;
46219@@ -202,6 +210,9 @@ static int proc_sys_fill_cache(struct fi
46220 return -ENOMEM;
46221 } else {
46222 d_set_d_op(child, &proc_sys_dentry_operations);
46223+
46224+ gr_handle_proc_create(child, inode);
46225+
46226 d_add(child, inode);
46227 }
46228 } else {
46229@@ -230,6 +241,9 @@ static int scan(struct ctl_table_header
46230 if (*pos < file->f_pos)
46231 continue;
46232
46233+ if (gr_handle_sysctl(table, 0))
46234+ continue;
46235+
46236 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46237 if (res)
46238 return res;
46239@@ -355,6 +369,9 @@ static int proc_sys_getattr(struct vfsmo
46240 if (IS_ERR(head))
46241 return PTR_ERR(head);
46242
46243+ if (table && gr_handle_sysctl(table, MAY_EXEC))
46244+ return -ENOENT;
46245+
46246 generic_fillattr(inode, stat);
46247 if (table)
46248 stat->mode = (stat->mode & S_IFMT) | table->mode;
46249@@ -370,17 +387,18 @@ static const struct file_operations proc
46250 };
46251
46252 static const struct file_operations proc_sys_dir_file_operations = {
46253+ .read = generic_read_dir,
46254 .readdir = proc_sys_readdir,
46255 .llseek = generic_file_llseek,
46256 };
46257
46258-static const struct inode_operations proc_sys_inode_operations = {
46259+const struct inode_operations proc_sys_inode_operations = {
46260 .permission = proc_sys_permission,
46261 .setattr = proc_sys_setattr,
46262 .getattr = proc_sys_getattr,
46263 };
46264
46265-static const struct inode_operations proc_sys_dir_operations = {
46266+const struct inode_operations proc_sys_dir_operations = {
46267 .lookup = proc_sys_lookup,
46268 .permission = proc_sys_permission,
46269 .setattr = proc_sys_setattr,
46270diff -urNp linux-3.1.1/fs/proc/root.c linux-3.1.1/fs/proc/root.c
46271--- linux-3.1.1/fs/proc/root.c 2011-11-11 15:19:27.000000000 -0500
46272+++ linux-3.1.1/fs/proc/root.c 2011-11-16 18:40:29.000000000 -0500
46273@@ -123,7 +123,15 @@ void __init proc_root_init(void)
46274 #ifdef CONFIG_PROC_DEVICETREE
46275 proc_device_tree_init();
46276 #endif
46277+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46278+#ifdef CONFIG_GRKERNSEC_PROC_USER
46279+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46280+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46281+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46282+#endif
46283+#else
46284 proc_mkdir("bus", NULL);
46285+#endif
46286 proc_sys_init();
46287 }
46288
46289diff -urNp linux-3.1.1/fs/proc/task_mmu.c linux-3.1.1/fs/proc/task_mmu.c
46290--- linux-3.1.1/fs/proc/task_mmu.c 2011-11-11 15:19:27.000000000 -0500
46291+++ linux-3.1.1/fs/proc/task_mmu.c 2011-11-16 18:40:29.000000000 -0500
46292@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
46293 "VmExe:\t%8lu kB\n"
46294 "VmLib:\t%8lu kB\n"
46295 "VmPTE:\t%8lu kB\n"
46296- "VmSwap:\t%8lu kB\n",
46297- hiwater_vm << (PAGE_SHIFT-10),
46298+ "VmSwap:\t%8lu kB\n"
46299+
46300+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46301+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46302+#endif
46303+
46304+ ,hiwater_vm << (PAGE_SHIFT-10),
46305 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46306 mm->locked_vm << (PAGE_SHIFT-10),
46307 hiwater_rss << (PAGE_SHIFT-10),
46308@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
46309 data << (PAGE_SHIFT-10),
46310 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46311 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46312- swap << (PAGE_SHIFT-10));
46313+ swap << (PAGE_SHIFT-10)
46314+
46315+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46316+ , mm->context.user_cs_base, mm->context.user_cs_limit
46317+#endif
46318+
46319+ );
46320 }
46321
46322 unsigned long task_vsize(struct mm_struct *mm)
46323@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
46324 return ret;
46325 }
46326
46327+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46328+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46329+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
46330+ _mm->pax_flags & MF_PAX_SEGMEXEC))
46331+#endif
46332+
46333 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46334 {
46335 struct mm_struct *mm = vma->vm_mm;
46336@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
46337 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46338 }
46339
46340- /* We don't show the stack guard page in /proc/maps */
46341+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46342+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46343+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46344+#else
46345 start = vma->vm_start;
46346- if (stack_guard_page_start(vma, start))
46347- start += PAGE_SIZE;
46348 end = vma->vm_end;
46349- if (stack_guard_page_end(vma, end))
46350- end -= PAGE_SIZE;
46351+#endif
46352
46353 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46354 start,
46355@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
46356 flags & VM_WRITE ? 'w' : '-',
46357 flags & VM_EXEC ? 'x' : '-',
46358 flags & VM_MAYSHARE ? 's' : 'p',
46359+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46360+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46361+#else
46362 pgoff,
46363+#endif
46364 MAJOR(dev), MINOR(dev), ino, &len);
46365
46366 /*
46367@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
46368 */
46369 if (file) {
46370 pad_len_spaces(m, len);
46371- seq_path(m, &file->f_path, "\n");
46372+ seq_path(m, &file->f_path, "\n\\");
46373 } else {
46374 const char *name = arch_vma_name(vma);
46375 if (!name) {
46376@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
46377 if (vma->vm_start <= mm->brk &&
46378 vma->vm_end >= mm->start_brk) {
46379 name = "[heap]";
46380- } else if (vma->vm_start <= mm->start_stack &&
46381- vma->vm_end >= mm->start_stack) {
46382+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46383+ (vma->vm_start <= mm->start_stack &&
46384+ vma->vm_end >= mm->start_stack)) {
46385 name = "[stack]";
46386 }
46387 } else {
46388@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
46389 };
46390
46391 memset(&mss, 0, sizeof mss);
46392- mss.vma = vma;
46393- /* mmap_sem is held in m_start */
46394- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46395- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46396-
46397+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46398+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46399+#endif
46400+ mss.vma = vma;
46401+ /* mmap_sem is held in m_start */
46402+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46403+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46404+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46405+ }
46406+#endif
46407 show_map_vma(m, vma);
46408
46409 seq_printf(m,
46410@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
46411 "KernelPageSize: %8lu kB\n"
46412 "MMUPageSize: %8lu kB\n"
46413 "Locked: %8lu kB\n",
46414+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46415+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46416+#else
46417 (vma->vm_end - vma->vm_start) >> 10,
46418+#endif
46419 mss.resident >> 10,
46420 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46421 mss.shared_clean >> 10,
46422@@ -1031,7 +1062,7 @@ static int show_numa_map(struct seq_file
46423
46424 if (file) {
46425 seq_printf(m, " file=");
46426- seq_path(m, &file->f_path, "\n\t= ");
46427+ seq_path(m, &file->f_path, "\n\t\\= ");
46428 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46429 seq_printf(m, " heap");
46430 } else if (vma->vm_start <= mm->start_stack &&
46431diff -urNp linux-3.1.1/fs/proc/task_nommu.c linux-3.1.1/fs/proc/task_nommu.c
46432--- linux-3.1.1/fs/proc/task_nommu.c 2011-11-11 15:19:27.000000000 -0500
46433+++ linux-3.1.1/fs/proc/task_nommu.c 2011-11-16 18:39:08.000000000 -0500
46434@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
46435 else
46436 bytes += kobjsize(mm);
46437
46438- if (current->fs && current->fs->users > 1)
46439+ if (current->fs && atomic_read(&current->fs->users) > 1)
46440 sbytes += kobjsize(current->fs);
46441 else
46442 bytes += kobjsize(current->fs);
46443@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
46444
46445 if (file) {
46446 pad_len_spaces(m, len);
46447- seq_path(m, &file->f_path, "");
46448+ seq_path(m, &file->f_path, "\n\\");
46449 } else if (mm) {
46450 if (vma->vm_start <= mm->start_stack &&
46451 vma->vm_end >= mm->start_stack) {
46452diff -urNp linux-3.1.1/fs/quota/netlink.c linux-3.1.1/fs/quota/netlink.c
46453--- linux-3.1.1/fs/quota/netlink.c 2011-11-11 15:19:27.000000000 -0500
46454+++ linux-3.1.1/fs/quota/netlink.c 2011-11-16 18:39:08.000000000 -0500
46455@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
46456 void quota_send_warning(short type, unsigned int id, dev_t dev,
46457 const char warntype)
46458 {
46459- static atomic_t seq;
46460+ static atomic_unchecked_t seq;
46461 struct sk_buff *skb;
46462 void *msg_head;
46463 int ret;
46464@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
46465 "VFS: Not enough memory to send quota warning.\n");
46466 return;
46467 }
46468- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46469+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46470 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46471 if (!msg_head) {
46472 printk(KERN_ERR
46473diff -urNp linux-3.1.1/fs/readdir.c linux-3.1.1/fs/readdir.c
46474--- linux-3.1.1/fs/readdir.c 2011-11-11 15:19:27.000000000 -0500
46475+++ linux-3.1.1/fs/readdir.c 2011-11-16 18:40:29.000000000 -0500
46476@@ -17,6 +17,7 @@
46477 #include <linux/security.h>
46478 #include <linux/syscalls.h>
46479 #include <linux/unistd.h>
46480+#include <linux/namei.h>
46481
46482 #include <asm/uaccess.h>
46483
46484@@ -67,6 +68,7 @@ struct old_linux_dirent {
46485
46486 struct readdir_callback {
46487 struct old_linux_dirent __user * dirent;
46488+ struct file * file;
46489 int result;
46490 };
46491
46492@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
46493 buf->result = -EOVERFLOW;
46494 return -EOVERFLOW;
46495 }
46496+
46497+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46498+ return 0;
46499+
46500 buf->result++;
46501 dirent = buf->dirent;
46502 if (!access_ok(VERIFY_WRITE, dirent,
46503@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
46504
46505 buf.result = 0;
46506 buf.dirent = dirent;
46507+ buf.file = file;
46508
46509 error = vfs_readdir(file, fillonedir, &buf);
46510 if (buf.result)
46511@@ -142,6 +149,7 @@ struct linux_dirent {
46512 struct getdents_callback {
46513 struct linux_dirent __user * current_dir;
46514 struct linux_dirent __user * previous;
46515+ struct file * file;
46516 int count;
46517 int error;
46518 };
46519@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
46520 buf->error = -EOVERFLOW;
46521 return -EOVERFLOW;
46522 }
46523+
46524+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46525+ return 0;
46526+
46527 dirent = buf->previous;
46528 if (dirent) {
46529 if (__put_user(offset, &dirent->d_off))
46530@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
46531 buf.previous = NULL;
46532 buf.count = count;
46533 buf.error = 0;
46534+ buf.file = file;
46535
46536 error = vfs_readdir(file, filldir, &buf);
46537 if (error >= 0)
46538@@ -229,6 +242,7 @@ out:
46539 struct getdents_callback64 {
46540 struct linux_dirent64 __user * current_dir;
46541 struct linux_dirent64 __user * previous;
46542+ struct file *file;
46543 int count;
46544 int error;
46545 };
46546@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
46547 buf->error = -EINVAL; /* only used if we fail.. */
46548 if (reclen > buf->count)
46549 return -EINVAL;
46550+
46551+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46552+ return 0;
46553+
46554 dirent = buf->previous;
46555 if (dirent) {
46556 if (__put_user(offset, &dirent->d_off))
46557@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46558
46559 buf.current_dir = dirent;
46560 buf.previous = NULL;
46561+ buf.file = file;
46562 buf.count = count;
46563 buf.error = 0;
46564
46565@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46566 error = buf.error;
46567 lastdirent = buf.previous;
46568 if (lastdirent) {
46569- typeof(lastdirent->d_off) d_off = file->f_pos;
46570+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46571 if (__put_user(d_off, &lastdirent->d_off))
46572 error = -EFAULT;
46573 else
46574diff -urNp linux-3.1.1/fs/reiserfs/dir.c linux-3.1.1/fs/reiserfs/dir.c
46575--- linux-3.1.1/fs/reiserfs/dir.c 2011-11-11 15:19:27.000000000 -0500
46576+++ linux-3.1.1/fs/reiserfs/dir.c 2011-11-16 18:40:29.000000000 -0500
46577@@ -75,6 +75,8 @@ int reiserfs_readdir_dentry(struct dentr
46578 struct reiserfs_dir_entry de;
46579 int ret = 0;
46580
46581+ pax_track_stack();
46582+
46583 reiserfs_write_lock(inode->i_sb);
46584
46585 reiserfs_check_lock_depth(inode->i_sb, "readdir");
46586diff -urNp linux-3.1.1/fs/reiserfs/do_balan.c linux-3.1.1/fs/reiserfs/do_balan.c
46587--- linux-3.1.1/fs/reiserfs/do_balan.c 2011-11-11 15:19:27.000000000 -0500
46588+++ linux-3.1.1/fs/reiserfs/do_balan.c 2011-11-16 18:39:08.000000000 -0500
46589@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
46590 return;
46591 }
46592
46593- atomic_inc(&(fs_generation(tb->tb_sb)));
46594+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46595 do_balance_starts(tb);
46596
46597 /* balance leaf returns 0 except if combining L R and S into
46598diff -urNp linux-3.1.1/fs/reiserfs/journal.c linux-3.1.1/fs/reiserfs/journal.c
46599--- linux-3.1.1/fs/reiserfs/journal.c 2011-11-11 15:19:27.000000000 -0500
46600+++ linux-3.1.1/fs/reiserfs/journal.c 2011-11-16 18:40:29.000000000 -0500
46601@@ -2289,6 +2289,8 @@ static struct buffer_head *reiserfs_brea
46602 struct buffer_head *bh;
46603 int i, j;
46604
46605+ pax_track_stack();
46606+
46607 bh = __getblk(dev, block, bufsize);
46608 if (buffer_uptodate(bh))
46609 return (bh);
46610diff -urNp linux-3.1.1/fs/reiserfs/namei.c linux-3.1.1/fs/reiserfs/namei.c
46611--- linux-3.1.1/fs/reiserfs/namei.c 2011-11-11 15:19:27.000000000 -0500
46612+++ linux-3.1.1/fs/reiserfs/namei.c 2011-11-16 18:40:29.000000000 -0500
46613@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
46614 unsigned long savelink = 1;
46615 struct timespec ctime;
46616
46617+ pax_track_stack();
46618+
46619 /* three balancings: (1) old name removal, (2) new name insertion
46620 and (3) maybe "save" link insertion
46621 stat data updates: (1) old directory,
46622diff -urNp linux-3.1.1/fs/reiserfs/procfs.c linux-3.1.1/fs/reiserfs/procfs.c
46623--- linux-3.1.1/fs/reiserfs/procfs.c 2011-11-11 15:19:27.000000000 -0500
46624+++ linux-3.1.1/fs/reiserfs/procfs.c 2011-11-16 18:40:29.000000000 -0500
46625@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
46626 "SMALL_TAILS " : "NO_TAILS ",
46627 replay_only(sb) ? "REPLAY_ONLY " : "",
46628 convert_reiserfs(sb) ? "CONV " : "",
46629- atomic_read(&r->s_generation_counter),
46630+ atomic_read_unchecked(&r->s_generation_counter),
46631 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46632 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46633 SF(s_good_search_by_key_reada), SF(s_bmaps),
46634@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
46635 struct journal_params *jp = &rs->s_v1.s_journal;
46636 char b[BDEVNAME_SIZE];
46637
46638+ pax_track_stack();
46639+
46640 seq_printf(m, /* on-disk fields */
46641 "jp_journal_1st_block: \t%i\n"
46642 "jp_journal_dev: \t%s[%x]\n"
46643diff -urNp linux-3.1.1/fs/reiserfs/stree.c linux-3.1.1/fs/reiserfs/stree.c
46644--- linux-3.1.1/fs/reiserfs/stree.c 2011-11-11 15:19:27.000000000 -0500
46645+++ linux-3.1.1/fs/reiserfs/stree.c 2011-11-16 18:40:29.000000000 -0500
46646@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
46647 int iter = 0;
46648 #endif
46649
46650+ pax_track_stack();
46651+
46652 BUG_ON(!th->t_trans_id);
46653
46654 init_tb_struct(th, &s_del_balance, sb, path,
46655@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
46656 int retval;
46657 int quota_cut_bytes = 0;
46658
46659+ pax_track_stack();
46660+
46661 BUG_ON(!th->t_trans_id);
46662
46663 le_key2cpu_key(&cpu_key, key);
46664@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
46665 int quota_cut_bytes;
46666 loff_t tail_pos = 0;
46667
46668+ pax_track_stack();
46669+
46670 BUG_ON(!th->t_trans_id);
46671
46672 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
46673@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
46674 int retval;
46675 int fs_gen;
46676
46677+ pax_track_stack();
46678+
46679 BUG_ON(!th->t_trans_id);
46680
46681 fs_gen = get_generation(inode->i_sb);
46682@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
46683 int fs_gen = 0;
46684 int quota_bytes = 0;
46685
46686+ pax_track_stack();
46687+
46688 BUG_ON(!th->t_trans_id);
46689
46690 if (inode) { /* Do we count quotas for item? */
46691diff -urNp linux-3.1.1/fs/reiserfs/super.c linux-3.1.1/fs/reiserfs/super.c
46692--- linux-3.1.1/fs/reiserfs/super.c 2011-11-11 15:19:27.000000000 -0500
46693+++ linux-3.1.1/fs/reiserfs/super.c 2011-11-16 18:40:29.000000000 -0500
46694@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
46695 {.option_name = NULL}
46696 };
46697
46698+ pax_track_stack();
46699+
46700 *blocks = 0;
46701 if (!options || !*options)
46702 /* use default configuration: create tails, journaling on, no
46703diff -urNp linux-3.1.1/fs/select.c linux-3.1.1/fs/select.c
46704--- linux-3.1.1/fs/select.c 2011-11-11 15:19:27.000000000 -0500
46705+++ linux-3.1.1/fs/select.c 2011-11-16 18:40:29.000000000 -0500
46706@@ -20,6 +20,7 @@
46707 #include <linux/module.h>
46708 #include <linux/slab.h>
46709 #include <linux/poll.h>
46710+#include <linux/security.h>
46711 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46712 #include <linux/file.h>
46713 #include <linux/fdtable.h>
46714@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
46715 int retval, i, timed_out = 0;
46716 unsigned long slack = 0;
46717
46718+ pax_track_stack();
46719+
46720 rcu_read_lock();
46721 retval = max_select_fd(n, fds);
46722 rcu_read_unlock();
46723@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
46724 /* Allocate small arguments on the stack to save memory and be faster */
46725 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
46726
46727+ pax_track_stack();
46728+
46729 ret = -EINVAL;
46730 if (n < 0)
46731 goto out_nofds;
46732@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
46733 struct poll_list *walk = head;
46734 unsigned long todo = nfds;
46735
46736+ pax_track_stack();
46737+
46738+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46739 if (nfds > rlimit(RLIMIT_NOFILE))
46740 return -EINVAL;
46741
46742diff -urNp linux-3.1.1/fs/seq_file.c linux-3.1.1/fs/seq_file.c
46743--- linux-3.1.1/fs/seq_file.c 2011-11-11 15:19:27.000000000 -0500
46744+++ linux-3.1.1/fs/seq_file.c 2011-11-16 18:39:08.000000000 -0500
46745@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
46746 return 0;
46747 }
46748 if (!m->buf) {
46749- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46750+ m->size = PAGE_SIZE;
46751+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46752 if (!m->buf)
46753 return -ENOMEM;
46754 }
46755@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
46756 Eoverflow:
46757 m->op->stop(m, p);
46758 kfree(m->buf);
46759- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46760+ m->size <<= 1;
46761+ m->buf = kmalloc(m->size, GFP_KERNEL);
46762 return !m->buf ? -ENOMEM : -EAGAIN;
46763 }
46764
46765@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
46766 m->version = file->f_version;
46767 /* grab buffer if we didn't have one */
46768 if (!m->buf) {
46769- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46770+ m->size = PAGE_SIZE;
46771+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46772 if (!m->buf)
46773 goto Enomem;
46774 }
46775@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
46776 goto Fill;
46777 m->op->stop(m, p);
46778 kfree(m->buf);
46779- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46780+ m->size <<= 1;
46781+ m->buf = kmalloc(m->size, GFP_KERNEL);
46782 if (!m->buf)
46783 goto Enomem;
46784 m->count = 0;
46785@@ -549,7 +553,7 @@ static void single_stop(struct seq_file
46786 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46787 void *data)
46788 {
46789- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46790+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46791 int res = -ENOMEM;
46792
46793 if (op) {
46794diff -urNp linux-3.1.1/fs/splice.c linux-3.1.1/fs/splice.c
46795--- linux-3.1.1/fs/splice.c 2011-11-11 15:19:27.000000000 -0500
46796+++ linux-3.1.1/fs/splice.c 2011-11-16 18:40:29.000000000 -0500
46797@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46798 pipe_lock(pipe);
46799
46800 for (;;) {
46801- if (!pipe->readers) {
46802+ if (!atomic_read(&pipe->readers)) {
46803 send_sig(SIGPIPE, current, 0);
46804 if (!ret)
46805 ret = -EPIPE;
46806@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46807 do_wakeup = 0;
46808 }
46809
46810- pipe->waiting_writers++;
46811+ atomic_inc(&pipe->waiting_writers);
46812 pipe_wait(pipe);
46813- pipe->waiting_writers--;
46814+ atomic_dec(&pipe->waiting_writers);
46815 }
46816
46817 pipe_unlock(pipe);
46818@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
46819 .spd_release = spd_release_page,
46820 };
46821
46822+ pax_track_stack();
46823+
46824 if (splice_grow_spd(pipe, &spd))
46825 return -ENOMEM;
46826
46827@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
46828 old_fs = get_fs();
46829 set_fs(get_ds());
46830 /* The cast to a user pointer is valid due to the set_fs() */
46831- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46832+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46833 set_fs(old_fs);
46834
46835 return res;
46836@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
46837 old_fs = get_fs();
46838 set_fs(get_ds());
46839 /* The cast to a user pointer is valid due to the set_fs() */
46840- res = vfs_write(file, (const char __user *)buf, count, &pos);
46841+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46842 set_fs(old_fs);
46843
46844 return res;
46845@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
46846 .spd_release = spd_release_page,
46847 };
46848
46849+ pax_track_stack();
46850+
46851 if (splice_grow_spd(pipe, &spd))
46852 return -ENOMEM;
46853
46854@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
46855 goto err;
46856
46857 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46858- vec[i].iov_base = (void __user *) page_address(page);
46859+ vec[i].iov_base = (void __force_user *) page_address(page);
46860 vec[i].iov_len = this_len;
46861 spd.pages[i] = page;
46862 spd.nr_pages++;
46863@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46864 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46865 {
46866 while (!pipe->nrbufs) {
46867- if (!pipe->writers)
46868+ if (!atomic_read(&pipe->writers))
46869 return 0;
46870
46871- if (!pipe->waiting_writers && sd->num_spliced)
46872+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46873 return 0;
46874
46875 if (sd->flags & SPLICE_F_NONBLOCK)
46876@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
46877 * out of the pipe right after the splice_to_pipe(). So set
46878 * PIPE_READERS appropriately.
46879 */
46880- pipe->readers = 1;
46881+ atomic_set(&pipe->readers, 1);
46882
46883 current->splice_pipe = pipe;
46884 }
46885@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
46886 };
46887 long ret;
46888
46889+ pax_track_stack();
46890+
46891 pipe = get_pipe_info(file);
46892 if (!pipe)
46893 return -EBADF;
46894@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
46895 ret = -ERESTARTSYS;
46896 break;
46897 }
46898- if (!pipe->writers)
46899+ if (!atomic_read(&pipe->writers))
46900 break;
46901- if (!pipe->waiting_writers) {
46902+ if (!atomic_read(&pipe->waiting_writers)) {
46903 if (flags & SPLICE_F_NONBLOCK) {
46904 ret = -EAGAIN;
46905 break;
46906@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
46907 pipe_lock(pipe);
46908
46909 while (pipe->nrbufs >= pipe->buffers) {
46910- if (!pipe->readers) {
46911+ if (!atomic_read(&pipe->readers)) {
46912 send_sig(SIGPIPE, current, 0);
46913 ret = -EPIPE;
46914 break;
46915@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
46916 ret = -ERESTARTSYS;
46917 break;
46918 }
46919- pipe->waiting_writers++;
46920+ atomic_inc(&pipe->waiting_writers);
46921 pipe_wait(pipe);
46922- pipe->waiting_writers--;
46923+ atomic_dec(&pipe->waiting_writers);
46924 }
46925
46926 pipe_unlock(pipe);
46927@@ -1819,14 +1825,14 @@ retry:
46928 pipe_double_lock(ipipe, opipe);
46929
46930 do {
46931- if (!opipe->readers) {
46932+ if (!atomic_read(&opipe->readers)) {
46933 send_sig(SIGPIPE, current, 0);
46934 if (!ret)
46935 ret = -EPIPE;
46936 break;
46937 }
46938
46939- if (!ipipe->nrbufs && !ipipe->writers)
46940+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46941 break;
46942
46943 /*
46944@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
46945 pipe_double_lock(ipipe, opipe);
46946
46947 do {
46948- if (!opipe->readers) {
46949+ if (!atomic_read(&opipe->readers)) {
46950 send_sig(SIGPIPE, current, 0);
46951 if (!ret)
46952 ret = -EPIPE;
46953@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
46954 * return EAGAIN if we have the potential of some data in the
46955 * future, otherwise just return 0
46956 */
46957- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46958+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46959 ret = -EAGAIN;
46960
46961 pipe_unlock(ipipe);
46962diff -urNp linux-3.1.1/fs/sysfs/file.c linux-3.1.1/fs/sysfs/file.c
46963--- linux-3.1.1/fs/sysfs/file.c 2011-11-11 15:19:27.000000000 -0500
46964+++ linux-3.1.1/fs/sysfs/file.c 2011-11-16 18:39:08.000000000 -0500
46965@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46966
46967 struct sysfs_open_dirent {
46968 atomic_t refcnt;
46969- atomic_t event;
46970+ atomic_unchecked_t event;
46971 wait_queue_head_t poll;
46972 struct list_head buffers; /* goes through sysfs_buffer.list */
46973 };
46974@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
46975 if (!sysfs_get_active(attr_sd))
46976 return -ENODEV;
46977
46978- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46979+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46980 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46981
46982 sysfs_put_active(attr_sd);
46983@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
46984 return -ENOMEM;
46985
46986 atomic_set(&new_od->refcnt, 0);
46987- atomic_set(&new_od->event, 1);
46988+ atomic_set_unchecked(&new_od->event, 1);
46989 init_waitqueue_head(&new_od->poll);
46990 INIT_LIST_HEAD(&new_od->buffers);
46991 goto retry;
46992@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
46993
46994 sysfs_put_active(attr_sd);
46995
46996- if (buffer->event != atomic_read(&od->event))
46997+ if (buffer->event != atomic_read_unchecked(&od->event))
46998 goto trigger;
46999
47000 return DEFAULT_POLLMASK;
47001@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
47002
47003 od = sd->s_attr.open;
47004 if (od) {
47005- atomic_inc(&od->event);
47006+ atomic_inc_unchecked(&od->event);
47007 wake_up_interruptible(&od->poll);
47008 }
47009
47010diff -urNp linux-3.1.1/fs/sysfs/mount.c linux-3.1.1/fs/sysfs/mount.c
47011--- linux-3.1.1/fs/sysfs/mount.c 2011-11-11 15:19:27.000000000 -0500
47012+++ linux-3.1.1/fs/sysfs/mount.c 2011-11-16 18:40:29.000000000 -0500
47013@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
47014 .s_name = "",
47015 .s_count = ATOMIC_INIT(1),
47016 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
47017+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
47018+ .s_mode = S_IFDIR | S_IRWXU,
47019+#else
47020 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
47021+#endif
47022 .s_ino = 1,
47023 };
47024
47025diff -urNp linux-3.1.1/fs/sysfs/symlink.c linux-3.1.1/fs/sysfs/symlink.c
47026--- linux-3.1.1/fs/sysfs/symlink.c 2011-11-11 15:19:27.000000000 -0500
47027+++ linux-3.1.1/fs/sysfs/symlink.c 2011-11-16 18:39:08.000000000 -0500
47028@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
47029
47030 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47031 {
47032- char *page = nd_get_link(nd);
47033+ const char *page = nd_get_link(nd);
47034 if (!IS_ERR(page))
47035 free_page((unsigned long)page);
47036 }
47037diff -urNp linux-3.1.1/fs/udf/inode.c linux-3.1.1/fs/udf/inode.c
47038--- linux-3.1.1/fs/udf/inode.c 2011-11-11 15:19:27.000000000 -0500
47039+++ linux-3.1.1/fs/udf/inode.c 2011-11-16 18:40:29.000000000 -0500
47040@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
47041 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
47042 int lastblock = 0;
47043
47044+ pax_track_stack();
47045+
47046 prev_epos.offset = udf_file_entry_alloc_offset(inode);
47047 prev_epos.block = iinfo->i_location;
47048 prev_epos.bh = NULL;
47049diff -urNp linux-3.1.1/fs/udf/misc.c linux-3.1.1/fs/udf/misc.c
47050--- linux-3.1.1/fs/udf/misc.c 2011-11-11 15:19:27.000000000 -0500
47051+++ linux-3.1.1/fs/udf/misc.c 2011-11-16 18:39:08.000000000 -0500
47052@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
47053
47054 u8 udf_tag_checksum(const struct tag *t)
47055 {
47056- u8 *data = (u8 *)t;
47057+ const u8 *data = (const u8 *)t;
47058 u8 checksum = 0;
47059 int i;
47060 for (i = 0; i < sizeof(struct tag); ++i)
47061diff -urNp linux-3.1.1/fs/utimes.c linux-3.1.1/fs/utimes.c
47062--- linux-3.1.1/fs/utimes.c 2011-11-11 15:19:27.000000000 -0500
47063+++ linux-3.1.1/fs/utimes.c 2011-11-16 18:40:29.000000000 -0500
47064@@ -1,6 +1,7 @@
47065 #include <linux/compiler.h>
47066 #include <linux/file.h>
47067 #include <linux/fs.h>
47068+#include <linux/security.h>
47069 #include <linux/linkage.h>
47070 #include <linux/mount.h>
47071 #include <linux/namei.h>
47072@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
47073 goto mnt_drop_write_and_out;
47074 }
47075 }
47076+
47077+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
47078+ error = -EACCES;
47079+ goto mnt_drop_write_and_out;
47080+ }
47081+
47082 mutex_lock(&inode->i_mutex);
47083 error = notify_change(path->dentry, &newattrs);
47084 mutex_unlock(&inode->i_mutex);
47085diff -urNp linux-3.1.1/fs/xattr_acl.c linux-3.1.1/fs/xattr_acl.c
47086--- linux-3.1.1/fs/xattr_acl.c 2011-11-11 15:19:27.000000000 -0500
47087+++ linux-3.1.1/fs/xattr_acl.c 2011-11-16 18:39:08.000000000 -0500
47088@@ -17,8 +17,8 @@
47089 struct posix_acl *
47090 posix_acl_from_xattr(const void *value, size_t size)
47091 {
47092- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
47093- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
47094+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
47095+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
47096 int count;
47097 struct posix_acl *acl;
47098 struct posix_acl_entry *acl_e;
47099diff -urNp linux-3.1.1/fs/xattr.c linux-3.1.1/fs/xattr.c
47100--- linux-3.1.1/fs/xattr.c 2011-11-11 15:19:27.000000000 -0500
47101+++ linux-3.1.1/fs/xattr.c 2011-11-16 18:40:29.000000000 -0500
47102@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
47103 * Extended attribute SET operations
47104 */
47105 static long
47106-setxattr(struct dentry *d, const char __user *name, const void __user *value,
47107+setxattr(struct path *path, const char __user *name, const void __user *value,
47108 size_t size, int flags)
47109 {
47110 int error;
47111@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
47112 return PTR_ERR(kvalue);
47113 }
47114
47115- error = vfs_setxattr(d, kname, kvalue, size, flags);
47116+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
47117+ error = -EACCES;
47118+ goto out;
47119+ }
47120+
47121+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
47122+out:
47123 kfree(kvalue);
47124 return error;
47125 }
47126@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
47127 return error;
47128 error = mnt_want_write(path.mnt);
47129 if (!error) {
47130- error = setxattr(path.dentry, name, value, size, flags);
47131+ error = setxattr(&path, name, value, size, flags);
47132 mnt_drop_write(path.mnt);
47133 }
47134 path_put(&path);
47135@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
47136 return error;
47137 error = mnt_want_write(path.mnt);
47138 if (!error) {
47139- error = setxattr(path.dentry, name, value, size, flags);
47140+ error = setxattr(&path, name, value, size, flags);
47141 mnt_drop_write(path.mnt);
47142 }
47143 path_put(&path);
47144@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
47145 const void __user *,value, size_t, size, int, flags)
47146 {
47147 struct file *f;
47148- struct dentry *dentry;
47149 int error = -EBADF;
47150
47151 f = fget(fd);
47152 if (!f)
47153 return error;
47154- dentry = f->f_path.dentry;
47155- audit_inode(NULL, dentry);
47156+ audit_inode(NULL, f->f_path.dentry);
47157 error = mnt_want_write_file(f);
47158 if (!error) {
47159- error = setxattr(dentry, name, value, size, flags);
47160+ error = setxattr(&f->f_path, name, value, size, flags);
47161 mnt_drop_write(f->f_path.mnt);
47162 }
47163 fput(f);
47164diff -urNp linux-3.1.1/fs/xfs/xfs_bmap.c linux-3.1.1/fs/xfs/xfs_bmap.c
47165--- linux-3.1.1/fs/xfs/xfs_bmap.c 2011-11-11 15:19:27.000000000 -0500
47166+++ linux-3.1.1/fs/xfs/xfs_bmap.c 2011-11-16 18:39:08.000000000 -0500
47167@@ -250,7 +250,7 @@ xfs_bmap_validate_ret(
47168 int nmap,
47169 int ret_nmap);
47170 #else
47171-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
47172+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
47173 #endif /* DEBUG */
47174
47175 STATIC int
47176diff -urNp linux-3.1.1/fs/xfs/xfs_dir2_sf.c linux-3.1.1/fs/xfs/xfs_dir2_sf.c
47177--- linux-3.1.1/fs/xfs/xfs_dir2_sf.c 2011-11-11 15:19:27.000000000 -0500
47178+++ linux-3.1.1/fs/xfs/xfs_dir2_sf.c 2011-11-16 18:39:08.000000000 -0500
47179@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
47180 }
47181
47182 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
47183- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47184+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
47185+ char name[sfep->namelen];
47186+ memcpy(name, sfep->name, sfep->namelen);
47187+ if (filldir(dirent, name, sfep->namelen,
47188+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
47189+ *offset = off & 0x7fffffff;
47190+ return 0;
47191+ }
47192+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47193 off & 0x7fffffff, ino, DT_UNKNOWN)) {
47194 *offset = off & 0x7fffffff;
47195 return 0;
47196diff -urNp linux-3.1.1/fs/xfs/xfs_ioctl.c linux-3.1.1/fs/xfs/xfs_ioctl.c
47197--- linux-3.1.1/fs/xfs/xfs_ioctl.c 2011-11-11 15:19:27.000000000 -0500
47198+++ linux-3.1.1/fs/xfs/xfs_ioctl.c 2011-11-16 18:39:08.000000000 -0500
47199@@ -128,7 +128,7 @@ xfs_find_handle(
47200 }
47201
47202 error = -EFAULT;
47203- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
47204+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
47205 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
47206 goto out_put;
47207
47208diff -urNp linux-3.1.1/fs/xfs/xfs_iops.c linux-3.1.1/fs/xfs/xfs_iops.c
47209--- linux-3.1.1/fs/xfs/xfs_iops.c 2011-11-11 15:19:27.000000000 -0500
47210+++ linux-3.1.1/fs/xfs/xfs_iops.c 2011-11-16 18:39:08.000000000 -0500
47211@@ -446,7 +446,7 @@ xfs_vn_put_link(
47212 struct nameidata *nd,
47213 void *p)
47214 {
47215- char *s = nd_get_link(nd);
47216+ const char *s = nd_get_link(nd);
47217
47218 if (!IS_ERR(s))
47219 kfree(s);
47220diff -urNp linux-3.1.1/fs/xfs/xfs_vnodeops.c linux-3.1.1/fs/xfs/xfs_vnodeops.c
47221--- linux-3.1.1/fs/xfs/xfs_vnodeops.c 2011-11-11 15:19:27.000000000 -0500
47222+++ linux-3.1.1/fs/xfs/xfs_vnodeops.c 2011-11-18 18:54:56.000000000 -0500
47223@@ -123,13 +123,17 @@ xfs_readlink(
47224
47225 xfs_ilock(ip, XFS_ILOCK_SHARED);
47226
47227- ASSERT(S_ISLNK(ip->i_d.di_mode));
47228- ASSERT(ip->i_d.di_size <= MAXPATHLEN);
47229-
47230 pathlen = ip->i_d.di_size;
47231 if (!pathlen)
47232 goto out;
47233
47234+ if (pathlen > MAXPATHLEN) {
47235+ xfs_alert(mp, "%s: inode (%llu) symlink length (%d) too long",
47236+ __func__, (unsigned long long)ip->i_ino, pathlen);
47237+ ASSERT(0);
47238+ return XFS_ERROR(EFSCORRUPTED);
47239+ }
47240+
47241 if (ip->i_df.if_flags & XFS_IFINLINE) {
47242 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
47243 link[pathlen] = '\0';
47244diff -urNp linux-3.1.1/grsecurity/gracl_alloc.c linux-3.1.1/grsecurity/gracl_alloc.c
47245--- linux-3.1.1/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
47246+++ linux-3.1.1/grsecurity/gracl_alloc.c 2011-11-16 18:40:31.000000000 -0500
47247@@ -0,0 +1,105 @@
47248+#include <linux/kernel.h>
47249+#include <linux/mm.h>
47250+#include <linux/slab.h>
47251+#include <linux/vmalloc.h>
47252+#include <linux/gracl.h>
47253+#include <linux/grsecurity.h>
47254+
47255+static unsigned long alloc_stack_next = 1;
47256+static unsigned long alloc_stack_size = 1;
47257+static void **alloc_stack;
47258+
47259+static __inline__ int
47260+alloc_pop(void)
47261+{
47262+ if (alloc_stack_next == 1)
47263+ return 0;
47264+
47265+ kfree(alloc_stack[alloc_stack_next - 2]);
47266+
47267+ alloc_stack_next--;
47268+
47269+ return 1;
47270+}
47271+
47272+static __inline__ int
47273+alloc_push(void *buf)
47274+{
47275+ if (alloc_stack_next >= alloc_stack_size)
47276+ return 1;
47277+
47278+ alloc_stack[alloc_stack_next - 1] = buf;
47279+
47280+ alloc_stack_next++;
47281+
47282+ return 0;
47283+}
47284+
47285+void *
47286+acl_alloc(unsigned long len)
47287+{
47288+ void *ret = NULL;
47289+
47290+ if (!len || len > PAGE_SIZE)
47291+ goto out;
47292+
47293+ ret = kmalloc(len, GFP_KERNEL);
47294+
47295+ if (ret) {
47296+ if (alloc_push(ret)) {
47297+ kfree(ret);
47298+ ret = NULL;
47299+ }
47300+ }
47301+
47302+out:
47303+ return ret;
47304+}
47305+
47306+void *
47307+acl_alloc_num(unsigned long num, unsigned long len)
47308+{
47309+ if (!len || (num > (PAGE_SIZE / len)))
47310+ return NULL;
47311+
47312+ return acl_alloc(num * len);
47313+}
47314+
47315+void
47316+acl_free_all(void)
47317+{
47318+ if (gr_acl_is_enabled() || !alloc_stack)
47319+ return;
47320+
47321+ while (alloc_pop()) ;
47322+
47323+ if (alloc_stack) {
47324+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
47325+ kfree(alloc_stack);
47326+ else
47327+ vfree(alloc_stack);
47328+ }
47329+
47330+ alloc_stack = NULL;
47331+ alloc_stack_size = 1;
47332+ alloc_stack_next = 1;
47333+
47334+ return;
47335+}
47336+
47337+int
47338+acl_alloc_stack_init(unsigned long size)
47339+{
47340+ if ((size * sizeof (void *)) <= PAGE_SIZE)
47341+ alloc_stack =
47342+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
47343+ else
47344+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
47345+
47346+ alloc_stack_size = size;
47347+
47348+ if (!alloc_stack)
47349+ return 0;
47350+ else
47351+ return 1;
47352+}
47353diff -urNp linux-3.1.1/grsecurity/gracl.c linux-3.1.1/grsecurity/gracl.c
47354--- linux-3.1.1/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
47355+++ linux-3.1.1/grsecurity/gracl.c 2011-11-16 19:31:00.000000000 -0500
47356@@ -0,0 +1,4156 @@
47357+#include <linux/kernel.h>
47358+#include <linux/module.h>
47359+#include <linux/sched.h>
47360+#include <linux/mm.h>
47361+#include <linux/file.h>
47362+#include <linux/fs.h>
47363+#include <linux/namei.h>
47364+#include <linux/mount.h>
47365+#include <linux/tty.h>
47366+#include <linux/proc_fs.h>
47367+#include <linux/lglock.h>
47368+#include <linux/slab.h>
47369+#include <linux/vmalloc.h>
47370+#include <linux/types.h>
47371+#include <linux/sysctl.h>
47372+#include <linux/netdevice.h>
47373+#include <linux/ptrace.h>
47374+#include <linux/gracl.h>
47375+#include <linux/gralloc.h>
47376+#include <linux/grsecurity.h>
47377+#include <linux/grinternal.h>
47378+#include <linux/pid_namespace.h>
47379+#include <linux/fdtable.h>
47380+#include <linux/percpu.h>
47381+
47382+#include <asm/uaccess.h>
47383+#include <asm/errno.h>
47384+#include <asm/mman.h>
47385+
47386+static struct acl_role_db acl_role_set;
47387+static struct name_db name_set;
47388+static struct inodev_db inodev_set;
47389+
47390+/* for keeping track of userspace pointers used for subjects, so we
47391+ can share references in the kernel as well
47392+*/
47393+
47394+static struct path real_root;
47395+
47396+static struct acl_subj_map_db subj_map_set;
47397+
47398+static struct acl_role_label *default_role;
47399+
47400+static struct acl_role_label *role_list;
47401+
47402+static u16 acl_sp_role_value;
47403+
47404+extern char *gr_shared_page[4];
47405+static DEFINE_MUTEX(gr_dev_mutex);
47406+DEFINE_RWLOCK(gr_inode_lock);
47407+
47408+struct gr_arg *gr_usermode;
47409+
47410+static unsigned int gr_status __read_only = GR_STATUS_INIT;
47411+
47412+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
47413+extern void gr_clear_learn_entries(void);
47414+
47415+#ifdef CONFIG_GRKERNSEC_RESLOG
47416+extern void gr_log_resource(const struct task_struct *task,
47417+ const int res, const unsigned long wanted, const int gt);
47418+#endif
47419+
47420+unsigned char *gr_system_salt;
47421+unsigned char *gr_system_sum;
47422+
47423+static struct sprole_pw **acl_special_roles = NULL;
47424+static __u16 num_sprole_pws = 0;
47425+
47426+static struct acl_role_label *kernel_role = NULL;
47427+
47428+static unsigned int gr_auth_attempts = 0;
47429+static unsigned long gr_auth_expires = 0UL;
47430+
47431+#ifdef CONFIG_NET
47432+extern struct vfsmount *sock_mnt;
47433+#endif
47434+
47435+extern struct vfsmount *pipe_mnt;
47436+extern struct vfsmount *shm_mnt;
47437+#ifdef CONFIG_HUGETLBFS
47438+extern struct vfsmount *hugetlbfs_vfsmount;
47439+#endif
47440+
47441+static struct acl_object_label *fakefs_obj_rw;
47442+static struct acl_object_label *fakefs_obj_rwx;
47443+
47444+extern int gr_init_uidset(void);
47445+extern void gr_free_uidset(void);
47446+extern void gr_remove_uid(uid_t uid);
47447+extern int gr_find_uid(uid_t uid);
47448+
47449+DECLARE_BRLOCK(vfsmount_lock);
47450+
47451+__inline__ int
47452+gr_acl_is_enabled(void)
47453+{
47454+ return (gr_status & GR_READY);
47455+}
47456+
47457+#ifdef CONFIG_BTRFS_FS
47458+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
47459+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
47460+#endif
47461+
47462+static inline dev_t __get_dev(const struct dentry *dentry)
47463+{
47464+#ifdef CONFIG_BTRFS_FS
47465+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
47466+ return get_btrfs_dev_from_inode(dentry->d_inode);
47467+ else
47468+#endif
47469+ return dentry->d_inode->i_sb->s_dev;
47470+}
47471+
47472+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47473+{
47474+ return __get_dev(dentry);
47475+}
47476+
47477+static char gr_task_roletype_to_char(struct task_struct *task)
47478+{
47479+ switch (task->role->roletype &
47480+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
47481+ GR_ROLE_SPECIAL)) {
47482+ case GR_ROLE_DEFAULT:
47483+ return 'D';
47484+ case GR_ROLE_USER:
47485+ return 'U';
47486+ case GR_ROLE_GROUP:
47487+ return 'G';
47488+ case GR_ROLE_SPECIAL:
47489+ return 'S';
47490+ }
47491+
47492+ return 'X';
47493+}
47494+
47495+char gr_roletype_to_char(void)
47496+{
47497+ return gr_task_roletype_to_char(current);
47498+}
47499+
47500+__inline__ int
47501+gr_acl_tpe_check(void)
47502+{
47503+ if (unlikely(!(gr_status & GR_READY)))
47504+ return 0;
47505+ if (current->role->roletype & GR_ROLE_TPE)
47506+ return 1;
47507+ else
47508+ return 0;
47509+}
47510+
47511+int
47512+gr_handle_rawio(const struct inode *inode)
47513+{
47514+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47515+ if (inode && S_ISBLK(inode->i_mode) &&
47516+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47517+ !capable(CAP_SYS_RAWIO))
47518+ return 1;
47519+#endif
47520+ return 0;
47521+}
47522+
47523+static int
47524+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
47525+{
47526+ if (likely(lena != lenb))
47527+ return 0;
47528+
47529+ return !memcmp(a, b, lena);
47530+}
47531+
47532+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
47533+{
47534+ *buflen -= namelen;
47535+ if (*buflen < 0)
47536+ return -ENAMETOOLONG;
47537+ *buffer -= namelen;
47538+ memcpy(*buffer, str, namelen);
47539+ return 0;
47540+}
47541+
47542+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
47543+{
47544+ return prepend(buffer, buflen, name->name, name->len);
47545+}
47546+
47547+static int prepend_path(const struct path *path, struct path *root,
47548+ char **buffer, int *buflen)
47549+{
47550+ struct dentry *dentry = path->dentry;
47551+ struct vfsmount *vfsmnt = path->mnt;
47552+ bool slash = false;
47553+ int error = 0;
47554+
47555+ while (dentry != root->dentry || vfsmnt != root->mnt) {
47556+ struct dentry * parent;
47557+
47558+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
47559+ /* Global root? */
47560+ if (vfsmnt->mnt_parent == vfsmnt) {
47561+ goto out;
47562+ }
47563+ dentry = vfsmnt->mnt_mountpoint;
47564+ vfsmnt = vfsmnt->mnt_parent;
47565+ continue;
47566+ }
47567+ parent = dentry->d_parent;
47568+ prefetch(parent);
47569+ spin_lock(&dentry->d_lock);
47570+ error = prepend_name(buffer, buflen, &dentry->d_name);
47571+ spin_unlock(&dentry->d_lock);
47572+ if (!error)
47573+ error = prepend(buffer, buflen, "/", 1);
47574+ if (error)
47575+ break;
47576+
47577+ slash = true;
47578+ dentry = parent;
47579+ }
47580+
47581+out:
47582+ if (!error && !slash)
47583+ error = prepend(buffer, buflen, "/", 1);
47584+
47585+ return error;
47586+}
47587+
47588+/* this must be called with vfsmount_lock and rename_lock held */
47589+
47590+static char *__our_d_path(const struct path *path, struct path *root,
47591+ char *buf, int buflen)
47592+{
47593+ char *res = buf + buflen;
47594+ int error;
47595+
47596+ prepend(&res, &buflen, "\0", 1);
47597+ error = prepend_path(path, root, &res, &buflen);
47598+ if (error)
47599+ return ERR_PTR(error);
47600+
47601+ return res;
47602+}
47603+
47604+static char *
47605+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
47606+{
47607+ char *retval;
47608+
47609+ retval = __our_d_path(path, root, buf, buflen);
47610+ if (unlikely(IS_ERR(retval)))
47611+ retval = strcpy(buf, "<path too long>");
47612+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
47613+ retval[1] = '\0';
47614+
47615+ return retval;
47616+}
47617+
47618+static char *
47619+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47620+ char *buf, int buflen)
47621+{
47622+ struct path path;
47623+ char *res;
47624+
47625+ path.dentry = (struct dentry *)dentry;
47626+ path.mnt = (struct vfsmount *)vfsmnt;
47627+
47628+ /* we can use real_root.dentry, real_root.mnt, because this is only called
47629+ by the RBAC system */
47630+ res = gen_full_path(&path, &real_root, buf, buflen);
47631+
47632+ return res;
47633+}
47634+
47635+static char *
47636+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47637+ char *buf, int buflen)
47638+{
47639+ char *res;
47640+ struct path path;
47641+ struct path root;
47642+ struct task_struct *reaper = &init_task;
47643+
47644+ path.dentry = (struct dentry *)dentry;
47645+ path.mnt = (struct vfsmount *)vfsmnt;
47646+
47647+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
47648+ get_fs_root(reaper->fs, &root);
47649+
47650+ write_seqlock(&rename_lock);
47651+ br_read_lock(vfsmount_lock);
47652+ res = gen_full_path(&path, &root, buf, buflen);
47653+ br_read_unlock(vfsmount_lock);
47654+ write_sequnlock(&rename_lock);
47655+
47656+ path_put(&root);
47657+ return res;
47658+}
47659+
47660+static char *
47661+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47662+{
47663+ char *ret;
47664+ write_seqlock(&rename_lock);
47665+ br_read_lock(vfsmount_lock);
47666+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47667+ PAGE_SIZE);
47668+ br_read_unlock(vfsmount_lock);
47669+ write_sequnlock(&rename_lock);
47670+ return ret;
47671+}
47672+
47673+static char *
47674+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47675+{
47676+ char *ret;
47677+ char *buf;
47678+ int buflen;
47679+
47680+ write_seqlock(&rename_lock);
47681+ br_read_lock(vfsmount_lock);
47682+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47683+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
47684+ buflen = (int)(ret - buf);
47685+ if (buflen >= 5)
47686+ prepend(&ret, &buflen, "/proc", 5);
47687+ else
47688+ ret = strcpy(buf, "<path too long>");
47689+ br_read_unlock(vfsmount_lock);
47690+ write_sequnlock(&rename_lock);
47691+ return ret;
47692+}
47693+
47694+char *
47695+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
47696+{
47697+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47698+ PAGE_SIZE);
47699+}
47700+
47701+char *
47702+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
47703+{
47704+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47705+ PAGE_SIZE);
47706+}
47707+
47708+char *
47709+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
47710+{
47711+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
47712+ PAGE_SIZE);
47713+}
47714+
47715+char *
47716+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
47717+{
47718+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
47719+ PAGE_SIZE);
47720+}
47721+
47722+char *
47723+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
47724+{
47725+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
47726+ PAGE_SIZE);
47727+}
47728+
47729+__inline__ __u32
47730+to_gr_audit(const __u32 reqmode)
47731+{
47732+ /* masks off auditable permission flags, then shifts them to create
47733+ auditing flags, and adds the special case of append auditing if
47734+ we're requesting write */
47735+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47736+}
47737+
47738+struct acl_subject_label *
47739+lookup_subject_map(const struct acl_subject_label *userp)
47740+{
47741+ unsigned int index = shash(userp, subj_map_set.s_size);
47742+ struct subject_map *match;
47743+
47744+ match = subj_map_set.s_hash[index];
47745+
47746+ while (match && match->user != userp)
47747+ match = match->next;
47748+
47749+ if (match != NULL)
47750+ return match->kernel;
47751+ else
47752+ return NULL;
47753+}
47754+
47755+static void
47756+insert_subj_map_entry(struct subject_map *subjmap)
47757+{
47758+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47759+ struct subject_map **curr;
47760+
47761+ subjmap->prev = NULL;
47762+
47763+ curr = &subj_map_set.s_hash[index];
47764+ if (*curr != NULL)
47765+ (*curr)->prev = subjmap;
47766+
47767+ subjmap->next = *curr;
47768+ *curr = subjmap;
47769+
47770+ return;
47771+}
47772+
47773+static struct acl_role_label *
47774+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47775+ const gid_t gid)
47776+{
47777+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47778+ struct acl_role_label *match;
47779+ struct role_allowed_ip *ipp;
47780+ unsigned int x;
47781+ u32 curr_ip = task->signal->curr_ip;
47782+
47783+ task->signal->saved_ip = curr_ip;
47784+
47785+ match = acl_role_set.r_hash[index];
47786+
47787+ while (match) {
47788+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47789+ for (x = 0; x < match->domain_child_num; x++) {
47790+ if (match->domain_children[x] == uid)
47791+ goto found;
47792+ }
47793+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47794+ break;
47795+ match = match->next;
47796+ }
47797+found:
47798+ if (match == NULL) {
47799+ try_group:
47800+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47801+ match = acl_role_set.r_hash[index];
47802+
47803+ while (match) {
47804+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47805+ for (x = 0; x < match->domain_child_num; x++) {
47806+ if (match->domain_children[x] == gid)
47807+ goto found2;
47808+ }
47809+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47810+ break;
47811+ match = match->next;
47812+ }
47813+found2:
47814+ if (match == NULL)
47815+ match = default_role;
47816+ if (match->allowed_ips == NULL)
47817+ return match;
47818+ else {
47819+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47820+ if (likely
47821+ ((ntohl(curr_ip) & ipp->netmask) ==
47822+ (ntohl(ipp->addr) & ipp->netmask)))
47823+ return match;
47824+ }
47825+ match = default_role;
47826+ }
47827+ } else if (match->allowed_ips == NULL) {
47828+ return match;
47829+ } else {
47830+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47831+ if (likely
47832+ ((ntohl(curr_ip) & ipp->netmask) ==
47833+ (ntohl(ipp->addr) & ipp->netmask)))
47834+ return match;
47835+ }
47836+ goto try_group;
47837+ }
47838+
47839+ return match;
47840+}
47841+
47842+struct acl_subject_label *
47843+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47844+ const struct acl_role_label *role)
47845+{
47846+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47847+ struct acl_subject_label *match;
47848+
47849+ match = role->subj_hash[index];
47850+
47851+ while (match && (match->inode != ino || match->device != dev ||
47852+ (match->mode & GR_DELETED))) {
47853+ match = match->next;
47854+ }
47855+
47856+ if (match && !(match->mode & GR_DELETED))
47857+ return match;
47858+ else
47859+ return NULL;
47860+}
47861+
47862+struct acl_subject_label *
47863+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47864+ const struct acl_role_label *role)
47865+{
47866+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47867+ struct acl_subject_label *match;
47868+
47869+ match = role->subj_hash[index];
47870+
47871+ while (match && (match->inode != ino || match->device != dev ||
47872+ !(match->mode & GR_DELETED))) {
47873+ match = match->next;
47874+ }
47875+
47876+ if (match && (match->mode & GR_DELETED))
47877+ return match;
47878+ else
47879+ return NULL;
47880+}
47881+
47882+static struct acl_object_label *
47883+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47884+ const struct acl_subject_label *subj)
47885+{
47886+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47887+ struct acl_object_label *match;
47888+
47889+ match = subj->obj_hash[index];
47890+
47891+ while (match && (match->inode != ino || match->device != dev ||
47892+ (match->mode & GR_DELETED))) {
47893+ match = match->next;
47894+ }
47895+
47896+ if (match && !(match->mode & GR_DELETED))
47897+ return match;
47898+ else
47899+ return NULL;
47900+}
47901+
47902+static struct acl_object_label *
47903+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47904+ const struct acl_subject_label *subj)
47905+{
47906+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47907+ struct acl_object_label *match;
47908+
47909+ match = subj->obj_hash[index];
47910+
47911+ while (match && (match->inode != ino || match->device != dev ||
47912+ !(match->mode & GR_DELETED))) {
47913+ match = match->next;
47914+ }
47915+
47916+ if (match && (match->mode & GR_DELETED))
47917+ return match;
47918+
47919+ match = subj->obj_hash[index];
47920+
47921+ while (match && (match->inode != ino || match->device != dev ||
47922+ (match->mode & GR_DELETED))) {
47923+ match = match->next;
47924+ }
47925+
47926+ if (match && !(match->mode & GR_DELETED))
47927+ return match;
47928+ else
47929+ return NULL;
47930+}
47931+
47932+static struct name_entry *
47933+lookup_name_entry(const char *name)
47934+{
47935+ unsigned int len = strlen(name);
47936+ unsigned int key = full_name_hash(name, len);
47937+ unsigned int index = key % name_set.n_size;
47938+ struct name_entry *match;
47939+
47940+ match = name_set.n_hash[index];
47941+
47942+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47943+ match = match->next;
47944+
47945+ return match;
47946+}
47947+
47948+static struct name_entry *
47949+lookup_name_entry_create(const char *name)
47950+{
47951+ unsigned int len = strlen(name);
47952+ unsigned int key = full_name_hash(name, len);
47953+ unsigned int index = key % name_set.n_size;
47954+ struct name_entry *match;
47955+
47956+ match = name_set.n_hash[index];
47957+
47958+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47959+ !match->deleted))
47960+ match = match->next;
47961+
47962+ if (match && match->deleted)
47963+ return match;
47964+
47965+ match = name_set.n_hash[index];
47966+
47967+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47968+ match->deleted))
47969+ match = match->next;
47970+
47971+ if (match && !match->deleted)
47972+ return match;
47973+ else
47974+ return NULL;
47975+}
47976+
47977+static struct inodev_entry *
47978+lookup_inodev_entry(const ino_t ino, const dev_t dev)
47979+{
47980+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
47981+ struct inodev_entry *match;
47982+
47983+ match = inodev_set.i_hash[index];
47984+
47985+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47986+ match = match->next;
47987+
47988+ return match;
47989+}
47990+
47991+static void
47992+insert_inodev_entry(struct inodev_entry *entry)
47993+{
47994+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47995+ inodev_set.i_size);
47996+ struct inodev_entry **curr;
47997+
47998+ entry->prev = NULL;
47999+
48000+ curr = &inodev_set.i_hash[index];
48001+ if (*curr != NULL)
48002+ (*curr)->prev = entry;
48003+
48004+ entry->next = *curr;
48005+ *curr = entry;
48006+
48007+ return;
48008+}
48009+
48010+static void
48011+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
48012+{
48013+ unsigned int index =
48014+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
48015+ struct acl_role_label **curr;
48016+ struct acl_role_label *tmp;
48017+
48018+ curr = &acl_role_set.r_hash[index];
48019+
48020+ /* if role was already inserted due to domains and already has
48021+ a role in the same bucket as it attached, then we need to
48022+ combine these two buckets
48023+ */
48024+ if (role->next) {
48025+ tmp = role->next;
48026+ while (tmp->next)
48027+ tmp = tmp->next;
48028+ tmp->next = *curr;
48029+ } else
48030+ role->next = *curr;
48031+ *curr = role;
48032+
48033+ return;
48034+}
48035+
48036+static void
48037+insert_acl_role_label(struct acl_role_label *role)
48038+{
48039+ int i;
48040+
48041+ if (role_list == NULL) {
48042+ role_list = role;
48043+ role->prev = NULL;
48044+ } else {
48045+ role->prev = role_list;
48046+ role_list = role;
48047+ }
48048+
48049+ /* used for hash chains */
48050+ role->next = NULL;
48051+
48052+ if (role->roletype & GR_ROLE_DOMAIN) {
48053+ for (i = 0; i < role->domain_child_num; i++)
48054+ __insert_acl_role_label(role, role->domain_children[i]);
48055+ } else
48056+ __insert_acl_role_label(role, role->uidgid);
48057+}
48058+
48059+static int
48060+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
48061+{
48062+ struct name_entry **curr, *nentry;
48063+ struct inodev_entry *ientry;
48064+ unsigned int len = strlen(name);
48065+ unsigned int key = full_name_hash(name, len);
48066+ unsigned int index = key % name_set.n_size;
48067+
48068+ curr = &name_set.n_hash[index];
48069+
48070+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
48071+ curr = &((*curr)->next);
48072+
48073+ if (*curr != NULL)
48074+ return 1;
48075+
48076+ nentry = acl_alloc(sizeof (struct name_entry));
48077+ if (nentry == NULL)
48078+ return 0;
48079+ ientry = acl_alloc(sizeof (struct inodev_entry));
48080+ if (ientry == NULL)
48081+ return 0;
48082+ ientry->nentry = nentry;
48083+
48084+ nentry->key = key;
48085+ nentry->name = name;
48086+ nentry->inode = inode;
48087+ nentry->device = device;
48088+ nentry->len = len;
48089+ nentry->deleted = deleted;
48090+
48091+ nentry->prev = NULL;
48092+ curr = &name_set.n_hash[index];
48093+ if (*curr != NULL)
48094+ (*curr)->prev = nentry;
48095+ nentry->next = *curr;
48096+ *curr = nentry;
48097+
48098+ /* insert us into the table searchable by inode/dev */
48099+ insert_inodev_entry(ientry);
48100+
48101+ return 1;
48102+}
48103+
48104+static void
48105+insert_acl_obj_label(struct acl_object_label *obj,
48106+ struct acl_subject_label *subj)
48107+{
48108+ unsigned int index =
48109+ fhash(obj->inode, obj->device, subj->obj_hash_size);
48110+ struct acl_object_label **curr;
48111+
48112+
48113+ obj->prev = NULL;
48114+
48115+ curr = &subj->obj_hash[index];
48116+ if (*curr != NULL)
48117+ (*curr)->prev = obj;
48118+
48119+ obj->next = *curr;
48120+ *curr = obj;
48121+
48122+ return;
48123+}
48124+
48125+static void
48126+insert_acl_subj_label(struct acl_subject_label *obj,
48127+ struct acl_role_label *role)
48128+{
48129+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
48130+ struct acl_subject_label **curr;
48131+
48132+ obj->prev = NULL;
48133+
48134+ curr = &role->subj_hash[index];
48135+ if (*curr != NULL)
48136+ (*curr)->prev = obj;
48137+
48138+ obj->next = *curr;
48139+ *curr = obj;
48140+
48141+ return;
48142+}
48143+
48144+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
48145+
48146+static void *
48147+create_table(__u32 * len, int elementsize)
48148+{
48149+ unsigned int table_sizes[] = {
48150+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
48151+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
48152+ 4194301, 8388593, 16777213, 33554393, 67108859
48153+ };
48154+ void *newtable = NULL;
48155+ unsigned int pwr = 0;
48156+
48157+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
48158+ table_sizes[pwr] <= *len)
48159+ pwr++;
48160+
48161+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
48162+ return newtable;
48163+
48164+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
48165+ newtable =
48166+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
48167+ else
48168+ newtable = vmalloc(table_sizes[pwr] * elementsize);
48169+
48170+ *len = table_sizes[pwr];
48171+
48172+ return newtable;
48173+}
48174+
48175+static int
48176+init_variables(const struct gr_arg *arg)
48177+{
48178+ struct task_struct *reaper = &init_task;
48179+ unsigned int stacksize;
48180+
48181+ subj_map_set.s_size = arg->role_db.num_subjects;
48182+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
48183+ name_set.n_size = arg->role_db.num_objects;
48184+ inodev_set.i_size = arg->role_db.num_objects;
48185+
48186+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
48187+ !name_set.n_size || !inodev_set.i_size)
48188+ return 1;
48189+
48190+ if (!gr_init_uidset())
48191+ return 1;
48192+
48193+ /* set up the stack that holds allocation info */
48194+
48195+ stacksize = arg->role_db.num_pointers + 5;
48196+
48197+ if (!acl_alloc_stack_init(stacksize))
48198+ return 1;
48199+
48200+ /* grab reference for the real root dentry and vfsmount */
48201+ get_fs_root(reaper->fs, &real_root);
48202+
48203+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48204+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
48205+#endif
48206+
48207+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
48208+ if (fakefs_obj_rw == NULL)
48209+ return 1;
48210+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
48211+
48212+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
48213+ if (fakefs_obj_rwx == NULL)
48214+ return 1;
48215+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
48216+
48217+ subj_map_set.s_hash =
48218+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
48219+ acl_role_set.r_hash =
48220+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
48221+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
48222+ inodev_set.i_hash =
48223+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
48224+
48225+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
48226+ !name_set.n_hash || !inodev_set.i_hash)
48227+ return 1;
48228+
48229+ memset(subj_map_set.s_hash, 0,
48230+ sizeof(struct subject_map *) * subj_map_set.s_size);
48231+ memset(acl_role_set.r_hash, 0,
48232+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
48233+ memset(name_set.n_hash, 0,
48234+ sizeof (struct name_entry *) * name_set.n_size);
48235+ memset(inodev_set.i_hash, 0,
48236+ sizeof (struct inodev_entry *) * inodev_set.i_size);
48237+
48238+ return 0;
48239+}
48240+
48241+/* free information not needed after startup
48242+ currently contains user->kernel pointer mappings for subjects
48243+*/
48244+
48245+static void
48246+free_init_variables(void)
48247+{
48248+ __u32 i;
48249+
48250+ if (subj_map_set.s_hash) {
48251+ for (i = 0; i < subj_map_set.s_size; i++) {
48252+ if (subj_map_set.s_hash[i]) {
48253+ kfree(subj_map_set.s_hash[i]);
48254+ subj_map_set.s_hash[i] = NULL;
48255+ }
48256+ }
48257+
48258+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
48259+ PAGE_SIZE)
48260+ kfree(subj_map_set.s_hash);
48261+ else
48262+ vfree(subj_map_set.s_hash);
48263+ }
48264+
48265+ return;
48266+}
48267+
48268+static void
48269+free_variables(void)
48270+{
48271+ struct acl_subject_label *s;
48272+ struct acl_role_label *r;
48273+ struct task_struct *task, *task2;
48274+ unsigned int x;
48275+
48276+ gr_clear_learn_entries();
48277+
48278+ read_lock(&tasklist_lock);
48279+ do_each_thread(task2, task) {
48280+ task->acl_sp_role = 0;
48281+ task->acl_role_id = 0;
48282+ task->acl = NULL;
48283+ task->role = NULL;
48284+ } while_each_thread(task2, task);
48285+ read_unlock(&tasklist_lock);
48286+
48287+ /* release the reference to the real root dentry and vfsmount */
48288+ path_put(&real_root);
48289+
48290+ /* free all object hash tables */
48291+
48292+ FOR_EACH_ROLE_START(r)
48293+ if (r->subj_hash == NULL)
48294+ goto next_role;
48295+ FOR_EACH_SUBJECT_START(r, s, x)
48296+ if (s->obj_hash == NULL)
48297+ break;
48298+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48299+ kfree(s->obj_hash);
48300+ else
48301+ vfree(s->obj_hash);
48302+ FOR_EACH_SUBJECT_END(s, x)
48303+ FOR_EACH_NESTED_SUBJECT_START(r, s)
48304+ if (s->obj_hash == NULL)
48305+ break;
48306+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48307+ kfree(s->obj_hash);
48308+ else
48309+ vfree(s->obj_hash);
48310+ FOR_EACH_NESTED_SUBJECT_END(s)
48311+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
48312+ kfree(r->subj_hash);
48313+ else
48314+ vfree(r->subj_hash);
48315+ r->subj_hash = NULL;
48316+next_role:
48317+ FOR_EACH_ROLE_END(r)
48318+
48319+ acl_free_all();
48320+
48321+ if (acl_role_set.r_hash) {
48322+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
48323+ PAGE_SIZE)
48324+ kfree(acl_role_set.r_hash);
48325+ else
48326+ vfree(acl_role_set.r_hash);
48327+ }
48328+ if (name_set.n_hash) {
48329+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
48330+ PAGE_SIZE)
48331+ kfree(name_set.n_hash);
48332+ else
48333+ vfree(name_set.n_hash);
48334+ }
48335+
48336+ if (inodev_set.i_hash) {
48337+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
48338+ PAGE_SIZE)
48339+ kfree(inodev_set.i_hash);
48340+ else
48341+ vfree(inodev_set.i_hash);
48342+ }
48343+
48344+ gr_free_uidset();
48345+
48346+ memset(&name_set, 0, sizeof (struct name_db));
48347+ memset(&inodev_set, 0, sizeof (struct inodev_db));
48348+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
48349+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
48350+
48351+ default_role = NULL;
48352+ role_list = NULL;
48353+
48354+ return;
48355+}
48356+
48357+static __u32
48358+count_user_objs(struct acl_object_label *userp)
48359+{
48360+ struct acl_object_label o_tmp;
48361+ __u32 num = 0;
48362+
48363+ while (userp) {
48364+ if (copy_from_user(&o_tmp, userp,
48365+ sizeof (struct acl_object_label)))
48366+ break;
48367+
48368+ userp = o_tmp.prev;
48369+ num++;
48370+ }
48371+
48372+ return num;
48373+}
48374+
48375+static struct acl_subject_label *
48376+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
48377+
48378+static int
48379+copy_user_glob(struct acl_object_label *obj)
48380+{
48381+ struct acl_object_label *g_tmp, **guser;
48382+ unsigned int len;
48383+ char *tmp;
48384+
48385+ if (obj->globbed == NULL)
48386+ return 0;
48387+
48388+ guser = &obj->globbed;
48389+ while (*guser) {
48390+ g_tmp = (struct acl_object_label *)
48391+ acl_alloc(sizeof (struct acl_object_label));
48392+ if (g_tmp == NULL)
48393+ return -ENOMEM;
48394+
48395+ if (copy_from_user(g_tmp, *guser,
48396+ sizeof (struct acl_object_label)))
48397+ return -EFAULT;
48398+
48399+ len = strnlen_user(g_tmp->filename, PATH_MAX);
48400+
48401+ if (!len || len >= PATH_MAX)
48402+ return -EINVAL;
48403+
48404+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48405+ return -ENOMEM;
48406+
48407+ if (copy_from_user(tmp, g_tmp->filename, len))
48408+ return -EFAULT;
48409+ tmp[len-1] = '\0';
48410+ g_tmp->filename = tmp;
48411+
48412+ *guser = g_tmp;
48413+ guser = &(g_tmp->next);
48414+ }
48415+
48416+ return 0;
48417+}
48418+
48419+static int
48420+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
48421+ struct acl_role_label *role)
48422+{
48423+ struct acl_object_label *o_tmp;
48424+ unsigned int len;
48425+ int ret;
48426+ char *tmp;
48427+
48428+ while (userp) {
48429+ if ((o_tmp = (struct acl_object_label *)
48430+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
48431+ return -ENOMEM;
48432+
48433+ if (copy_from_user(o_tmp, userp,
48434+ sizeof (struct acl_object_label)))
48435+ return -EFAULT;
48436+
48437+ userp = o_tmp->prev;
48438+
48439+ len = strnlen_user(o_tmp->filename, PATH_MAX);
48440+
48441+ if (!len || len >= PATH_MAX)
48442+ return -EINVAL;
48443+
48444+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48445+ return -ENOMEM;
48446+
48447+ if (copy_from_user(tmp, o_tmp->filename, len))
48448+ return -EFAULT;
48449+ tmp[len-1] = '\0';
48450+ o_tmp->filename = tmp;
48451+
48452+ insert_acl_obj_label(o_tmp, subj);
48453+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
48454+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
48455+ return -ENOMEM;
48456+
48457+ ret = copy_user_glob(o_tmp);
48458+ if (ret)
48459+ return ret;
48460+
48461+ if (o_tmp->nested) {
48462+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
48463+ if (IS_ERR(o_tmp->nested))
48464+ return PTR_ERR(o_tmp->nested);
48465+
48466+ /* insert into nested subject list */
48467+ o_tmp->nested->next = role->hash->first;
48468+ role->hash->first = o_tmp->nested;
48469+ }
48470+ }
48471+
48472+ return 0;
48473+}
48474+
48475+static __u32
48476+count_user_subjs(struct acl_subject_label *userp)
48477+{
48478+ struct acl_subject_label s_tmp;
48479+ __u32 num = 0;
48480+
48481+ while (userp) {
48482+ if (copy_from_user(&s_tmp, userp,
48483+ sizeof (struct acl_subject_label)))
48484+ break;
48485+
48486+ userp = s_tmp.prev;
48487+ /* do not count nested subjects against this count, since
48488+ they are not included in the hash table, but are
48489+ attached to objects. We have already counted
48490+ the subjects in userspace for the allocation
48491+ stack
48492+ */
48493+ if (!(s_tmp.mode & GR_NESTED))
48494+ num++;
48495+ }
48496+
48497+ return num;
48498+}
48499+
48500+static int
48501+copy_user_allowedips(struct acl_role_label *rolep)
48502+{
48503+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
48504+
48505+ ruserip = rolep->allowed_ips;
48506+
48507+ while (ruserip) {
48508+ rlast = rtmp;
48509+
48510+ if ((rtmp = (struct role_allowed_ip *)
48511+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
48512+ return -ENOMEM;
48513+
48514+ if (copy_from_user(rtmp, ruserip,
48515+ sizeof (struct role_allowed_ip)))
48516+ return -EFAULT;
48517+
48518+ ruserip = rtmp->prev;
48519+
48520+ if (!rlast) {
48521+ rtmp->prev = NULL;
48522+ rolep->allowed_ips = rtmp;
48523+ } else {
48524+ rlast->next = rtmp;
48525+ rtmp->prev = rlast;
48526+ }
48527+
48528+ if (!ruserip)
48529+ rtmp->next = NULL;
48530+ }
48531+
48532+ return 0;
48533+}
48534+
48535+static int
48536+copy_user_transitions(struct acl_role_label *rolep)
48537+{
48538+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
48539+
48540+ unsigned int len;
48541+ char *tmp;
48542+
48543+ rusertp = rolep->transitions;
48544+
48545+ while (rusertp) {
48546+ rlast = rtmp;
48547+
48548+ if ((rtmp = (struct role_transition *)
48549+ acl_alloc(sizeof (struct role_transition))) == NULL)
48550+ return -ENOMEM;
48551+
48552+ if (copy_from_user(rtmp, rusertp,
48553+ sizeof (struct role_transition)))
48554+ return -EFAULT;
48555+
48556+ rusertp = rtmp->prev;
48557+
48558+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
48559+
48560+ if (!len || len >= GR_SPROLE_LEN)
48561+ return -EINVAL;
48562+
48563+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48564+ return -ENOMEM;
48565+
48566+ if (copy_from_user(tmp, rtmp->rolename, len))
48567+ return -EFAULT;
48568+ tmp[len-1] = '\0';
48569+ rtmp->rolename = tmp;
48570+
48571+ if (!rlast) {
48572+ rtmp->prev = NULL;
48573+ rolep->transitions = rtmp;
48574+ } else {
48575+ rlast->next = rtmp;
48576+ rtmp->prev = rlast;
48577+ }
48578+
48579+ if (!rusertp)
48580+ rtmp->next = NULL;
48581+ }
48582+
48583+ return 0;
48584+}
48585+
48586+static struct acl_subject_label *
48587+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
48588+{
48589+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
48590+ unsigned int len;
48591+ char *tmp;
48592+ __u32 num_objs;
48593+ struct acl_ip_label **i_tmp, *i_utmp2;
48594+ struct gr_hash_struct ghash;
48595+ struct subject_map *subjmap;
48596+ unsigned int i_num;
48597+ int err;
48598+
48599+ s_tmp = lookup_subject_map(userp);
48600+
48601+ /* we've already copied this subject into the kernel, just return
48602+ the reference to it, and don't copy it over again
48603+ */
48604+ if (s_tmp)
48605+ return(s_tmp);
48606+
48607+ if ((s_tmp = (struct acl_subject_label *)
48608+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
48609+ return ERR_PTR(-ENOMEM);
48610+
48611+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
48612+ if (subjmap == NULL)
48613+ return ERR_PTR(-ENOMEM);
48614+
48615+ subjmap->user = userp;
48616+ subjmap->kernel = s_tmp;
48617+ insert_subj_map_entry(subjmap);
48618+
48619+ if (copy_from_user(s_tmp, userp,
48620+ sizeof (struct acl_subject_label)))
48621+ return ERR_PTR(-EFAULT);
48622+
48623+ len = strnlen_user(s_tmp->filename, PATH_MAX);
48624+
48625+ if (!len || len >= PATH_MAX)
48626+ return ERR_PTR(-EINVAL);
48627+
48628+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48629+ return ERR_PTR(-ENOMEM);
48630+
48631+ if (copy_from_user(tmp, s_tmp->filename, len))
48632+ return ERR_PTR(-EFAULT);
48633+ tmp[len-1] = '\0';
48634+ s_tmp->filename = tmp;
48635+
48636+ if (!strcmp(s_tmp->filename, "/"))
48637+ role->root_label = s_tmp;
48638+
48639+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
48640+ return ERR_PTR(-EFAULT);
48641+
48642+ /* copy user and group transition tables */
48643+
48644+ if (s_tmp->user_trans_num) {
48645+ uid_t *uidlist;
48646+
48647+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
48648+ if (uidlist == NULL)
48649+ return ERR_PTR(-ENOMEM);
48650+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
48651+ return ERR_PTR(-EFAULT);
48652+
48653+ s_tmp->user_transitions = uidlist;
48654+ }
48655+
48656+ if (s_tmp->group_trans_num) {
48657+ gid_t *gidlist;
48658+
48659+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
48660+ if (gidlist == NULL)
48661+ return ERR_PTR(-ENOMEM);
48662+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
48663+ return ERR_PTR(-EFAULT);
48664+
48665+ s_tmp->group_transitions = gidlist;
48666+ }
48667+
48668+ /* set up object hash table */
48669+ num_objs = count_user_objs(ghash.first);
48670+
48671+ s_tmp->obj_hash_size = num_objs;
48672+ s_tmp->obj_hash =
48673+ (struct acl_object_label **)
48674+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
48675+
48676+ if (!s_tmp->obj_hash)
48677+ return ERR_PTR(-ENOMEM);
48678+
48679+ memset(s_tmp->obj_hash, 0,
48680+ s_tmp->obj_hash_size *
48681+ sizeof (struct acl_object_label *));
48682+
48683+ /* add in objects */
48684+ err = copy_user_objs(ghash.first, s_tmp, role);
48685+
48686+ if (err)
48687+ return ERR_PTR(err);
48688+
48689+ /* set pointer for parent subject */
48690+ if (s_tmp->parent_subject) {
48691+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
48692+
48693+ if (IS_ERR(s_tmp2))
48694+ return s_tmp2;
48695+
48696+ s_tmp->parent_subject = s_tmp2;
48697+ }
48698+
48699+ /* add in ip acls */
48700+
48701+ if (!s_tmp->ip_num) {
48702+ s_tmp->ips = NULL;
48703+ goto insert;
48704+ }
48705+
48706+ i_tmp =
48707+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
48708+ sizeof (struct acl_ip_label *));
48709+
48710+ if (!i_tmp)
48711+ return ERR_PTR(-ENOMEM);
48712+
48713+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
48714+ *(i_tmp + i_num) =
48715+ (struct acl_ip_label *)
48716+ acl_alloc(sizeof (struct acl_ip_label));
48717+ if (!*(i_tmp + i_num))
48718+ return ERR_PTR(-ENOMEM);
48719+
48720+ if (copy_from_user
48721+ (&i_utmp2, s_tmp->ips + i_num,
48722+ sizeof (struct acl_ip_label *)))
48723+ return ERR_PTR(-EFAULT);
48724+
48725+ if (copy_from_user
48726+ (*(i_tmp + i_num), i_utmp2,
48727+ sizeof (struct acl_ip_label)))
48728+ return ERR_PTR(-EFAULT);
48729+
48730+ if ((*(i_tmp + i_num))->iface == NULL)
48731+ continue;
48732+
48733+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48734+ if (!len || len >= IFNAMSIZ)
48735+ return ERR_PTR(-EINVAL);
48736+ tmp = acl_alloc(len);
48737+ if (tmp == NULL)
48738+ return ERR_PTR(-ENOMEM);
48739+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48740+ return ERR_PTR(-EFAULT);
48741+ (*(i_tmp + i_num))->iface = tmp;
48742+ }
48743+
48744+ s_tmp->ips = i_tmp;
48745+
48746+insert:
48747+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48748+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48749+ return ERR_PTR(-ENOMEM);
48750+
48751+ return s_tmp;
48752+}
48753+
48754+static int
48755+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48756+{
48757+ struct acl_subject_label s_pre;
48758+ struct acl_subject_label * ret;
48759+ int err;
48760+
48761+ while (userp) {
48762+ if (copy_from_user(&s_pre, userp,
48763+ sizeof (struct acl_subject_label)))
48764+ return -EFAULT;
48765+
48766+ /* do not add nested subjects here, add
48767+ while parsing objects
48768+ */
48769+
48770+ if (s_pre.mode & GR_NESTED) {
48771+ userp = s_pre.prev;
48772+ continue;
48773+ }
48774+
48775+ ret = do_copy_user_subj(userp, role);
48776+
48777+ err = PTR_ERR(ret);
48778+ if (IS_ERR(ret))
48779+ return err;
48780+
48781+ insert_acl_subj_label(ret, role);
48782+
48783+ userp = s_pre.prev;
48784+ }
48785+
48786+ return 0;
48787+}
48788+
48789+static int
48790+copy_user_acl(struct gr_arg *arg)
48791+{
48792+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48793+ struct sprole_pw *sptmp;
48794+ struct gr_hash_struct *ghash;
48795+ uid_t *domainlist;
48796+ unsigned int r_num;
48797+ unsigned int len;
48798+ char *tmp;
48799+ int err = 0;
48800+ __u16 i;
48801+ __u32 num_subjs;
48802+
48803+ /* we need a default and kernel role */
48804+ if (arg->role_db.num_roles < 2)
48805+ return -EINVAL;
48806+
48807+ /* copy special role authentication info from userspace */
48808+
48809+ num_sprole_pws = arg->num_sprole_pws;
48810+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48811+
48812+ if (!acl_special_roles) {
48813+ err = -ENOMEM;
48814+ goto cleanup;
48815+ }
48816+
48817+ for (i = 0; i < num_sprole_pws; i++) {
48818+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48819+ if (!sptmp) {
48820+ err = -ENOMEM;
48821+ goto cleanup;
48822+ }
48823+ if (copy_from_user(sptmp, arg->sprole_pws + i,
48824+ sizeof (struct sprole_pw))) {
48825+ err = -EFAULT;
48826+ goto cleanup;
48827+ }
48828+
48829+ len =
48830+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48831+
48832+ if (!len || len >= GR_SPROLE_LEN) {
48833+ err = -EINVAL;
48834+ goto cleanup;
48835+ }
48836+
48837+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48838+ err = -ENOMEM;
48839+ goto cleanup;
48840+ }
48841+
48842+ if (copy_from_user(tmp, sptmp->rolename, len)) {
48843+ err = -EFAULT;
48844+ goto cleanup;
48845+ }
48846+ tmp[len-1] = '\0';
48847+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48848+ printk(KERN_ALERT "Copying special role %s\n", tmp);
48849+#endif
48850+ sptmp->rolename = tmp;
48851+ acl_special_roles[i] = sptmp;
48852+ }
48853+
48854+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48855+
48856+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48857+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
48858+
48859+ if (!r_tmp) {
48860+ err = -ENOMEM;
48861+ goto cleanup;
48862+ }
48863+
48864+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
48865+ sizeof (struct acl_role_label *))) {
48866+ err = -EFAULT;
48867+ goto cleanup;
48868+ }
48869+
48870+ if (copy_from_user(r_tmp, r_utmp2,
48871+ sizeof (struct acl_role_label))) {
48872+ err = -EFAULT;
48873+ goto cleanup;
48874+ }
48875+
48876+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48877+
48878+ if (!len || len >= PATH_MAX) {
48879+ err = -EINVAL;
48880+ goto cleanup;
48881+ }
48882+
48883+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48884+ err = -ENOMEM;
48885+ goto cleanup;
48886+ }
48887+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
48888+ err = -EFAULT;
48889+ goto cleanup;
48890+ }
48891+ tmp[len-1] = '\0';
48892+ r_tmp->rolename = tmp;
48893+
48894+ if (!strcmp(r_tmp->rolename, "default")
48895+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48896+ default_role = r_tmp;
48897+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48898+ kernel_role = r_tmp;
48899+ }
48900+
48901+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48902+ err = -ENOMEM;
48903+ goto cleanup;
48904+ }
48905+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48906+ err = -EFAULT;
48907+ goto cleanup;
48908+ }
48909+
48910+ r_tmp->hash = ghash;
48911+
48912+ num_subjs = count_user_subjs(r_tmp->hash->first);
48913+
48914+ r_tmp->subj_hash_size = num_subjs;
48915+ r_tmp->subj_hash =
48916+ (struct acl_subject_label **)
48917+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48918+
48919+ if (!r_tmp->subj_hash) {
48920+ err = -ENOMEM;
48921+ goto cleanup;
48922+ }
48923+
48924+ err = copy_user_allowedips(r_tmp);
48925+ if (err)
48926+ goto cleanup;
48927+
48928+ /* copy domain info */
48929+ if (r_tmp->domain_children != NULL) {
48930+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48931+ if (domainlist == NULL) {
48932+ err = -ENOMEM;
48933+ goto cleanup;
48934+ }
48935+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48936+ err = -EFAULT;
48937+ goto cleanup;
48938+ }
48939+ r_tmp->domain_children = domainlist;
48940+ }
48941+
48942+ err = copy_user_transitions(r_tmp);
48943+ if (err)
48944+ goto cleanup;
48945+
48946+ memset(r_tmp->subj_hash, 0,
48947+ r_tmp->subj_hash_size *
48948+ sizeof (struct acl_subject_label *));
48949+
48950+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48951+
48952+ if (err)
48953+ goto cleanup;
48954+
48955+ /* set nested subject list to null */
48956+ r_tmp->hash->first = NULL;
48957+
48958+ insert_acl_role_label(r_tmp);
48959+ }
48960+
48961+ goto return_err;
48962+ cleanup:
48963+ free_variables();
48964+ return_err:
48965+ return err;
48966+
48967+}
48968+
48969+static int
48970+gracl_init(struct gr_arg *args)
48971+{
48972+ int error = 0;
48973+
48974+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48975+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48976+
48977+ if (init_variables(args)) {
48978+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48979+ error = -ENOMEM;
48980+ free_variables();
48981+ goto out;
48982+ }
48983+
48984+ error = copy_user_acl(args);
48985+ free_init_variables();
48986+ if (error) {
48987+ free_variables();
48988+ goto out;
48989+ }
48990+
48991+ if ((error = gr_set_acls(0))) {
48992+ free_variables();
48993+ goto out;
48994+ }
48995+
48996+ pax_open_kernel();
48997+ gr_status |= GR_READY;
48998+ pax_close_kernel();
48999+
49000+ out:
49001+ return error;
49002+}
49003+
49004+/* derived from glibc fnmatch() 0: match, 1: no match*/
49005+
49006+static int
49007+glob_match(const char *p, const char *n)
49008+{
49009+ char c;
49010+
49011+ while ((c = *p++) != '\0') {
49012+ switch (c) {
49013+ case '?':
49014+ if (*n == '\0')
49015+ return 1;
49016+ else if (*n == '/')
49017+ return 1;
49018+ break;
49019+ case '\\':
49020+ if (*n != c)
49021+ return 1;
49022+ break;
49023+ case '*':
49024+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
49025+ if (*n == '/')
49026+ return 1;
49027+ else if (c == '?') {
49028+ if (*n == '\0')
49029+ return 1;
49030+ else
49031+ ++n;
49032+ }
49033+ }
49034+ if (c == '\0') {
49035+ return 0;
49036+ } else {
49037+ const char *endp;
49038+
49039+ if ((endp = strchr(n, '/')) == NULL)
49040+ endp = n + strlen(n);
49041+
49042+ if (c == '[') {
49043+ for (--p; n < endp; ++n)
49044+ if (!glob_match(p, n))
49045+ return 0;
49046+ } else if (c == '/') {
49047+ while (*n != '\0' && *n != '/')
49048+ ++n;
49049+ if (*n == '/' && !glob_match(p, n + 1))
49050+ return 0;
49051+ } else {
49052+ for (--p; n < endp; ++n)
49053+ if (*n == c && !glob_match(p, n))
49054+ return 0;
49055+ }
49056+
49057+ return 1;
49058+ }
49059+ case '[':
49060+ {
49061+ int not;
49062+ char cold;
49063+
49064+ if (*n == '\0' || *n == '/')
49065+ return 1;
49066+
49067+ not = (*p == '!' || *p == '^');
49068+ if (not)
49069+ ++p;
49070+
49071+ c = *p++;
49072+ for (;;) {
49073+ unsigned char fn = (unsigned char)*n;
49074+
49075+ if (c == '\0')
49076+ return 1;
49077+ else {
49078+ if (c == fn)
49079+ goto matched;
49080+ cold = c;
49081+ c = *p++;
49082+
49083+ if (c == '-' && *p != ']') {
49084+ unsigned char cend = *p++;
49085+
49086+ if (cend == '\0')
49087+ return 1;
49088+
49089+ if (cold <= fn && fn <= cend)
49090+ goto matched;
49091+
49092+ c = *p++;
49093+ }
49094+ }
49095+
49096+ if (c == ']')
49097+ break;
49098+ }
49099+ if (!not)
49100+ return 1;
49101+ break;
49102+ matched:
49103+ while (c != ']') {
49104+ if (c == '\0')
49105+ return 1;
49106+
49107+ c = *p++;
49108+ }
49109+ if (not)
49110+ return 1;
49111+ }
49112+ break;
49113+ default:
49114+ if (c != *n)
49115+ return 1;
49116+ }
49117+
49118+ ++n;
49119+ }
49120+
49121+ if (*n == '\0')
49122+ return 0;
49123+
49124+ if (*n == '/')
49125+ return 0;
49126+
49127+ return 1;
49128+}
49129+
49130+static struct acl_object_label *
49131+chk_glob_label(struct acl_object_label *globbed,
49132+ struct dentry *dentry, struct vfsmount *mnt, char **path)
49133+{
49134+ struct acl_object_label *tmp;
49135+
49136+ if (*path == NULL)
49137+ *path = gr_to_filename_nolock(dentry, mnt);
49138+
49139+ tmp = globbed;
49140+
49141+ while (tmp) {
49142+ if (!glob_match(tmp->filename, *path))
49143+ return tmp;
49144+ tmp = tmp->next;
49145+ }
49146+
49147+ return NULL;
49148+}
49149+
49150+static struct acl_object_label *
49151+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49152+ const ino_t curr_ino, const dev_t curr_dev,
49153+ const struct acl_subject_label *subj, char **path, const int checkglob)
49154+{
49155+ struct acl_subject_label *tmpsubj;
49156+ struct acl_object_label *retval;
49157+ struct acl_object_label *retval2;
49158+
49159+ tmpsubj = (struct acl_subject_label *) subj;
49160+ read_lock(&gr_inode_lock);
49161+ do {
49162+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
49163+ if (retval) {
49164+ if (checkglob && retval->globbed) {
49165+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
49166+ (struct vfsmount *)orig_mnt, path);
49167+ if (retval2)
49168+ retval = retval2;
49169+ }
49170+ break;
49171+ }
49172+ } while ((tmpsubj = tmpsubj->parent_subject));
49173+ read_unlock(&gr_inode_lock);
49174+
49175+ return retval;
49176+}
49177+
49178+static __inline__ struct acl_object_label *
49179+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49180+ struct dentry *curr_dentry,
49181+ const struct acl_subject_label *subj, char **path, const int checkglob)
49182+{
49183+ int newglob = checkglob;
49184+ ino_t inode;
49185+ dev_t device;
49186+
49187+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
49188+ as we don't want a / * rule to match instead of the / object
49189+ don't do this for create lookups that call this function though, since they're looking up
49190+ on the parent and thus need globbing checks on all paths
49191+ */
49192+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
49193+ newglob = GR_NO_GLOB;
49194+
49195+ spin_lock(&curr_dentry->d_lock);
49196+ inode = curr_dentry->d_inode->i_ino;
49197+ device = __get_dev(curr_dentry);
49198+ spin_unlock(&curr_dentry->d_lock);
49199+
49200+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
49201+}
49202+
49203+static struct acl_object_label *
49204+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49205+ const struct acl_subject_label *subj, char *path, const int checkglob)
49206+{
49207+ struct dentry *dentry = (struct dentry *) l_dentry;
49208+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49209+ struct acl_object_label *retval;
49210+ struct dentry *parent;
49211+
49212+ write_seqlock(&rename_lock);
49213+ br_read_lock(vfsmount_lock);
49214+
49215+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
49216+#ifdef CONFIG_NET
49217+ mnt == sock_mnt ||
49218+#endif
49219+#ifdef CONFIG_HUGETLBFS
49220+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
49221+#endif
49222+ /* ignore Eric Biederman */
49223+ IS_PRIVATE(l_dentry->d_inode))) {
49224+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
49225+ goto out;
49226+ }
49227+
49228+ for (;;) {
49229+ if (dentry == real_root.dentry && mnt == real_root.mnt)
49230+ break;
49231+
49232+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49233+ if (mnt->mnt_parent == mnt)
49234+ break;
49235+
49236+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49237+ if (retval != NULL)
49238+ goto out;
49239+
49240+ dentry = mnt->mnt_mountpoint;
49241+ mnt = mnt->mnt_parent;
49242+ continue;
49243+ }
49244+
49245+ parent = dentry->d_parent;
49246+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49247+ if (retval != NULL)
49248+ goto out;
49249+
49250+ dentry = parent;
49251+ }
49252+
49253+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49254+
49255+ /* real_root is pinned so we don't have to hold a reference */
49256+ if (retval == NULL)
49257+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
49258+out:
49259+ br_read_unlock(vfsmount_lock);
49260+ write_sequnlock(&rename_lock);
49261+
49262+ BUG_ON(retval == NULL);
49263+
49264+ return retval;
49265+}
49266+
49267+static __inline__ struct acl_object_label *
49268+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49269+ const struct acl_subject_label *subj)
49270+{
49271+ char *path = NULL;
49272+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
49273+}
49274+
49275+static __inline__ struct acl_object_label *
49276+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49277+ const struct acl_subject_label *subj)
49278+{
49279+ char *path = NULL;
49280+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
49281+}
49282+
49283+static __inline__ struct acl_object_label *
49284+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49285+ const struct acl_subject_label *subj, char *path)
49286+{
49287+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
49288+}
49289+
49290+static struct acl_subject_label *
49291+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49292+ const struct acl_role_label *role)
49293+{
49294+ struct dentry *dentry = (struct dentry *) l_dentry;
49295+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49296+ struct acl_subject_label *retval;
49297+ struct dentry *parent;
49298+
49299+ write_seqlock(&rename_lock);
49300+ br_read_lock(vfsmount_lock);
49301+
49302+ for (;;) {
49303+ if (dentry == real_root.dentry && mnt == real_root.mnt)
49304+ break;
49305+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49306+ if (mnt->mnt_parent == mnt)
49307+ break;
49308+
49309+ spin_lock(&dentry->d_lock);
49310+ read_lock(&gr_inode_lock);
49311+ retval =
49312+ lookup_acl_subj_label(dentry->d_inode->i_ino,
49313+ __get_dev(dentry), role);
49314+ read_unlock(&gr_inode_lock);
49315+ spin_unlock(&dentry->d_lock);
49316+ if (retval != NULL)
49317+ goto out;
49318+
49319+ dentry = mnt->mnt_mountpoint;
49320+ mnt = mnt->mnt_parent;
49321+ continue;
49322+ }
49323+
49324+ spin_lock(&dentry->d_lock);
49325+ read_lock(&gr_inode_lock);
49326+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49327+ __get_dev(dentry), role);
49328+ read_unlock(&gr_inode_lock);
49329+ parent = dentry->d_parent;
49330+ spin_unlock(&dentry->d_lock);
49331+
49332+ if (retval != NULL)
49333+ goto out;
49334+
49335+ dentry = parent;
49336+ }
49337+
49338+ spin_lock(&dentry->d_lock);
49339+ read_lock(&gr_inode_lock);
49340+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49341+ __get_dev(dentry), role);
49342+ read_unlock(&gr_inode_lock);
49343+ spin_unlock(&dentry->d_lock);
49344+
49345+ if (unlikely(retval == NULL)) {
49346+ /* real_root is pinned, we don't need to hold a reference */
49347+ read_lock(&gr_inode_lock);
49348+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
49349+ __get_dev(real_root.dentry), role);
49350+ read_unlock(&gr_inode_lock);
49351+ }
49352+out:
49353+ br_read_unlock(vfsmount_lock);
49354+ write_sequnlock(&rename_lock);
49355+
49356+ BUG_ON(retval == NULL);
49357+
49358+ return retval;
49359+}
49360+
49361+static void
49362+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
49363+{
49364+ struct task_struct *task = current;
49365+ const struct cred *cred = current_cred();
49366+
49367+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49368+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49369+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49370+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
49371+
49372+ return;
49373+}
49374+
49375+static void
49376+gr_log_learn_sysctl(const char *path, const __u32 mode)
49377+{
49378+ struct task_struct *task = current;
49379+ const struct cred *cred = current_cred();
49380+
49381+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49382+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49383+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49384+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
49385+
49386+ return;
49387+}
49388+
49389+static void
49390+gr_log_learn_id_change(const char type, const unsigned int real,
49391+ const unsigned int effective, const unsigned int fs)
49392+{
49393+ struct task_struct *task = current;
49394+ const struct cred *cred = current_cred();
49395+
49396+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
49397+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49398+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49399+ type, real, effective, fs, &task->signal->saved_ip);
49400+
49401+ return;
49402+}
49403+
49404+__u32
49405+gr_search_file(const struct dentry * dentry, const __u32 mode,
49406+ const struct vfsmount * mnt)
49407+{
49408+ __u32 retval = mode;
49409+ struct acl_subject_label *curracl;
49410+ struct acl_object_label *currobj;
49411+
49412+ if (unlikely(!(gr_status & GR_READY)))
49413+ return (mode & ~GR_AUDITS);
49414+
49415+ curracl = current->acl;
49416+
49417+ currobj = chk_obj_label(dentry, mnt, curracl);
49418+ retval = currobj->mode & mode;
49419+
49420+ /* if we're opening a specified transfer file for writing
49421+ (e.g. /dev/initctl), then transfer our role to init
49422+ */
49423+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
49424+ current->role->roletype & GR_ROLE_PERSIST)) {
49425+ struct task_struct *task = init_pid_ns.child_reaper;
49426+
49427+ if (task->role != current->role) {
49428+ task->acl_sp_role = 0;
49429+ task->acl_role_id = current->acl_role_id;
49430+ task->role = current->role;
49431+ rcu_read_lock();
49432+ read_lock(&grsec_exec_file_lock);
49433+ gr_apply_subject_to_task(task);
49434+ read_unlock(&grsec_exec_file_lock);
49435+ rcu_read_unlock();
49436+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
49437+ }
49438+ }
49439+
49440+ if (unlikely
49441+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
49442+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
49443+ __u32 new_mode = mode;
49444+
49445+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49446+
49447+ retval = new_mode;
49448+
49449+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
49450+ new_mode |= GR_INHERIT;
49451+
49452+ if (!(mode & GR_NOLEARN))
49453+ gr_log_learn(dentry, mnt, new_mode);
49454+ }
49455+
49456+ return retval;
49457+}
49458+
49459+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
49460+ const struct dentry *parent,
49461+ const struct vfsmount *mnt)
49462+{
49463+ struct name_entry *match;
49464+ struct acl_object_label *matchpo;
49465+ struct acl_subject_label *curracl;
49466+ char *path;
49467+
49468+ if (unlikely(!(gr_status & GR_READY)))
49469+ return NULL;
49470+
49471+ preempt_disable();
49472+ path = gr_to_filename_rbac(new_dentry, mnt);
49473+ match = lookup_name_entry_create(path);
49474+
49475+ curracl = current->acl;
49476+
49477+ if (match) {
49478+ read_lock(&gr_inode_lock);
49479+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
49480+ read_unlock(&gr_inode_lock);
49481+
49482+ if (matchpo) {
49483+ preempt_enable();
49484+ return matchpo;
49485+ }
49486+ }
49487+
49488+ // lookup parent
49489+
49490+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
49491+
49492+ preempt_enable();
49493+ return matchpo;
49494+}
49495+
49496+__u32
49497+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
49498+ const struct vfsmount * mnt, const __u32 mode)
49499+{
49500+ struct acl_object_label *matchpo;
49501+ __u32 retval;
49502+
49503+ if (unlikely(!(gr_status & GR_READY)))
49504+ return (mode & ~GR_AUDITS);
49505+
49506+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
49507+
49508+ retval = matchpo->mode & mode;
49509+
49510+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
49511+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
49512+ __u32 new_mode = mode;
49513+
49514+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49515+
49516+ gr_log_learn(new_dentry, mnt, new_mode);
49517+ return new_mode;
49518+ }
49519+
49520+ return retval;
49521+}
49522+
49523+__u32
49524+gr_check_link(const struct dentry * new_dentry,
49525+ const struct dentry * parent_dentry,
49526+ const struct vfsmount * parent_mnt,
49527+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
49528+{
49529+ struct acl_object_label *obj;
49530+ __u32 oldmode, newmode;
49531+ __u32 needmode;
49532+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
49533+ GR_DELETE | GR_INHERIT;
49534+
49535+ if (unlikely(!(gr_status & GR_READY)))
49536+ return (GR_CREATE | GR_LINK);
49537+
49538+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
49539+ oldmode = obj->mode;
49540+
49541+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
49542+ newmode = obj->mode;
49543+
49544+ needmode = newmode & checkmodes;
49545+
49546+ // old name for hardlink must have at least the permissions of the new name
49547+ if ((oldmode & needmode) != needmode)
49548+ goto bad;
49549+
49550+ // if old name had restrictions/auditing, make sure the new name does as well
49551+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
49552+
49553+ // don't allow hardlinking of suid/sgid files without permission
49554+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49555+ needmode |= GR_SETID;
49556+
49557+ if ((newmode & needmode) != needmode)
49558+ goto bad;
49559+
49560+ // enforce minimum permissions
49561+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
49562+ return newmode;
49563+bad:
49564+ needmode = oldmode;
49565+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49566+ needmode |= GR_SETID;
49567+
49568+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49569+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
49570+ return (GR_CREATE | GR_LINK);
49571+ } else if (newmode & GR_SUPPRESS)
49572+ return GR_SUPPRESS;
49573+ else
49574+ return 0;
49575+}
49576+
49577+int
49578+gr_check_hidden_task(const struct task_struct *task)
49579+{
49580+ if (unlikely(!(gr_status & GR_READY)))
49581+ return 0;
49582+
49583+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
49584+ return 1;
49585+
49586+ return 0;
49587+}
49588+
49589+int
49590+gr_check_protected_task(const struct task_struct *task)
49591+{
49592+ if (unlikely(!(gr_status & GR_READY) || !task))
49593+ return 0;
49594+
49595+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49596+ task->acl != current->acl)
49597+ return 1;
49598+
49599+ return 0;
49600+}
49601+
49602+int
49603+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49604+{
49605+ struct task_struct *p;
49606+ int ret = 0;
49607+
49608+ if (unlikely(!(gr_status & GR_READY) || !pid))
49609+ return ret;
49610+
49611+ read_lock(&tasklist_lock);
49612+ do_each_pid_task(pid, type, p) {
49613+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49614+ p->acl != current->acl) {
49615+ ret = 1;
49616+ goto out;
49617+ }
49618+ } while_each_pid_task(pid, type, p);
49619+out:
49620+ read_unlock(&tasklist_lock);
49621+
49622+ return ret;
49623+}
49624+
49625+void
49626+gr_copy_label(struct task_struct *tsk)
49627+{
49628+ tsk->signal->used_accept = 0;
49629+ tsk->acl_sp_role = 0;
49630+ tsk->acl_role_id = current->acl_role_id;
49631+ tsk->acl = current->acl;
49632+ tsk->role = current->role;
49633+ tsk->signal->curr_ip = current->signal->curr_ip;
49634+ tsk->signal->saved_ip = current->signal->saved_ip;
49635+ if (current->exec_file)
49636+ get_file(current->exec_file);
49637+ tsk->exec_file = current->exec_file;
49638+ tsk->is_writable = current->is_writable;
49639+ if (unlikely(current->signal->used_accept)) {
49640+ current->signal->curr_ip = 0;
49641+ current->signal->saved_ip = 0;
49642+ }
49643+
49644+ return;
49645+}
49646+
49647+static void
49648+gr_set_proc_res(struct task_struct *task)
49649+{
49650+ struct acl_subject_label *proc;
49651+ unsigned short i;
49652+
49653+ proc = task->acl;
49654+
49655+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
49656+ return;
49657+
49658+ for (i = 0; i < RLIM_NLIMITS; i++) {
49659+ if (!(proc->resmask & (1 << i)))
49660+ continue;
49661+
49662+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
49663+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
49664+ }
49665+
49666+ return;
49667+}
49668+
49669+extern int __gr_process_user_ban(struct user_struct *user);
49670+
49671+int
49672+gr_check_user_change(int real, int effective, int fs)
49673+{
49674+ unsigned int i;
49675+ __u16 num;
49676+ uid_t *uidlist;
49677+ int curuid;
49678+ int realok = 0;
49679+ int effectiveok = 0;
49680+ int fsok = 0;
49681+
49682+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49683+ struct user_struct *user;
49684+
49685+ if (real == -1)
49686+ goto skipit;
49687+
49688+ user = find_user(real);
49689+ if (user == NULL)
49690+ goto skipit;
49691+
49692+ if (__gr_process_user_ban(user)) {
49693+ /* for find_user */
49694+ free_uid(user);
49695+ return 1;
49696+ }
49697+
49698+ /* for find_user */
49699+ free_uid(user);
49700+
49701+skipit:
49702+#endif
49703+
49704+ if (unlikely(!(gr_status & GR_READY)))
49705+ return 0;
49706+
49707+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49708+ gr_log_learn_id_change('u', real, effective, fs);
49709+
49710+ num = current->acl->user_trans_num;
49711+ uidlist = current->acl->user_transitions;
49712+
49713+ if (uidlist == NULL)
49714+ return 0;
49715+
49716+ if (real == -1)
49717+ realok = 1;
49718+ if (effective == -1)
49719+ effectiveok = 1;
49720+ if (fs == -1)
49721+ fsok = 1;
49722+
49723+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
49724+ for (i = 0; i < num; i++) {
49725+ curuid = (int)uidlist[i];
49726+ if (real == curuid)
49727+ realok = 1;
49728+ if (effective == curuid)
49729+ effectiveok = 1;
49730+ if (fs == curuid)
49731+ fsok = 1;
49732+ }
49733+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
49734+ for (i = 0; i < num; i++) {
49735+ curuid = (int)uidlist[i];
49736+ if (real == curuid)
49737+ break;
49738+ if (effective == curuid)
49739+ break;
49740+ if (fs == curuid)
49741+ break;
49742+ }
49743+ /* not in deny list */
49744+ if (i == num) {
49745+ realok = 1;
49746+ effectiveok = 1;
49747+ fsok = 1;
49748+ }
49749+ }
49750+
49751+ if (realok && effectiveok && fsok)
49752+ return 0;
49753+ else {
49754+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49755+ return 1;
49756+ }
49757+}
49758+
49759+int
49760+gr_check_group_change(int real, int effective, int fs)
49761+{
49762+ unsigned int i;
49763+ __u16 num;
49764+ gid_t *gidlist;
49765+ int curgid;
49766+ int realok = 0;
49767+ int effectiveok = 0;
49768+ int fsok = 0;
49769+
49770+ if (unlikely(!(gr_status & GR_READY)))
49771+ return 0;
49772+
49773+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49774+ gr_log_learn_id_change('g', real, effective, fs);
49775+
49776+ num = current->acl->group_trans_num;
49777+ gidlist = current->acl->group_transitions;
49778+
49779+ if (gidlist == NULL)
49780+ return 0;
49781+
49782+ if (real == -1)
49783+ realok = 1;
49784+ if (effective == -1)
49785+ effectiveok = 1;
49786+ if (fs == -1)
49787+ fsok = 1;
49788+
49789+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
49790+ for (i = 0; i < num; i++) {
49791+ curgid = (int)gidlist[i];
49792+ if (real == curgid)
49793+ realok = 1;
49794+ if (effective == curgid)
49795+ effectiveok = 1;
49796+ if (fs == curgid)
49797+ fsok = 1;
49798+ }
49799+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
49800+ for (i = 0; i < num; i++) {
49801+ curgid = (int)gidlist[i];
49802+ if (real == curgid)
49803+ break;
49804+ if (effective == curgid)
49805+ break;
49806+ if (fs == curgid)
49807+ break;
49808+ }
49809+ /* not in deny list */
49810+ if (i == num) {
49811+ realok = 1;
49812+ effectiveok = 1;
49813+ fsok = 1;
49814+ }
49815+ }
49816+
49817+ if (realok && effectiveok && fsok)
49818+ return 0;
49819+ else {
49820+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49821+ return 1;
49822+ }
49823+}
49824+
49825+void
49826+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49827+{
49828+ struct acl_role_label *role = task->role;
49829+ struct acl_subject_label *subj = NULL;
49830+ struct acl_object_label *obj;
49831+ struct file *filp;
49832+
49833+ if (unlikely(!(gr_status & GR_READY)))
49834+ return;
49835+
49836+ filp = task->exec_file;
49837+
49838+ /* kernel process, we'll give them the kernel role */
49839+ if (unlikely(!filp)) {
49840+ task->role = kernel_role;
49841+ task->acl = kernel_role->root_label;
49842+ return;
49843+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49844+ role = lookup_acl_role_label(task, uid, gid);
49845+
49846+ /* perform subject lookup in possibly new role
49847+ we can use this result below in the case where role == task->role
49848+ */
49849+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49850+
49851+ /* if we changed uid/gid, but result in the same role
49852+ and are using inheritance, don't lose the inherited subject
49853+ if current subject is other than what normal lookup
49854+ would result in, we arrived via inheritance, don't
49855+ lose subject
49856+ */
49857+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49858+ (subj == task->acl)))
49859+ task->acl = subj;
49860+
49861+ task->role = role;
49862+
49863+ task->is_writable = 0;
49864+
49865+ /* ignore additional mmap checks for processes that are writable
49866+ by the default ACL */
49867+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49868+ if (unlikely(obj->mode & GR_WRITE))
49869+ task->is_writable = 1;
49870+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49871+ if (unlikely(obj->mode & GR_WRITE))
49872+ task->is_writable = 1;
49873+
49874+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49875+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49876+#endif
49877+
49878+ gr_set_proc_res(task);
49879+
49880+ return;
49881+}
49882+
49883+int
49884+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49885+ const int unsafe_share)
49886+{
49887+ struct task_struct *task = current;
49888+ struct acl_subject_label *newacl;
49889+ struct acl_object_label *obj;
49890+ __u32 retmode;
49891+
49892+ if (unlikely(!(gr_status & GR_READY)))
49893+ return 0;
49894+
49895+ newacl = chk_subj_label(dentry, mnt, task->role);
49896+
49897+ task_lock(task);
49898+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49899+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49900+ !(task->role->roletype & GR_ROLE_GOD) &&
49901+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49902+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49903+ task_unlock(task);
49904+ if (unsafe_share)
49905+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49906+ else
49907+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49908+ return -EACCES;
49909+ }
49910+ task_unlock(task);
49911+
49912+ obj = chk_obj_label(dentry, mnt, task->acl);
49913+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49914+
49915+ if (!(task->acl->mode & GR_INHERITLEARN) &&
49916+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49917+ if (obj->nested)
49918+ task->acl = obj->nested;
49919+ else
49920+ task->acl = newacl;
49921+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49922+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49923+
49924+ task->is_writable = 0;
49925+
49926+ /* ignore additional mmap checks for processes that are writable
49927+ by the default ACL */
49928+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
49929+ if (unlikely(obj->mode & GR_WRITE))
49930+ task->is_writable = 1;
49931+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
49932+ if (unlikely(obj->mode & GR_WRITE))
49933+ task->is_writable = 1;
49934+
49935+ gr_set_proc_res(task);
49936+
49937+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49938+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49939+#endif
49940+ return 0;
49941+}
49942+
49943+/* always called with valid inodev ptr */
49944+static void
49945+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49946+{
49947+ struct acl_object_label *matchpo;
49948+ struct acl_subject_label *matchps;
49949+ struct acl_subject_label *subj;
49950+ struct acl_role_label *role;
49951+ unsigned int x;
49952+
49953+ FOR_EACH_ROLE_START(role)
49954+ FOR_EACH_SUBJECT_START(role, subj, x)
49955+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49956+ matchpo->mode |= GR_DELETED;
49957+ FOR_EACH_SUBJECT_END(subj,x)
49958+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49959+ if (subj->inode == ino && subj->device == dev)
49960+ subj->mode |= GR_DELETED;
49961+ FOR_EACH_NESTED_SUBJECT_END(subj)
49962+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49963+ matchps->mode |= GR_DELETED;
49964+ FOR_EACH_ROLE_END(role)
49965+
49966+ inodev->nentry->deleted = 1;
49967+
49968+ return;
49969+}
49970+
49971+void
49972+gr_handle_delete(const ino_t ino, const dev_t dev)
49973+{
49974+ struct inodev_entry *inodev;
49975+
49976+ if (unlikely(!(gr_status & GR_READY)))
49977+ return;
49978+
49979+ write_lock(&gr_inode_lock);
49980+ inodev = lookup_inodev_entry(ino, dev);
49981+ if (inodev != NULL)
49982+ do_handle_delete(inodev, ino, dev);
49983+ write_unlock(&gr_inode_lock);
49984+
49985+ return;
49986+}
49987+
49988+static void
49989+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49990+ const ino_t newinode, const dev_t newdevice,
49991+ struct acl_subject_label *subj)
49992+{
49993+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49994+ struct acl_object_label *match;
49995+
49996+ match = subj->obj_hash[index];
49997+
49998+ while (match && (match->inode != oldinode ||
49999+ match->device != olddevice ||
50000+ !(match->mode & GR_DELETED)))
50001+ match = match->next;
50002+
50003+ if (match && (match->inode == oldinode)
50004+ && (match->device == olddevice)
50005+ && (match->mode & GR_DELETED)) {
50006+ if (match->prev == NULL) {
50007+ subj->obj_hash[index] = match->next;
50008+ if (match->next != NULL)
50009+ match->next->prev = NULL;
50010+ } else {
50011+ match->prev->next = match->next;
50012+ if (match->next != NULL)
50013+ match->next->prev = match->prev;
50014+ }
50015+ match->prev = NULL;
50016+ match->next = NULL;
50017+ match->inode = newinode;
50018+ match->device = newdevice;
50019+ match->mode &= ~GR_DELETED;
50020+
50021+ insert_acl_obj_label(match, subj);
50022+ }
50023+
50024+ return;
50025+}
50026+
50027+static void
50028+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
50029+ const ino_t newinode, const dev_t newdevice,
50030+ struct acl_role_label *role)
50031+{
50032+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
50033+ struct acl_subject_label *match;
50034+
50035+ match = role->subj_hash[index];
50036+
50037+ while (match && (match->inode != oldinode ||
50038+ match->device != olddevice ||
50039+ !(match->mode & GR_DELETED)))
50040+ match = match->next;
50041+
50042+ if (match && (match->inode == oldinode)
50043+ && (match->device == olddevice)
50044+ && (match->mode & GR_DELETED)) {
50045+ if (match->prev == NULL) {
50046+ role->subj_hash[index] = match->next;
50047+ if (match->next != NULL)
50048+ match->next->prev = NULL;
50049+ } else {
50050+ match->prev->next = match->next;
50051+ if (match->next != NULL)
50052+ match->next->prev = match->prev;
50053+ }
50054+ match->prev = NULL;
50055+ match->next = NULL;
50056+ match->inode = newinode;
50057+ match->device = newdevice;
50058+ match->mode &= ~GR_DELETED;
50059+
50060+ insert_acl_subj_label(match, role);
50061+ }
50062+
50063+ return;
50064+}
50065+
50066+static void
50067+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
50068+ const ino_t newinode, const dev_t newdevice)
50069+{
50070+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
50071+ struct inodev_entry *match;
50072+
50073+ match = inodev_set.i_hash[index];
50074+
50075+ while (match && (match->nentry->inode != oldinode ||
50076+ match->nentry->device != olddevice || !match->nentry->deleted))
50077+ match = match->next;
50078+
50079+ if (match && (match->nentry->inode == oldinode)
50080+ && (match->nentry->device == olddevice) &&
50081+ match->nentry->deleted) {
50082+ if (match->prev == NULL) {
50083+ inodev_set.i_hash[index] = match->next;
50084+ if (match->next != NULL)
50085+ match->next->prev = NULL;
50086+ } else {
50087+ match->prev->next = match->next;
50088+ if (match->next != NULL)
50089+ match->next->prev = match->prev;
50090+ }
50091+ match->prev = NULL;
50092+ match->next = NULL;
50093+ match->nentry->inode = newinode;
50094+ match->nentry->device = newdevice;
50095+ match->nentry->deleted = 0;
50096+
50097+ insert_inodev_entry(match);
50098+ }
50099+
50100+ return;
50101+}
50102+
50103+static void
50104+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
50105+{
50106+ struct acl_subject_label *subj;
50107+ struct acl_role_label *role;
50108+ unsigned int x;
50109+
50110+ FOR_EACH_ROLE_START(role)
50111+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
50112+
50113+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
50114+ if ((subj->inode == ino) && (subj->device == dev)) {
50115+ subj->inode = ino;
50116+ subj->device = dev;
50117+ }
50118+ FOR_EACH_NESTED_SUBJECT_END(subj)
50119+ FOR_EACH_SUBJECT_START(role, subj, x)
50120+ update_acl_obj_label(matchn->inode, matchn->device,
50121+ ino, dev, subj);
50122+ FOR_EACH_SUBJECT_END(subj,x)
50123+ FOR_EACH_ROLE_END(role)
50124+
50125+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
50126+
50127+ return;
50128+}
50129+
50130+static void
50131+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
50132+ const struct vfsmount *mnt)
50133+{
50134+ ino_t ino = dentry->d_inode->i_ino;
50135+ dev_t dev = __get_dev(dentry);
50136+
50137+ __do_handle_create(matchn, ino, dev);
50138+
50139+ return;
50140+}
50141+
50142+void
50143+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50144+{
50145+ struct name_entry *matchn;
50146+
50147+ if (unlikely(!(gr_status & GR_READY)))
50148+ return;
50149+
50150+ preempt_disable();
50151+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
50152+
50153+ if (unlikely((unsigned long)matchn)) {
50154+ write_lock(&gr_inode_lock);
50155+ do_handle_create(matchn, dentry, mnt);
50156+ write_unlock(&gr_inode_lock);
50157+ }
50158+ preempt_enable();
50159+
50160+ return;
50161+}
50162+
50163+void
50164+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
50165+{
50166+ struct name_entry *matchn;
50167+
50168+ if (unlikely(!(gr_status & GR_READY)))
50169+ return;
50170+
50171+ preempt_disable();
50172+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
50173+
50174+ if (unlikely((unsigned long)matchn)) {
50175+ write_lock(&gr_inode_lock);
50176+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
50177+ write_unlock(&gr_inode_lock);
50178+ }
50179+ preempt_enable();
50180+
50181+ return;
50182+}
50183+
50184+void
50185+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50186+ struct dentry *old_dentry,
50187+ struct dentry *new_dentry,
50188+ struct vfsmount *mnt, const __u8 replace)
50189+{
50190+ struct name_entry *matchn;
50191+ struct inodev_entry *inodev;
50192+ struct inode *inode = new_dentry->d_inode;
50193+ ino_t old_ino = old_dentry->d_inode->i_ino;
50194+ dev_t old_dev = __get_dev(old_dentry);
50195+
50196+ /* vfs_rename swaps the name and parent link for old_dentry and
50197+ new_dentry
50198+ at this point, old_dentry has the new name, parent link, and inode
50199+ for the renamed file
50200+ if a file is being replaced by a rename, new_dentry has the inode
50201+ and name for the replaced file
50202+ */
50203+
50204+ if (unlikely(!(gr_status & GR_READY)))
50205+ return;
50206+
50207+ preempt_disable();
50208+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
50209+
50210+ /* we wouldn't have to check d_inode if it weren't for
50211+ NFS silly-renaming
50212+ */
50213+
50214+ write_lock(&gr_inode_lock);
50215+ if (unlikely(replace && inode)) {
50216+ ino_t new_ino = inode->i_ino;
50217+ dev_t new_dev = __get_dev(new_dentry);
50218+
50219+ inodev = lookup_inodev_entry(new_ino, new_dev);
50220+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
50221+ do_handle_delete(inodev, new_ino, new_dev);
50222+ }
50223+
50224+ inodev = lookup_inodev_entry(old_ino, old_dev);
50225+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
50226+ do_handle_delete(inodev, old_ino, old_dev);
50227+
50228+ if (unlikely((unsigned long)matchn))
50229+ do_handle_create(matchn, old_dentry, mnt);
50230+
50231+ write_unlock(&gr_inode_lock);
50232+ preempt_enable();
50233+
50234+ return;
50235+}
50236+
50237+static int
50238+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
50239+ unsigned char **sum)
50240+{
50241+ struct acl_role_label *r;
50242+ struct role_allowed_ip *ipp;
50243+ struct role_transition *trans;
50244+ unsigned int i;
50245+ int found = 0;
50246+ u32 curr_ip = current->signal->curr_ip;
50247+
50248+ current->signal->saved_ip = curr_ip;
50249+
50250+ /* check transition table */
50251+
50252+ for (trans = current->role->transitions; trans; trans = trans->next) {
50253+ if (!strcmp(rolename, trans->rolename)) {
50254+ found = 1;
50255+ break;
50256+ }
50257+ }
50258+
50259+ if (!found)
50260+ return 0;
50261+
50262+ /* handle special roles that do not require authentication
50263+ and check ip */
50264+
50265+ FOR_EACH_ROLE_START(r)
50266+ if (!strcmp(rolename, r->rolename) &&
50267+ (r->roletype & GR_ROLE_SPECIAL)) {
50268+ found = 0;
50269+ if (r->allowed_ips != NULL) {
50270+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
50271+ if ((ntohl(curr_ip) & ipp->netmask) ==
50272+ (ntohl(ipp->addr) & ipp->netmask))
50273+ found = 1;
50274+ }
50275+ } else
50276+ found = 2;
50277+ if (!found)
50278+ return 0;
50279+
50280+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
50281+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
50282+ *salt = NULL;
50283+ *sum = NULL;
50284+ return 1;
50285+ }
50286+ }
50287+ FOR_EACH_ROLE_END(r)
50288+
50289+ for (i = 0; i < num_sprole_pws; i++) {
50290+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
50291+ *salt = acl_special_roles[i]->salt;
50292+ *sum = acl_special_roles[i]->sum;
50293+ return 1;
50294+ }
50295+ }
50296+
50297+ return 0;
50298+}
50299+
50300+static void
50301+assign_special_role(char *rolename)
50302+{
50303+ struct acl_object_label *obj;
50304+ struct acl_role_label *r;
50305+ struct acl_role_label *assigned = NULL;
50306+ struct task_struct *tsk;
50307+ struct file *filp;
50308+
50309+ FOR_EACH_ROLE_START(r)
50310+ if (!strcmp(rolename, r->rolename) &&
50311+ (r->roletype & GR_ROLE_SPECIAL)) {
50312+ assigned = r;
50313+ break;
50314+ }
50315+ FOR_EACH_ROLE_END(r)
50316+
50317+ if (!assigned)
50318+ return;
50319+
50320+ read_lock(&tasklist_lock);
50321+ read_lock(&grsec_exec_file_lock);
50322+
50323+ tsk = current->real_parent;
50324+ if (tsk == NULL)
50325+ goto out_unlock;
50326+
50327+ filp = tsk->exec_file;
50328+ if (filp == NULL)
50329+ goto out_unlock;
50330+
50331+ tsk->is_writable = 0;
50332+
50333+ tsk->acl_sp_role = 1;
50334+ tsk->acl_role_id = ++acl_sp_role_value;
50335+ tsk->role = assigned;
50336+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
50337+
50338+ /* ignore additional mmap checks for processes that are writable
50339+ by the default ACL */
50340+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50341+ if (unlikely(obj->mode & GR_WRITE))
50342+ tsk->is_writable = 1;
50343+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
50344+ if (unlikely(obj->mode & GR_WRITE))
50345+ tsk->is_writable = 1;
50346+
50347+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50348+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
50349+#endif
50350+
50351+out_unlock:
50352+ read_unlock(&grsec_exec_file_lock);
50353+ read_unlock(&tasklist_lock);
50354+ return;
50355+}
50356+
50357+int gr_check_secure_terminal(struct task_struct *task)
50358+{
50359+ struct task_struct *p, *p2, *p3;
50360+ struct files_struct *files;
50361+ struct fdtable *fdt;
50362+ struct file *our_file = NULL, *file;
50363+ int i;
50364+
50365+ if (task->signal->tty == NULL)
50366+ return 1;
50367+
50368+ files = get_files_struct(task);
50369+ if (files != NULL) {
50370+ rcu_read_lock();
50371+ fdt = files_fdtable(files);
50372+ for (i=0; i < fdt->max_fds; i++) {
50373+ file = fcheck_files(files, i);
50374+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
50375+ get_file(file);
50376+ our_file = file;
50377+ }
50378+ }
50379+ rcu_read_unlock();
50380+ put_files_struct(files);
50381+ }
50382+
50383+ if (our_file == NULL)
50384+ return 1;
50385+
50386+ read_lock(&tasklist_lock);
50387+ do_each_thread(p2, p) {
50388+ files = get_files_struct(p);
50389+ if (files == NULL ||
50390+ (p->signal && p->signal->tty == task->signal->tty)) {
50391+ if (files != NULL)
50392+ put_files_struct(files);
50393+ continue;
50394+ }
50395+ rcu_read_lock();
50396+ fdt = files_fdtable(files);
50397+ for (i=0; i < fdt->max_fds; i++) {
50398+ file = fcheck_files(files, i);
50399+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
50400+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
50401+ p3 = task;
50402+ while (p3->pid > 0) {
50403+ if (p3 == p)
50404+ break;
50405+ p3 = p3->real_parent;
50406+ }
50407+ if (p3 == p)
50408+ break;
50409+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
50410+ gr_handle_alertkill(p);
50411+ rcu_read_unlock();
50412+ put_files_struct(files);
50413+ read_unlock(&tasklist_lock);
50414+ fput(our_file);
50415+ return 0;
50416+ }
50417+ }
50418+ rcu_read_unlock();
50419+ put_files_struct(files);
50420+ } while_each_thread(p2, p);
50421+ read_unlock(&tasklist_lock);
50422+
50423+ fput(our_file);
50424+ return 1;
50425+}
50426+
50427+ssize_t
50428+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
50429+{
50430+ struct gr_arg_wrapper uwrap;
50431+ unsigned char *sprole_salt = NULL;
50432+ unsigned char *sprole_sum = NULL;
50433+ int error = sizeof (struct gr_arg_wrapper);
50434+ int error2 = 0;
50435+
50436+ mutex_lock(&gr_dev_mutex);
50437+
50438+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
50439+ error = -EPERM;
50440+ goto out;
50441+ }
50442+
50443+ if (count != sizeof (struct gr_arg_wrapper)) {
50444+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
50445+ error = -EINVAL;
50446+ goto out;
50447+ }
50448+
50449+
50450+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
50451+ gr_auth_expires = 0;
50452+ gr_auth_attempts = 0;
50453+ }
50454+
50455+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
50456+ error = -EFAULT;
50457+ goto out;
50458+ }
50459+
50460+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
50461+ error = -EINVAL;
50462+ goto out;
50463+ }
50464+
50465+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
50466+ error = -EFAULT;
50467+ goto out;
50468+ }
50469+
50470+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50471+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50472+ time_after(gr_auth_expires, get_seconds())) {
50473+ error = -EBUSY;
50474+ goto out;
50475+ }
50476+
50477+ /* if non-root trying to do anything other than use a special role,
50478+ do not attempt authentication, do not count towards authentication
50479+ locking
50480+ */
50481+
50482+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
50483+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50484+ current_uid()) {
50485+ error = -EPERM;
50486+ goto out;
50487+ }
50488+
50489+ /* ensure pw and special role name are null terminated */
50490+
50491+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
50492+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
50493+
50494+ /* Okay.
50495+ * We have our enough of the argument structure..(we have yet
50496+ * to copy_from_user the tables themselves) . Copy the tables
50497+ * only if we need them, i.e. for loading operations. */
50498+
50499+ switch (gr_usermode->mode) {
50500+ case GR_STATUS:
50501+ if (gr_status & GR_READY) {
50502+ error = 1;
50503+ if (!gr_check_secure_terminal(current))
50504+ error = 3;
50505+ } else
50506+ error = 2;
50507+ goto out;
50508+ case GR_SHUTDOWN:
50509+ if ((gr_status & GR_READY)
50510+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50511+ pax_open_kernel();
50512+ gr_status &= ~GR_READY;
50513+ pax_close_kernel();
50514+
50515+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
50516+ free_variables();
50517+ memset(gr_usermode, 0, sizeof (struct gr_arg));
50518+ memset(gr_system_salt, 0, GR_SALT_LEN);
50519+ memset(gr_system_sum, 0, GR_SHA_LEN);
50520+ } else if (gr_status & GR_READY) {
50521+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
50522+ error = -EPERM;
50523+ } else {
50524+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
50525+ error = -EAGAIN;
50526+ }
50527+ break;
50528+ case GR_ENABLE:
50529+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
50530+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
50531+ else {
50532+ if (gr_status & GR_READY)
50533+ error = -EAGAIN;
50534+ else
50535+ error = error2;
50536+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
50537+ }
50538+ break;
50539+ case GR_RELOAD:
50540+ if (!(gr_status & GR_READY)) {
50541+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
50542+ error = -EAGAIN;
50543+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50544+ preempt_disable();
50545+
50546+ pax_open_kernel();
50547+ gr_status &= ~GR_READY;
50548+ pax_close_kernel();
50549+
50550+ free_variables();
50551+ if (!(error2 = gracl_init(gr_usermode))) {
50552+ preempt_enable();
50553+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
50554+ } else {
50555+ preempt_enable();
50556+ error = error2;
50557+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50558+ }
50559+ } else {
50560+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50561+ error = -EPERM;
50562+ }
50563+ break;
50564+ case GR_SEGVMOD:
50565+ if (unlikely(!(gr_status & GR_READY))) {
50566+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
50567+ error = -EAGAIN;
50568+ break;
50569+ }
50570+
50571+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50572+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
50573+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
50574+ struct acl_subject_label *segvacl;
50575+ segvacl =
50576+ lookup_acl_subj_label(gr_usermode->segv_inode,
50577+ gr_usermode->segv_device,
50578+ current->role);
50579+ if (segvacl) {
50580+ segvacl->crashes = 0;
50581+ segvacl->expires = 0;
50582+ }
50583+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
50584+ gr_remove_uid(gr_usermode->segv_uid);
50585+ }
50586+ } else {
50587+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
50588+ error = -EPERM;
50589+ }
50590+ break;
50591+ case GR_SPROLE:
50592+ case GR_SPROLEPAM:
50593+ if (unlikely(!(gr_status & GR_READY))) {
50594+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
50595+ error = -EAGAIN;
50596+ break;
50597+ }
50598+
50599+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
50600+ current->role->expires = 0;
50601+ current->role->auth_attempts = 0;
50602+ }
50603+
50604+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50605+ time_after(current->role->expires, get_seconds())) {
50606+ error = -EBUSY;
50607+ goto out;
50608+ }
50609+
50610+ if (lookup_special_role_auth
50611+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
50612+ && ((!sprole_salt && !sprole_sum)
50613+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
50614+ char *p = "";
50615+ assign_special_role(gr_usermode->sp_role);
50616+ read_lock(&tasklist_lock);
50617+ if (current->real_parent)
50618+ p = current->real_parent->role->rolename;
50619+ read_unlock(&tasklist_lock);
50620+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
50621+ p, acl_sp_role_value);
50622+ } else {
50623+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
50624+ error = -EPERM;
50625+ if(!(current->role->auth_attempts++))
50626+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50627+
50628+ goto out;
50629+ }
50630+ break;
50631+ case GR_UNSPROLE:
50632+ if (unlikely(!(gr_status & GR_READY))) {
50633+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
50634+ error = -EAGAIN;
50635+ break;
50636+ }
50637+
50638+ if (current->role->roletype & GR_ROLE_SPECIAL) {
50639+ char *p = "";
50640+ int i = 0;
50641+
50642+ read_lock(&tasklist_lock);
50643+ if (current->real_parent) {
50644+ p = current->real_parent->role->rolename;
50645+ i = current->real_parent->acl_role_id;
50646+ }
50647+ read_unlock(&tasklist_lock);
50648+
50649+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
50650+ gr_set_acls(1);
50651+ } else {
50652+ error = -EPERM;
50653+ goto out;
50654+ }
50655+ break;
50656+ default:
50657+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
50658+ error = -EINVAL;
50659+ break;
50660+ }
50661+
50662+ if (error != -EPERM)
50663+ goto out;
50664+
50665+ if(!(gr_auth_attempts++))
50666+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50667+
50668+ out:
50669+ mutex_unlock(&gr_dev_mutex);
50670+ return error;
50671+}
50672+
50673+/* must be called with
50674+ rcu_read_lock();
50675+ read_lock(&tasklist_lock);
50676+ read_lock(&grsec_exec_file_lock);
50677+*/
50678+int gr_apply_subject_to_task(struct task_struct *task)
50679+{
50680+ struct acl_object_label *obj;
50681+ char *tmpname;
50682+ struct acl_subject_label *tmpsubj;
50683+ struct file *filp;
50684+ struct name_entry *nmatch;
50685+
50686+ filp = task->exec_file;
50687+ if (filp == NULL)
50688+ return 0;
50689+
50690+ /* the following is to apply the correct subject
50691+ on binaries running when the RBAC system
50692+ is enabled, when the binaries have been
50693+ replaced or deleted since their execution
50694+ -----
50695+ when the RBAC system starts, the inode/dev
50696+ from exec_file will be one the RBAC system
50697+ is unaware of. It only knows the inode/dev
50698+ of the present file on disk, or the absence
50699+ of it.
50700+ */
50701+ preempt_disable();
50702+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
50703+
50704+ nmatch = lookup_name_entry(tmpname);
50705+ preempt_enable();
50706+ tmpsubj = NULL;
50707+ if (nmatch) {
50708+ if (nmatch->deleted)
50709+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
50710+ else
50711+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
50712+ if (tmpsubj != NULL)
50713+ task->acl = tmpsubj;
50714+ }
50715+ if (tmpsubj == NULL)
50716+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
50717+ task->role);
50718+ if (task->acl) {
50719+ task->is_writable = 0;
50720+ /* ignore additional mmap checks for processes that are writable
50721+ by the default ACL */
50722+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50723+ if (unlikely(obj->mode & GR_WRITE))
50724+ task->is_writable = 1;
50725+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50726+ if (unlikely(obj->mode & GR_WRITE))
50727+ task->is_writable = 1;
50728+
50729+ gr_set_proc_res(task);
50730+
50731+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50732+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50733+#endif
50734+ } else {
50735+ return 1;
50736+ }
50737+
50738+ return 0;
50739+}
50740+
50741+int
50742+gr_set_acls(const int type)
50743+{
50744+ struct task_struct *task, *task2;
50745+ struct acl_role_label *role = current->role;
50746+ __u16 acl_role_id = current->acl_role_id;
50747+ const struct cred *cred;
50748+ int ret;
50749+
50750+ rcu_read_lock();
50751+ read_lock(&tasklist_lock);
50752+ read_lock(&grsec_exec_file_lock);
50753+ do_each_thread(task2, task) {
50754+ /* check to see if we're called from the exit handler,
50755+ if so, only replace ACLs that have inherited the admin
50756+ ACL */
50757+
50758+ if (type && (task->role != role ||
50759+ task->acl_role_id != acl_role_id))
50760+ continue;
50761+
50762+ task->acl_role_id = 0;
50763+ task->acl_sp_role = 0;
50764+
50765+ if (task->exec_file) {
50766+ cred = __task_cred(task);
50767+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50768+ ret = gr_apply_subject_to_task(task);
50769+ if (ret) {
50770+ read_unlock(&grsec_exec_file_lock);
50771+ read_unlock(&tasklist_lock);
50772+ rcu_read_unlock();
50773+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50774+ return ret;
50775+ }
50776+ } else {
50777+ // it's a kernel process
50778+ task->role = kernel_role;
50779+ task->acl = kernel_role->root_label;
50780+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50781+ task->acl->mode &= ~GR_PROCFIND;
50782+#endif
50783+ }
50784+ } while_each_thread(task2, task);
50785+ read_unlock(&grsec_exec_file_lock);
50786+ read_unlock(&tasklist_lock);
50787+ rcu_read_unlock();
50788+
50789+ return 0;
50790+}
50791+
50792+void
50793+gr_learn_resource(const struct task_struct *task,
50794+ const int res, const unsigned long wanted, const int gt)
50795+{
50796+ struct acl_subject_label *acl;
50797+ const struct cred *cred;
50798+
50799+ if (unlikely((gr_status & GR_READY) &&
50800+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50801+ goto skip_reslog;
50802+
50803+#ifdef CONFIG_GRKERNSEC_RESLOG
50804+ gr_log_resource(task, res, wanted, gt);
50805+#endif
50806+ skip_reslog:
50807+
50808+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50809+ return;
50810+
50811+ acl = task->acl;
50812+
50813+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50814+ !(acl->resmask & (1 << (unsigned short) res))))
50815+ return;
50816+
50817+ if (wanted >= acl->res[res].rlim_cur) {
50818+ unsigned long res_add;
50819+
50820+ res_add = wanted;
50821+ switch (res) {
50822+ case RLIMIT_CPU:
50823+ res_add += GR_RLIM_CPU_BUMP;
50824+ break;
50825+ case RLIMIT_FSIZE:
50826+ res_add += GR_RLIM_FSIZE_BUMP;
50827+ break;
50828+ case RLIMIT_DATA:
50829+ res_add += GR_RLIM_DATA_BUMP;
50830+ break;
50831+ case RLIMIT_STACK:
50832+ res_add += GR_RLIM_STACK_BUMP;
50833+ break;
50834+ case RLIMIT_CORE:
50835+ res_add += GR_RLIM_CORE_BUMP;
50836+ break;
50837+ case RLIMIT_RSS:
50838+ res_add += GR_RLIM_RSS_BUMP;
50839+ break;
50840+ case RLIMIT_NPROC:
50841+ res_add += GR_RLIM_NPROC_BUMP;
50842+ break;
50843+ case RLIMIT_NOFILE:
50844+ res_add += GR_RLIM_NOFILE_BUMP;
50845+ break;
50846+ case RLIMIT_MEMLOCK:
50847+ res_add += GR_RLIM_MEMLOCK_BUMP;
50848+ break;
50849+ case RLIMIT_AS:
50850+ res_add += GR_RLIM_AS_BUMP;
50851+ break;
50852+ case RLIMIT_LOCKS:
50853+ res_add += GR_RLIM_LOCKS_BUMP;
50854+ break;
50855+ case RLIMIT_SIGPENDING:
50856+ res_add += GR_RLIM_SIGPENDING_BUMP;
50857+ break;
50858+ case RLIMIT_MSGQUEUE:
50859+ res_add += GR_RLIM_MSGQUEUE_BUMP;
50860+ break;
50861+ case RLIMIT_NICE:
50862+ res_add += GR_RLIM_NICE_BUMP;
50863+ break;
50864+ case RLIMIT_RTPRIO:
50865+ res_add += GR_RLIM_RTPRIO_BUMP;
50866+ break;
50867+ case RLIMIT_RTTIME:
50868+ res_add += GR_RLIM_RTTIME_BUMP;
50869+ break;
50870+ }
50871+
50872+ acl->res[res].rlim_cur = res_add;
50873+
50874+ if (wanted > acl->res[res].rlim_max)
50875+ acl->res[res].rlim_max = res_add;
50876+
50877+ /* only log the subject filename, since resource logging is supported for
50878+ single-subject learning only */
50879+ rcu_read_lock();
50880+ cred = __task_cred(task);
50881+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50882+ task->role->roletype, cred->uid, cred->gid, acl->filename,
50883+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50884+ "", (unsigned long) res, &task->signal->saved_ip);
50885+ rcu_read_unlock();
50886+ }
50887+
50888+ return;
50889+}
50890+
50891+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50892+void
50893+pax_set_initial_flags(struct linux_binprm *bprm)
50894+{
50895+ struct task_struct *task = current;
50896+ struct acl_subject_label *proc;
50897+ unsigned long flags;
50898+
50899+ if (unlikely(!(gr_status & GR_READY)))
50900+ return;
50901+
50902+ flags = pax_get_flags(task);
50903+
50904+ proc = task->acl;
50905+
50906+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50907+ flags &= ~MF_PAX_PAGEEXEC;
50908+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50909+ flags &= ~MF_PAX_SEGMEXEC;
50910+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50911+ flags &= ~MF_PAX_RANDMMAP;
50912+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50913+ flags &= ~MF_PAX_EMUTRAMP;
50914+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50915+ flags &= ~MF_PAX_MPROTECT;
50916+
50917+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50918+ flags |= MF_PAX_PAGEEXEC;
50919+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50920+ flags |= MF_PAX_SEGMEXEC;
50921+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50922+ flags |= MF_PAX_RANDMMAP;
50923+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50924+ flags |= MF_PAX_EMUTRAMP;
50925+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50926+ flags |= MF_PAX_MPROTECT;
50927+
50928+ pax_set_flags(task, flags);
50929+
50930+ return;
50931+}
50932+#endif
50933+
50934+#ifdef CONFIG_SYSCTL
50935+/* Eric Biederman likes breaking userland ABI and every inode-based security
50936+ system to save 35kb of memory */
50937+
50938+/* we modify the passed in filename, but adjust it back before returning */
50939+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50940+{
50941+ struct name_entry *nmatch;
50942+ char *p, *lastp = NULL;
50943+ struct acl_object_label *obj = NULL, *tmp;
50944+ struct acl_subject_label *tmpsubj;
50945+ char c = '\0';
50946+
50947+ read_lock(&gr_inode_lock);
50948+
50949+ p = name + len - 1;
50950+ do {
50951+ nmatch = lookup_name_entry(name);
50952+ if (lastp != NULL)
50953+ *lastp = c;
50954+
50955+ if (nmatch == NULL)
50956+ goto next_component;
50957+ tmpsubj = current->acl;
50958+ do {
50959+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50960+ if (obj != NULL) {
50961+ tmp = obj->globbed;
50962+ while (tmp) {
50963+ if (!glob_match(tmp->filename, name)) {
50964+ obj = tmp;
50965+ goto found_obj;
50966+ }
50967+ tmp = tmp->next;
50968+ }
50969+ goto found_obj;
50970+ }
50971+ } while ((tmpsubj = tmpsubj->parent_subject));
50972+next_component:
50973+ /* end case */
50974+ if (p == name)
50975+ break;
50976+
50977+ while (*p != '/')
50978+ p--;
50979+ if (p == name)
50980+ lastp = p + 1;
50981+ else {
50982+ lastp = p;
50983+ p--;
50984+ }
50985+ c = *lastp;
50986+ *lastp = '\0';
50987+ } while (1);
50988+found_obj:
50989+ read_unlock(&gr_inode_lock);
50990+ /* obj returned will always be non-null */
50991+ return obj;
50992+}
50993+
50994+/* returns 0 when allowing, non-zero on error
50995+ op of 0 is used for readdir, so we don't log the names of hidden files
50996+*/
50997+__u32
50998+gr_handle_sysctl(const struct ctl_table *table, const int op)
50999+{
51000+ struct ctl_table *tmp;
51001+ const char *proc_sys = "/proc/sys";
51002+ char *path;
51003+ struct acl_object_label *obj;
51004+ unsigned short len = 0, pos = 0, depth = 0, i;
51005+ __u32 err = 0;
51006+ __u32 mode = 0;
51007+
51008+ if (unlikely(!(gr_status & GR_READY)))
51009+ return 0;
51010+
51011+ /* for now, ignore operations on non-sysctl entries if it's not a
51012+ readdir*/
51013+ if (table->child != NULL && op != 0)
51014+ return 0;
51015+
51016+ mode |= GR_FIND;
51017+ /* it's only a read if it's an entry, read on dirs is for readdir */
51018+ if (op & MAY_READ)
51019+ mode |= GR_READ;
51020+ if (op & MAY_WRITE)
51021+ mode |= GR_WRITE;
51022+
51023+ preempt_disable();
51024+
51025+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51026+
51027+ /* it's only a read/write if it's an actual entry, not a dir
51028+ (which are opened for readdir)
51029+ */
51030+
51031+ /* convert the requested sysctl entry into a pathname */
51032+
51033+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51034+ len += strlen(tmp->procname);
51035+ len++;
51036+ depth++;
51037+ }
51038+
51039+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
51040+ /* deny */
51041+ goto out;
51042+ }
51043+
51044+ memset(path, 0, PAGE_SIZE);
51045+
51046+ memcpy(path, proc_sys, strlen(proc_sys));
51047+
51048+ pos += strlen(proc_sys);
51049+
51050+ for (; depth > 0; depth--) {
51051+ path[pos] = '/';
51052+ pos++;
51053+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51054+ if (depth == i) {
51055+ memcpy(path + pos, tmp->procname,
51056+ strlen(tmp->procname));
51057+ pos += strlen(tmp->procname);
51058+ }
51059+ i++;
51060+ }
51061+ }
51062+
51063+ obj = gr_lookup_by_name(path, pos);
51064+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
51065+
51066+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
51067+ ((err & mode) != mode))) {
51068+ __u32 new_mode = mode;
51069+
51070+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51071+
51072+ err = 0;
51073+ gr_log_learn_sysctl(path, new_mode);
51074+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
51075+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
51076+ err = -ENOENT;
51077+ } else if (!(err & GR_FIND)) {
51078+ err = -ENOENT;
51079+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
51080+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
51081+ path, (mode & GR_READ) ? " reading" : "",
51082+ (mode & GR_WRITE) ? " writing" : "");
51083+ err = -EACCES;
51084+ } else if ((err & mode) != mode) {
51085+ err = -EACCES;
51086+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
51087+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
51088+ path, (mode & GR_READ) ? " reading" : "",
51089+ (mode & GR_WRITE) ? " writing" : "");
51090+ err = 0;
51091+ } else
51092+ err = 0;
51093+
51094+ out:
51095+ preempt_enable();
51096+
51097+ return err;
51098+}
51099+#endif
51100+
51101+int
51102+gr_handle_proc_ptrace(struct task_struct *task)
51103+{
51104+ struct file *filp;
51105+ struct task_struct *tmp = task;
51106+ struct task_struct *curtemp = current;
51107+ __u32 retmode;
51108+
51109+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51110+ if (unlikely(!(gr_status & GR_READY)))
51111+ return 0;
51112+#endif
51113+
51114+ read_lock(&tasklist_lock);
51115+ read_lock(&grsec_exec_file_lock);
51116+ filp = task->exec_file;
51117+
51118+ while (tmp->pid > 0) {
51119+ if (tmp == curtemp)
51120+ break;
51121+ tmp = tmp->real_parent;
51122+ }
51123+
51124+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51125+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
51126+ read_unlock(&grsec_exec_file_lock);
51127+ read_unlock(&tasklist_lock);
51128+ return 1;
51129+ }
51130+
51131+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51132+ if (!(gr_status & GR_READY)) {
51133+ read_unlock(&grsec_exec_file_lock);
51134+ read_unlock(&tasklist_lock);
51135+ return 0;
51136+ }
51137+#endif
51138+
51139+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
51140+ read_unlock(&grsec_exec_file_lock);
51141+ read_unlock(&tasklist_lock);
51142+
51143+ if (retmode & GR_NOPTRACE)
51144+ return 1;
51145+
51146+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
51147+ && (current->acl != task->acl || (current->acl != current->role->root_label
51148+ && current->pid != task->pid)))
51149+ return 1;
51150+
51151+ return 0;
51152+}
51153+
51154+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
51155+{
51156+ if (unlikely(!(gr_status & GR_READY)))
51157+ return;
51158+
51159+ if (!(current->role->roletype & GR_ROLE_GOD))
51160+ return;
51161+
51162+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
51163+ p->role->rolename, gr_task_roletype_to_char(p),
51164+ p->acl->filename);
51165+}
51166+
51167+int
51168+gr_handle_ptrace(struct task_struct *task, const long request)
51169+{
51170+ struct task_struct *tmp = task;
51171+ struct task_struct *curtemp = current;
51172+ __u32 retmode;
51173+
51174+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51175+ if (unlikely(!(gr_status & GR_READY)))
51176+ return 0;
51177+#endif
51178+
51179+ read_lock(&tasklist_lock);
51180+ while (tmp->pid > 0) {
51181+ if (tmp == curtemp)
51182+ break;
51183+ tmp = tmp->real_parent;
51184+ }
51185+
51186+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51187+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
51188+ read_unlock(&tasklist_lock);
51189+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51190+ return 1;
51191+ }
51192+ read_unlock(&tasklist_lock);
51193+
51194+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51195+ if (!(gr_status & GR_READY))
51196+ return 0;
51197+#endif
51198+
51199+ read_lock(&grsec_exec_file_lock);
51200+ if (unlikely(!task->exec_file)) {
51201+ read_unlock(&grsec_exec_file_lock);
51202+ return 0;
51203+ }
51204+
51205+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
51206+ read_unlock(&grsec_exec_file_lock);
51207+
51208+ if (retmode & GR_NOPTRACE) {
51209+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51210+ return 1;
51211+ }
51212+
51213+ if (retmode & GR_PTRACERD) {
51214+ switch (request) {
51215+ case PTRACE_SEIZE:
51216+ case PTRACE_POKETEXT:
51217+ case PTRACE_POKEDATA:
51218+ case PTRACE_POKEUSR:
51219+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
51220+ case PTRACE_SETREGS:
51221+ case PTRACE_SETFPREGS:
51222+#endif
51223+#ifdef CONFIG_X86
51224+ case PTRACE_SETFPXREGS:
51225+#endif
51226+#ifdef CONFIG_ALTIVEC
51227+ case PTRACE_SETVRREGS:
51228+#endif
51229+ return 1;
51230+ default:
51231+ return 0;
51232+ }
51233+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
51234+ !(current->role->roletype & GR_ROLE_GOD) &&
51235+ (current->acl != task->acl)) {
51236+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51237+ return 1;
51238+ }
51239+
51240+ return 0;
51241+}
51242+
51243+static int is_writable_mmap(const struct file *filp)
51244+{
51245+ struct task_struct *task = current;
51246+ struct acl_object_label *obj, *obj2;
51247+
51248+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
51249+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
51250+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51251+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
51252+ task->role->root_label);
51253+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
51254+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
51255+ return 1;
51256+ }
51257+ }
51258+ return 0;
51259+}
51260+
51261+int
51262+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
51263+{
51264+ __u32 mode;
51265+
51266+ if (unlikely(!file || !(prot & PROT_EXEC)))
51267+ return 1;
51268+
51269+ if (is_writable_mmap(file))
51270+ return 0;
51271+
51272+ mode =
51273+ gr_search_file(file->f_path.dentry,
51274+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51275+ file->f_path.mnt);
51276+
51277+ if (!gr_tpe_allow(file))
51278+ return 0;
51279+
51280+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51281+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51282+ return 0;
51283+ } else if (unlikely(!(mode & GR_EXEC))) {
51284+ return 0;
51285+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51286+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51287+ return 1;
51288+ }
51289+
51290+ return 1;
51291+}
51292+
51293+int
51294+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
51295+{
51296+ __u32 mode;
51297+
51298+ if (unlikely(!file || !(prot & PROT_EXEC)))
51299+ return 1;
51300+
51301+ if (is_writable_mmap(file))
51302+ return 0;
51303+
51304+ mode =
51305+ gr_search_file(file->f_path.dentry,
51306+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51307+ file->f_path.mnt);
51308+
51309+ if (!gr_tpe_allow(file))
51310+ return 0;
51311+
51312+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51313+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51314+ return 0;
51315+ } else if (unlikely(!(mode & GR_EXEC))) {
51316+ return 0;
51317+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51318+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51319+ return 1;
51320+ }
51321+
51322+ return 1;
51323+}
51324+
51325+void
51326+gr_acl_handle_psacct(struct task_struct *task, const long code)
51327+{
51328+ unsigned long runtime;
51329+ unsigned long cputime;
51330+ unsigned int wday, cday;
51331+ __u8 whr, chr;
51332+ __u8 wmin, cmin;
51333+ __u8 wsec, csec;
51334+ struct timespec timeval;
51335+
51336+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
51337+ !(task->acl->mode & GR_PROCACCT)))
51338+ return;
51339+
51340+ do_posix_clock_monotonic_gettime(&timeval);
51341+ runtime = timeval.tv_sec - task->start_time.tv_sec;
51342+ wday = runtime / (3600 * 24);
51343+ runtime -= wday * (3600 * 24);
51344+ whr = runtime / 3600;
51345+ runtime -= whr * 3600;
51346+ wmin = runtime / 60;
51347+ runtime -= wmin * 60;
51348+ wsec = runtime;
51349+
51350+ cputime = (task->utime + task->stime) / HZ;
51351+ cday = cputime / (3600 * 24);
51352+ cputime -= cday * (3600 * 24);
51353+ chr = cputime / 3600;
51354+ cputime -= chr * 3600;
51355+ cmin = cputime / 60;
51356+ cputime -= cmin * 60;
51357+ csec = cputime;
51358+
51359+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
51360+
51361+ return;
51362+}
51363+
51364+void gr_set_kernel_label(struct task_struct *task)
51365+{
51366+ if (gr_status & GR_READY) {
51367+ task->role = kernel_role;
51368+ task->acl = kernel_role->root_label;
51369+ }
51370+ return;
51371+}
51372+
51373+#ifdef CONFIG_TASKSTATS
51374+int gr_is_taskstats_denied(int pid)
51375+{
51376+ struct task_struct *task;
51377+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51378+ const struct cred *cred;
51379+#endif
51380+ int ret = 0;
51381+
51382+ /* restrict taskstats viewing to un-chrooted root users
51383+ who have the 'view' subject flag if the RBAC system is enabled
51384+ */
51385+
51386+ rcu_read_lock();
51387+ read_lock(&tasklist_lock);
51388+ task = find_task_by_vpid(pid);
51389+ if (task) {
51390+#ifdef CONFIG_GRKERNSEC_CHROOT
51391+ if (proc_is_chrooted(task))
51392+ ret = -EACCES;
51393+#endif
51394+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51395+ cred = __task_cred(task);
51396+#ifdef CONFIG_GRKERNSEC_PROC_USER
51397+ if (cred->uid != 0)
51398+ ret = -EACCES;
51399+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51400+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
51401+ ret = -EACCES;
51402+#endif
51403+#endif
51404+ if (gr_status & GR_READY) {
51405+ if (!(task->acl->mode & GR_VIEW))
51406+ ret = -EACCES;
51407+ }
51408+ } else
51409+ ret = -ENOENT;
51410+
51411+ read_unlock(&tasklist_lock);
51412+ rcu_read_unlock();
51413+
51414+ return ret;
51415+}
51416+#endif
51417+
51418+/* AUXV entries are filled via a descendant of search_binary_handler
51419+ after we've already applied the subject for the target
51420+*/
51421+int gr_acl_enable_at_secure(void)
51422+{
51423+ if (unlikely(!(gr_status & GR_READY)))
51424+ return 0;
51425+
51426+ if (current->acl->mode & GR_ATSECURE)
51427+ return 1;
51428+
51429+ return 0;
51430+}
51431+
51432+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
51433+{
51434+ struct task_struct *task = current;
51435+ struct dentry *dentry = file->f_path.dentry;
51436+ struct vfsmount *mnt = file->f_path.mnt;
51437+ struct acl_object_label *obj, *tmp;
51438+ struct acl_subject_label *subj;
51439+ unsigned int bufsize;
51440+ int is_not_root;
51441+ char *path;
51442+ dev_t dev = __get_dev(dentry);
51443+
51444+ if (unlikely(!(gr_status & GR_READY)))
51445+ return 1;
51446+
51447+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
51448+ return 1;
51449+
51450+ /* ignore Eric Biederman */
51451+ if (IS_PRIVATE(dentry->d_inode))
51452+ return 1;
51453+
51454+ subj = task->acl;
51455+ do {
51456+ obj = lookup_acl_obj_label(ino, dev, subj);
51457+ if (obj != NULL)
51458+ return (obj->mode & GR_FIND) ? 1 : 0;
51459+ } while ((subj = subj->parent_subject));
51460+
51461+ /* this is purely an optimization since we're looking for an object
51462+ for the directory we're doing a readdir on
51463+ if it's possible for any globbed object to match the entry we're
51464+ filling into the directory, then the object we find here will be
51465+ an anchor point with attached globbed objects
51466+ */
51467+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
51468+ if (obj->globbed == NULL)
51469+ return (obj->mode & GR_FIND) ? 1 : 0;
51470+
51471+ is_not_root = ((obj->filename[0] == '/') &&
51472+ (obj->filename[1] == '\0')) ? 0 : 1;
51473+ bufsize = PAGE_SIZE - namelen - is_not_root;
51474+
51475+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
51476+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
51477+ return 1;
51478+
51479+ preempt_disable();
51480+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51481+ bufsize);
51482+
51483+ bufsize = strlen(path);
51484+
51485+ /* if base is "/", don't append an additional slash */
51486+ if (is_not_root)
51487+ *(path + bufsize) = '/';
51488+ memcpy(path + bufsize + is_not_root, name, namelen);
51489+ *(path + bufsize + namelen + is_not_root) = '\0';
51490+
51491+ tmp = obj->globbed;
51492+ while (tmp) {
51493+ if (!glob_match(tmp->filename, path)) {
51494+ preempt_enable();
51495+ return (tmp->mode & GR_FIND) ? 1 : 0;
51496+ }
51497+ tmp = tmp->next;
51498+ }
51499+ preempt_enable();
51500+ return (obj->mode & GR_FIND) ? 1 : 0;
51501+}
51502+
51503+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
51504+EXPORT_SYMBOL(gr_acl_is_enabled);
51505+#endif
51506+EXPORT_SYMBOL(gr_learn_resource);
51507+EXPORT_SYMBOL(gr_set_kernel_label);
51508+#ifdef CONFIG_SECURITY
51509+EXPORT_SYMBOL(gr_check_user_change);
51510+EXPORT_SYMBOL(gr_check_group_change);
51511+#endif
51512+
51513diff -urNp linux-3.1.1/grsecurity/gracl_cap.c linux-3.1.1/grsecurity/gracl_cap.c
51514--- linux-3.1.1/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
51515+++ linux-3.1.1/grsecurity/gracl_cap.c 2011-11-16 18:40:31.000000000 -0500
51516@@ -0,0 +1,101 @@
51517+#include <linux/kernel.h>
51518+#include <linux/module.h>
51519+#include <linux/sched.h>
51520+#include <linux/gracl.h>
51521+#include <linux/grsecurity.h>
51522+#include <linux/grinternal.h>
51523+
51524+extern const char *captab_log[];
51525+extern int captab_log_entries;
51526+
51527+int
51528+gr_acl_is_capable(const int cap)
51529+{
51530+ struct task_struct *task = current;
51531+ const struct cred *cred = current_cred();
51532+ struct acl_subject_label *curracl;
51533+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51534+ kernel_cap_t cap_audit = __cap_empty_set;
51535+
51536+ if (!gr_acl_is_enabled())
51537+ return 1;
51538+
51539+ curracl = task->acl;
51540+
51541+ cap_drop = curracl->cap_lower;
51542+ cap_mask = curracl->cap_mask;
51543+ cap_audit = curracl->cap_invert_audit;
51544+
51545+ while ((curracl = curracl->parent_subject)) {
51546+ /* if the cap isn't specified in the current computed mask but is specified in the
51547+ current level subject, and is lowered in the current level subject, then add
51548+ it to the set of dropped capabilities
51549+ otherwise, add the current level subject's mask to the current computed mask
51550+ */
51551+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51552+ cap_raise(cap_mask, cap);
51553+ if (cap_raised(curracl->cap_lower, cap))
51554+ cap_raise(cap_drop, cap);
51555+ if (cap_raised(curracl->cap_invert_audit, cap))
51556+ cap_raise(cap_audit, cap);
51557+ }
51558+ }
51559+
51560+ if (!cap_raised(cap_drop, cap)) {
51561+ if (cap_raised(cap_audit, cap))
51562+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
51563+ return 1;
51564+ }
51565+
51566+ curracl = task->acl;
51567+
51568+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
51569+ && cap_raised(cred->cap_effective, cap)) {
51570+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51571+ task->role->roletype, cred->uid,
51572+ cred->gid, task->exec_file ?
51573+ gr_to_filename(task->exec_file->f_path.dentry,
51574+ task->exec_file->f_path.mnt) : curracl->filename,
51575+ curracl->filename, 0UL,
51576+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
51577+ return 1;
51578+ }
51579+
51580+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
51581+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
51582+ return 0;
51583+}
51584+
51585+int
51586+gr_acl_is_capable_nolog(const int cap)
51587+{
51588+ struct acl_subject_label *curracl;
51589+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51590+
51591+ if (!gr_acl_is_enabled())
51592+ return 1;
51593+
51594+ curracl = current->acl;
51595+
51596+ cap_drop = curracl->cap_lower;
51597+ cap_mask = curracl->cap_mask;
51598+
51599+ while ((curracl = curracl->parent_subject)) {
51600+ /* if the cap isn't specified in the current computed mask but is specified in the
51601+ current level subject, and is lowered in the current level subject, then add
51602+ it to the set of dropped capabilities
51603+ otherwise, add the current level subject's mask to the current computed mask
51604+ */
51605+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51606+ cap_raise(cap_mask, cap);
51607+ if (cap_raised(curracl->cap_lower, cap))
51608+ cap_raise(cap_drop, cap);
51609+ }
51610+ }
51611+
51612+ if (!cap_raised(cap_drop, cap))
51613+ return 1;
51614+
51615+ return 0;
51616+}
51617+
51618diff -urNp linux-3.1.1/grsecurity/gracl_fs.c linux-3.1.1/grsecurity/gracl_fs.c
51619--- linux-3.1.1/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
51620+++ linux-3.1.1/grsecurity/gracl_fs.c 2011-11-17 00:25:32.000000000 -0500
51621@@ -0,0 +1,433 @@
51622+#include <linux/kernel.h>
51623+#include <linux/sched.h>
51624+#include <linux/types.h>
51625+#include <linux/fs.h>
51626+#include <linux/file.h>
51627+#include <linux/stat.h>
51628+#include <linux/grsecurity.h>
51629+#include <linux/grinternal.h>
51630+#include <linux/gracl.h>
51631+
51632+__u32
51633+gr_acl_handle_hidden_file(const struct dentry * dentry,
51634+ const struct vfsmount * mnt)
51635+{
51636+ __u32 mode;
51637+
51638+ if (unlikely(!dentry->d_inode))
51639+ return GR_FIND;
51640+
51641+ mode =
51642+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
51643+
51644+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
51645+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51646+ return mode;
51647+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
51648+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51649+ return 0;
51650+ } else if (unlikely(!(mode & GR_FIND)))
51651+ return 0;
51652+
51653+ return GR_FIND;
51654+}
51655+
51656+__u32
51657+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
51658+ int acc_mode)
51659+{
51660+ __u32 reqmode = GR_FIND;
51661+ __u32 mode;
51662+
51663+ if (unlikely(!dentry->d_inode))
51664+ return reqmode;
51665+
51666+ if (acc_mode & MAY_APPEND)
51667+ reqmode |= GR_APPEND;
51668+ else if (acc_mode & MAY_WRITE)
51669+ reqmode |= GR_WRITE;
51670+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
51671+ reqmode |= GR_READ;
51672+
51673+ mode =
51674+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51675+ mnt);
51676+
51677+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51678+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51679+ reqmode & GR_READ ? " reading" : "",
51680+ reqmode & GR_WRITE ? " writing" : reqmode &
51681+ GR_APPEND ? " appending" : "");
51682+ return reqmode;
51683+ } else
51684+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51685+ {
51686+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51687+ reqmode & GR_READ ? " reading" : "",
51688+ reqmode & GR_WRITE ? " writing" : reqmode &
51689+ GR_APPEND ? " appending" : "");
51690+ return 0;
51691+ } else if (unlikely((mode & reqmode) != reqmode))
51692+ return 0;
51693+
51694+ return reqmode;
51695+}
51696+
51697+__u32
51698+gr_acl_handle_creat(const struct dentry * dentry,
51699+ const struct dentry * p_dentry,
51700+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
51701+ const int imode)
51702+{
51703+ __u32 reqmode = GR_WRITE | GR_CREATE;
51704+ __u32 mode;
51705+
51706+ if (acc_mode & MAY_APPEND)
51707+ reqmode |= GR_APPEND;
51708+ // if a directory was required or the directory already exists, then
51709+ // don't count this open as a read
51710+ if ((acc_mode & MAY_READ) &&
51711+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
51712+ reqmode |= GR_READ;
51713+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
51714+ reqmode |= GR_SETID;
51715+
51716+ mode =
51717+ gr_check_create(dentry, p_dentry, p_mnt,
51718+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51719+
51720+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51721+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51722+ reqmode & GR_READ ? " reading" : "",
51723+ reqmode & GR_WRITE ? " writing" : reqmode &
51724+ GR_APPEND ? " appending" : "");
51725+ return reqmode;
51726+ } else
51727+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51728+ {
51729+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51730+ reqmode & GR_READ ? " reading" : "",
51731+ reqmode & GR_WRITE ? " writing" : reqmode &
51732+ GR_APPEND ? " appending" : "");
51733+ return 0;
51734+ } else if (unlikely((mode & reqmode) != reqmode))
51735+ return 0;
51736+
51737+ return reqmode;
51738+}
51739+
51740+__u32
51741+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51742+ const int fmode)
51743+{
51744+ __u32 mode, reqmode = GR_FIND;
51745+
51746+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51747+ reqmode |= GR_EXEC;
51748+ if (fmode & S_IWOTH)
51749+ reqmode |= GR_WRITE;
51750+ if (fmode & S_IROTH)
51751+ reqmode |= GR_READ;
51752+
51753+ mode =
51754+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51755+ mnt);
51756+
51757+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51758+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51759+ reqmode & GR_READ ? " reading" : "",
51760+ reqmode & GR_WRITE ? " writing" : "",
51761+ reqmode & GR_EXEC ? " executing" : "");
51762+ return reqmode;
51763+ } else
51764+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51765+ {
51766+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51767+ reqmode & GR_READ ? " reading" : "",
51768+ reqmode & GR_WRITE ? " writing" : "",
51769+ reqmode & GR_EXEC ? " executing" : "");
51770+ return 0;
51771+ } else if (unlikely((mode & reqmode) != reqmode))
51772+ return 0;
51773+
51774+ return reqmode;
51775+}
51776+
51777+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51778+{
51779+ __u32 mode;
51780+
51781+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51782+
51783+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51784+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51785+ return mode;
51786+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51787+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51788+ return 0;
51789+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51790+ return 0;
51791+
51792+ return (reqmode);
51793+}
51794+
51795+__u32
51796+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51797+{
51798+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51799+}
51800+
51801+__u32
51802+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51803+{
51804+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51805+}
51806+
51807+__u32
51808+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51809+{
51810+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51811+}
51812+
51813+__u32
51814+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51815+{
51816+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51817+}
51818+
51819+__u32
51820+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51821+ mode_t mode)
51822+{
51823+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51824+ return 1;
51825+
51826+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51827+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51828+ GR_FCHMOD_ACL_MSG);
51829+ } else {
51830+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51831+ }
51832+}
51833+
51834+__u32
51835+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51836+ mode_t mode)
51837+{
51838+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51839+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51840+ GR_CHMOD_ACL_MSG);
51841+ } else {
51842+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51843+ }
51844+}
51845+
51846+__u32
51847+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51848+{
51849+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51850+}
51851+
51852+__u32
51853+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51854+{
51855+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51856+}
51857+
51858+__u32
51859+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51860+{
51861+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51862+}
51863+
51864+__u32
51865+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51866+{
51867+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51868+ GR_UNIXCONNECT_ACL_MSG);
51869+}
51870+
51871+/* hardlinks require at minimum create and link permission,
51872+ any additional privilege required is based on the
51873+ privilege of the file being linked to
51874+*/
51875+__u32
51876+gr_acl_handle_link(const struct dentry * new_dentry,
51877+ const struct dentry * parent_dentry,
51878+ const struct vfsmount * parent_mnt,
51879+ const struct dentry * old_dentry,
51880+ const struct vfsmount * old_mnt, const char *to)
51881+{
51882+ __u32 mode;
51883+ __u32 needmode = GR_CREATE | GR_LINK;
51884+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51885+
51886+ mode =
51887+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51888+ old_mnt);
51889+
51890+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51891+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51892+ return mode;
51893+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51894+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51895+ return 0;
51896+ } else if (unlikely((mode & needmode) != needmode))
51897+ return 0;
51898+
51899+ return 1;
51900+}
51901+
51902+__u32
51903+gr_acl_handle_symlink(const struct dentry * new_dentry,
51904+ const struct dentry * parent_dentry,
51905+ const struct vfsmount * parent_mnt, const char *from)
51906+{
51907+ __u32 needmode = GR_WRITE | GR_CREATE;
51908+ __u32 mode;
51909+
51910+ mode =
51911+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
51912+ GR_CREATE | GR_AUDIT_CREATE |
51913+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51914+
51915+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51916+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51917+ return mode;
51918+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51919+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51920+ return 0;
51921+ } else if (unlikely((mode & needmode) != needmode))
51922+ return 0;
51923+
51924+ return (GR_WRITE | GR_CREATE);
51925+}
51926+
51927+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51928+{
51929+ __u32 mode;
51930+
51931+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51932+
51933+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51934+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51935+ return mode;
51936+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51937+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51938+ return 0;
51939+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51940+ return 0;
51941+
51942+ return (reqmode);
51943+}
51944+
51945+__u32
51946+gr_acl_handle_mknod(const struct dentry * new_dentry,
51947+ const struct dentry * parent_dentry,
51948+ const struct vfsmount * parent_mnt,
51949+ const int mode)
51950+{
51951+ __u32 reqmode = GR_WRITE | GR_CREATE;
51952+ if (unlikely(mode & (S_ISUID | S_ISGID)))
51953+ reqmode |= GR_SETID;
51954+
51955+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51956+ reqmode, GR_MKNOD_ACL_MSG);
51957+}
51958+
51959+__u32
51960+gr_acl_handle_mkdir(const struct dentry *new_dentry,
51961+ const struct dentry *parent_dentry,
51962+ const struct vfsmount *parent_mnt)
51963+{
51964+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51965+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51966+}
51967+
51968+#define RENAME_CHECK_SUCCESS(old, new) \
51969+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51970+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51971+
51972+int
51973+gr_acl_handle_rename(struct dentry *new_dentry,
51974+ struct dentry *parent_dentry,
51975+ const struct vfsmount *parent_mnt,
51976+ struct dentry *old_dentry,
51977+ struct inode *old_parent_inode,
51978+ struct vfsmount *old_mnt, const char *newname)
51979+{
51980+ __u32 comp1, comp2;
51981+ int error = 0;
51982+
51983+ if (unlikely(!gr_acl_is_enabled()))
51984+ return 0;
51985+
51986+ if (!new_dentry->d_inode) {
51987+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51988+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51989+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51990+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51991+ GR_DELETE | GR_AUDIT_DELETE |
51992+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51993+ GR_SUPPRESS, old_mnt);
51994+ } else {
51995+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51996+ GR_CREATE | GR_DELETE |
51997+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51998+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51999+ GR_SUPPRESS, parent_mnt);
52000+ comp2 =
52001+ gr_search_file(old_dentry,
52002+ GR_READ | GR_WRITE | GR_AUDIT_READ |
52003+ GR_DELETE | GR_AUDIT_DELETE |
52004+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
52005+ }
52006+
52007+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
52008+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
52009+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52010+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
52011+ && !(comp2 & GR_SUPPRESS)) {
52012+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52013+ error = -EACCES;
52014+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
52015+ error = -EACCES;
52016+
52017+ return error;
52018+}
52019+
52020+void
52021+gr_acl_handle_exit(void)
52022+{
52023+ u16 id;
52024+ char *rolename;
52025+ struct file *exec_file;
52026+
52027+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
52028+ !(current->role->roletype & GR_ROLE_PERSIST))) {
52029+ id = current->acl_role_id;
52030+ rolename = current->role->rolename;
52031+ gr_set_acls(1);
52032+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
52033+ }
52034+
52035+ write_lock(&grsec_exec_file_lock);
52036+ exec_file = current->exec_file;
52037+ current->exec_file = NULL;
52038+ write_unlock(&grsec_exec_file_lock);
52039+
52040+ if (exec_file)
52041+ fput(exec_file);
52042+}
52043+
52044+int
52045+gr_acl_handle_procpidmem(const struct task_struct *task)
52046+{
52047+ if (unlikely(!gr_acl_is_enabled()))
52048+ return 0;
52049+
52050+ if (task != current && task->acl->mode & GR_PROTPROCFD)
52051+ return -EACCES;
52052+
52053+ return 0;
52054+}
52055diff -urNp linux-3.1.1/grsecurity/gracl_ip.c linux-3.1.1/grsecurity/gracl_ip.c
52056--- linux-3.1.1/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
52057+++ linux-3.1.1/grsecurity/gracl_ip.c 2011-11-16 18:40:31.000000000 -0500
52058@@ -0,0 +1,381 @@
52059+#include <linux/kernel.h>
52060+#include <asm/uaccess.h>
52061+#include <asm/errno.h>
52062+#include <net/sock.h>
52063+#include <linux/file.h>
52064+#include <linux/fs.h>
52065+#include <linux/net.h>
52066+#include <linux/in.h>
52067+#include <linux/skbuff.h>
52068+#include <linux/ip.h>
52069+#include <linux/udp.h>
52070+#include <linux/types.h>
52071+#include <linux/sched.h>
52072+#include <linux/netdevice.h>
52073+#include <linux/inetdevice.h>
52074+#include <linux/gracl.h>
52075+#include <linux/grsecurity.h>
52076+#include <linux/grinternal.h>
52077+
52078+#define GR_BIND 0x01
52079+#define GR_CONNECT 0x02
52080+#define GR_INVERT 0x04
52081+#define GR_BINDOVERRIDE 0x08
52082+#define GR_CONNECTOVERRIDE 0x10
52083+#define GR_SOCK_FAMILY 0x20
52084+
52085+static const char * gr_protocols[IPPROTO_MAX] = {
52086+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
52087+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
52088+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
52089+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
52090+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
52091+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
52092+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
52093+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
52094+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
52095+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
52096+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
52097+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
52098+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
52099+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
52100+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
52101+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
52102+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
52103+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
52104+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
52105+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
52106+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
52107+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
52108+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
52109+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
52110+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
52111+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
52112+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
52113+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
52114+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
52115+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
52116+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
52117+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
52118+ };
52119+
52120+static const char * gr_socktypes[SOCK_MAX] = {
52121+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
52122+ "unknown:7", "unknown:8", "unknown:9", "packet"
52123+ };
52124+
52125+static const char * gr_sockfamilies[AF_MAX+1] = {
52126+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
52127+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
52128+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
52129+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
52130+ };
52131+
52132+const char *
52133+gr_proto_to_name(unsigned char proto)
52134+{
52135+ return gr_protocols[proto];
52136+}
52137+
52138+const char *
52139+gr_socktype_to_name(unsigned char type)
52140+{
52141+ return gr_socktypes[type];
52142+}
52143+
52144+const char *
52145+gr_sockfamily_to_name(unsigned char family)
52146+{
52147+ return gr_sockfamilies[family];
52148+}
52149+
52150+int
52151+gr_search_socket(const int domain, const int type, const int protocol)
52152+{
52153+ struct acl_subject_label *curr;
52154+ const struct cred *cred = current_cred();
52155+
52156+ if (unlikely(!gr_acl_is_enabled()))
52157+ goto exit;
52158+
52159+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
52160+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
52161+ goto exit; // let the kernel handle it
52162+
52163+ curr = current->acl;
52164+
52165+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
52166+ /* the family is allowed, if this is PF_INET allow it only if
52167+ the extra sock type/protocol checks pass */
52168+ if (domain == PF_INET)
52169+ goto inet_check;
52170+ goto exit;
52171+ } else {
52172+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52173+ __u32 fakeip = 0;
52174+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52175+ current->role->roletype, cred->uid,
52176+ cred->gid, current->exec_file ?
52177+ gr_to_filename(current->exec_file->f_path.dentry,
52178+ current->exec_file->f_path.mnt) :
52179+ curr->filename, curr->filename,
52180+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
52181+ &current->signal->saved_ip);
52182+ goto exit;
52183+ }
52184+ goto exit_fail;
52185+ }
52186+
52187+inet_check:
52188+ /* the rest of this checking is for IPv4 only */
52189+ if (!curr->ips)
52190+ goto exit;
52191+
52192+ if ((curr->ip_type & (1 << type)) &&
52193+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
52194+ goto exit;
52195+
52196+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52197+ /* we don't place acls on raw sockets , and sometimes
52198+ dgram/ip sockets are opened for ioctl and not
52199+ bind/connect, so we'll fake a bind learn log */
52200+ if (type == SOCK_RAW || type == SOCK_PACKET) {
52201+ __u32 fakeip = 0;
52202+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52203+ current->role->roletype, cred->uid,
52204+ cred->gid, current->exec_file ?
52205+ gr_to_filename(current->exec_file->f_path.dentry,
52206+ current->exec_file->f_path.mnt) :
52207+ curr->filename, curr->filename,
52208+ &fakeip, 0, type,
52209+ protocol, GR_CONNECT, &current->signal->saved_ip);
52210+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
52211+ __u32 fakeip = 0;
52212+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52213+ current->role->roletype, cred->uid,
52214+ cred->gid, current->exec_file ?
52215+ gr_to_filename(current->exec_file->f_path.dentry,
52216+ current->exec_file->f_path.mnt) :
52217+ curr->filename, curr->filename,
52218+ &fakeip, 0, type,
52219+ protocol, GR_BIND, &current->signal->saved_ip);
52220+ }
52221+ /* we'll log when they use connect or bind */
52222+ goto exit;
52223+ }
52224+
52225+exit_fail:
52226+ if (domain == PF_INET)
52227+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
52228+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
52229+ else
52230+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
52231+ gr_socktype_to_name(type), protocol);
52232+
52233+ return 0;
52234+exit:
52235+ return 1;
52236+}
52237+
52238+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
52239+{
52240+ if ((ip->mode & mode) &&
52241+ (ip_port >= ip->low) &&
52242+ (ip_port <= ip->high) &&
52243+ ((ntohl(ip_addr) & our_netmask) ==
52244+ (ntohl(our_addr) & our_netmask))
52245+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
52246+ && (ip->type & (1 << type))) {
52247+ if (ip->mode & GR_INVERT)
52248+ return 2; // specifically denied
52249+ else
52250+ return 1; // allowed
52251+ }
52252+
52253+ return 0; // not specifically allowed, may continue parsing
52254+}
52255+
52256+static int
52257+gr_search_connectbind(const int full_mode, struct sock *sk,
52258+ struct sockaddr_in *addr, const int type)
52259+{
52260+ char iface[IFNAMSIZ] = {0};
52261+ struct acl_subject_label *curr;
52262+ struct acl_ip_label *ip;
52263+ struct inet_sock *isk;
52264+ struct net_device *dev;
52265+ struct in_device *idev;
52266+ unsigned long i;
52267+ int ret;
52268+ int mode = full_mode & (GR_BIND | GR_CONNECT);
52269+ __u32 ip_addr = 0;
52270+ __u32 our_addr;
52271+ __u32 our_netmask;
52272+ char *p;
52273+ __u16 ip_port = 0;
52274+ const struct cred *cred = current_cred();
52275+
52276+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
52277+ return 0;
52278+
52279+ curr = current->acl;
52280+ isk = inet_sk(sk);
52281+
52282+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
52283+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
52284+ addr->sin_addr.s_addr = curr->inaddr_any_override;
52285+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
52286+ struct sockaddr_in saddr;
52287+ int err;
52288+
52289+ saddr.sin_family = AF_INET;
52290+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
52291+ saddr.sin_port = isk->inet_sport;
52292+
52293+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52294+ if (err)
52295+ return err;
52296+
52297+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52298+ if (err)
52299+ return err;
52300+ }
52301+
52302+ if (!curr->ips)
52303+ return 0;
52304+
52305+ ip_addr = addr->sin_addr.s_addr;
52306+ ip_port = ntohs(addr->sin_port);
52307+
52308+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52309+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52310+ current->role->roletype, cred->uid,
52311+ cred->gid, current->exec_file ?
52312+ gr_to_filename(current->exec_file->f_path.dentry,
52313+ current->exec_file->f_path.mnt) :
52314+ curr->filename, curr->filename,
52315+ &ip_addr, ip_port, type,
52316+ sk->sk_protocol, mode, &current->signal->saved_ip);
52317+ return 0;
52318+ }
52319+
52320+ for (i = 0; i < curr->ip_num; i++) {
52321+ ip = *(curr->ips + i);
52322+ if (ip->iface != NULL) {
52323+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
52324+ p = strchr(iface, ':');
52325+ if (p != NULL)
52326+ *p = '\0';
52327+ dev = dev_get_by_name(sock_net(sk), iface);
52328+ if (dev == NULL)
52329+ continue;
52330+ idev = in_dev_get(dev);
52331+ if (idev == NULL) {
52332+ dev_put(dev);
52333+ continue;
52334+ }
52335+ rcu_read_lock();
52336+ for_ifa(idev) {
52337+ if (!strcmp(ip->iface, ifa->ifa_label)) {
52338+ our_addr = ifa->ifa_address;
52339+ our_netmask = 0xffffffff;
52340+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
52341+ if (ret == 1) {
52342+ rcu_read_unlock();
52343+ in_dev_put(idev);
52344+ dev_put(dev);
52345+ return 0;
52346+ } else if (ret == 2) {
52347+ rcu_read_unlock();
52348+ in_dev_put(idev);
52349+ dev_put(dev);
52350+ goto denied;
52351+ }
52352+ }
52353+ } endfor_ifa(idev);
52354+ rcu_read_unlock();
52355+ in_dev_put(idev);
52356+ dev_put(dev);
52357+ } else {
52358+ our_addr = ip->addr;
52359+ our_netmask = ip->netmask;
52360+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
52361+ if (ret == 1)
52362+ return 0;
52363+ else if (ret == 2)
52364+ goto denied;
52365+ }
52366+ }
52367+
52368+denied:
52369+ if (mode == GR_BIND)
52370+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
52371+ else if (mode == GR_CONNECT)
52372+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
52373+
52374+ return -EACCES;
52375+}
52376+
52377+int
52378+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
52379+{
52380+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
52381+}
52382+
52383+int
52384+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
52385+{
52386+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
52387+}
52388+
52389+int gr_search_listen(struct socket *sock)
52390+{
52391+ struct sock *sk = sock->sk;
52392+ struct sockaddr_in addr;
52393+
52394+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
52395+ addr.sin_port = inet_sk(sk)->inet_sport;
52396+
52397+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52398+}
52399+
52400+int gr_search_accept(struct socket *sock)
52401+{
52402+ struct sock *sk = sock->sk;
52403+ struct sockaddr_in addr;
52404+
52405+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
52406+ addr.sin_port = inet_sk(sk)->inet_sport;
52407+
52408+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52409+}
52410+
52411+int
52412+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
52413+{
52414+ if (addr)
52415+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
52416+ else {
52417+ struct sockaddr_in sin;
52418+ const struct inet_sock *inet = inet_sk(sk);
52419+
52420+ sin.sin_addr.s_addr = inet->inet_daddr;
52421+ sin.sin_port = inet->inet_dport;
52422+
52423+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52424+ }
52425+}
52426+
52427+int
52428+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
52429+{
52430+ struct sockaddr_in sin;
52431+
52432+ if (unlikely(skb->len < sizeof (struct udphdr)))
52433+ return 0; // skip this packet
52434+
52435+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
52436+ sin.sin_port = udp_hdr(skb)->source;
52437+
52438+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52439+}
52440diff -urNp linux-3.1.1/grsecurity/gracl_learn.c linux-3.1.1/grsecurity/gracl_learn.c
52441--- linux-3.1.1/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
52442+++ linux-3.1.1/grsecurity/gracl_learn.c 2011-11-16 18:40:31.000000000 -0500
52443@@ -0,0 +1,207 @@
52444+#include <linux/kernel.h>
52445+#include <linux/mm.h>
52446+#include <linux/sched.h>
52447+#include <linux/poll.h>
52448+#include <linux/string.h>
52449+#include <linux/file.h>
52450+#include <linux/types.h>
52451+#include <linux/vmalloc.h>
52452+#include <linux/grinternal.h>
52453+
52454+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
52455+ size_t count, loff_t *ppos);
52456+extern int gr_acl_is_enabled(void);
52457+
52458+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
52459+static int gr_learn_attached;
52460+
52461+/* use a 512k buffer */
52462+#define LEARN_BUFFER_SIZE (512 * 1024)
52463+
52464+static DEFINE_SPINLOCK(gr_learn_lock);
52465+static DEFINE_MUTEX(gr_learn_user_mutex);
52466+
52467+/* we need to maintain two buffers, so that the kernel context of grlearn
52468+ uses a semaphore around the userspace copying, and the other kernel contexts
52469+ use a spinlock when copying into the buffer, since they cannot sleep
52470+*/
52471+static char *learn_buffer;
52472+static char *learn_buffer_user;
52473+static int learn_buffer_len;
52474+static int learn_buffer_user_len;
52475+
52476+static ssize_t
52477+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
52478+{
52479+ DECLARE_WAITQUEUE(wait, current);
52480+ ssize_t retval = 0;
52481+
52482+ add_wait_queue(&learn_wait, &wait);
52483+ set_current_state(TASK_INTERRUPTIBLE);
52484+ do {
52485+ mutex_lock(&gr_learn_user_mutex);
52486+ spin_lock(&gr_learn_lock);
52487+ if (learn_buffer_len)
52488+ break;
52489+ spin_unlock(&gr_learn_lock);
52490+ mutex_unlock(&gr_learn_user_mutex);
52491+ if (file->f_flags & O_NONBLOCK) {
52492+ retval = -EAGAIN;
52493+ goto out;
52494+ }
52495+ if (signal_pending(current)) {
52496+ retval = -ERESTARTSYS;
52497+ goto out;
52498+ }
52499+
52500+ schedule();
52501+ } while (1);
52502+
52503+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
52504+ learn_buffer_user_len = learn_buffer_len;
52505+ retval = learn_buffer_len;
52506+ learn_buffer_len = 0;
52507+
52508+ spin_unlock(&gr_learn_lock);
52509+
52510+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
52511+ retval = -EFAULT;
52512+
52513+ mutex_unlock(&gr_learn_user_mutex);
52514+out:
52515+ set_current_state(TASK_RUNNING);
52516+ remove_wait_queue(&learn_wait, &wait);
52517+ return retval;
52518+}
52519+
52520+static unsigned int
52521+poll_learn(struct file * file, poll_table * wait)
52522+{
52523+ poll_wait(file, &learn_wait, wait);
52524+
52525+ if (learn_buffer_len)
52526+ return (POLLIN | POLLRDNORM);
52527+
52528+ return 0;
52529+}
52530+
52531+void
52532+gr_clear_learn_entries(void)
52533+{
52534+ char *tmp;
52535+
52536+ mutex_lock(&gr_learn_user_mutex);
52537+ spin_lock(&gr_learn_lock);
52538+ tmp = learn_buffer;
52539+ learn_buffer = NULL;
52540+ spin_unlock(&gr_learn_lock);
52541+ if (tmp)
52542+ vfree(tmp);
52543+ if (learn_buffer_user != NULL) {
52544+ vfree(learn_buffer_user);
52545+ learn_buffer_user = NULL;
52546+ }
52547+ learn_buffer_len = 0;
52548+ mutex_unlock(&gr_learn_user_mutex);
52549+
52550+ return;
52551+}
52552+
52553+void
52554+gr_add_learn_entry(const char *fmt, ...)
52555+{
52556+ va_list args;
52557+ unsigned int len;
52558+
52559+ if (!gr_learn_attached)
52560+ return;
52561+
52562+ spin_lock(&gr_learn_lock);
52563+
52564+ /* leave a gap at the end so we know when it's "full" but don't have to
52565+ compute the exact length of the string we're trying to append
52566+ */
52567+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
52568+ spin_unlock(&gr_learn_lock);
52569+ wake_up_interruptible(&learn_wait);
52570+ return;
52571+ }
52572+ if (learn_buffer == NULL) {
52573+ spin_unlock(&gr_learn_lock);
52574+ return;
52575+ }
52576+
52577+ va_start(args, fmt);
52578+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
52579+ va_end(args);
52580+
52581+ learn_buffer_len += len + 1;
52582+
52583+ spin_unlock(&gr_learn_lock);
52584+ wake_up_interruptible(&learn_wait);
52585+
52586+ return;
52587+}
52588+
52589+static int
52590+open_learn(struct inode *inode, struct file *file)
52591+{
52592+ if (file->f_mode & FMODE_READ && gr_learn_attached)
52593+ return -EBUSY;
52594+ if (file->f_mode & FMODE_READ) {
52595+ int retval = 0;
52596+ mutex_lock(&gr_learn_user_mutex);
52597+ if (learn_buffer == NULL)
52598+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
52599+ if (learn_buffer_user == NULL)
52600+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
52601+ if (learn_buffer == NULL) {
52602+ retval = -ENOMEM;
52603+ goto out_error;
52604+ }
52605+ if (learn_buffer_user == NULL) {
52606+ retval = -ENOMEM;
52607+ goto out_error;
52608+ }
52609+ learn_buffer_len = 0;
52610+ learn_buffer_user_len = 0;
52611+ gr_learn_attached = 1;
52612+out_error:
52613+ mutex_unlock(&gr_learn_user_mutex);
52614+ return retval;
52615+ }
52616+ return 0;
52617+}
52618+
52619+static int
52620+close_learn(struct inode *inode, struct file *file)
52621+{
52622+ if (file->f_mode & FMODE_READ) {
52623+ char *tmp = NULL;
52624+ mutex_lock(&gr_learn_user_mutex);
52625+ spin_lock(&gr_learn_lock);
52626+ tmp = learn_buffer;
52627+ learn_buffer = NULL;
52628+ spin_unlock(&gr_learn_lock);
52629+ if (tmp)
52630+ vfree(tmp);
52631+ if (learn_buffer_user != NULL) {
52632+ vfree(learn_buffer_user);
52633+ learn_buffer_user = NULL;
52634+ }
52635+ learn_buffer_len = 0;
52636+ learn_buffer_user_len = 0;
52637+ gr_learn_attached = 0;
52638+ mutex_unlock(&gr_learn_user_mutex);
52639+ }
52640+
52641+ return 0;
52642+}
52643+
52644+const struct file_operations grsec_fops = {
52645+ .read = read_learn,
52646+ .write = write_grsec_handler,
52647+ .open = open_learn,
52648+ .release = close_learn,
52649+ .poll = poll_learn,
52650+};
52651diff -urNp linux-3.1.1/grsecurity/gracl_res.c linux-3.1.1/grsecurity/gracl_res.c
52652--- linux-3.1.1/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
52653+++ linux-3.1.1/grsecurity/gracl_res.c 2011-11-16 18:40:31.000000000 -0500
52654@@ -0,0 +1,68 @@
52655+#include <linux/kernel.h>
52656+#include <linux/sched.h>
52657+#include <linux/gracl.h>
52658+#include <linux/grinternal.h>
52659+
52660+static const char *restab_log[] = {
52661+ [RLIMIT_CPU] = "RLIMIT_CPU",
52662+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
52663+ [RLIMIT_DATA] = "RLIMIT_DATA",
52664+ [RLIMIT_STACK] = "RLIMIT_STACK",
52665+ [RLIMIT_CORE] = "RLIMIT_CORE",
52666+ [RLIMIT_RSS] = "RLIMIT_RSS",
52667+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
52668+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
52669+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
52670+ [RLIMIT_AS] = "RLIMIT_AS",
52671+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
52672+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
52673+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
52674+ [RLIMIT_NICE] = "RLIMIT_NICE",
52675+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
52676+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
52677+ [GR_CRASH_RES] = "RLIMIT_CRASH"
52678+};
52679+
52680+void
52681+gr_log_resource(const struct task_struct *task,
52682+ const int res, const unsigned long wanted, const int gt)
52683+{
52684+ const struct cred *cred;
52685+ unsigned long rlim;
52686+
52687+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
52688+ return;
52689+
52690+ // not yet supported resource
52691+ if (unlikely(!restab_log[res]))
52692+ return;
52693+
52694+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
52695+ rlim = task_rlimit_max(task, res);
52696+ else
52697+ rlim = task_rlimit(task, res);
52698+
52699+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
52700+ return;
52701+
52702+ rcu_read_lock();
52703+ cred = __task_cred(task);
52704+
52705+ if (res == RLIMIT_NPROC &&
52706+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
52707+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
52708+ goto out_rcu_unlock;
52709+ else if (res == RLIMIT_MEMLOCK &&
52710+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
52711+ goto out_rcu_unlock;
52712+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
52713+ goto out_rcu_unlock;
52714+ rcu_read_unlock();
52715+
52716+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
52717+
52718+ return;
52719+out_rcu_unlock:
52720+ rcu_read_unlock();
52721+ return;
52722+}
52723diff -urNp linux-3.1.1/grsecurity/gracl_segv.c linux-3.1.1/grsecurity/gracl_segv.c
52724--- linux-3.1.1/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
52725+++ linux-3.1.1/grsecurity/gracl_segv.c 2011-11-16 18:40:31.000000000 -0500
52726@@ -0,0 +1,299 @@
52727+#include <linux/kernel.h>
52728+#include <linux/mm.h>
52729+#include <asm/uaccess.h>
52730+#include <asm/errno.h>
52731+#include <asm/mman.h>
52732+#include <net/sock.h>
52733+#include <linux/file.h>
52734+#include <linux/fs.h>
52735+#include <linux/net.h>
52736+#include <linux/in.h>
52737+#include <linux/slab.h>
52738+#include <linux/types.h>
52739+#include <linux/sched.h>
52740+#include <linux/timer.h>
52741+#include <linux/gracl.h>
52742+#include <linux/grsecurity.h>
52743+#include <linux/grinternal.h>
52744+
52745+static struct crash_uid *uid_set;
52746+static unsigned short uid_used;
52747+static DEFINE_SPINLOCK(gr_uid_lock);
52748+extern rwlock_t gr_inode_lock;
52749+extern struct acl_subject_label *
52750+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52751+ struct acl_role_label *role);
52752+
52753+#ifdef CONFIG_BTRFS_FS
52754+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
52755+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
52756+#endif
52757+
52758+static inline dev_t __get_dev(const struct dentry *dentry)
52759+{
52760+#ifdef CONFIG_BTRFS_FS
52761+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
52762+ return get_btrfs_dev_from_inode(dentry->d_inode);
52763+ else
52764+#endif
52765+ return dentry->d_inode->i_sb->s_dev;
52766+}
52767+
52768+int
52769+gr_init_uidset(void)
52770+{
52771+ uid_set =
52772+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52773+ uid_used = 0;
52774+
52775+ return uid_set ? 1 : 0;
52776+}
52777+
52778+void
52779+gr_free_uidset(void)
52780+{
52781+ if (uid_set)
52782+ kfree(uid_set);
52783+
52784+ return;
52785+}
52786+
52787+int
52788+gr_find_uid(const uid_t uid)
52789+{
52790+ struct crash_uid *tmp = uid_set;
52791+ uid_t buid;
52792+ int low = 0, high = uid_used - 1, mid;
52793+
52794+ while (high >= low) {
52795+ mid = (low + high) >> 1;
52796+ buid = tmp[mid].uid;
52797+ if (buid == uid)
52798+ return mid;
52799+ if (buid > uid)
52800+ high = mid - 1;
52801+ if (buid < uid)
52802+ low = mid + 1;
52803+ }
52804+
52805+ return -1;
52806+}
52807+
52808+static __inline__ void
52809+gr_insertsort(void)
52810+{
52811+ unsigned short i, j;
52812+ struct crash_uid index;
52813+
52814+ for (i = 1; i < uid_used; i++) {
52815+ index = uid_set[i];
52816+ j = i;
52817+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52818+ uid_set[j] = uid_set[j - 1];
52819+ j--;
52820+ }
52821+ uid_set[j] = index;
52822+ }
52823+
52824+ return;
52825+}
52826+
52827+static __inline__ void
52828+gr_insert_uid(const uid_t uid, const unsigned long expires)
52829+{
52830+ int loc;
52831+
52832+ if (uid_used == GR_UIDTABLE_MAX)
52833+ return;
52834+
52835+ loc = gr_find_uid(uid);
52836+
52837+ if (loc >= 0) {
52838+ uid_set[loc].expires = expires;
52839+ return;
52840+ }
52841+
52842+ uid_set[uid_used].uid = uid;
52843+ uid_set[uid_used].expires = expires;
52844+ uid_used++;
52845+
52846+ gr_insertsort();
52847+
52848+ return;
52849+}
52850+
52851+void
52852+gr_remove_uid(const unsigned short loc)
52853+{
52854+ unsigned short i;
52855+
52856+ for (i = loc + 1; i < uid_used; i++)
52857+ uid_set[i - 1] = uid_set[i];
52858+
52859+ uid_used--;
52860+
52861+ return;
52862+}
52863+
52864+int
52865+gr_check_crash_uid(const uid_t uid)
52866+{
52867+ int loc;
52868+ int ret = 0;
52869+
52870+ if (unlikely(!gr_acl_is_enabled()))
52871+ return 0;
52872+
52873+ spin_lock(&gr_uid_lock);
52874+ loc = gr_find_uid(uid);
52875+
52876+ if (loc < 0)
52877+ goto out_unlock;
52878+
52879+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
52880+ gr_remove_uid(loc);
52881+ else
52882+ ret = 1;
52883+
52884+out_unlock:
52885+ spin_unlock(&gr_uid_lock);
52886+ return ret;
52887+}
52888+
52889+static __inline__ int
52890+proc_is_setxid(const struct cred *cred)
52891+{
52892+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
52893+ cred->uid != cred->fsuid)
52894+ return 1;
52895+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52896+ cred->gid != cred->fsgid)
52897+ return 1;
52898+
52899+ return 0;
52900+}
52901+
52902+extern int gr_fake_force_sig(int sig, struct task_struct *t);
52903+
52904+void
52905+gr_handle_crash(struct task_struct *task, const int sig)
52906+{
52907+ struct acl_subject_label *curr;
52908+ struct task_struct *tsk, *tsk2;
52909+ const struct cred *cred;
52910+ const struct cred *cred2;
52911+
52912+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52913+ return;
52914+
52915+ if (unlikely(!gr_acl_is_enabled()))
52916+ return;
52917+
52918+ curr = task->acl;
52919+
52920+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
52921+ return;
52922+
52923+ if (time_before_eq(curr->expires, get_seconds())) {
52924+ curr->expires = 0;
52925+ curr->crashes = 0;
52926+ }
52927+
52928+ curr->crashes++;
52929+
52930+ if (!curr->expires)
52931+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52932+
52933+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52934+ time_after(curr->expires, get_seconds())) {
52935+ rcu_read_lock();
52936+ cred = __task_cred(task);
52937+ if (cred->uid && proc_is_setxid(cred)) {
52938+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52939+ spin_lock(&gr_uid_lock);
52940+ gr_insert_uid(cred->uid, curr->expires);
52941+ spin_unlock(&gr_uid_lock);
52942+ curr->expires = 0;
52943+ curr->crashes = 0;
52944+ read_lock(&tasklist_lock);
52945+ do_each_thread(tsk2, tsk) {
52946+ cred2 = __task_cred(tsk);
52947+ if (tsk != task && cred2->uid == cred->uid)
52948+ gr_fake_force_sig(SIGKILL, tsk);
52949+ } while_each_thread(tsk2, tsk);
52950+ read_unlock(&tasklist_lock);
52951+ } else {
52952+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52953+ read_lock(&tasklist_lock);
52954+ read_lock(&grsec_exec_file_lock);
52955+ do_each_thread(tsk2, tsk) {
52956+ if (likely(tsk != task)) {
52957+ // if this thread has the same subject as the one that triggered
52958+ // RES_CRASH and it's the same binary, kill it
52959+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
52960+ gr_fake_force_sig(SIGKILL, tsk);
52961+ }
52962+ } while_each_thread(tsk2, tsk);
52963+ read_unlock(&grsec_exec_file_lock);
52964+ read_unlock(&tasklist_lock);
52965+ }
52966+ rcu_read_unlock();
52967+ }
52968+
52969+ return;
52970+}
52971+
52972+int
52973+gr_check_crash_exec(const struct file *filp)
52974+{
52975+ struct acl_subject_label *curr;
52976+
52977+ if (unlikely(!gr_acl_is_enabled()))
52978+ return 0;
52979+
52980+ read_lock(&gr_inode_lock);
52981+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52982+ __get_dev(filp->f_path.dentry),
52983+ current->role);
52984+ read_unlock(&gr_inode_lock);
52985+
52986+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52987+ (!curr->crashes && !curr->expires))
52988+ return 0;
52989+
52990+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52991+ time_after(curr->expires, get_seconds()))
52992+ return 1;
52993+ else if (time_before_eq(curr->expires, get_seconds())) {
52994+ curr->crashes = 0;
52995+ curr->expires = 0;
52996+ }
52997+
52998+ return 0;
52999+}
53000+
53001+void
53002+gr_handle_alertkill(struct task_struct *task)
53003+{
53004+ struct acl_subject_label *curracl;
53005+ __u32 curr_ip;
53006+ struct task_struct *p, *p2;
53007+
53008+ if (unlikely(!gr_acl_is_enabled()))
53009+ return;
53010+
53011+ curracl = task->acl;
53012+ curr_ip = task->signal->curr_ip;
53013+
53014+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
53015+ read_lock(&tasklist_lock);
53016+ do_each_thread(p2, p) {
53017+ if (p->signal->curr_ip == curr_ip)
53018+ gr_fake_force_sig(SIGKILL, p);
53019+ } while_each_thread(p2, p);
53020+ read_unlock(&tasklist_lock);
53021+ } else if (curracl->mode & GR_KILLPROC)
53022+ gr_fake_force_sig(SIGKILL, task);
53023+
53024+ return;
53025+}
53026diff -urNp linux-3.1.1/grsecurity/gracl_shm.c linux-3.1.1/grsecurity/gracl_shm.c
53027--- linux-3.1.1/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
53028+++ linux-3.1.1/grsecurity/gracl_shm.c 2011-11-16 18:40:31.000000000 -0500
53029@@ -0,0 +1,40 @@
53030+#include <linux/kernel.h>
53031+#include <linux/mm.h>
53032+#include <linux/sched.h>
53033+#include <linux/file.h>
53034+#include <linux/ipc.h>
53035+#include <linux/gracl.h>
53036+#include <linux/grsecurity.h>
53037+#include <linux/grinternal.h>
53038+
53039+int
53040+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53041+ const time_t shm_createtime, const uid_t cuid, const int shmid)
53042+{
53043+ struct task_struct *task;
53044+
53045+ if (!gr_acl_is_enabled())
53046+ return 1;
53047+
53048+ rcu_read_lock();
53049+ read_lock(&tasklist_lock);
53050+
53051+ task = find_task_by_vpid(shm_cprid);
53052+
53053+ if (unlikely(!task))
53054+ task = find_task_by_vpid(shm_lapid);
53055+
53056+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
53057+ (task->pid == shm_lapid)) &&
53058+ (task->acl->mode & GR_PROTSHM) &&
53059+ (task->acl != current->acl))) {
53060+ read_unlock(&tasklist_lock);
53061+ rcu_read_unlock();
53062+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
53063+ return 0;
53064+ }
53065+ read_unlock(&tasklist_lock);
53066+ rcu_read_unlock();
53067+
53068+ return 1;
53069+}
53070diff -urNp linux-3.1.1/grsecurity/grsec_chdir.c linux-3.1.1/grsecurity/grsec_chdir.c
53071--- linux-3.1.1/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
53072+++ linux-3.1.1/grsecurity/grsec_chdir.c 2011-11-16 18:40:31.000000000 -0500
53073@@ -0,0 +1,19 @@
53074+#include <linux/kernel.h>
53075+#include <linux/sched.h>
53076+#include <linux/fs.h>
53077+#include <linux/file.h>
53078+#include <linux/grsecurity.h>
53079+#include <linux/grinternal.h>
53080+
53081+void
53082+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
53083+{
53084+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53085+ if ((grsec_enable_chdir && grsec_enable_group &&
53086+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
53087+ !grsec_enable_group)) {
53088+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
53089+ }
53090+#endif
53091+ return;
53092+}
53093diff -urNp linux-3.1.1/grsecurity/grsec_chroot.c linux-3.1.1/grsecurity/grsec_chroot.c
53094--- linux-3.1.1/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
53095+++ linux-3.1.1/grsecurity/grsec_chroot.c 2011-11-16 18:40:31.000000000 -0500
53096@@ -0,0 +1,351 @@
53097+#include <linux/kernel.h>
53098+#include <linux/module.h>
53099+#include <linux/sched.h>
53100+#include <linux/file.h>
53101+#include <linux/fs.h>
53102+#include <linux/mount.h>
53103+#include <linux/types.h>
53104+#include <linux/pid_namespace.h>
53105+#include <linux/grsecurity.h>
53106+#include <linux/grinternal.h>
53107+
53108+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
53109+{
53110+#ifdef CONFIG_GRKERNSEC
53111+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
53112+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
53113+ task->gr_is_chrooted = 1;
53114+ else
53115+ task->gr_is_chrooted = 0;
53116+
53117+ task->gr_chroot_dentry = path->dentry;
53118+#endif
53119+ return;
53120+}
53121+
53122+void gr_clear_chroot_entries(struct task_struct *task)
53123+{
53124+#ifdef CONFIG_GRKERNSEC
53125+ task->gr_is_chrooted = 0;
53126+ task->gr_chroot_dentry = NULL;
53127+#endif
53128+ return;
53129+}
53130+
53131+int
53132+gr_handle_chroot_unix(const pid_t pid)
53133+{
53134+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53135+ struct task_struct *p;
53136+
53137+ if (unlikely(!grsec_enable_chroot_unix))
53138+ return 1;
53139+
53140+ if (likely(!proc_is_chrooted(current)))
53141+ return 1;
53142+
53143+ rcu_read_lock();
53144+ read_lock(&tasklist_lock);
53145+ p = find_task_by_vpid_unrestricted(pid);
53146+ if (unlikely(p && !have_same_root(current, p))) {
53147+ read_unlock(&tasklist_lock);
53148+ rcu_read_unlock();
53149+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
53150+ return 0;
53151+ }
53152+ read_unlock(&tasklist_lock);
53153+ rcu_read_unlock();
53154+#endif
53155+ return 1;
53156+}
53157+
53158+int
53159+gr_handle_chroot_nice(void)
53160+{
53161+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53162+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
53163+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
53164+ return -EPERM;
53165+ }
53166+#endif
53167+ return 0;
53168+}
53169+
53170+int
53171+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
53172+{
53173+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53174+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
53175+ && proc_is_chrooted(current)) {
53176+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
53177+ return -EACCES;
53178+ }
53179+#endif
53180+ return 0;
53181+}
53182+
53183+int
53184+gr_handle_chroot_rawio(const struct inode *inode)
53185+{
53186+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53187+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
53188+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
53189+ return 1;
53190+#endif
53191+ return 0;
53192+}
53193+
53194+int
53195+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
53196+{
53197+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53198+ struct task_struct *p;
53199+ int ret = 0;
53200+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
53201+ return ret;
53202+
53203+ read_lock(&tasklist_lock);
53204+ do_each_pid_task(pid, type, p) {
53205+ if (!have_same_root(current, p)) {
53206+ ret = 1;
53207+ goto out;
53208+ }
53209+ } while_each_pid_task(pid, type, p);
53210+out:
53211+ read_unlock(&tasklist_lock);
53212+ return ret;
53213+#endif
53214+ return 0;
53215+}
53216+
53217+int
53218+gr_pid_is_chrooted(struct task_struct *p)
53219+{
53220+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53221+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
53222+ return 0;
53223+
53224+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
53225+ !have_same_root(current, p)) {
53226+ return 1;
53227+ }
53228+#endif
53229+ return 0;
53230+}
53231+
53232+EXPORT_SYMBOL(gr_pid_is_chrooted);
53233+
53234+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
53235+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
53236+{
53237+ struct path path, currentroot;
53238+ int ret = 0;
53239+
53240+ path.dentry = (struct dentry *)u_dentry;
53241+ path.mnt = (struct vfsmount *)u_mnt;
53242+ get_fs_root(current->fs, &currentroot);
53243+ if (path_is_under(&path, &currentroot))
53244+ ret = 1;
53245+ path_put(&currentroot);
53246+
53247+ return ret;
53248+}
53249+#endif
53250+
53251+int
53252+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
53253+{
53254+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53255+ if (!grsec_enable_chroot_fchdir)
53256+ return 1;
53257+
53258+ if (!proc_is_chrooted(current))
53259+ return 1;
53260+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
53261+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
53262+ return 0;
53263+ }
53264+#endif
53265+ return 1;
53266+}
53267+
53268+int
53269+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53270+ const time_t shm_createtime)
53271+{
53272+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53273+ struct task_struct *p;
53274+ time_t starttime;
53275+
53276+ if (unlikely(!grsec_enable_chroot_shmat))
53277+ return 1;
53278+
53279+ if (likely(!proc_is_chrooted(current)))
53280+ return 1;
53281+
53282+ rcu_read_lock();
53283+ read_lock(&tasklist_lock);
53284+
53285+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
53286+ starttime = p->start_time.tv_sec;
53287+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
53288+ if (have_same_root(current, p)) {
53289+ goto allow;
53290+ } else {
53291+ read_unlock(&tasklist_lock);
53292+ rcu_read_unlock();
53293+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53294+ return 0;
53295+ }
53296+ }
53297+ /* creator exited, pid reuse, fall through to next check */
53298+ }
53299+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
53300+ if (unlikely(!have_same_root(current, p))) {
53301+ read_unlock(&tasklist_lock);
53302+ rcu_read_unlock();
53303+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53304+ return 0;
53305+ }
53306+ }
53307+
53308+allow:
53309+ read_unlock(&tasklist_lock);
53310+ rcu_read_unlock();
53311+#endif
53312+ return 1;
53313+}
53314+
53315+void
53316+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
53317+{
53318+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53319+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
53320+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
53321+#endif
53322+ return;
53323+}
53324+
53325+int
53326+gr_handle_chroot_mknod(const struct dentry *dentry,
53327+ const struct vfsmount *mnt, const int mode)
53328+{
53329+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53330+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
53331+ proc_is_chrooted(current)) {
53332+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
53333+ return -EPERM;
53334+ }
53335+#endif
53336+ return 0;
53337+}
53338+
53339+int
53340+gr_handle_chroot_mount(const struct dentry *dentry,
53341+ const struct vfsmount *mnt, const char *dev_name)
53342+{
53343+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53344+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
53345+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
53346+ return -EPERM;
53347+ }
53348+#endif
53349+ return 0;
53350+}
53351+
53352+int
53353+gr_handle_chroot_pivot(void)
53354+{
53355+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53356+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
53357+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
53358+ return -EPERM;
53359+ }
53360+#endif
53361+ return 0;
53362+}
53363+
53364+int
53365+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
53366+{
53367+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53368+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
53369+ !gr_is_outside_chroot(dentry, mnt)) {
53370+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
53371+ return -EPERM;
53372+ }
53373+#endif
53374+ return 0;
53375+}
53376+
53377+extern const char *captab_log[];
53378+extern int captab_log_entries;
53379+
53380+int
53381+gr_chroot_is_capable(const int cap)
53382+{
53383+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53384+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
53385+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
53386+ if (cap_raised(chroot_caps, cap)) {
53387+ const struct cred *creds = current_cred();
53388+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
53389+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
53390+ }
53391+ return 0;
53392+ }
53393+ }
53394+#endif
53395+ return 1;
53396+}
53397+
53398+int
53399+gr_chroot_is_capable_nolog(const int cap)
53400+{
53401+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53402+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
53403+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
53404+ if (cap_raised(chroot_caps, cap)) {
53405+ return 0;
53406+ }
53407+ }
53408+#endif
53409+ return 1;
53410+}
53411+
53412+int
53413+gr_handle_chroot_sysctl(const int op)
53414+{
53415+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53416+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
53417+ proc_is_chrooted(current))
53418+ return -EACCES;
53419+#endif
53420+ return 0;
53421+}
53422+
53423+void
53424+gr_handle_chroot_chdir(struct path *path)
53425+{
53426+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53427+ if (grsec_enable_chroot_chdir)
53428+ set_fs_pwd(current->fs, path);
53429+#endif
53430+ return;
53431+}
53432+
53433+int
53434+gr_handle_chroot_chmod(const struct dentry *dentry,
53435+ const struct vfsmount *mnt, const int mode)
53436+{
53437+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53438+ /* allow chmod +s on directories, but not files */
53439+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
53440+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
53441+ proc_is_chrooted(current)) {
53442+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
53443+ return -EPERM;
53444+ }
53445+#endif
53446+ return 0;
53447+}
53448diff -urNp linux-3.1.1/grsecurity/grsec_disabled.c linux-3.1.1/grsecurity/grsec_disabled.c
53449--- linux-3.1.1/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
53450+++ linux-3.1.1/grsecurity/grsec_disabled.c 2011-11-17 00:16:25.000000000 -0500
53451@@ -0,0 +1,439 @@
53452+#include <linux/kernel.h>
53453+#include <linux/module.h>
53454+#include <linux/sched.h>
53455+#include <linux/file.h>
53456+#include <linux/fs.h>
53457+#include <linux/kdev_t.h>
53458+#include <linux/net.h>
53459+#include <linux/in.h>
53460+#include <linux/ip.h>
53461+#include <linux/skbuff.h>
53462+#include <linux/sysctl.h>
53463+
53464+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
53465+void
53466+pax_set_initial_flags(struct linux_binprm *bprm)
53467+{
53468+ return;
53469+}
53470+#endif
53471+
53472+#ifdef CONFIG_SYSCTL
53473+__u32
53474+gr_handle_sysctl(const struct ctl_table * table, const int op)
53475+{
53476+ return 0;
53477+}
53478+#endif
53479+
53480+#ifdef CONFIG_TASKSTATS
53481+int gr_is_taskstats_denied(int pid)
53482+{
53483+ return 0;
53484+}
53485+#endif
53486+
53487+int
53488+gr_acl_is_enabled(void)
53489+{
53490+ return 0;
53491+}
53492+
53493+void
53494+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53495+{
53496+ return;
53497+}
53498+
53499+int
53500+gr_handle_rawio(const struct inode *inode)
53501+{
53502+ return 0;
53503+}
53504+
53505+void
53506+gr_acl_handle_psacct(struct task_struct *task, const long code)
53507+{
53508+ return;
53509+}
53510+
53511+int
53512+gr_handle_ptrace(struct task_struct *task, const long request)
53513+{
53514+ return 0;
53515+}
53516+
53517+int
53518+gr_handle_proc_ptrace(struct task_struct *task)
53519+{
53520+ return 0;
53521+}
53522+
53523+void
53524+gr_learn_resource(const struct task_struct *task,
53525+ const int res, const unsigned long wanted, const int gt)
53526+{
53527+ return;
53528+}
53529+
53530+int
53531+gr_set_acls(const int type)
53532+{
53533+ return 0;
53534+}
53535+
53536+int
53537+gr_check_hidden_task(const struct task_struct *tsk)
53538+{
53539+ return 0;
53540+}
53541+
53542+int
53543+gr_check_protected_task(const struct task_struct *task)
53544+{
53545+ return 0;
53546+}
53547+
53548+int
53549+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53550+{
53551+ return 0;
53552+}
53553+
53554+void
53555+gr_copy_label(struct task_struct *tsk)
53556+{
53557+ return;
53558+}
53559+
53560+void
53561+gr_set_pax_flags(struct task_struct *task)
53562+{
53563+ return;
53564+}
53565+
53566+int
53567+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53568+ const int unsafe_share)
53569+{
53570+ return 0;
53571+}
53572+
53573+void
53574+gr_handle_delete(const ino_t ino, const dev_t dev)
53575+{
53576+ return;
53577+}
53578+
53579+void
53580+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53581+{
53582+ return;
53583+}
53584+
53585+void
53586+gr_handle_crash(struct task_struct *task, const int sig)
53587+{
53588+ return;
53589+}
53590+
53591+int
53592+gr_check_crash_exec(const struct file *filp)
53593+{
53594+ return 0;
53595+}
53596+
53597+int
53598+gr_check_crash_uid(const uid_t uid)
53599+{
53600+ return 0;
53601+}
53602+
53603+void
53604+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53605+ struct dentry *old_dentry,
53606+ struct dentry *new_dentry,
53607+ struct vfsmount *mnt, const __u8 replace)
53608+{
53609+ return;
53610+}
53611+
53612+int
53613+gr_search_socket(const int family, const int type, const int protocol)
53614+{
53615+ return 1;
53616+}
53617+
53618+int
53619+gr_search_connectbind(const int mode, const struct socket *sock,
53620+ const struct sockaddr_in *addr)
53621+{
53622+ return 0;
53623+}
53624+
53625+void
53626+gr_handle_alertkill(struct task_struct *task)
53627+{
53628+ return;
53629+}
53630+
53631+__u32
53632+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
53633+{
53634+ return 1;
53635+}
53636+
53637+__u32
53638+gr_acl_handle_hidden_file(const struct dentry * dentry,
53639+ const struct vfsmount * mnt)
53640+{
53641+ return 1;
53642+}
53643+
53644+__u32
53645+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
53646+ int acc_mode)
53647+{
53648+ return 1;
53649+}
53650+
53651+__u32
53652+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53653+{
53654+ return 1;
53655+}
53656+
53657+__u32
53658+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
53659+{
53660+ return 1;
53661+}
53662+
53663+int
53664+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
53665+ unsigned int *vm_flags)
53666+{
53667+ return 1;
53668+}
53669+
53670+__u32
53671+gr_acl_handle_truncate(const struct dentry * dentry,
53672+ const struct vfsmount * mnt)
53673+{
53674+ return 1;
53675+}
53676+
53677+__u32
53678+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
53679+{
53680+ return 1;
53681+}
53682+
53683+__u32
53684+gr_acl_handle_access(const struct dentry * dentry,
53685+ const struct vfsmount * mnt, const int fmode)
53686+{
53687+ return 1;
53688+}
53689+
53690+__u32
53691+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
53692+ mode_t mode)
53693+{
53694+ return 1;
53695+}
53696+
53697+__u32
53698+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
53699+ mode_t mode)
53700+{
53701+ return 1;
53702+}
53703+
53704+__u32
53705+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
53706+{
53707+ return 1;
53708+}
53709+
53710+__u32
53711+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53712+{
53713+ return 1;
53714+}
53715+
53716+void
53717+grsecurity_init(void)
53718+{
53719+ return;
53720+}
53721+
53722+__u32
53723+gr_acl_handle_mknod(const struct dentry * new_dentry,
53724+ const struct dentry * parent_dentry,
53725+ const struct vfsmount * parent_mnt,
53726+ const int mode)
53727+{
53728+ return 1;
53729+}
53730+
53731+__u32
53732+gr_acl_handle_mkdir(const struct dentry * new_dentry,
53733+ const struct dentry * parent_dentry,
53734+ const struct vfsmount * parent_mnt)
53735+{
53736+ return 1;
53737+}
53738+
53739+__u32
53740+gr_acl_handle_symlink(const struct dentry * new_dentry,
53741+ const struct dentry * parent_dentry,
53742+ const struct vfsmount * parent_mnt, const char *from)
53743+{
53744+ return 1;
53745+}
53746+
53747+__u32
53748+gr_acl_handle_link(const struct dentry * new_dentry,
53749+ const struct dentry * parent_dentry,
53750+ const struct vfsmount * parent_mnt,
53751+ const struct dentry * old_dentry,
53752+ const struct vfsmount * old_mnt, const char *to)
53753+{
53754+ return 1;
53755+}
53756+
53757+int
53758+gr_acl_handle_rename(const struct dentry *new_dentry,
53759+ const struct dentry *parent_dentry,
53760+ const struct vfsmount *parent_mnt,
53761+ const struct dentry *old_dentry,
53762+ const struct inode *old_parent_inode,
53763+ const struct vfsmount *old_mnt, const char *newname)
53764+{
53765+ return 0;
53766+}
53767+
53768+int
53769+gr_acl_handle_filldir(const struct file *file, const char *name,
53770+ const int namelen, const ino_t ino)
53771+{
53772+ return 1;
53773+}
53774+
53775+int
53776+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53777+ const time_t shm_createtime, const uid_t cuid, const int shmid)
53778+{
53779+ return 1;
53780+}
53781+
53782+int
53783+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53784+{
53785+ return 0;
53786+}
53787+
53788+int
53789+gr_search_accept(const struct socket *sock)
53790+{
53791+ return 0;
53792+}
53793+
53794+int
53795+gr_search_listen(const struct socket *sock)
53796+{
53797+ return 0;
53798+}
53799+
53800+int
53801+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53802+{
53803+ return 0;
53804+}
53805+
53806+__u32
53807+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53808+{
53809+ return 1;
53810+}
53811+
53812+__u32
53813+gr_acl_handle_creat(const struct dentry * dentry,
53814+ const struct dentry * p_dentry,
53815+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
53816+ const int imode)
53817+{
53818+ return 1;
53819+}
53820+
53821+void
53822+gr_acl_handle_exit(void)
53823+{
53824+ return;
53825+}
53826+
53827+int
53828+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53829+{
53830+ return 1;
53831+}
53832+
53833+void
53834+gr_set_role_label(const uid_t uid, const gid_t gid)
53835+{
53836+ return;
53837+}
53838+
53839+int
53840+gr_acl_handle_procpidmem(const struct task_struct *task)
53841+{
53842+ return 0;
53843+}
53844+
53845+int
53846+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53847+{
53848+ return 0;
53849+}
53850+
53851+int
53852+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53853+{
53854+ return 0;
53855+}
53856+
53857+void
53858+gr_set_kernel_label(struct task_struct *task)
53859+{
53860+ return;
53861+}
53862+
53863+int
53864+gr_check_user_change(int real, int effective, int fs)
53865+{
53866+ return 0;
53867+}
53868+
53869+int
53870+gr_check_group_change(int real, int effective, int fs)
53871+{
53872+ return 0;
53873+}
53874+
53875+int gr_acl_enable_at_secure(void)
53876+{
53877+ return 0;
53878+}
53879+
53880+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53881+{
53882+ return dentry->d_inode->i_sb->s_dev;
53883+}
53884+
53885+EXPORT_SYMBOL(gr_learn_resource);
53886+EXPORT_SYMBOL(gr_set_kernel_label);
53887+#ifdef CONFIG_SECURITY
53888+EXPORT_SYMBOL(gr_check_user_change);
53889+EXPORT_SYMBOL(gr_check_group_change);
53890+#endif
53891diff -urNp linux-3.1.1/grsecurity/grsec_exec.c linux-3.1.1/grsecurity/grsec_exec.c
53892--- linux-3.1.1/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53893+++ linux-3.1.1/grsecurity/grsec_exec.c 2011-11-16 18:40:31.000000000 -0500
53894@@ -0,0 +1,146 @@
53895+#include <linux/kernel.h>
53896+#include <linux/sched.h>
53897+#include <linux/file.h>
53898+#include <linux/binfmts.h>
53899+#include <linux/fs.h>
53900+#include <linux/types.h>
53901+#include <linux/grdefs.h>
53902+#include <linux/grsecurity.h>
53903+#include <linux/grinternal.h>
53904+#include <linux/capability.h>
53905+#include <linux/module.h>
53906+
53907+#include <asm/uaccess.h>
53908+
53909+#ifdef CONFIG_GRKERNSEC_EXECLOG
53910+static char gr_exec_arg_buf[132];
53911+static DEFINE_MUTEX(gr_exec_arg_mutex);
53912+#endif
53913+
53914+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
53915+
53916+void
53917+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
53918+{
53919+#ifdef CONFIG_GRKERNSEC_EXECLOG
53920+ char *grarg = gr_exec_arg_buf;
53921+ unsigned int i, x, execlen = 0;
53922+ char c;
53923+
53924+ if (!((grsec_enable_execlog && grsec_enable_group &&
53925+ in_group_p(grsec_audit_gid))
53926+ || (grsec_enable_execlog && !grsec_enable_group)))
53927+ return;
53928+
53929+ mutex_lock(&gr_exec_arg_mutex);
53930+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
53931+
53932+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
53933+ const char __user *p;
53934+ unsigned int len;
53935+
53936+ p = get_user_arg_ptr(argv, i);
53937+ if (IS_ERR(p))
53938+ goto log;
53939+
53940+ len = strnlen_user(p, 128 - execlen);
53941+ if (len > 128 - execlen)
53942+ len = 128 - execlen;
53943+ else if (len > 0)
53944+ len--;
53945+ if (copy_from_user(grarg + execlen, p, len))
53946+ goto log;
53947+
53948+ /* rewrite unprintable characters */
53949+ for (x = 0; x < len; x++) {
53950+ c = *(grarg + execlen + x);
53951+ if (c < 32 || c > 126)
53952+ *(grarg + execlen + x) = ' ';
53953+ }
53954+
53955+ execlen += len;
53956+ *(grarg + execlen) = ' ';
53957+ *(grarg + execlen + 1) = '\0';
53958+ execlen++;
53959+ }
53960+
53961+ log:
53962+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53963+ bprm->file->f_path.mnt, grarg);
53964+ mutex_unlock(&gr_exec_arg_mutex);
53965+#endif
53966+ return;
53967+}
53968+
53969+#ifdef CONFIG_GRKERNSEC
53970+extern int gr_acl_is_capable(const int cap);
53971+extern int gr_acl_is_capable_nolog(const int cap);
53972+extern int gr_chroot_is_capable(const int cap);
53973+extern int gr_chroot_is_capable_nolog(const int cap);
53974+#endif
53975+
53976+const char *captab_log[] = {
53977+ "CAP_CHOWN",
53978+ "CAP_DAC_OVERRIDE",
53979+ "CAP_DAC_READ_SEARCH",
53980+ "CAP_FOWNER",
53981+ "CAP_FSETID",
53982+ "CAP_KILL",
53983+ "CAP_SETGID",
53984+ "CAP_SETUID",
53985+ "CAP_SETPCAP",
53986+ "CAP_LINUX_IMMUTABLE",
53987+ "CAP_NET_BIND_SERVICE",
53988+ "CAP_NET_BROADCAST",
53989+ "CAP_NET_ADMIN",
53990+ "CAP_NET_RAW",
53991+ "CAP_IPC_LOCK",
53992+ "CAP_IPC_OWNER",
53993+ "CAP_SYS_MODULE",
53994+ "CAP_SYS_RAWIO",
53995+ "CAP_SYS_CHROOT",
53996+ "CAP_SYS_PTRACE",
53997+ "CAP_SYS_PACCT",
53998+ "CAP_SYS_ADMIN",
53999+ "CAP_SYS_BOOT",
54000+ "CAP_SYS_NICE",
54001+ "CAP_SYS_RESOURCE",
54002+ "CAP_SYS_TIME",
54003+ "CAP_SYS_TTY_CONFIG",
54004+ "CAP_MKNOD",
54005+ "CAP_LEASE",
54006+ "CAP_AUDIT_WRITE",
54007+ "CAP_AUDIT_CONTROL",
54008+ "CAP_SETFCAP",
54009+ "CAP_MAC_OVERRIDE",
54010+ "CAP_MAC_ADMIN",
54011+ "CAP_SYSLOG",
54012+ "CAP_WAKE_ALARM"
54013+};
54014+
54015+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
54016+
54017+int gr_is_capable(const int cap)
54018+{
54019+#ifdef CONFIG_GRKERNSEC
54020+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
54021+ return 1;
54022+ return 0;
54023+#else
54024+ return 1;
54025+#endif
54026+}
54027+
54028+int gr_is_capable_nolog(const int cap)
54029+{
54030+#ifdef CONFIG_GRKERNSEC
54031+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
54032+ return 1;
54033+ return 0;
54034+#else
54035+ return 1;
54036+#endif
54037+}
54038+
54039+EXPORT_SYMBOL(gr_is_capable);
54040+EXPORT_SYMBOL(gr_is_capable_nolog);
54041diff -urNp linux-3.1.1/grsecurity/grsec_fifo.c linux-3.1.1/grsecurity/grsec_fifo.c
54042--- linux-3.1.1/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
54043+++ linux-3.1.1/grsecurity/grsec_fifo.c 2011-11-16 18:40:31.000000000 -0500
54044@@ -0,0 +1,24 @@
54045+#include <linux/kernel.h>
54046+#include <linux/sched.h>
54047+#include <linux/fs.h>
54048+#include <linux/file.h>
54049+#include <linux/grinternal.h>
54050+
54051+int
54052+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
54053+ const struct dentry *dir, const int flag, const int acc_mode)
54054+{
54055+#ifdef CONFIG_GRKERNSEC_FIFO
54056+ const struct cred *cred = current_cred();
54057+
54058+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
54059+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
54060+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
54061+ (cred->fsuid != dentry->d_inode->i_uid)) {
54062+ if (!inode_permission(dentry->d_inode, acc_mode))
54063+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
54064+ return -EACCES;
54065+ }
54066+#endif
54067+ return 0;
54068+}
54069diff -urNp linux-3.1.1/grsecurity/grsec_fork.c linux-3.1.1/grsecurity/grsec_fork.c
54070--- linux-3.1.1/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
54071+++ linux-3.1.1/grsecurity/grsec_fork.c 2011-11-16 18:40:31.000000000 -0500
54072@@ -0,0 +1,23 @@
54073+#include <linux/kernel.h>
54074+#include <linux/sched.h>
54075+#include <linux/grsecurity.h>
54076+#include <linux/grinternal.h>
54077+#include <linux/errno.h>
54078+
54079+void
54080+gr_log_forkfail(const int retval)
54081+{
54082+#ifdef CONFIG_GRKERNSEC_FORKFAIL
54083+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
54084+ switch (retval) {
54085+ case -EAGAIN:
54086+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
54087+ break;
54088+ case -ENOMEM:
54089+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
54090+ break;
54091+ }
54092+ }
54093+#endif
54094+ return;
54095+}
54096diff -urNp linux-3.1.1/grsecurity/grsec_init.c linux-3.1.1/grsecurity/grsec_init.c
54097--- linux-3.1.1/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
54098+++ linux-3.1.1/grsecurity/grsec_init.c 2011-11-16 18:40:31.000000000 -0500
54099@@ -0,0 +1,269 @@
54100+#include <linux/kernel.h>
54101+#include <linux/sched.h>
54102+#include <linux/mm.h>
54103+#include <linux/gracl.h>
54104+#include <linux/slab.h>
54105+#include <linux/vmalloc.h>
54106+#include <linux/percpu.h>
54107+#include <linux/module.h>
54108+
54109+int grsec_enable_brute;
54110+int grsec_enable_link;
54111+int grsec_enable_dmesg;
54112+int grsec_enable_harden_ptrace;
54113+int grsec_enable_fifo;
54114+int grsec_enable_execlog;
54115+int grsec_enable_signal;
54116+int grsec_enable_forkfail;
54117+int grsec_enable_audit_ptrace;
54118+int grsec_enable_time;
54119+int grsec_enable_audit_textrel;
54120+int grsec_enable_group;
54121+int grsec_audit_gid;
54122+int grsec_enable_chdir;
54123+int grsec_enable_mount;
54124+int grsec_enable_rofs;
54125+int grsec_enable_chroot_findtask;
54126+int grsec_enable_chroot_mount;
54127+int grsec_enable_chroot_shmat;
54128+int grsec_enable_chroot_fchdir;
54129+int grsec_enable_chroot_double;
54130+int grsec_enable_chroot_pivot;
54131+int grsec_enable_chroot_chdir;
54132+int grsec_enable_chroot_chmod;
54133+int grsec_enable_chroot_mknod;
54134+int grsec_enable_chroot_nice;
54135+int grsec_enable_chroot_execlog;
54136+int grsec_enable_chroot_caps;
54137+int grsec_enable_chroot_sysctl;
54138+int grsec_enable_chroot_unix;
54139+int grsec_enable_tpe;
54140+int grsec_tpe_gid;
54141+int grsec_enable_blackhole;
54142+#ifdef CONFIG_IPV6_MODULE
54143+EXPORT_SYMBOL(grsec_enable_blackhole);
54144+#endif
54145+int grsec_lastack_retries;
54146+int grsec_enable_tpe_all;
54147+int grsec_enable_tpe_invert;
54148+int grsec_enable_socket_all;
54149+int grsec_socket_all_gid;
54150+int grsec_enable_socket_client;
54151+int grsec_socket_client_gid;
54152+int grsec_enable_socket_server;
54153+int grsec_socket_server_gid;
54154+int grsec_resource_logging;
54155+int grsec_disable_privio;
54156+int grsec_enable_log_rwxmaps;
54157+int grsec_lock;
54158+
54159+DEFINE_SPINLOCK(grsec_alert_lock);
54160+unsigned long grsec_alert_wtime = 0;
54161+unsigned long grsec_alert_fyet = 0;
54162+
54163+DEFINE_SPINLOCK(grsec_audit_lock);
54164+
54165+DEFINE_RWLOCK(grsec_exec_file_lock);
54166+
54167+char *gr_shared_page[4];
54168+
54169+char *gr_alert_log_fmt;
54170+char *gr_audit_log_fmt;
54171+char *gr_alert_log_buf;
54172+char *gr_audit_log_buf;
54173+
54174+extern struct gr_arg *gr_usermode;
54175+extern unsigned char *gr_system_salt;
54176+extern unsigned char *gr_system_sum;
54177+
54178+void __init
54179+grsecurity_init(void)
54180+{
54181+ int j;
54182+ /* create the per-cpu shared pages */
54183+
54184+#ifdef CONFIG_X86
54185+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
54186+#endif
54187+
54188+ for (j = 0; j < 4; j++) {
54189+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
54190+ if (gr_shared_page[j] == NULL) {
54191+ panic("Unable to allocate grsecurity shared page");
54192+ return;
54193+ }
54194+ }
54195+
54196+ /* allocate log buffers */
54197+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
54198+ if (!gr_alert_log_fmt) {
54199+ panic("Unable to allocate grsecurity alert log format buffer");
54200+ return;
54201+ }
54202+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
54203+ if (!gr_audit_log_fmt) {
54204+ panic("Unable to allocate grsecurity audit log format buffer");
54205+ return;
54206+ }
54207+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54208+ if (!gr_alert_log_buf) {
54209+ panic("Unable to allocate grsecurity alert log buffer");
54210+ return;
54211+ }
54212+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54213+ if (!gr_audit_log_buf) {
54214+ panic("Unable to allocate grsecurity audit log buffer");
54215+ return;
54216+ }
54217+
54218+ /* allocate memory for authentication structure */
54219+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
54220+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
54221+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
54222+
54223+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
54224+ panic("Unable to allocate grsecurity authentication structure");
54225+ return;
54226+ }
54227+
54228+
54229+#ifdef CONFIG_GRKERNSEC_IO
54230+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
54231+ grsec_disable_privio = 1;
54232+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
54233+ grsec_disable_privio = 1;
54234+#else
54235+ grsec_disable_privio = 0;
54236+#endif
54237+#endif
54238+
54239+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
54240+ /* for backward compatibility, tpe_invert always defaults to on if
54241+ enabled in the kernel
54242+ */
54243+ grsec_enable_tpe_invert = 1;
54244+#endif
54245+
54246+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
54247+#ifndef CONFIG_GRKERNSEC_SYSCTL
54248+ grsec_lock = 1;
54249+#endif
54250+
54251+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54252+ grsec_enable_audit_textrel = 1;
54253+#endif
54254+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54255+ grsec_enable_log_rwxmaps = 1;
54256+#endif
54257+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
54258+ grsec_enable_group = 1;
54259+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
54260+#endif
54261+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54262+ grsec_enable_chdir = 1;
54263+#endif
54264+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54265+ grsec_enable_harden_ptrace = 1;
54266+#endif
54267+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54268+ grsec_enable_mount = 1;
54269+#endif
54270+#ifdef CONFIG_GRKERNSEC_LINK
54271+ grsec_enable_link = 1;
54272+#endif
54273+#ifdef CONFIG_GRKERNSEC_BRUTE
54274+ grsec_enable_brute = 1;
54275+#endif
54276+#ifdef CONFIG_GRKERNSEC_DMESG
54277+ grsec_enable_dmesg = 1;
54278+#endif
54279+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54280+ grsec_enable_blackhole = 1;
54281+ grsec_lastack_retries = 4;
54282+#endif
54283+#ifdef CONFIG_GRKERNSEC_FIFO
54284+ grsec_enable_fifo = 1;
54285+#endif
54286+#ifdef CONFIG_GRKERNSEC_EXECLOG
54287+ grsec_enable_execlog = 1;
54288+#endif
54289+#ifdef CONFIG_GRKERNSEC_SIGNAL
54290+ grsec_enable_signal = 1;
54291+#endif
54292+#ifdef CONFIG_GRKERNSEC_FORKFAIL
54293+ grsec_enable_forkfail = 1;
54294+#endif
54295+#ifdef CONFIG_GRKERNSEC_TIME
54296+ grsec_enable_time = 1;
54297+#endif
54298+#ifdef CONFIG_GRKERNSEC_RESLOG
54299+ grsec_resource_logging = 1;
54300+#endif
54301+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54302+ grsec_enable_chroot_findtask = 1;
54303+#endif
54304+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54305+ grsec_enable_chroot_unix = 1;
54306+#endif
54307+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54308+ grsec_enable_chroot_mount = 1;
54309+#endif
54310+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54311+ grsec_enable_chroot_fchdir = 1;
54312+#endif
54313+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54314+ grsec_enable_chroot_shmat = 1;
54315+#endif
54316+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54317+ grsec_enable_audit_ptrace = 1;
54318+#endif
54319+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54320+ grsec_enable_chroot_double = 1;
54321+#endif
54322+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54323+ grsec_enable_chroot_pivot = 1;
54324+#endif
54325+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54326+ grsec_enable_chroot_chdir = 1;
54327+#endif
54328+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54329+ grsec_enable_chroot_chmod = 1;
54330+#endif
54331+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54332+ grsec_enable_chroot_mknod = 1;
54333+#endif
54334+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54335+ grsec_enable_chroot_nice = 1;
54336+#endif
54337+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54338+ grsec_enable_chroot_execlog = 1;
54339+#endif
54340+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54341+ grsec_enable_chroot_caps = 1;
54342+#endif
54343+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54344+ grsec_enable_chroot_sysctl = 1;
54345+#endif
54346+#ifdef CONFIG_GRKERNSEC_TPE
54347+ grsec_enable_tpe = 1;
54348+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
54349+#ifdef CONFIG_GRKERNSEC_TPE_ALL
54350+ grsec_enable_tpe_all = 1;
54351+#endif
54352+#endif
54353+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54354+ grsec_enable_socket_all = 1;
54355+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
54356+#endif
54357+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54358+ grsec_enable_socket_client = 1;
54359+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
54360+#endif
54361+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54362+ grsec_enable_socket_server = 1;
54363+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
54364+#endif
54365+#endif
54366+
54367+ return;
54368+}
54369diff -urNp linux-3.1.1/grsecurity/grsec_link.c linux-3.1.1/grsecurity/grsec_link.c
54370--- linux-3.1.1/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
54371+++ linux-3.1.1/grsecurity/grsec_link.c 2011-11-16 18:40:31.000000000 -0500
54372@@ -0,0 +1,43 @@
54373+#include <linux/kernel.h>
54374+#include <linux/sched.h>
54375+#include <linux/fs.h>
54376+#include <linux/file.h>
54377+#include <linux/grinternal.h>
54378+
54379+int
54380+gr_handle_follow_link(const struct inode *parent,
54381+ const struct inode *inode,
54382+ const struct dentry *dentry, const struct vfsmount *mnt)
54383+{
54384+#ifdef CONFIG_GRKERNSEC_LINK
54385+ const struct cred *cred = current_cred();
54386+
54387+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
54388+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
54389+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
54390+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
54391+ return -EACCES;
54392+ }
54393+#endif
54394+ return 0;
54395+}
54396+
54397+int
54398+gr_handle_hardlink(const struct dentry *dentry,
54399+ const struct vfsmount *mnt,
54400+ struct inode *inode, const int mode, const char *to)
54401+{
54402+#ifdef CONFIG_GRKERNSEC_LINK
54403+ const struct cred *cred = current_cred();
54404+
54405+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
54406+ (!S_ISREG(mode) || (mode & S_ISUID) ||
54407+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
54408+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
54409+ !capable(CAP_FOWNER) && cred->uid) {
54410+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
54411+ return -EPERM;
54412+ }
54413+#endif
54414+ return 0;
54415+}
54416diff -urNp linux-3.1.1/grsecurity/grsec_log.c linux-3.1.1/grsecurity/grsec_log.c
54417--- linux-3.1.1/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
54418+++ linux-3.1.1/grsecurity/grsec_log.c 2011-11-16 18:40:31.000000000 -0500
54419@@ -0,0 +1,322 @@
54420+#include <linux/kernel.h>
54421+#include <linux/sched.h>
54422+#include <linux/file.h>
54423+#include <linux/tty.h>
54424+#include <linux/fs.h>
54425+#include <linux/grinternal.h>
54426+
54427+#ifdef CONFIG_TREE_PREEMPT_RCU
54428+#define DISABLE_PREEMPT() preempt_disable()
54429+#define ENABLE_PREEMPT() preempt_enable()
54430+#else
54431+#define DISABLE_PREEMPT()
54432+#define ENABLE_PREEMPT()
54433+#endif
54434+
54435+#define BEGIN_LOCKS(x) \
54436+ DISABLE_PREEMPT(); \
54437+ rcu_read_lock(); \
54438+ read_lock(&tasklist_lock); \
54439+ read_lock(&grsec_exec_file_lock); \
54440+ if (x != GR_DO_AUDIT) \
54441+ spin_lock(&grsec_alert_lock); \
54442+ else \
54443+ spin_lock(&grsec_audit_lock)
54444+
54445+#define END_LOCKS(x) \
54446+ if (x != GR_DO_AUDIT) \
54447+ spin_unlock(&grsec_alert_lock); \
54448+ else \
54449+ spin_unlock(&grsec_audit_lock); \
54450+ read_unlock(&grsec_exec_file_lock); \
54451+ read_unlock(&tasklist_lock); \
54452+ rcu_read_unlock(); \
54453+ ENABLE_PREEMPT(); \
54454+ if (x == GR_DONT_AUDIT) \
54455+ gr_handle_alertkill(current)
54456+
54457+enum {
54458+ FLOODING,
54459+ NO_FLOODING
54460+};
54461+
54462+extern char *gr_alert_log_fmt;
54463+extern char *gr_audit_log_fmt;
54464+extern char *gr_alert_log_buf;
54465+extern char *gr_audit_log_buf;
54466+
54467+static int gr_log_start(int audit)
54468+{
54469+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
54470+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
54471+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54472+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
54473+ unsigned long curr_secs = get_seconds();
54474+
54475+ if (audit == GR_DO_AUDIT)
54476+ goto set_fmt;
54477+
54478+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
54479+ grsec_alert_wtime = curr_secs;
54480+ grsec_alert_fyet = 0;
54481+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
54482+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
54483+ grsec_alert_fyet++;
54484+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
54485+ grsec_alert_wtime = curr_secs;
54486+ grsec_alert_fyet++;
54487+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
54488+ return FLOODING;
54489+ }
54490+ else return FLOODING;
54491+
54492+set_fmt:
54493+#endif
54494+ memset(buf, 0, PAGE_SIZE);
54495+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
54496+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
54497+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54498+ } else if (current->signal->curr_ip) {
54499+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
54500+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
54501+ } else if (gr_acl_is_enabled()) {
54502+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
54503+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54504+ } else {
54505+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
54506+ strcpy(buf, fmt);
54507+ }
54508+
54509+ return NO_FLOODING;
54510+}
54511+
54512+static void gr_log_middle(int audit, const char *msg, va_list ap)
54513+ __attribute__ ((format (printf, 2, 0)));
54514+
54515+static void gr_log_middle(int audit, const char *msg, va_list ap)
54516+{
54517+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54518+ unsigned int len = strlen(buf);
54519+
54520+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54521+
54522+ return;
54523+}
54524+
54525+static void gr_log_middle_varargs(int audit, const char *msg, ...)
54526+ __attribute__ ((format (printf, 2, 3)));
54527+
54528+static void gr_log_middle_varargs(int audit, const char *msg, ...)
54529+{
54530+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54531+ unsigned int len = strlen(buf);
54532+ va_list ap;
54533+
54534+ va_start(ap, msg);
54535+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54536+ va_end(ap);
54537+
54538+ return;
54539+}
54540+
54541+static void gr_log_end(int audit, int append_default)
54542+{
54543+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54544+
54545+ if (append_default) {
54546+ unsigned int len = strlen(buf);
54547+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
54548+ }
54549+
54550+ printk("%s\n", buf);
54551+
54552+ return;
54553+}
54554+
54555+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
54556+{
54557+ int logtype;
54558+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
54559+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
54560+ void *voidptr = NULL;
54561+ int num1 = 0, num2 = 0;
54562+ unsigned long ulong1 = 0, ulong2 = 0;
54563+ struct dentry *dentry = NULL;
54564+ struct vfsmount *mnt = NULL;
54565+ struct file *file = NULL;
54566+ struct task_struct *task = NULL;
54567+ const struct cred *cred, *pcred;
54568+ va_list ap;
54569+
54570+ BEGIN_LOCKS(audit);
54571+ logtype = gr_log_start(audit);
54572+ if (logtype == FLOODING) {
54573+ END_LOCKS(audit);
54574+ return;
54575+ }
54576+ va_start(ap, argtypes);
54577+ switch (argtypes) {
54578+ case GR_TTYSNIFF:
54579+ task = va_arg(ap, struct task_struct *);
54580+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
54581+ break;
54582+ case GR_SYSCTL_HIDDEN:
54583+ str1 = va_arg(ap, char *);
54584+ gr_log_middle_varargs(audit, msg, result, str1);
54585+ break;
54586+ case GR_RBAC:
54587+ dentry = va_arg(ap, struct dentry *);
54588+ mnt = va_arg(ap, struct vfsmount *);
54589+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
54590+ break;
54591+ case GR_RBAC_STR:
54592+ dentry = va_arg(ap, struct dentry *);
54593+ mnt = va_arg(ap, struct vfsmount *);
54594+ str1 = va_arg(ap, char *);
54595+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
54596+ break;
54597+ case GR_STR_RBAC:
54598+ str1 = va_arg(ap, char *);
54599+ dentry = va_arg(ap, struct dentry *);
54600+ mnt = va_arg(ap, struct vfsmount *);
54601+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
54602+ break;
54603+ case GR_RBAC_MODE2:
54604+ dentry = va_arg(ap, struct dentry *);
54605+ mnt = va_arg(ap, struct vfsmount *);
54606+ str1 = va_arg(ap, char *);
54607+ str2 = va_arg(ap, char *);
54608+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
54609+ break;
54610+ case GR_RBAC_MODE3:
54611+ dentry = va_arg(ap, struct dentry *);
54612+ mnt = va_arg(ap, struct vfsmount *);
54613+ str1 = va_arg(ap, char *);
54614+ str2 = va_arg(ap, char *);
54615+ str3 = va_arg(ap, char *);
54616+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
54617+ break;
54618+ case GR_FILENAME:
54619+ dentry = va_arg(ap, struct dentry *);
54620+ mnt = va_arg(ap, struct vfsmount *);
54621+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
54622+ break;
54623+ case GR_STR_FILENAME:
54624+ str1 = va_arg(ap, char *);
54625+ dentry = va_arg(ap, struct dentry *);
54626+ mnt = va_arg(ap, struct vfsmount *);
54627+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
54628+ break;
54629+ case GR_FILENAME_STR:
54630+ dentry = va_arg(ap, struct dentry *);
54631+ mnt = va_arg(ap, struct vfsmount *);
54632+ str1 = va_arg(ap, char *);
54633+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
54634+ break;
54635+ case GR_FILENAME_TWO_INT:
54636+ dentry = va_arg(ap, struct dentry *);
54637+ mnt = va_arg(ap, struct vfsmount *);
54638+ num1 = va_arg(ap, int);
54639+ num2 = va_arg(ap, int);
54640+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
54641+ break;
54642+ case GR_FILENAME_TWO_INT_STR:
54643+ dentry = va_arg(ap, struct dentry *);
54644+ mnt = va_arg(ap, struct vfsmount *);
54645+ num1 = va_arg(ap, int);
54646+ num2 = va_arg(ap, int);
54647+ str1 = va_arg(ap, char *);
54648+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
54649+ break;
54650+ case GR_TEXTREL:
54651+ file = va_arg(ap, struct file *);
54652+ ulong1 = va_arg(ap, unsigned long);
54653+ ulong2 = va_arg(ap, unsigned long);
54654+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
54655+ break;
54656+ case GR_PTRACE:
54657+ task = va_arg(ap, struct task_struct *);
54658+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
54659+ break;
54660+ case GR_RESOURCE:
54661+ task = va_arg(ap, struct task_struct *);
54662+ cred = __task_cred(task);
54663+ pcred = __task_cred(task->real_parent);
54664+ ulong1 = va_arg(ap, unsigned long);
54665+ str1 = va_arg(ap, char *);
54666+ ulong2 = va_arg(ap, unsigned long);
54667+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54668+ break;
54669+ case GR_CAP:
54670+ task = va_arg(ap, struct task_struct *);
54671+ cred = __task_cred(task);
54672+ pcred = __task_cred(task->real_parent);
54673+ str1 = va_arg(ap, char *);
54674+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54675+ break;
54676+ case GR_SIG:
54677+ str1 = va_arg(ap, char *);
54678+ voidptr = va_arg(ap, void *);
54679+ gr_log_middle_varargs(audit, msg, str1, voidptr);
54680+ break;
54681+ case GR_SIG2:
54682+ task = va_arg(ap, struct task_struct *);
54683+ cred = __task_cred(task);
54684+ pcred = __task_cred(task->real_parent);
54685+ num1 = va_arg(ap, int);
54686+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54687+ break;
54688+ case GR_CRASH1:
54689+ task = va_arg(ap, struct task_struct *);
54690+ cred = __task_cred(task);
54691+ pcred = __task_cred(task->real_parent);
54692+ ulong1 = va_arg(ap, unsigned long);
54693+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
54694+ break;
54695+ case GR_CRASH2:
54696+ task = va_arg(ap, struct task_struct *);
54697+ cred = __task_cred(task);
54698+ pcred = __task_cred(task->real_parent);
54699+ ulong1 = va_arg(ap, unsigned long);
54700+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
54701+ break;
54702+ case GR_RWXMAP:
54703+ file = va_arg(ap, struct file *);
54704+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
54705+ break;
54706+ case GR_PSACCT:
54707+ {
54708+ unsigned int wday, cday;
54709+ __u8 whr, chr;
54710+ __u8 wmin, cmin;
54711+ __u8 wsec, csec;
54712+ char cur_tty[64] = { 0 };
54713+ char parent_tty[64] = { 0 };
54714+
54715+ task = va_arg(ap, struct task_struct *);
54716+ wday = va_arg(ap, unsigned int);
54717+ cday = va_arg(ap, unsigned int);
54718+ whr = va_arg(ap, int);
54719+ chr = va_arg(ap, int);
54720+ wmin = va_arg(ap, int);
54721+ cmin = va_arg(ap, int);
54722+ wsec = va_arg(ap, int);
54723+ csec = va_arg(ap, int);
54724+ ulong1 = va_arg(ap, unsigned long);
54725+ cred = __task_cred(task);
54726+ pcred = __task_cred(task->real_parent);
54727+
54728+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54729+ }
54730+ break;
54731+ default:
54732+ gr_log_middle(audit, msg, ap);
54733+ }
54734+ va_end(ap);
54735+ // these don't need DEFAULTSECARGS printed on the end
54736+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
54737+ gr_log_end(audit, 0);
54738+ else
54739+ gr_log_end(audit, 1);
54740+ END_LOCKS(audit);
54741+}
54742diff -urNp linux-3.1.1/grsecurity/grsec_mem.c linux-3.1.1/grsecurity/grsec_mem.c
54743--- linux-3.1.1/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54744+++ linux-3.1.1/grsecurity/grsec_mem.c 2011-11-16 18:40:31.000000000 -0500
54745@@ -0,0 +1,33 @@
54746+#include <linux/kernel.h>
54747+#include <linux/sched.h>
54748+#include <linux/mm.h>
54749+#include <linux/mman.h>
54750+#include <linux/grinternal.h>
54751+
54752+void
54753+gr_handle_ioperm(void)
54754+{
54755+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54756+ return;
54757+}
54758+
54759+void
54760+gr_handle_iopl(void)
54761+{
54762+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54763+ return;
54764+}
54765+
54766+void
54767+gr_handle_mem_readwrite(u64 from, u64 to)
54768+{
54769+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54770+ return;
54771+}
54772+
54773+void
54774+gr_handle_vm86(void)
54775+{
54776+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54777+ return;
54778+}
54779diff -urNp linux-3.1.1/grsecurity/grsec_mount.c linux-3.1.1/grsecurity/grsec_mount.c
54780--- linux-3.1.1/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54781+++ linux-3.1.1/grsecurity/grsec_mount.c 2011-11-16 18:40:31.000000000 -0500
54782@@ -0,0 +1,62 @@
54783+#include <linux/kernel.h>
54784+#include <linux/sched.h>
54785+#include <linux/mount.h>
54786+#include <linux/grsecurity.h>
54787+#include <linux/grinternal.h>
54788+
54789+void
54790+gr_log_remount(const char *devname, const int retval)
54791+{
54792+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54793+ if (grsec_enable_mount && (retval >= 0))
54794+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54795+#endif
54796+ return;
54797+}
54798+
54799+void
54800+gr_log_unmount(const char *devname, const int retval)
54801+{
54802+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54803+ if (grsec_enable_mount && (retval >= 0))
54804+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54805+#endif
54806+ return;
54807+}
54808+
54809+void
54810+gr_log_mount(const char *from, const char *to, const int retval)
54811+{
54812+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54813+ if (grsec_enable_mount && (retval >= 0))
54814+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54815+#endif
54816+ return;
54817+}
54818+
54819+int
54820+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54821+{
54822+#ifdef CONFIG_GRKERNSEC_ROFS
54823+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54824+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54825+ return -EPERM;
54826+ } else
54827+ return 0;
54828+#endif
54829+ return 0;
54830+}
54831+
54832+int
54833+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54834+{
54835+#ifdef CONFIG_GRKERNSEC_ROFS
54836+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54837+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54838+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54839+ return -EPERM;
54840+ } else
54841+ return 0;
54842+#endif
54843+ return 0;
54844+}
54845diff -urNp linux-3.1.1/grsecurity/grsec_pax.c linux-3.1.1/grsecurity/grsec_pax.c
54846--- linux-3.1.1/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54847+++ linux-3.1.1/grsecurity/grsec_pax.c 2011-11-16 18:40:31.000000000 -0500
54848@@ -0,0 +1,36 @@
54849+#include <linux/kernel.h>
54850+#include <linux/sched.h>
54851+#include <linux/mm.h>
54852+#include <linux/file.h>
54853+#include <linux/grinternal.h>
54854+#include <linux/grsecurity.h>
54855+
54856+void
54857+gr_log_textrel(struct vm_area_struct * vma)
54858+{
54859+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54860+ if (grsec_enable_audit_textrel)
54861+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54862+#endif
54863+ return;
54864+}
54865+
54866+void
54867+gr_log_rwxmmap(struct file *file)
54868+{
54869+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54870+ if (grsec_enable_log_rwxmaps)
54871+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54872+#endif
54873+ return;
54874+}
54875+
54876+void
54877+gr_log_rwxmprotect(struct file *file)
54878+{
54879+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54880+ if (grsec_enable_log_rwxmaps)
54881+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54882+#endif
54883+ return;
54884+}
54885diff -urNp linux-3.1.1/grsecurity/grsec_ptrace.c linux-3.1.1/grsecurity/grsec_ptrace.c
54886--- linux-3.1.1/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54887+++ linux-3.1.1/grsecurity/grsec_ptrace.c 2011-11-16 18:40:31.000000000 -0500
54888@@ -0,0 +1,14 @@
54889+#include <linux/kernel.h>
54890+#include <linux/sched.h>
54891+#include <linux/grinternal.h>
54892+#include <linux/grsecurity.h>
54893+
54894+void
54895+gr_audit_ptrace(struct task_struct *task)
54896+{
54897+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54898+ if (grsec_enable_audit_ptrace)
54899+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54900+#endif
54901+ return;
54902+}
54903diff -urNp linux-3.1.1/grsecurity/grsec_sig.c linux-3.1.1/grsecurity/grsec_sig.c
54904--- linux-3.1.1/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54905+++ linux-3.1.1/grsecurity/grsec_sig.c 2011-11-16 18:40:31.000000000 -0500
54906@@ -0,0 +1,206 @@
54907+#include <linux/kernel.h>
54908+#include <linux/sched.h>
54909+#include <linux/delay.h>
54910+#include <linux/grsecurity.h>
54911+#include <linux/grinternal.h>
54912+#include <linux/hardirq.h>
54913+
54914+char *signames[] = {
54915+ [SIGSEGV] = "Segmentation fault",
54916+ [SIGILL] = "Illegal instruction",
54917+ [SIGABRT] = "Abort",
54918+ [SIGBUS] = "Invalid alignment/Bus error"
54919+};
54920+
54921+void
54922+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54923+{
54924+#ifdef CONFIG_GRKERNSEC_SIGNAL
54925+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54926+ (sig == SIGABRT) || (sig == SIGBUS))) {
54927+ if (t->pid == current->pid) {
54928+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54929+ } else {
54930+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54931+ }
54932+ }
54933+#endif
54934+ return;
54935+}
54936+
54937+int
54938+gr_handle_signal(const struct task_struct *p, const int sig)
54939+{
54940+#ifdef CONFIG_GRKERNSEC
54941+ if (current->pid > 1 && gr_check_protected_task(p)) {
54942+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54943+ return -EPERM;
54944+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54945+ return -EPERM;
54946+ }
54947+#endif
54948+ return 0;
54949+}
54950+
54951+#ifdef CONFIG_GRKERNSEC
54952+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54953+
54954+int gr_fake_force_sig(int sig, struct task_struct *t)
54955+{
54956+ unsigned long int flags;
54957+ int ret, blocked, ignored;
54958+ struct k_sigaction *action;
54959+
54960+ spin_lock_irqsave(&t->sighand->siglock, flags);
54961+ action = &t->sighand->action[sig-1];
54962+ ignored = action->sa.sa_handler == SIG_IGN;
54963+ blocked = sigismember(&t->blocked, sig);
54964+ if (blocked || ignored) {
54965+ action->sa.sa_handler = SIG_DFL;
54966+ if (blocked) {
54967+ sigdelset(&t->blocked, sig);
54968+ recalc_sigpending_and_wake(t);
54969+ }
54970+ }
54971+ if (action->sa.sa_handler == SIG_DFL)
54972+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
54973+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54974+
54975+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
54976+
54977+ return ret;
54978+}
54979+#endif
54980+
54981+#ifdef CONFIG_GRKERNSEC_BRUTE
54982+#define GR_USER_BAN_TIME (15 * 60)
54983+
54984+static int __get_dumpable(unsigned long mm_flags)
54985+{
54986+ int ret;
54987+
54988+ ret = mm_flags & MMF_DUMPABLE_MASK;
54989+ return (ret >= 2) ? 2 : ret;
54990+}
54991+#endif
54992+
54993+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54994+{
54995+#ifdef CONFIG_GRKERNSEC_BRUTE
54996+ uid_t uid = 0;
54997+
54998+ if (!grsec_enable_brute)
54999+ return;
55000+
55001+ rcu_read_lock();
55002+ read_lock(&tasklist_lock);
55003+ read_lock(&grsec_exec_file_lock);
55004+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
55005+ p->real_parent->brute = 1;
55006+ else {
55007+ const struct cred *cred = __task_cred(p), *cred2;
55008+ struct task_struct *tsk, *tsk2;
55009+
55010+ if (!__get_dumpable(mm_flags) && cred->uid) {
55011+ struct user_struct *user;
55012+
55013+ uid = cred->uid;
55014+
55015+ /* this is put upon execution past expiration */
55016+ user = find_user(uid);
55017+ if (user == NULL)
55018+ goto unlock;
55019+ user->banned = 1;
55020+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
55021+ if (user->ban_expires == ~0UL)
55022+ user->ban_expires--;
55023+
55024+ do_each_thread(tsk2, tsk) {
55025+ cred2 = __task_cred(tsk);
55026+ if (tsk != p && cred2->uid == uid)
55027+ gr_fake_force_sig(SIGKILL, tsk);
55028+ } while_each_thread(tsk2, tsk);
55029+ }
55030+ }
55031+unlock:
55032+ read_unlock(&grsec_exec_file_lock);
55033+ read_unlock(&tasklist_lock);
55034+ rcu_read_unlock();
55035+
55036+ if (uid)
55037+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
55038+
55039+#endif
55040+ return;
55041+}
55042+
55043+void gr_handle_brute_check(void)
55044+{
55045+#ifdef CONFIG_GRKERNSEC_BRUTE
55046+ if (current->brute)
55047+ msleep(30 * 1000);
55048+#endif
55049+ return;
55050+}
55051+
55052+void gr_handle_kernel_exploit(void)
55053+{
55054+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
55055+ const struct cred *cred;
55056+ struct task_struct *tsk, *tsk2;
55057+ struct user_struct *user;
55058+ uid_t uid;
55059+
55060+ if (in_irq() || in_serving_softirq() || in_nmi())
55061+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
55062+
55063+ uid = current_uid();
55064+
55065+ if (uid == 0)
55066+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
55067+ else {
55068+ /* kill all the processes of this user, hold a reference
55069+ to their creds struct, and prevent them from creating
55070+ another process until system reset
55071+ */
55072+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
55073+ /* we intentionally leak this ref */
55074+ user = get_uid(current->cred->user);
55075+ if (user) {
55076+ user->banned = 1;
55077+ user->ban_expires = ~0UL;
55078+ }
55079+
55080+ read_lock(&tasklist_lock);
55081+ do_each_thread(tsk2, tsk) {
55082+ cred = __task_cred(tsk);
55083+ if (cred->uid == uid)
55084+ gr_fake_force_sig(SIGKILL, tsk);
55085+ } while_each_thread(tsk2, tsk);
55086+ read_unlock(&tasklist_lock);
55087+ }
55088+#endif
55089+}
55090+
55091+int __gr_process_user_ban(struct user_struct *user)
55092+{
55093+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55094+ if (unlikely(user->banned)) {
55095+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
55096+ user->banned = 0;
55097+ user->ban_expires = 0;
55098+ free_uid(user);
55099+ } else
55100+ return -EPERM;
55101+ }
55102+#endif
55103+ return 0;
55104+}
55105+
55106+int gr_process_user_ban(void)
55107+{
55108+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55109+ return __gr_process_user_ban(current->cred->user);
55110+#endif
55111+ return 0;
55112+}
55113diff -urNp linux-3.1.1/grsecurity/grsec_sock.c linux-3.1.1/grsecurity/grsec_sock.c
55114--- linux-3.1.1/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
55115+++ linux-3.1.1/grsecurity/grsec_sock.c 2011-11-16 18:40:31.000000000 -0500
55116@@ -0,0 +1,244 @@
55117+#include <linux/kernel.h>
55118+#include <linux/module.h>
55119+#include <linux/sched.h>
55120+#include <linux/file.h>
55121+#include <linux/net.h>
55122+#include <linux/in.h>
55123+#include <linux/ip.h>
55124+#include <net/sock.h>
55125+#include <net/inet_sock.h>
55126+#include <linux/grsecurity.h>
55127+#include <linux/grinternal.h>
55128+#include <linux/gracl.h>
55129+
55130+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
55131+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
55132+
55133+EXPORT_SYMBOL(gr_search_udp_recvmsg);
55134+EXPORT_SYMBOL(gr_search_udp_sendmsg);
55135+
55136+#ifdef CONFIG_UNIX_MODULE
55137+EXPORT_SYMBOL(gr_acl_handle_unix);
55138+EXPORT_SYMBOL(gr_acl_handle_mknod);
55139+EXPORT_SYMBOL(gr_handle_chroot_unix);
55140+EXPORT_SYMBOL(gr_handle_create);
55141+#endif
55142+
55143+#ifdef CONFIG_GRKERNSEC
55144+#define gr_conn_table_size 32749
55145+struct conn_table_entry {
55146+ struct conn_table_entry *next;
55147+ struct signal_struct *sig;
55148+};
55149+
55150+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
55151+DEFINE_SPINLOCK(gr_conn_table_lock);
55152+
55153+extern const char * gr_socktype_to_name(unsigned char type);
55154+extern const char * gr_proto_to_name(unsigned char proto);
55155+extern const char * gr_sockfamily_to_name(unsigned char family);
55156+
55157+static __inline__ int
55158+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
55159+{
55160+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
55161+}
55162+
55163+static __inline__ int
55164+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
55165+ __u16 sport, __u16 dport)
55166+{
55167+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
55168+ sig->gr_sport == sport && sig->gr_dport == dport))
55169+ return 1;
55170+ else
55171+ return 0;
55172+}
55173+
55174+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
55175+{
55176+ struct conn_table_entry **match;
55177+ unsigned int index;
55178+
55179+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55180+ sig->gr_sport, sig->gr_dport,
55181+ gr_conn_table_size);
55182+
55183+ newent->sig = sig;
55184+
55185+ match = &gr_conn_table[index];
55186+ newent->next = *match;
55187+ *match = newent;
55188+
55189+ return;
55190+}
55191+
55192+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
55193+{
55194+ struct conn_table_entry *match, *last = NULL;
55195+ unsigned int index;
55196+
55197+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55198+ sig->gr_sport, sig->gr_dport,
55199+ gr_conn_table_size);
55200+
55201+ match = gr_conn_table[index];
55202+ while (match && !conn_match(match->sig,
55203+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
55204+ sig->gr_dport)) {
55205+ last = match;
55206+ match = match->next;
55207+ }
55208+
55209+ if (match) {
55210+ if (last)
55211+ last->next = match->next;
55212+ else
55213+ gr_conn_table[index] = NULL;
55214+ kfree(match);
55215+ }
55216+
55217+ return;
55218+}
55219+
55220+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
55221+ __u16 sport, __u16 dport)
55222+{
55223+ struct conn_table_entry *match;
55224+ unsigned int index;
55225+
55226+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
55227+
55228+ match = gr_conn_table[index];
55229+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
55230+ match = match->next;
55231+
55232+ if (match)
55233+ return match->sig;
55234+ else
55235+ return NULL;
55236+}
55237+
55238+#endif
55239+
55240+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
55241+{
55242+#ifdef CONFIG_GRKERNSEC
55243+ struct signal_struct *sig = task->signal;
55244+ struct conn_table_entry *newent;
55245+
55246+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
55247+ if (newent == NULL)
55248+ return;
55249+ /* no bh lock needed since we are called with bh disabled */
55250+ spin_lock(&gr_conn_table_lock);
55251+ gr_del_task_from_ip_table_nolock(sig);
55252+ sig->gr_saddr = inet->inet_rcv_saddr;
55253+ sig->gr_daddr = inet->inet_daddr;
55254+ sig->gr_sport = inet->inet_sport;
55255+ sig->gr_dport = inet->inet_dport;
55256+ gr_add_to_task_ip_table_nolock(sig, newent);
55257+ spin_unlock(&gr_conn_table_lock);
55258+#endif
55259+ return;
55260+}
55261+
55262+void gr_del_task_from_ip_table(struct task_struct *task)
55263+{
55264+#ifdef CONFIG_GRKERNSEC
55265+ spin_lock_bh(&gr_conn_table_lock);
55266+ gr_del_task_from_ip_table_nolock(task->signal);
55267+ spin_unlock_bh(&gr_conn_table_lock);
55268+#endif
55269+ return;
55270+}
55271+
55272+void
55273+gr_attach_curr_ip(const struct sock *sk)
55274+{
55275+#ifdef CONFIG_GRKERNSEC
55276+ struct signal_struct *p, *set;
55277+ const struct inet_sock *inet = inet_sk(sk);
55278+
55279+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
55280+ return;
55281+
55282+ set = current->signal;
55283+
55284+ spin_lock_bh(&gr_conn_table_lock);
55285+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
55286+ inet->inet_dport, inet->inet_sport);
55287+ if (unlikely(p != NULL)) {
55288+ set->curr_ip = p->curr_ip;
55289+ set->used_accept = 1;
55290+ gr_del_task_from_ip_table_nolock(p);
55291+ spin_unlock_bh(&gr_conn_table_lock);
55292+ return;
55293+ }
55294+ spin_unlock_bh(&gr_conn_table_lock);
55295+
55296+ set->curr_ip = inet->inet_daddr;
55297+ set->used_accept = 1;
55298+#endif
55299+ return;
55300+}
55301+
55302+int
55303+gr_handle_sock_all(const int family, const int type, const int protocol)
55304+{
55305+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55306+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
55307+ (family != AF_UNIX)) {
55308+ if (family == AF_INET)
55309+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
55310+ else
55311+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
55312+ return -EACCES;
55313+ }
55314+#endif
55315+ return 0;
55316+}
55317+
55318+int
55319+gr_handle_sock_server(const struct sockaddr *sck)
55320+{
55321+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55322+ if (grsec_enable_socket_server &&
55323+ in_group_p(grsec_socket_server_gid) &&
55324+ sck && (sck->sa_family != AF_UNIX) &&
55325+ (sck->sa_family != AF_LOCAL)) {
55326+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
55327+ return -EACCES;
55328+ }
55329+#endif
55330+ return 0;
55331+}
55332+
55333+int
55334+gr_handle_sock_server_other(const struct sock *sck)
55335+{
55336+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55337+ if (grsec_enable_socket_server &&
55338+ in_group_p(grsec_socket_server_gid) &&
55339+ sck && (sck->sk_family != AF_UNIX) &&
55340+ (sck->sk_family != AF_LOCAL)) {
55341+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
55342+ return -EACCES;
55343+ }
55344+#endif
55345+ return 0;
55346+}
55347+
55348+int
55349+gr_handle_sock_client(const struct sockaddr *sck)
55350+{
55351+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55352+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
55353+ sck && (sck->sa_family != AF_UNIX) &&
55354+ (sck->sa_family != AF_LOCAL)) {
55355+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
55356+ return -EACCES;
55357+ }
55358+#endif
55359+ return 0;
55360+}
55361diff -urNp linux-3.1.1/grsecurity/grsec_sysctl.c linux-3.1.1/grsecurity/grsec_sysctl.c
55362--- linux-3.1.1/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
55363+++ linux-3.1.1/grsecurity/grsec_sysctl.c 2011-11-16 18:40:31.000000000 -0500
55364@@ -0,0 +1,433 @@
55365+#include <linux/kernel.h>
55366+#include <linux/sched.h>
55367+#include <linux/sysctl.h>
55368+#include <linux/grsecurity.h>
55369+#include <linux/grinternal.h>
55370+
55371+int
55372+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
55373+{
55374+#ifdef CONFIG_GRKERNSEC_SYSCTL
55375+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
55376+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
55377+ return -EACCES;
55378+ }
55379+#endif
55380+ return 0;
55381+}
55382+
55383+#ifdef CONFIG_GRKERNSEC_ROFS
55384+static int __maybe_unused one = 1;
55385+#endif
55386+
55387+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
55388+struct ctl_table grsecurity_table[] = {
55389+#ifdef CONFIG_GRKERNSEC_SYSCTL
55390+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
55391+#ifdef CONFIG_GRKERNSEC_IO
55392+ {
55393+ .procname = "disable_priv_io",
55394+ .data = &grsec_disable_privio,
55395+ .maxlen = sizeof(int),
55396+ .mode = 0600,
55397+ .proc_handler = &proc_dointvec,
55398+ },
55399+#endif
55400+#endif
55401+#ifdef CONFIG_GRKERNSEC_LINK
55402+ {
55403+ .procname = "linking_restrictions",
55404+ .data = &grsec_enable_link,
55405+ .maxlen = sizeof(int),
55406+ .mode = 0600,
55407+ .proc_handler = &proc_dointvec,
55408+ },
55409+#endif
55410+#ifdef CONFIG_GRKERNSEC_BRUTE
55411+ {
55412+ .procname = "deter_bruteforce",
55413+ .data = &grsec_enable_brute,
55414+ .maxlen = sizeof(int),
55415+ .mode = 0600,
55416+ .proc_handler = &proc_dointvec,
55417+ },
55418+#endif
55419+#ifdef CONFIG_GRKERNSEC_FIFO
55420+ {
55421+ .procname = "fifo_restrictions",
55422+ .data = &grsec_enable_fifo,
55423+ .maxlen = sizeof(int),
55424+ .mode = 0600,
55425+ .proc_handler = &proc_dointvec,
55426+ },
55427+#endif
55428+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55429+ {
55430+ .procname = "ip_blackhole",
55431+ .data = &grsec_enable_blackhole,
55432+ .maxlen = sizeof(int),
55433+ .mode = 0600,
55434+ .proc_handler = &proc_dointvec,
55435+ },
55436+ {
55437+ .procname = "lastack_retries",
55438+ .data = &grsec_lastack_retries,
55439+ .maxlen = sizeof(int),
55440+ .mode = 0600,
55441+ .proc_handler = &proc_dointvec,
55442+ },
55443+#endif
55444+#ifdef CONFIG_GRKERNSEC_EXECLOG
55445+ {
55446+ .procname = "exec_logging",
55447+ .data = &grsec_enable_execlog,
55448+ .maxlen = sizeof(int),
55449+ .mode = 0600,
55450+ .proc_handler = &proc_dointvec,
55451+ },
55452+#endif
55453+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55454+ {
55455+ .procname = "rwxmap_logging",
55456+ .data = &grsec_enable_log_rwxmaps,
55457+ .maxlen = sizeof(int),
55458+ .mode = 0600,
55459+ .proc_handler = &proc_dointvec,
55460+ },
55461+#endif
55462+#ifdef CONFIG_GRKERNSEC_SIGNAL
55463+ {
55464+ .procname = "signal_logging",
55465+ .data = &grsec_enable_signal,
55466+ .maxlen = sizeof(int),
55467+ .mode = 0600,
55468+ .proc_handler = &proc_dointvec,
55469+ },
55470+#endif
55471+#ifdef CONFIG_GRKERNSEC_FORKFAIL
55472+ {
55473+ .procname = "forkfail_logging",
55474+ .data = &grsec_enable_forkfail,
55475+ .maxlen = sizeof(int),
55476+ .mode = 0600,
55477+ .proc_handler = &proc_dointvec,
55478+ },
55479+#endif
55480+#ifdef CONFIG_GRKERNSEC_TIME
55481+ {
55482+ .procname = "timechange_logging",
55483+ .data = &grsec_enable_time,
55484+ .maxlen = sizeof(int),
55485+ .mode = 0600,
55486+ .proc_handler = &proc_dointvec,
55487+ },
55488+#endif
55489+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55490+ {
55491+ .procname = "chroot_deny_shmat",
55492+ .data = &grsec_enable_chroot_shmat,
55493+ .maxlen = sizeof(int),
55494+ .mode = 0600,
55495+ .proc_handler = &proc_dointvec,
55496+ },
55497+#endif
55498+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55499+ {
55500+ .procname = "chroot_deny_unix",
55501+ .data = &grsec_enable_chroot_unix,
55502+ .maxlen = sizeof(int),
55503+ .mode = 0600,
55504+ .proc_handler = &proc_dointvec,
55505+ },
55506+#endif
55507+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55508+ {
55509+ .procname = "chroot_deny_mount",
55510+ .data = &grsec_enable_chroot_mount,
55511+ .maxlen = sizeof(int),
55512+ .mode = 0600,
55513+ .proc_handler = &proc_dointvec,
55514+ },
55515+#endif
55516+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55517+ {
55518+ .procname = "chroot_deny_fchdir",
55519+ .data = &grsec_enable_chroot_fchdir,
55520+ .maxlen = sizeof(int),
55521+ .mode = 0600,
55522+ .proc_handler = &proc_dointvec,
55523+ },
55524+#endif
55525+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55526+ {
55527+ .procname = "chroot_deny_chroot",
55528+ .data = &grsec_enable_chroot_double,
55529+ .maxlen = sizeof(int),
55530+ .mode = 0600,
55531+ .proc_handler = &proc_dointvec,
55532+ },
55533+#endif
55534+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55535+ {
55536+ .procname = "chroot_deny_pivot",
55537+ .data = &grsec_enable_chroot_pivot,
55538+ .maxlen = sizeof(int),
55539+ .mode = 0600,
55540+ .proc_handler = &proc_dointvec,
55541+ },
55542+#endif
55543+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55544+ {
55545+ .procname = "chroot_enforce_chdir",
55546+ .data = &grsec_enable_chroot_chdir,
55547+ .maxlen = sizeof(int),
55548+ .mode = 0600,
55549+ .proc_handler = &proc_dointvec,
55550+ },
55551+#endif
55552+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55553+ {
55554+ .procname = "chroot_deny_chmod",
55555+ .data = &grsec_enable_chroot_chmod,
55556+ .maxlen = sizeof(int),
55557+ .mode = 0600,
55558+ .proc_handler = &proc_dointvec,
55559+ },
55560+#endif
55561+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55562+ {
55563+ .procname = "chroot_deny_mknod",
55564+ .data = &grsec_enable_chroot_mknod,
55565+ .maxlen = sizeof(int),
55566+ .mode = 0600,
55567+ .proc_handler = &proc_dointvec,
55568+ },
55569+#endif
55570+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55571+ {
55572+ .procname = "chroot_restrict_nice",
55573+ .data = &grsec_enable_chroot_nice,
55574+ .maxlen = sizeof(int),
55575+ .mode = 0600,
55576+ .proc_handler = &proc_dointvec,
55577+ },
55578+#endif
55579+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55580+ {
55581+ .procname = "chroot_execlog",
55582+ .data = &grsec_enable_chroot_execlog,
55583+ .maxlen = sizeof(int),
55584+ .mode = 0600,
55585+ .proc_handler = &proc_dointvec,
55586+ },
55587+#endif
55588+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55589+ {
55590+ .procname = "chroot_caps",
55591+ .data = &grsec_enable_chroot_caps,
55592+ .maxlen = sizeof(int),
55593+ .mode = 0600,
55594+ .proc_handler = &proc_dointvec,
55595+ },
55596+#endif
55597+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55598+ {
55599+ .procname = "chroot_deny_sysctl",
55600+ .data = &grsec_enable_chroot_sysctl,
55601+ .maxlen = sizeof(int),
55602+ .mode = 0600,
55603+ .proc_handler = &proc_dointvec,
55604+ },
55605+#endif
55606+#ifdef CONFIG_GRKERNSEC_TPE
55607+ {
55608+ .procname = "tpe",
55609+ .data = &grsec_enable_tpe,
55610+ .maxlen = sizeof(int),
55611+ .mode = 0600,
55612+ .proc_handler = &proc_dointvec,
55613+ },
55614+ {
55615+ .procname = "tpe_gid",
55616+ .data = &grsec_tpe_gid,
55617+ .maxlen = sizeof(int),
55618+ .mode = 0600,
55619+ .proc_handler = &proc_dointvec,
55620+ },
55621+#endif
55622+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55623+ {
55624+ .procname = "tpe_invert",
55625+ .data = &grsec_enable_tpe_invert,
55626+ .maxlen = sizeof(int),
55627+ .mode = 0600,
55628+ .proc_handler = &proc_dointvec,
55629+ },
55630+#endif
55631+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55632+ {
55633+ .procname = "tpe_restrict_all",
55634+ .data = &grsec_enable_tpe_all,
55635+ .maxlen = sizeof(int),
55636+ .mode = 0600,
55637+ .proc_handler = &proc_dointvec,
55638+ },
55639+#endif
55640+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55641+ {
55642+ .procname = "socket_all",
55643+ .data = &grsec_enable_socket_all,
55644+ .maxlen = sizeof(int),
55645+ .mode = 0600,
55646+ .proc_handler = &proc_dointvec,
55647+ },
55648+ {
55649+ .procname = "socket_all_gid",
55650+ .data = &grsec_socket_all_gid,
55651+ .maxlen = sizeof(int),
55652+ .mode = 0600,
55653+ .proc_handler = &proc_dointvec,
55654+ },
55655+#endif
55656+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55657+ {
55658+ .procname = "socket_client",
55659+ .data = &grsec_enable_socket_client,
55660+ .maxlen = sizeof(int),
55661+ .mode = 0600,
55662+ .proc_handler = &proc_dointvec,
55663+ },
55664+ {
55665+ .procname = "socket_client_gid",
55666+ .data = &grsec_socket_client_gid,
55667+ .maxlen = sizeof(int),
55668+ .mode = 0600,
55669+ .proc_handler = &proc_dointvec,
55670+ },
55671+#endif
55672+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55673+ {
55674+ .procname = "socket_server",
55675+ .data = &grsec_enable_socket_server,
55676+ .maxlen = sizeof(int),
55677+ .mode = 0600,
55678+ .proc_handler = &proc_dointvec,
55679+ },
55680+ {
55681+ .procname = "socket_server_gid",
55682+ .data = &grsec_socket_server_gid,
55683+ .maxlen = sizeof(int),
55684+ .mode = 0600,
55685+ .proc_handler = &proc_dointvec,
55686+ },
55687+#endif
55688+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55689+ {
55690+ .procname = "audit_group",
55691+ .data = &grsec_enable_group,
55692+ .maxlen = sizeof(int),
55693+ .mode = 0600,
55694+ .proc_handler = &proc_dointvec,
55695+ },
55696+ {
55697+ .procname = "audit_gid",
55698+ .data = &grsec_audit_gid,
55699+ .maxlen = sizeof(int),
55700+ .mode = 0600,
55701+ .proc_handler = &proc_dointvec,
55702+ },
55703+#endif
55704+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55705+ {
55706+ .procname = "audit_chdir",
55707+ .data = &grsec_enable_chdir,
55708+ .maxlen = sizeof(int),
55709+ .mode = 0600,
55710+ .proc_handler = &proc_dointvec,
55711+ },
55712+#endif
55713+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55714+ {
55715+ .procname = "audit_mount",
55716+ .data = &grsec_enable_mount,
55717+ .maxlen = sizeof(int),
55718+ .mode = 0600,
55719+ .proc_handler = &proc_dointvec,
55720+ },
55721+#endif
55722+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55723+ {
55724+ .procname = "audit_textrel",
55725+ .data = &grsec_enable_audit_textrel,
55726+ .maxlen = sizeof(int),
55727+ .mode = 0600,
55728+ .proc_handler = &proc_dointvec,
55729+ },
55730+#endif
55731+#ifdef CONFIG_GRKERNSEC_DMESG
55732+ {
55733+ .procname = "dmesg",
55734+ .data = &grsec_enable_dmesg,
55735+ .maxlen = sizeof(int),
55736+ .mode = 0600,
55737+ .proc_handler = &proc_dointvec,
55738+ },
55739+#endif
55740+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55741+ {
55742+ .procname = "chroot_findtask",
55743+ .data = &grsec_enable_chroot_findtask,
55744+ .maxlen = sizeof(int),
55745+ .mode = 0600,
55746+ .proc_handler = &proc_dointvec,
55747+ },
55748+#endif
55749+#ifdef CONFIG_GRKERNSEC_RESLOG
55750+ {
55751+ .procname = "resource_logging",
55752+ .data = &grsec_resource_logging,
55753+ .maxlen = sizeof(int),
55754+ .mode = 0600,
55755+ .proc_handler = &proc_dointvec,
55756+ },
55757+#endif
55758+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55759+ {
55760+ .procname = "audit_ptrace",
55761+ .data = &grsec_enable_audit_ptrace,
55762+ .maxlen = sizeof(int),
55763+ .mode = 0600,
55764+ .proc_handler = &proc_dointvec,
55765+ },
55766+#endif
55767+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55768+ {
55769+ .procname = "harden_ptrace",
55770+ .data = &grsec_enable_harden_ptrace,
55771+ .maxlen = sizeof(int),
55772+ .mode = 0600,
55773+ .proc_handler = &proc_dointvec,
55774+ },
55775+#endif
55776+ {
55777+ .procname = "grsec_lock",
55778+ .data = &grsec_lock,
55779+ .maxlen = sizeof(int),
55780+ .mode = 0600,
55781+ .proc_handler = &proc_dointvec,
55782+ },
55783+#endif
55784+#ifdef CONFIG_GRKERNSEC_ROFS
55785+ {
55786+ .procname = "romount_protect",
55787+ .data = &grsec_enable_rofs,
55788+ .maxlen = sizeof(int),
55789+ .mode = 0600,
55790+ .proc_handler = &proc_dointvec_minmax,
55791+ .extra1 = &one,
55792+ .extra2 = &one,
55793+ },
55794+#endif
55795+ { }
55796+};
55797+#endif
55798diff -urNp linux-3.1.1/grsecurity/grsec_time.c linux-3.1.1/grsecurity/grsec_time.c
55799--- linux-3.1.1/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55800+++ linux-3.1.1/grsecurity/grsec_time.c 2011-11-16 18:40:31.000000000 -0500
55801@@ -0,0 +1,16 @@
55802+#include <linux/kernel.h>
55803+#include <linux/sched.h>
55804+#include <linux/grinternal.h>
55805+#include <linux/module.h>
55806+
55807+void
55808+gr_log_timechange(void)
55809+{
55810+#ifdef CONFIG_GRKERNSEC_TIME
55811+ if (grsec_enable_time)
55812+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55813+#endif
55814+ return;
55815+}
55816+
55817+EXPORT_SYMBOL(gr_log_timechange);
55818diff -urNp linux-3.1.1/grsecurity/grsec_tpe.c linux-3.1.1/grsecurity/grsec_tpe.c
55819--- linux-3.1.1/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55820+++ linux-3.1.1/grsecurity/grsec_tpe.c 2011-11-16 18:40:31.000000000 -0500
55821@@ -0,0 +1,39 @@
55822+#include <linux/kernel.h>
55823+#include <linux/sched.h>
55824+#include <linux/file.h>
55825+#include <linux/fs.h>
55826+#include <linux/grinternal.h>
55827+
55828+extern int gr_acl_tpe_check(void);
55829+
55830+int
55831+gr_tpe_allow(const struct file *file)
55832+{
55833+#ifdef CONFIG_GRKERNSEC
55834+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55835+ const struct cred *cred = current_cred();
55836+
55837+ if (cred->uid && ((grsec_enable_tpe &&
55838+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55839+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55840+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55841+#else
55842+ in_group_p(grsec_tpe_gid)
55843+#endif
55844+ ) || gr_acl_tpe_check()) &&
55845+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55846+ (inode->i_mode & S_IWOTH))))) {
55847+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55848+ return 0;
55849+ }
55850+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55851+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55852+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55853+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55854+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55855+ return 0;
55856+ }
55857+#endif
55858+#endif
55859+ return 1;
55860+}
55861diff -urNp linux-3.1.1/grsecurity/grsum.c linux-3.1.1/grsecurity/grsum.c
55862--- linux-3.1.1/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55863+++ linux-3.1.1/grsecurity/grsum.c 2011-11-16 18:40:31.000000000 -0500
55864@@ -0,0 +1,61 @@
55865+#include <linux/err.h>
55866+#include <linux/kernel.h>
55867+#include <linux/sched.h>
55868+#include <linux/mm.h>
55869+#include <linux/scatterlist.h>
55870+#include <linux/crypto.h>
55871+#include <linux/gracl.h>
55872+
55873+
55874+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55875+#error "crypto and sha256 must be built into the kernel"
55876+#endif
55877+
55878+int
55879+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55880+{
55881+ char *p;
55882+ struct crypto_hash *tfm;
55883+ struct hash_desc desc;
55884+ struct scatterlist sg;
55885+ unsigned char temp_sum[GR_SHA_LEN];
55886+ volatile int retval = 0;
55887+ volatile int dummy = 0;
55888+ unsigned int i;
55889+
55890+ sg_init_table(&sg, 1);
55891+
55892+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55893+ if (IS_ERR(tfm)) {
55894+ /* should never happen, since sha256 should be built in */
55895+ return 1;
55896+ }
55897+
55898+ desc.tfm = tfm;
55899+ desc.flags = 0;
55900+
55901+ crypto_hash_init(&desc);
55902+
55903+ p = salt;
55904+ sg_set_buf(&sg, p, GR_SALT_LEN);
55905+ crypto_hash_update(&desc, &sg, sg.length);
55906+
55907+ p = entry->pw;
55908+ sg_set_buf(&sg, p, strlen(p));
55909+
55910+ crypto_hash_update(&desc, &sg, sg.length);
55911+
55912+ crypto_hash_final(&desc, temp_sum);
55913+
55914+ memset(entry->pw, 0, GR_PW_LEN);
55915+
55916+ for (i = 0; i < GR_SHA_LEN; i++)
55917+ if (sum[i] != temp_sum[i])
55918+ retval = 1;
55919+ else
55920+ dummy = 1; // waste a cycle
55921+
55922+ crypto_free_hash(tfm);
55923+
55924+ return retval;
55925+}
55926diff -urNp linux-3.1.1/grsecurity/Kconfig linux-3.1.1/grsecurity/Kconfig
55927--- linux-3.1.1/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
55928+++ linux-3.1.1/grsecurity/Kconfig 2011-11-16 18:40:31.000000000 -0500
55929@@ -0,0 +1,1037 @@
55930+#
55931+# grecurity configuration
55932+#
55933+
55934+menu "Grsecurity"
55935+
55936+config GRKERNSEC
55937+ bool "Grsecurity"
55938+ select CRYPTO
55939+ select CRYPTO_SHA256
55940+ help
55941+ If you say Y here, you will be able to configure many features
55942+ that will enhance the security of your system. It is highly
55943+ recommended that you say Y here and read through the help
55944+ for each option so that you fully understand the features and
55945+ can evaluate their usefulness for your machine.
55946+
55947+choice
55948+ prompt "Security Level"
55949+ depends on GRKERNSEC
55950+ default GRKERNSEC_CUSTOM
55951+
55952+config GRKERNSEC_LOW
55953+ bool "Low"
55954+ select GRKERNSEC_LINK
55955+ select GRKERNSEC_FIFO
55956+ select GRKERNSEC_RANDNET
55957+ select GRKERNSEC_DMESG
55958+ select GRKERNSEC_CHROOT
55959+ select GRKERNSEC_CHROOT_CHDIR
55960+
55961+ help
55962+ If you choose this option, several of the grsecurity options will
55963+ be enabled that will give you greater protection against a number
55964+ of attacks, while assuring that none of your software will have any
55965+ conflicts with the additional security measures. If you run a lot
55966+ of unusual software, or you are having problems with the higher
55967+ security levels, you should say Y here. With this option, the
55968+ following features are enabled:
55969+
55970+ - Linking restrictions
55971+ - FIFO restrictions
55972+ - Restricted dmesg
55973+ - Enforced chdir("/") on chroot
55974+ - Runtime module disabling
55975+
55976+config GRKERNSEC_MEDIUM
55977+ bool "Medium"
55978+ select PAX
55979+ select PAX_EI_PAX
55980+ select PAX_PT_PAX_FLAGS
55981+ select PAX_HAVE_ACL_FLAGS
55982+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55983+ select GRKERNSEC_CHROOT
55984+ select GRKERNSEC_CHROOT_SYSCTL
55985+ select GRKERNSEC_LINK
55986+ select GRKERNSEC_FIFO
55987+ select GRKERNSEC_DMESG
55988+ select GRKERNSEC_RANDNET
55989+ select GRKERNSEC_FORKFAIL
55990+ select GRKERNSEC_TIME
55991+ select GRKERNSEC_SIGNAL
55992+ select GRKERNSEC_CHROOT
55993+ select GRKERNSEC_CHROOT_UNIX
55994+ select GRKERNSEC_CHROOT_MOUNT
55995+ select GRKERNSEC_CHROOT_PIVOT
55996+ select GRKERNSEC_CHROOT_DOUBLE
55997+ select GRKERNSEC_CHROOT_CHDIR
55998+ select GRKERNSEC_CHROOT_MKNOD
55999+ select GRKERNSEC_PROC
56000+ select GRKERNSEC_PROC_USERGROUP
56001+ select PAX_RANDUSTACK
56002+ select PAX_ASLR
56003+ select PAX_RANDMMAP
56004+ select PAX_REFCOUNT if (X86 || SPARC64)
56005+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
56006+
56007+ help
56008+ If you say Y here, several features in addition to those included
56009+ in the low additional security level will be enabled. These
56010+ features provide even more security to your system, though in rare
56011+ cases they may be incompatible with very old or poorly written
56012+ software. If you enable this option, make sure that your auth
56013+ service (identd) is running as gid 1001. With this option,
56014+ the following features (in addition to those provided in the
56015+ low additional security level) will be enabled:
56016+
56017+ - Failed fork logging
56018+ - Time change logging
56019+ - Signal logging
56020+ - Deny mounts in chroot
56021+ - Deny double chrooting
56022+ - Deny sysctl writes in chroot
56023+ - Deny mknod in chroot
56024+ - Deny access to abstract AF_UNIX sockets out of chroot
56025+ - Deny pivot_root in chroot
56026+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
56027+ - /proc restrictions with special GID set to 10 (usually wheel)
56028+ - Address Space Layout Randomization (ASLR)
56029+ - Prevent exploitation of most refcount overflows
56030+ - Bounds checking of copying between the kernel and userland
56031+
56032+config GRKERNSEC_HIGH
56033+ bool "High"
56034+ select GRKERNSEC_LINK
56035+ select GRKERNSEC_FIFO
56036+ select GRKERNSEC_DMESG
56037+ select GRKERNSEC_FORKFAIL
56038+ select GRKERNSEC_TIME
56039+ select GRKERNSEC_SIGNAL
56040+ select GRKERNSEC_CHROOT
56041+ select GRKERNSEC_CHROOT_SHMAT
56042+ select GRKERNSEC_CHROOT_UNIX
56043+ select GRKERNSEC_CHROOT_MOUNT
56044+ select GRKERNSEC_CHROOT_FCHDIR
56045+ select GRKERNSEC_CHROOT_PIVOT
56046+ select GRKERNSEC_CHROOT_DOUBLE
56047+ select GRKERNSEC_CHROOT_CHDIR
56048+ select GRKERNSEC_CHROOT_MKNOD
56049+ select GRKERNSEC_CHROOT_CAPS
56050+ select GRKERNSEC_CHROOT_SYSCTL
56051+ select GRKERNSEC_CHROOT_FINDTASK
56052+ select GRKERNSEC_SYSFS_RESTRICT
56053+ select GRKERNSEC_PROC
56054+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
56055+ select GRKERNSEC_HIDESYM
56056+ select GRKERNSEC_BRUTE
56057+ select GRKERNSEC_PROC_USERGROUP
56058+ select GRKERNSEC_KMEM
56059+ select GRKERNSEC_RESLOG
56060+ select GRKERNSEC_RANDNET
56061+ select GRKERNSEC_PROC_ADD
56062+ select GRKERNSEC_CHROOT_CHMOD
56063+ select GRKERNSEC_CHROOT_NICE
56064+ select GRKERNSEC_AUDIT_MOUNT
56065+ select GRKERNSEC_MODHARDEN if (MODULES)
56066+ select GRKERNSEC_HARDEN_PTRACE
56067+ select GRKERNSEC_VM86 if (X86_32)
56068+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
56069+ select PAX
56070+ select PAX_RANDUSTACK
56071+ select PAX_ASLR
56072+ select PAX_RANDMMAP
56073+ select PAX_NOEXEC
56074+ select PAX_MPROTECT
56075+ select PAX_EI_PAX
56076+ select PAX_PT_PAX_FLAGS
56077+ select PAX_HAVE_ACL_FLAGS
56078+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
56079+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
56080+ select PAX_RANDKSTACK if (X86_TSC && X86)
56081+ select PAX_SEGMEXEC if (X86_32)
56082+ select PAX_PAGEEXEC
56083+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
56084+ select PAX_EMUTRAMP if (PARISC)
56085+ select PAX_EMUSIGRT if (PARISC)
56086+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
56087+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
56088+ select PAX_REFCOUNT if (X86 || SPARC64)
56089+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
56090+ help
56091+ If you say Y here, many of the features of grsecurity will be
56092+ enabled, which will protect you against many kinds of attacks
56093+ against your system. The heightened security comes at a cost
56094+ of an increased chance of incompatibilities with rare software
56095+ on your machine. Since this security level enables PaX, you should
56096+ view <http://pax.grsecurity.net> and read about the PaX
56097+ project. While you are there, download chpax and run it on
56098+ binaries that cause problems with PaX. Also remember that
56099+ since the /proc restrictions are enabled, you must run your
56100+ identd as gid 1001. This security level enables the following
56101+ features in addition to those listed in the low and medium
56102+ security levels:
56103+
56104+ - Additional /proc restrictions
56105+ - Chmod restrictions in chroot
56106+ - No signals, ptrace, or viewing of processes outside of chroot
56107+ - Capability restrictions in chroot
56108+ - Deny fchdir out of chroot
56109+ - Priority restrictions in chroot
56110+ - Segmentation-based implementation of PaX
56111+ - Mprotect restrictions
56112+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
56113+ - Kernel stack randomization
56114+ - Mount/unmount/remount logging
56115+ - Kernel symbol hiding
56116+ - Hardening of module auto-loading
56117+ - Ptrace restrictions
56118+ - Restricted vm86 mode
56119+ - Restricted sysfs/debugfs
56120+ - Active kernel exploit response
56121+
56122+config GRKERNSEC_CUSTOM
56123+ bool "Custom"
56124+ help
56125+ If you say Y here, you will be able to configure every grsecurity
56126+ option, which allows you to enable many more features that aren't
56127+ covered in the basic security levels. These additional features
56128+ include TPE, socket restrictions, and the sysctl system for
56129+ grsecurity. It is advised that you read through the help for
56130+ each option to determine its usefulness in your situation.
56131+
56132+endchoice
56133+
56134+menu "Address Space Protection"
56135+depends on GRKERNSEC
56136+
56137+config GRKERNSEC_KMEM
56138+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
56139+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
56140+ help
56141+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
56142+ be written to or read from to modify or leak the contents of the running
56143+ kernel. /dev/port will also not be allowed to be opened. If you have module
56144+ support disabled, enabling this will close up four ways that are
56145+ currently used to insert malicious code into the running kernel.
56146+ Even with all these features enabled, we still highly recommend that
56147+ you use the RBAC system, as it is still possible for an attacker to
56148+ modify the running kernel through privileged I/O granted by ioperm/iopl.
56149+ If you are not using XFree86, you may be able to stop this additional
56150+ case by enabling the 'Disable privileged I/O' option. Though nothing
56151+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
56152+ but only to video memory, which is the only writing we allow in this
56153+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
56154+ not be allowed to mprotect it with PROT_WRITE later.
56155+ It is highly recommended that you say Y here if you meet all the
56156+ conditions above.
56157+
56158+config GRKERNSEC_VM86
56159+ bool "Restrict VM86 mode"
56160+ depends on X86_32
56161+
56162+ help
56163+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
56164+ make use of a special execution mode on 32bit x86 processors called
56165+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
56166+ video cards and will still work with this option enabled. The purpose
56167+ of the option is to prevent exploitation of emulation errors in
56168+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
56169+ Nearly all users should be able to enable this option.
56170+
56171+config GRKERNSEC_IO
56172+ bool "Disable privileged I/O"
56173+ depends on X86
56174+ select RTC_CLASS
56175+ select RTC_INTF_DEV
56176+ select RTC_DRV_CMOS
56177+
56178+ help
56179+ If you say Y here, all ioperm and iopl calls will return an error.
56180+ Ioperm and iopl can be used to modify the running kernel.
56181+ Unfortunately, some programs need this access to operate properly,
56182+ the most notable of which are XFree86 and hwclock. hwclock can be
56183+ remedied by having RTC support in the kernel, so real-time
56184+ clock support is enabled if this option is enabled, to ensure
56185+ that hwclock operates correctly. XFree86 still will not
56186+ operate correctly with this option enabled, so DO NOT CHOOSE Y
56187+ IF YOU USE XFree86. If you use XFree86 and you still want to
56188+ protect your kernel against modification, use the RBAC system.
56189+
56190+config GRKERNSEC_PROC_MEMMAP
56191+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
56192+ default y if (PAX_NOEXEC || PAX_ASLR)
56193+ depends on PAX_NOEXEC || PAX_ASLR
56194+ help
56195+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
56196+ give no information about the addresses of its mappings if
56197+ PaX features that rely on random addresses are enabled on the task.
56198+ If you use PaX it is greatly recommended that you say Y here as it
56199+ closes up a hole that makes the full ASLR useless for suid
56200+ binaries.
56201+
56202+config GRKERNSEC_BRUTE
56203+ bool "Deter exploit bruteforcing"
56204+ help
56205+ If you say Y here, attempts to bruteforce exploits against forking
56206+ daemons such as apache or sshd, as well as against suid/sgid binaries
56207+ will be deterred. When a child of a forking daemon is killed by PaX
56208+ or crashes due to an illegal instruction or other suspicious signal,
56209+ the parent process will be delayed 30 seconds upon every subsequent
56210+ fork until the administrator is able to assess the situation and
56211+ restart the daemon.
56212+ In the suid/sgid case, the attempt is logged, the user has all their
56213+ processes terminated, and they are prevented from executing any further
56214+ processes for 15 minutes.
56215+ It is recommended that you also enable signal logging in the auditing
56216+ section so that logs are generated when a process triggers a suspicious
56217+ signal.
56218+ If the sysctl option is enabled, a sysctl option with name
56219+ "deter_bruteforce" is created.
56220+
56221+
56222+config GRKERNSEC_MODHARDEN
56223+ bool "Harden module auto-loading"
56224+ depends on MODULES
56225+ help
56226+ If you say Y here, module auto-loading in response to use of some
56227+ feature implemented by an unloaded module will be restricted to
56228+ root users. Enabling this option helps defend against attacks
56229+ by unprivileged users who abuse the auto-loading behavior to
56230+ cause a vulnerable module to load that is then exploited.
56231+
56232+ If this option prevents a legitimate use of auto-loading for a
56233+ non-root user, the administrator can execute modprobe manually
56234+ with the exact name of the module mentioned in the alert log.
56235+ Alternatively, the administrator can add the module to the list
56236+ of modules loaded at boot by modifying init scripts.
56237+
56238+ Modification of init scripts will most likely be needed on
56239+ Ubuntu servers with encrypted home directory support enabled,
56240+ as the first non-root user logging in will cause the ecb(aes),
56241+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
56242+
56243+config GRKERNSEC_HIDESYM
56244+ bool "Hide kernel symbols"
56245+ help
56246+ If you say Y here, getting information on loaded modules, and
56247+ displaying all kernel symbols through a syscall will be restricted
56248+ to users with CAP_SYS_MODULE. For software compatibility reasons,
56249+ /proc/kallsyms will be restricted to the root user. The RBAC
56250+ system can hide that entry even from root.
56251+
56252+ This option also prevents leaking of kernel addresses through
56253+ several /proc entries.
56254+
56255+ Note that this option is only effective provided the following
56256+ conditions are met:
56257+ 1) The kernel using grsecurity is not precompiled by some distribution
56258+ 2) You have also enabled GRKERNSEC_DMESG
56259+ 3) You are using the RBAC system and hiding other files such as your
56260+ kernel image and System.map. Alternatively, enabling this option
56261+ causes the permissions on /boot, /lib/modules, and the kernel
56262+ source directory to change at compile time to prevent
56263+ reading by non-root users.
56264+ If the above conditions are met, this option will aid in providing a
56265+ useful protection against local kernel exploitation of overflows
56266+ and arbitrary read/write vulnerabilities.
56267+
56268+config GRKERNSEC_KERN_LOCKOUT
56269+ bool "Active kernel exploit response"
56270+ depends on X86 || ARM || PPC || SPARC
56271+ help
56272+ If you say Y here, when a PaX alert is triggered due to suspicious
56273+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
56274+ or an OOPs occurs due to bad memory accesses, instead of just
56275+ terminating the offending process (and potentially allowing
56276+ a subsequent exploit from the same user), we will take one of two
56277+ actions:
56278+ If the user was root, we will panic the system
56279+ If the user was non-root, we will log the attempt, terminate
56280+ all processes owned by the user, then prevent them from creating
56281+ any new processes until the system is restarted
56282+ This deters repeated kernel exploitation/bruteforcing attempts
56283+ and is useful for later forensics.
56284+
56285+endmenu
56286+menu "Role Based Access Control Options"
56287+depends on GRKERNSEC
56288+
56289+config GRKERNSEC_RBAC_DEBUG
56290+ bool
56291+
56292+config GRKERNSEC_NO_RBAC
56293+ bool "Disable RBAC system"
56294+ help
56295+ If you say Y here, the /dev/grsec device will be removed from the kernel,
56296+ preventing the RBAC system from being enabled. You should only say Y
56297+ here if you have no intention of using the RBAC system, so as to prevent
56298+ an attacker with root access from misusing the RBAC system to hide files
56299+ and processes when loadable module support and /dev/[k]mem have been
56300+ locked down.
56301+
56302+config GRKERNSEC_ACL_HIDEKERN
56303+ bool "Hide kernel processes"
56304+ help
56305+ If you say Y here, all kernel threads will be hidden to all
56306+ processes but those whose subject has the "view hidden processes"
56307+ flag.
56308+
56309+config GRKERNSEC_ACL_MAXTRIES
56310+ int "Maximum tries before password lockout"
56311+ default 3
56312+ help
56313+ This option enforces the maximum number of times a user can attempt
56314+ to authorize themselves with the grsecurity RBAC system before being
56315+ denied the ability to attempt authorization again for a specified time.
56316+ The lower the number, the harder it will be to brute-force a password.
56317+
56318+config GRKERNSEC_ACL_TIMEOUT
56319+ int "Time to wait after max password tries, in seconds"
56320+ default 30
56321+ help
56322+ This option specifies the time the user must wait after attempting to
56323+ authorize to the RBAC system with the maximum number of invalid
56324+ passwords. The higher the number, the harder it will be to brute-force
56325+ a password.
56326+
56327+endmenu
56328+menu "Filesystem Protections"
56329+depends on GRKERNSEC
56330+
56331+config GRKERNSEC_PROC
56332+ bool "Proc restrictions"
56333+ help
56334+ If you say Y here, the permissions of the /proc filesystem
56335+ will be altered to enhance system security and privacy. You MUST
56336+ choose either a user only restriction or a user and group restriction.
56337+ Depending upon the option you choose, you can either restrict users to
56338+ see only the processes they themselves run, or choose a group that can
56339+ view all processes and files normally restricted to root if you choose
56340+ the "restrict to user only" option. NOTE: If you're running identd as
56341+ a non-root user, you will have to run it as the group you specify here.
56342+
56343+config GRKERNSEC_PROC_USER
56344+ bool "Restrict /proc to user only"
56345+ depends on GRKERNSEC_PROC
56346+ help
56347+ If you say Y here, non-root users will only be able to view their own
56348+ processes, and restricts them from viewing network-related information,
56349+ and viewing kernel symbol and module information.
56350+
56351+config GRKERNSEC_PROC_USERGROUP
56352+ bool "Allow special group"
56353+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
56354+ help
56355+ If you say Y here, you will be able to select a group that will be
56356+ able to view all processes and network-related information. If you've
56357+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
56358+ remain hidden. This option is useful if you want to run identd as
56359+ a non-root user.
56360+
56361+config GRKERNSEC_PROC_GID
56362+ int "GID for special group"
56363+ depends on GRKERNSEC_PROC_USERGROUP
56364+ default 1001
56365+
56366+config GRKERNSEC_PROC_ADD
56367+ bool "Additional restrictions"
56368+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
56369+ help
56370+ If you say Y here, additional restrictions will be placed on
56371+ /proc that keep normal users from viewing device information and
56372+ slabinfo information that could be useful for exploits.
56373+
56374+config GRKERNSEC_LINK
56375+ bool "Linking restrictions"
56376+ help
56377+ If you say Y here, /tmp race exploits will be prevented, since users
56378+ will no longer be able to follow symlinks owned by other users in
56379+ world-writable +t directories (e.g. /tmp), unless the owner of the
56380+ symlink is the owner of the directory. users will also not be
56381+ able to hardlink to files they do not own. If the sysctl option is
56382+ enabled, a sysctl option with name "linking_restrictions" is created.
56383+
56384+config GRKERNSEC_FIFO
56385+ bool "FIFO restrictions"
56386+ help
56387+ If you say Y here, users will not be able to write to FIFOs they don't
56388+ own in world-writable +t directories (e.g. /tmp), unless the owner of
56389+ the FIFO is the same owner of the directory it's held in. If the sysctl
56390+ option is enabled, a sysctl option with name "fifo_restrictions" is
56391+ created.
56392+
56393+config GRKERNSEC_SYSFS_RESTRICT
56394+ bool "Sysfs/debugfs restriction"
56395+ depends on SYSFS
56396+ help
56397+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
56398+ any filesystem normally mounted under it (e.g. debugfs) will only
56399+ be accessible by root. These filesystems generally provide access
56400+ to hardware and debug information that isn't appropriate for unprivileged
56401+ users of the system. Sysfs and debugfs have also become a large source
56402+ of new vulnerabilities, ranging from infoleaks to local compromise.
56403+ There has been very little oversight with an eye toward security involved
56404+ in adding new exporters of information to these filesystems, so their
56405+ use is discouraged.
56406+ This option is equivalent to a chmod 0700 of the mount paths.
56407+
56408+config GRKERNSEC_ROFS
56409+ bool "Runtime read-only mount protection"
56410+ help
56411+ If you say Y here, a sysctl option with name "romount_protect" will
56412+ be created. By setting this option to 1 at runtime, filesystems
56413+ will be protected in the following ways:
56414+ * No new writable mounts will be allowed
56415+ * Existing read-only mounts won't be able to be remounted read/write
56416+ * Write operations will be denied on all block devices
56417+ This option acts independently of grsec_lock: once it is set to 1,
56418+ it cannot be turned off. Therefore, please be mindful of the resulting
56419+ behavior if this option is enabled in an init script on a read-only
56420+ filesystem. This feature is mainly intended for secure embedded systems.
56421+
56422+config GRKERNSEC_CHROOT
56423+ bool "Chroot jail restrictions"
56424+ help
56425+ If you say Y here, you will be able to choose several options that will
56426+ make breaking out of a chrooted jail much more difficult. If you
56427+ encounter no software incompatibilities with the following options, it
56428+ is recommended that you enable each one.
56429+
56430+config GRKERNSEC_CHROOT_MOUNT
56431+ bool "Deny mounts"
56432+ depends on GRKERNSEC_CHROOT
56433+ help
56434+ If you say Y here, processes inside a chroot will not be able to
56435+ mount or remount filesystems. If the sysctl option is enabled, a
56436+ sysctl option with name "chroot_deny_mount" is created.
56437+
56438+config GRKERNSEC_CHROOT_DOUBLE
56439+ bool "Deny double-chroots"
56440+ depends on GRKERNSEC_CHROOT
56441+ help
56442+ If you say Y here, processes inside a chroot will not be able to chroot
56443+ again outside the chroot. This is a widely used method of breaking
56444+ out of a chroot jail and should not be allowed. If the sysctl
56445+ option is enabled, a sysctl option with name
56446+ "chroot_deny_chroot" is created.
56447+
56448+config GRKERNSEC_CHROOT_PIVOT
56449+ bool "Deny pivot_root in chroot"
56450+ depends on GRKERNSEC_CHROOT
56451+ help
56452+ If you say Y here, processes inside a chroot will not be able to use
56453+ a function called pivot_root() that was introduced in Linux 2.3.41. It
56454+ works similar to chroot in that it changes the root filesystem. This
56455+ function could be misused in a chrooted process to attempt to break out
56456+ of the chroot, and therefore should not be allowed. If the sysctl
56457+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
56458+ created.
56459+
56460+config GRKERNSEC_CHROOT_CHDIR
56461+ bool "Enforce chdir(\"/\") on all chroots"
56462+ depends on GRKERNSEC_CHROOT
56463+ help
56464+ If you say Y here, the current working directory of all newly-chrooted
56465+ applications will be set to the the root directory of the chroot.
56466+ The man page on chroot(2) states:
56467+ Note that this call does not change the current working
56468+ directory, so that `.' can be outside the tree rooted at
56469+ `/'. In particular, the super-user can escape from a
56470+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56471+
56472+ It is recommended that you say Y here, since it's not known to break
56473+ any software. If the sysctl option is enabled, a sysctl option with
56474+ name "chroot_enforce_chdir" is created.
56475+
56476+config GRKERNSEC_CHROOT_CHMOD
56477+ bool "Deny (f)chmod +s"
56478+ depends on GRKERNSEC_CHROOT
56479+ help
56480+ If you say Y here, processes inside a chroot will not be able to chmod
56481+ or fchmod files to make them have suid or sgid bits. This protects
56482+ against another published method of breaking a chroot. If the sysctl
56483+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
56484+ created.
56485+
56486+config GRKERNSEC_CHROOT_FCHDIR
56487+ bool "Deny fchdir out of chroot"
56488+ depends on GRKERNSEC_CHROOT
56489+ help
56490+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
56491+ to a file descriptor of the chrooting process that points to a directory
56492+ outside the filesystem will be stopped. If the sysctl option
56493+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56494+
56495+config GRKERNSEC_CHROOT_MKNOD
56496+ bool "Deny mknod"
56497+ depends on GRKERNSEC_CHROOT
56498+ help
56499+ If you say Y here, processes inside a chroot will not be allowed to
56500+ mknod. The problem with using mknod inside a chroot is that it
56501+ would allow an attacker to create a device entry that is the same
56502+ as one on the physical root of your system, which could range from
56503+ anything from the console device to a device for your harddrive (which
56504+ they could then use to wipe the drive or steal data). It is recommended
56505+ that you say Y here, unless you run into software incompatibilities.
56506+ If the sysctl option is enabled, a sysctl option with name
56507+ "chroot_deny_mknod" is created.
56508+
56509+config GRKERNSEC_CHROOT_SHMAT
56510+ bool "Deny shmat() out of chroot"
56511+ depends on GRKERNSEC_CHROOT
56512+ help
56513+ If you say Y here, processes inside a chroot will not be able to attach
56514+ to shared memory segments that were created outside of the chroot jail.
56515+ It is recommended that you say Y here. If the sysctl option is enabled,
56516+ a sysctl option with name "chroot_deny_shmat" is created.
56517+
56518+config GRKERNSEC_CHROOT_UNIX
56519+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
56520+ depends on GRKERNSEC_CHROOT
56521+ help
56522+ If you say Y here, processes inside a chroot will not be able to
56523+ connect to abstract (meaning not belonging to a filesystem) Unix
56524+ domain sockets that were bound outside of a chroot. It is recommended
56525+ that you say Y here. If the sysctl option is enabled, a sysctl option
56526+ with name "chroot_deny_unix" is created.
56527+
56528+config GRKERNSEC_CHROOT_FINDTASK
56529+ bool "Protect outside processes"
56530+ depends on GRKERNSEC_CHROOT
56531+ help
56532+ If you say Y here, processes inside a chroot will not be able to
56533+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56534+ getsid, or view any process outside of the chroot. If the sysctl
56535+ option is enabled, a sysctl option with name "chroot_findtask" is
56536+ created.
56537+
56538+config GRKERNSEC_CHROOT_NICE
56539+ bool "Restrict priority changes"
56540+ depends on GRKERNSEC_CHROOT
56541+ help
56542+ If you say Y here, processes inside a chroot will not be able to raise
56543+ the priority of processes in the chroot, or alter the priority of
56544+ processes outside the chroot. This provides more security than simply
56545+ removing CAP_SYS_NICE from the process' capability set. If the
56546+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56547+ is created.
56548+
56549+config GRKERNSEC_CHROOT_SYSCTL
56550+ bool "Deny sysctl writes"
56551+ depends on GRKERNSEC_CHROOT
56552+ help
56553+ If you say Y here, an attacker in a chroot will not be able to
56554+ write to sysctl entries, either by sysctl(2) or through a /proc
56555+ interface. It is strongly recommended that you say Y here. If the
56556+ sysctl option is enabled, a sysctl option with name
56557+ "chroot_deny_sysctl" is created.
56558+
56559+config GRKERNSEC_CHROOT_CAPS
56560+ bool "Capability restrictions"
56561+ depends on GRKERNSEC_CHROOT
56562+ help
56563+ If you say Y here, the capabilities on all processes within a
56564+ chroot jail will be lowered to stop module insertion, raw i/o,
56565+ system and net admin tasks, rebooting the system, modifying immutable
56566+ files, modifying IPC owned by another, and changing the system time.
56567+ This is left an option because it can break some apps. Disable this
56568+ if your chrooted apps are having problems performing those kinds of
56569+ tasks. If the sysctl option is enabled, a sysctl option with
56570+ name "chroot_caps" is created.
56571+
56572+endmenu
56573+menu "Kernel Auditing"
56574+depends on GRKERNSEC
56575+
56576+config GRKERNSEC_AUDIT_GROUP
56577+ bool "Single group for auditing"
56578+ help
56579+ If you say Y here, the exec, chdir, and (un)mount logging features
56580+ will only operate on a group you specify. This option is recommended
56581+ if you only want to watch certain users instead of having a large
56582+ amount of logs from the entire system. If the sysctl option is enabled,
56583+ a sysctl option with name "audit_group" is created.
56584+
56585+config GRKERNSEC_AUDIT_GID
56586+ int "GID for auditing"
56587+ depends on GRKERNSEC_AUDIT_GROUP
56588+ default 1007
56589+
56590+config GRKERNSEC_EXECLOG
56591+ bool "Exec logging"
56592+ help
56593+ If you say Y here, all execve() calls will be logged (since the
56594+ other exec*() calls are frontends to execve(), all execution
56595+ will be logged). Useful for shell-servers that like to keep track
56596+ of their users. If the sysctl option is enabled, a sysctl option with
56597+ name "exec_logging" is created.
56598+ WARNING: This option when enabled will produce a LOT of logs, especially
56599+ on an active system.
56600+
56601+config GRKERNSEC_RESLOG
56602+ bool "Resource logging"
56603+ help
56604+ If you say Y here, all attempts to overstep resource limits will
56605+ be logged with the resource name, the requested size, and the current
56606+ limit. It is highly recommended that you say Y here. If the sysctl
56607+ option is enabled, a sysctl option with name "resource_logging" is
56608+ created. If the RBAC system is enabled, the sysctl value is ignored.
56609+
56610+config GRKERNSEC_CHROOT_EXECLOG
56611+ bool "Log execs within chroot"
56612+ help
56613+ If you say Y here, all executions inside a chroot jail will be logged
56614+ to syslog. This can cause a large amount of logs if certain
56615+ applications (eg. djb's daemontools) are installed on the system, and
56616+ is therefore left as an option. If the sysctl option is enabled, a
56617+ sysctl option with name "chroot_execlog" is created.
56618+
56619+config GRKERNSEC_AUDIT_PTRACE
56620+ bool "Ptrace logging"
56621+ help
56622+ If you say Y here, all attempts to attach to a process via ptrace
56623+ will be logged. If the sysctl option is enabled, a sysctl option
56624+ with name "audit_ptrace" is created.
56625+
56626+config GRKERNSEC_AUDIT_CHDIR
56627+ bool "Chdir logging"
56628+ help
56629+ If you say Y here, all chdir() calls will be logged. If the sysctl
56630+ option is enabled, a sysctl option with name "audit_chdir" is created.
56631+
56632+config GRKERNSEC_AUDIT_MOUNT
56633+ bool "(Un)Mount logging"
56634+ help
56635+ If you say Y here, all mounts and unmounts will be logged. If the
56636+ sysctl option is enabled, a sysctl option with name "audit_mount" is
56637+ created.
56638+
56639+config GRKERNSEC_SIGNAL
56640+ bool "Signal logging"
56641+ help
56642+ If you say Y here, certain important signals will be logged, such as
56643+ SIGSEGV, which will as a result inform you of when a error in a program
56644+ occurred, which in some cases could mean a possible exploit attempt.
56645+ If the sysctl option is enabled, a sysctl option with name
56646+ "signal_logging" is created.
56647+
56648+config GRKERNSEC_FORKFAIL
56649+ bool "Fork failure logging"
56650+ help
56651+ If you say Y here, all failed fork() attempts will be logged.
56652+ This could suggest a fork bomb, or someone attempting to overstep
56653+ their process limit. If the sysctl option is enabled, a sysctl option
56654+ with name "forkfail_logging" is created.
56655+
56656+config GRKERNSEC_TIME
56657+ bool "Time change logging"
56658+ help
56659+ If you say Y here, any changes of the system clock will be logged.
56660+ If the sysctl option is enabled, a sysctl option with name
56661+ "timechange_logging" is created.
56662+
56663+config GRKERNSEC_PROC_IPADDR
56664+ bool "/proc/<pid>/ipaddr support"
56665+ help
56666+ If you say Y here, a new entry will be added to each /proc/<pid>
56667+ directory that contains the IP address of the person using the task.
56668+ The IP is carried across local TCP and AF_UNIX stream sockets.
56669+ This information can be useful for IDS/IPSes to perform remote response
56670+ to a local attack. The entry is readable by only the owner of the
56671+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56672+ the RBAC system), and thus does not create privacy concerns.
56673+
56674+config GRKERNSEC_RWXMAP_LOG
56675+ bool 'Denied RWX mmap/mprotect logging'
56676+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56677+ help
56678+ If you say Y here, calls to mmap() and mprotect() with explicit
56679+ usage of PROT_WRITE and PROT_EXEC together will be logged when
56680+ denied by the PAX_MPROTECT feature. If the sysctl option is
56681+ enabled, a sysctl option with name "rwxmap_logging" is created.
56682+
56683+config GRKERNSEC_AUDIT_TEXTREL
56684+ bool 'ELF text relocations logging (READ HELP)'
56685+ depends on PAX_MPROTECT
56686+ help
56687+ If you say Y here, text relocations will be logged with the filename
56688+ of the offending library or binary. The purpose of the feature is
56689+ to help Linux distribution developers get rid of libraries and
56690+ binaries that need text relocations which hinder the future progress
56691+ of PaX. Only Linux distribution developers should say Y here, and
56692+ never on a production machine, as this option creates an information
56693+ leak that could aid an attacker in defeating the randomization of
56694+ a single memory region. If the sysctl option is enabled, a sysctl
56695+ option with name "audit_textrel" is created.
56696+
56697+endmenu
56698+
56699+menu "Executable Protections"
56700+depends on GRKERNSEC
56701+
56702+config GRKERNSEC_DMESG
56703+ bool "Dmesg(8) restriction"
56704+ help
56705+ If you say Y here, non-root users will not be able to use dmesg(8)
56706+ to view up to the last 4kb of messages in the kernel's log buffer.
56707+ The kernel's log buffer often contains kernel addresses and other
56708+ identifying information useful to an attacker in fingerprinting a
56709+ system for a targeted exploit.
56710+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
56711+ created.
56712+
56713+config GRKERNSEC_HARDEN_PTRACE
56714+ bool "Deter ptrace-based process snooping"
56715+ help
56716+ If you say Y here, TTY sniffers and other malicious monitoring
56717+ programs implemented through ptrace will be defeated. If you
56718+ have been using the RBAC system, this option has already been
56719+ enabled for several years for all users, with the ability to make
56720+ fine-grained exceptions.
56721+
56722+ This option only affects the ability of non-root users to ptrace
56723+ processes that are not a descendent of the ptracing process.
56724+ This means that strace ./binary and gdb ./binary will still work,
56725+ but attaching to arbitrary processes will not. If the sysctl
56726+ option is enabled, a sysctl option with name "harden_ptrace" is
56727+ created.
56728+
56729+config GRKERNSEC_TPE
56730+ bool "Trusted Path Execution (TPE)"
56731+ help
56732+ If you say Y here, you will be able to choose a gid to add to the
56733+ supplementary groups of users you want to mark as "untrusted."
56734+ These users will not be able to execute any files that are not in
56735+ root-owned directories writable only by root. If the sysctl option
56736+ is enabled, a sysctl option with name "tpe" is created.
56737+
56738+config GRKERNSEC_TPE_ALL
56739+ bool "Partially restrict all non-root users"
56740+ depends on GRKERNSEC_TPE
56741+ help
56742+ If you say Y here, all non-root users will be covered under
56743+ a weaker TPE restriction. This is separate from, and in addition to,
56744+ the main TPE options that you have selected elsewhere. Thus, if a
56745+ "trusted" GID is chosen, this restriction applies to even that GID.
56746+ Under this restriction, all non-root users will only be allowed to
56747+ execute files in directories they own that are not group or
56748+ world-writable, or in directories owned by root and writable only by
56749+ root. If the sysctl option is enabled, a sysctl option with name
56750+ "tpe_restrict_all" is created.
56751+
56752+config GRKERNSEC_TPE_INVERT
56753+ bool "Invert GID option"
56754+ depends on GRKERNSEC_TPE
56755+ help
56756+ If you say Y here, the group you specify in the TPE configuration will
56757+ decide what group TPE restrictions will be *disabled* for. This
56758+ option is useful if you want TPE restrictions to be applied to most
56759+ users on the system. If the sysctl option is enabled, a sysctl option
56760+ with name "tpe_invert" is created. Unlike other sysctl options, this
56761+ entry will default to on for backward-compatibility.
56762+
56763+config GRKERNSEC_TPE_GID
56764+ int "GID for untrusted users"
56765+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56766+ default 1005
56767+ help
56768+ Setting this GID determines what group TPE restrictions will be
56769+ *enabled* for. If the sysctl option is enabled, a sysctl option
56770+ with name "tpe_gid" is created.
56771+
56772+config GRKERNSEC_TPE_GID
56773+ int "GID for trusted users"
56774+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56775+ default 1005
56776+ help
56777+ Setting this GID determines what group TPE restrictions will be
56778+ *disabled* for. If the sysctl option is enabled, a sysctl option
56779+ with name "tpe_gid" is created.
56780+
56781+endmenu
56782+menu "Network Protections"
56783+depends on GRKERNSEC
56784+
56785+config GRKERNSEC_RANDNET
56786+ bool "Larger entropy pools"
56787+ help
56788+ If you say Y here, the entropy pools used for many features of Linux
56789+ and grsecurity will be doubled in size. Since several grsecurity
56790+ features use additional randomness, it is recommended that you say Y
56791+ here. Saying Y here has a similar effect as modifying
56792+ /proc/sys/kernel/random/poolsize.
56793+
56794+config GRKERNSEC_BLACKHOLE
56795+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56796+ depends on NET
56797+ help
56798+ If you say Y here, neither TCP resets nor ICMP
56799+ destination-unreachable packets will be sent in response to packets
56800+ sent to ports for which no associated listening process exists.
56801+ This feature supports both IPV4 and IPV6 and exempts the
56802+ loopback interface from blackholing. Enabling this feature
56803+ makes a host more resilient to DoS attacks and reduces network
56804+ visibility against scanners.
56805+
56806+ The blackhole feature as-implemented is equivalent to the FreeBSD
56807+ blackhole feature, as it prevents RST responses to all packets, not
56808+ just SYNs. Under most application behavior this causes no
56809+ problems, but applications (like haproxy) may not close certain
56810+ connections in a way that cleanly terminates them on the remote
56811+ end, leaving the remote host in LAST_ACK state. Because of this
56812+ side-effect and to prevent intentional LAST_ACK DoSes, this
56813+ feature also adds automatic mitigation against such attacks.
56814+ The mitigation drastically reduces the amount of time a socket
56815+ can spend in LAST_ACK state. If you're using haproxy and not
56816+ all servers it connects to have this option enabled, consider
56817+ disabling this feature on the haproxy host.
56818+
56819+ If the sysctl option is enabled, two sysctl options with names
56820+ "ip_blackhole" and "lastack_retries" will be created.
56821+ While "ip_blackhole" takes the standard zero/non-zero on/off
56822+ toggle, "lastack_retries" uses the same kinds of values as
56823+ "tcp_retries1" and "tcp_retries2". The default value of 4
56824+ prevents a socket from lasting more than 45 seconds in LAST_ACK
56825+ state.
56826+
56827+config GRKERNSEC_SOCKET
56828+ bool "Socket restrictions"
56829+ depends on NET
56830+ help
56831+ If you say Y here, you will be able to choose from several options.
56832+ If you assign a GID on your system and add it to the supplementary
56833+ groups of users you want to restrict socket access to, this patch
56834+ will perform up to three things, based on the option(s) you choose.
56835+
56836+config GRKERNSEC_SOCKET_ALL
56837+ bool "Deny any sockets to group"
56838+ depends on GRKERNSEC_SOCKET
56839+ help
56840+ If you say Y here, you will be able to choose a GID of whose users will
56841+ be unable to connect to other hosts from your machine or run server
56842+ applications from your machine. If the sysctl option is enabled, a
56843+ sysctl option with name "socket_all" is created.
56844+
56845+config GRKERNSEC_SOCKET_ALL_GID
56846+ int "GID to deny all sockets for"
56847+ depends on GRKERNSEC_SOCKET_ALL
56848+ default 1004
56849+ help
56850+ Here you can choose the GID to disable socket access for. Remember to
56851+ add the users you want socket access disabled for to the GID
56852+ specified here. If the sysctl option is enabled, a sysctl option
56853+ with name "socket_all_gid" is created.
56854+
56855+config GRKERNSEC_SOCKET_CLIENT
56856+ bool "Deny client sockets to group"
56857+ depends on GRKERNSEC_SOCKET
56858+ help
56859+ If you say Y here, you will be able to choose a GID of whose users will
56860+ be unable to connect to other hosts from your machine, but will be
56861+ able to run servers. If this option is enabled, all users in the group
56862+ you specify will have to use passive mode when initiating ftp transfers
56863+ from the shell on your machine. If the sysctl option is enabled, a
56864+ sysctl option with name "socket_client" is created.
56865+
56866+config GRKERNSEC_SOCKET_CLIENT_GID
56867+ int "GID to deny client sockets for"
56868+ depends on GRKERNSEC_SOCKET_CLIENT
56869+ default 1003
56870+ help
56871+ Here you can choose the GID to disable client socket access for.
56872+ Remember to add the users you want client socket access disabled for to
56873+ the GID specified here. If the sysctl option is enabled, a sysctl
56874+ option with name "socket_client_gid" is created.
56875+
56876+config GRKERNSEC_SOCKET_SERVER
56877+ bool "Deny server sockets to group"
56878+ depends on GRKERNSEC_SOCKET
56879+ help
56880+ If you say Y here, you will be able to choose a GID of whose users will
56881+ be unable to run server applications from your machine. If the sysctl
56882+ option is enabled, a sysctl option with name "socket_server" is created.
56883+
56884+config GRKERNSEC_SOCKET_SERVER_GID
56885+ int "GID to deny server sockets for"
56886+ depends on GRKERNSEC_SOCKET_SERVER
56887+ default 1002
56888+ help
56889+ Here you can choose the GID to disable server socket access for.
56890+ Remember to add the users you want server socket access disabled for to
56891+ the GID specified here. If the sysctl option is enabled, a sysctl
56892+ option with name "socket_server_gid" is created.
56893+
56894+endmenu
56895+menu "Sysctl support"
56896+depends on GRKERNSEC && SYSCTL
56897+
56898+config GRKERNSEC_SYSCTL
56899+ bool "Sysctl support"
56900+ help
56901+ If you say Y here, you will be able to change the options that
56902+ grsecurity runs with at bootup, without having to recompile your
56903+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56904+ to enable (1) or disable (0) various features. All the sysctl entries
56905+ are mutable until the "grsec_lock" entry is set to a non-zero value.
56906+ All features enabled in the kernel configuration are disabled at boot
56907+ if you do not say Y to the "Turn on features by default" option.
56908+ All options should be set at startup, and the grsec_lock entry should
56909+ be set to a non-zero value after all the options are set.
56910+ *THIS IS EXTREMELY IMPORTANT*
56911+
56912+config GRKERNSEC_SYSCTL_DISTRO
56913+ bool "Extra sysctl support for distro makers (READ HELP)"
56914+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56915+ help
56916+ If you say Y here, additional sysctl options will be created
56917+ for features that affect processes running as root. Therefore,
56918+ it is critical when using this option that the grsec_lock entry be
56919+ enabled after boot. Only distros with prebuilt kernel packages
56920+ with this option enabled that can ensure grsec_lock is enabled
56921+ after boot should use this option.
56922+ *Failure to set grsec_lock after boot makes all grsec features
56923+ this option covers useless*
56924+
56925+ Currently this option creates the following sysctl entries:
56926+ "Disable Privileged I/O": "disable_priv_io"
56927+
56928+config GRKERNSEC_SYSCTL_ON
56929+ bool "Turn on features by default"
56930+ depends on GRKERNSEC_SYSCTL
56931+ help
56932+ If you say Y here, instead of having all features enabled in the
56933+ kernel configuration disabled at boot time, the features will be
56934+ enabled at boot time. It is recommended you say Y here unless
56935+ there is some reason you would want all sysctl-tunable features to
56936+ be disabled by default. As mentioned elsewhere, it is important
56937+ to enable the grsec_lock entry once you have finished modifying
56938+ the sysctl entries.
56939+
56940+endmenu
56941+menu "Logging Options"
56942+depends on GRKERNSEC
56943+
56944+config GRKERNSEC_FLOODTIME
56945+ int "Seconds in between log messages (minimum)"
56946+ default 10
56947+ help
56948+ This option allows you to enforce the number of seconds between
56949+ grsecurity log messages. The default should be suitable for most
56950+ people, however, if you choose to change it, choose a value small enough
56951+ to allow informative logs to be produced, but large enough to
56952+ prevent flooding.
56953+
56954+config GRKERNSEC_FLOODBURST
56955+ int "Number of messages in a burst (maximum)"
56956+ default 6
56957+ help
56958+ This option allows you to choose the maximum number of messages allowed
56959+ within the flood time interval you chose in a separate option. The
56960+ default should be suitable for most people, however if you find that
56961+ many of your logs are being interpreted as flooding, you may want to
56962+ raise this value.
56963+
56964+endmenu
56965+
56966+endmenu
56967diff -urNp linux-3.1.1/grsecurity/Makefile linux-3.1.1/grsecurity/Makefile
56968--- linux-3.1.1/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
56969+++ linux-3.1.1/grsecurity/Makefile 2011-11-16 18:40:31.000000000 -0500
56970@@ -0,0 +1,36 @@
56971+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56972+# during 2001-2009 it has been completely redesigned by Brad Spengler
56973+# into an RBAC system
56974+#
56975+# All code in this directory and various hooks inserted throughout the kernel
56976+# are copyright Brad Spengler - Open Source Security, Inc., and released
56977+# under the GPL v2 or higher
56978+
56979+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56980+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
56981+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56982+
56983+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56984+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56985+ gracl_learn.o grsec_log.o
56986+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56987+
56988+ifdef CONFIG_NET
56989+obj-y += grsec_sock.o
56990+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56991+endif
56992+
56993+ifndef CONFIG_GRKERNSEC
56994+obj-y += grsec_disabled.o
56995+endif
56996+
56997+ifdef CONFIG_GRKERNSEC_HIDESYM
56998+extra-y := grsec_hidesym.o
56999+$(obj)/grsec_hidesym.o:
57000+ @-chmod -f 500 /boot
57001+ @-chmod -f 500 /lib/modules
57002+ @-chmod -f 500 /lib64/modules
57003+ @-chmod -f 500 /lib32/modules
57004+ @-chmod -f 700 .
57005+ @echo ' grsec: protected kernel image paths'
57006+endif
57007diff -urNp linux-3.1.1/include/acpi/acpi_bus.h linux-3.1.1/include/acpi/acpi_bus.h
57008--- linux-3.1.1/include/acpi/acpi_bus.h 2011-11-11 15:19:27.000000000 -0500
57009+++ linux-3.1.1/include/acpi/acpi_bus.h 2011-11-16 18:39:08.000000000 -0500
57010@@ -107,7 +107,7 @@ struct acpi_device_ops {
57011 acpi_op_bind bind;
57012 acpi_op_unbind unbind;
57013 acpi_op_notify notify;
57014-};
57015+} __no_const;
57016
57017 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
57018
57019diff -urNp linux-3.1.1/include/asm-generic/atomic-long.h linux-3.1.1/include/asm-generic/atomic-long.h
57020--- linux-3.1.1/include/asm-generic/atomic-long.h 2011-11-11 15:19:27.000000000 -0500
57021+++ linux-3.1.1/include/asm-generic/atomic-long.h 2011-11-16 18:39:08.000000000 -0500
57022@@ -22,6 +22,12 @@
57023
57024 typedef atomic64_t atomic_long_t;
57025
57026+#ifdef CONFIG_PAX_REFCOUNT
57027+typedef atomic64_unchecked_t atomic_long_unchecked_t;
57028+#else
57029+typedef atomic64_t atomic_long_unchecked_t;
57030+#endif
57031+
57032 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
57033
57034 static inline long atomic_long_read(atomic_long_t *l)
57035@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
57036 return (long)atomic64_read(v);
57037 }
57038
57039+#ifdef CONFIG_PAX_REFCOUNT
57040+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57041+{
57042+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57043+
57044+ return (long)atomic64_read_unchecked(v);
57045+}
57046+#endif
57047+
57048 static inline void atomic_long_set(atomic_long_t *l, long i)
57049 {
57050 atomic64_t *v = (atomic64_t *)l;
57051@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
57052 atomic64_set(v, i);
57053 }
57054
57055+#ifdef CONFIG_PAX_REFCOUNT
57056+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57057+{
57058+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57059+
57060+ atomic64_set_unchecked(v, i);
57061+}
57062+#endif
57063+
57064 static inline void atomic_long_inc(atomic_long_t *l)
57065 {
57066 atomic64_t *v = (atomic64_t *)l;
57067@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
57068 atomic64_inc(v);
57069 }
57070
57071+#ifdef CONFIG_PAX_REFCOUNT
57072+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57073+{
57074+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57075+
57076+ atomic64_inc_unchecked(v);
57077+}
57078+#endif
57079+
57080 static inline void atomic_long_dec(atomic_long_t *l)
57081 {
57082 atomic64_t *v = (atomic64_t *)l;
57083@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
57084 atomic64_dec(v);
57085 }
57086
57087+#ifdef CONFIG_PAX_REFCOUNT
57088+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57089+{
57090+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57091+
57092+ atomic64_dec_unchecked(v);
57093+}
57094+#endif
57095+
57096 static inline void atomic_long_add(long i, atomic_long_t *l)
57097 {
57098 atomic64_t *v = (atomic64_t *)l;
57099@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
57100 atomic64_add(i, v);
57101 }
57102
57103+#ifdef CONFIG_PAX_REFCOUNT
57104+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57105+{
57106+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57107+
57108+ atomic64_add_unchecked(i, v);
57109+}
57110+#endif
57111+
57112 static inline void atomic_long_sub(long i, atomic_long_t *l)
57113 {
57114 atomic64_t *v = (atomic64_t *)l;
57115@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
57116 atomic64_sub(i, v);
57117 }
57118
57119+#ifdef CONFIG_PAX_REFCOUNT
57120+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57121+{
57122+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57123+
57124+ atomic64_sub_unchecked(i, v);
57125+}
57126+#endif
57127+
57128 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57129 {
57130 atomic64_t *v = (atomic64_t *)l;
57131@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
57132 return (long)atomic64_inc_return(v);
57133 }
57134
57135+#ifdef CONFIG_PAX_REFCOUNT
57136+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57137+{
57138+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57139+
57140+ return (long)atomic64_inc_return_unchecked(v);
57141+}
57142+#endif
57143+
57144 static inline long atomic_long_dec_return(atomic_long_t *l)
57145 {
57146 atomic64_t *v = (atomic64_t *)l;
57147@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
57148
57149 typedef atomic_t atomic_long_t;
57150
57151+#ifdef CONFIG_PAX_REFCOUNT
57152+typedef atomic_unchecked_t atomic_long_unchecked_t;
57153+#else
57154+typedef atomic_t atomic_long_unchecked_t;
57155+#endif
57156+
57157 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57158 static inline long atomic_long_read(atomic_long_t *l)
57159 {
57160@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
57161 return (long)atomic_read(v);
57162 }
57163
57164+#ifdef CONFIG_PAX_REFCOUNT
57165+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57166+{
57167+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57168+
57169+ return (long)atomic_read_unchecked(v);
57170+}
57171+#endif
57172+
57173 static inline void atomic_long_set(atomic_long_t *l, long i)
57174 {
57175 atomic_t *v = (atomic_t *)l;
57176@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
57177 atomic_set(v, i);
57178 }
57179
57180+#ifdef CONFIG_PAX_REFCOUNT
57181+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57182+{
57183+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57184+
57185+ atomic_set_unchecked(v, i);
57186+}
57187+#endif
57188+
57189 static inline void atomic_long_inc(atomic_long_t *l)
57190 {
57191 atomic_t *v = (atomic_t *)l;
57192@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
57193 atomic_inc(v);
57194 }
57195
57196+#ifdef CONFIG_PAX_REFCOUNT
57197+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57198+{
57199+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57200+
57201+ atomic_inc_unchecked(v);
57202+}
57203+#endif
57204+
57205 static inline void atomic_long_dec(atomic_long_t *l)
57206 {
57207 atomic_t *v = (atomic_t *)l;
57208@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
57209 atomic_dec(v);
57210 }
57211
57212+#ifdef CONFIG_PAX_REFCOUNT
57213+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57214+{
57215+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57216+
57217+ atomic_dec_unchecked(v);
57218+}
57219+#endif
57220+
57221 static inline void atomic_long_add(long i, atomic_long_t *l)
57222 {
57223 atomic_t *v = (atomic_t *)l;
57224@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
57225 atomic_add(i, v);
57226 }
57227
57228+#ifdef CONFIG_PAX_REFCOUNT
57229+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57230+{
57231+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57232+
57233+ atomic_add_unchecked(i, v);
57234+}
57235+#endif
57236+
57237 static inline void atomic_long_sub(long i, atomic_long_t *l)
57238 {
57239 atomic_t *v = (atomic_t *)l;
57240@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
57241 atomic_sub(i, v);
57242 }
57243
57244+#ifdef CONFIG_PAX_REFCOUNT
57245+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57246+{
57247+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57248+
57249+ atomic_sub_unchecked(i, v);
57250+}
57251+#endif
57252+
57253 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57254 {
57255 atomic_t *v = (atomic_t *)l;
57256@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
57257 return (long)atomic_inc_return(v);
57258 }
57259
57260+#ifdef CONFIG_PAX_REFCOUNT
57261+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57262+{
57263+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57264+
57265+ return (long)atomic_inc_return_unchecked(v);
57266+}
57267+#endif
57268+
57269 static inline long atomic_long_dec_return(atomic_long_t *l)
57270 {
57271 atomic_t *v = (atomic_t *)l;
57272@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
57273
57274 #endif /* BITS_PER_LONG == 64 */
57275
57276+#ifdef CONFIG_PAX_REFCOUNT
57277+static inline void pax_refcount_needs_these_functions(void)
57278+{
57279+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
57280+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57281+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57282+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57283+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57284+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57285+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57286+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57287+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57288+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57289+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57290+
57291+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57292+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57293+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57294+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
57295+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57296+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57297+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57298+}
57299+#else
57300+#define atomic_read_unchecked(v) atomic_read(v)
57301+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57302+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57303+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57304+#define atomic_inc_unchecked(v) atomic_inc(v)
57305+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57306+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57307+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57308+#define atomic_dec_unchecked(v) atomic_dec(v)
57309+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57310+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57311+
57312+#define atomic_long_read_unchecked(v) atomic_long_read(v)
57313+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57314+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57315+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
57316+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57317+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57318+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57319+#endif
57320+
57321 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57322diff -urNp linux-3.1.1/include/asm-generic/cache.h linux-3.1.1/include/asm-generic/cache.h
57323--- linux-3.1.1/include/asm-generic/cache.h 2011-11-11 15:19:27.000000000 -0500
57324+++ linux-3.1.1/include/asm-generic/cache.h 2011-11-16 18:39:08.000000000 -0500
57325@@ -6,7 +6,7 @@
57326 * cache lines need to provide their own cache.h.
57327 */
57328
57329-#define L1_CACHE_SHIFT 5
57330-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57331+#define L1_CACHE_SHIFT 5UL
57332+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57333
57334 #endif /* __ASM_GENERIC_CACHE_H */
57335diff -urNp linux-3.1.1/include/asm-generic/int-l64.h linux-3.1.1/include/asm-generic/int-l64.h
57336--- linux-3.1.1/include/asm-generic/int-l64.h 2011-11-11 15:19:27.000000000 -0500
57337+++ linux-3.1.1/include/asm-generic/int-l64.h 2011-11-16 18:39:08.000000000 -0500
57338@@ -46,6 +46,8 @@ typedef unsigned int u32;
57339 typedef signed long s64;
57340 typedef unsigned long u64;
57341
57342+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57343+
57344 #define S8_C(x) x
57345 #define U8_C(x) x ## U
57346 #define S16_C(x) x
57347diff -urNp linux-3.1.1/include/asm-generic/int-ll64.h linux-3.1.1/include/asm-generic/int-ll64.h
57348--- linux-3.1.1/include/asm-generic/int-ll64.h 2011-11-11 15:19:27.000000000 -0500
57349+++ linux-3.1.1/include/asm-generic/int-ll64.h 2011-11-16 18:39:08.000000000 -0500
57350@@ -51,6 +51,8 @@ typedef unsigned int u32;
57351 typedef signed long long s64;
57352 typedef unsigned long long u64;
57353
57354+typedef unsigned long long intoverflow_t;
57355+
57356 #define S8_C(x) x
57357 #define U8_C(x) x ## U
57358 #define S16_C(x) x
57359diff -urNp linux-3.1.1/include/asm-generic/kmap_types.h linux-3.1.1/include/asm-generic/kmap_types.h
57360--- linux-3.1.1/include/asm-generic/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
57361+++ linux-3.1.1/include/asm-generic/kmap_types.h 2011-11-16 18:39:08.000000000 -0500
57362@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57363 KMAP_D(17) KM_NMI,
57364 KMAP_D(18) KM_NMI_PTE,
57365 KMAP_D(19) KM_KDB,
57366+KMAP_D(20) KM_CLEARPAGE,
57367 /*
57368 * Remember to update debug_kmap_atomic() when adding new kmap types!
57369 */
57370-KMAP_D(20) KM_TYPE_NR
57371+KMAP_D(21) KM_TYPE_NR
57372 };
57373
57374 #undef KMAP_D
57375diff -urNp linux-3.1.1/include/asm-generic/pgtable.h linux-3.1.1/include/asm-generic/pgtable.h
57376--- linux-3.1.1/include/asm-generic/pgtable.h 2011-11-11 15:19:27.000000000 -0500
57377+++ linux-3.1.1/include/asm-generic/pgtable.h 2011-11-16 18:39:08.000000000 -0500
57378@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57379 #endif /* __HAVE_ARCH_PMD_WRITE */
57380 #endif
57381
57382+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57383+static inline unsigned long pax_open_kernel(void) { return 0; }
57384+#endif
57385+
57386+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57387+static inline unsigned long pax_close_kernel(void) { return 0; }
57388+#endif
57389+
57390 #endif /* !__ASSEMBLY__ */
57391
57392 #endif /* _ASM_GENERIC_PGTABLE_H */
57393diff -urNp linux-3.1.1/include/asm-generic/pgtable-nopmd.h linux-3.1.1/include/asm-generic/pgtable-nopmd.h
57394--- linux-3.1.1/include/asm-generic/pgtable-nopmd.h 2011-11-11 15:19:27.000000000 -0500
57395+++ linux-3.1.1/include/asm-generic/pgtable-nopmd.h 2011-11-16 18:39:08.000000000 -0500
57396@@ -1,14 +1,19 @@
57397 #ifndef _PGTABLE_NOPMD_H
57398 #define _PGTABLE_NOPMD_H
57399
57400-#ifndef __ASSEMBLY__
57401-
57402 #include <asm-generic/pgtable-nopud.h>
57403
57404-struct mm_struct;
57405-
57406 #define __PAGETABLE_PMD_FOLDED
57407
57408+#define PMD_SHIFT PUD_SHIFT
57409+#define PTRS_PER_PMD 1
57410+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57411+#define PMD_MASK (~(PMD_SIZE-1))
57412+
57413+#ifndef __ASSEMBLY__
57414+
57415+struct mm_struct;
57416+
57417 /*
57418 * Having the pmd type consist of a pud gets the size right, and allows
57419 * us to conceptually access the pud entry that this pmd is folded into
57420@@ -16,11 +21,6 @@ struct mm_struct;
57421 */
57422 typedef struct { pud_t pud; } pmd_t;
57423
57424-#define PMD_SHIFT PUD_SHIFT
57425-#define PTRS_PER_PMD 1
57426-#define PMD_SIZE (1UL << PMD_SHIFT)
57427-#define PMD_MASK (~(PMD_SIZE-1))
57428-
57429 /*
57430 * The "pud_xxx()" functions here are trivial for a folded two-level
57431 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57432diff -urNp linux-3.1.1/include/asm-generic/pgtable-nopud.h linux-3.1.1/include/asm-generic/pgtable-nopud.h
57433--- linux-3.1.1/include/asm-generic/pgtable-nopud.h 2011-11-11 15:19:27.000000000 -0500
57434+++ linux-3.1.1/include/asm-generic/pgtable-nopud.h 2011-11-16 18:39:08.000000000 -0500
57435@@ -1,10 +1,15 @@
57436 #ifndef _PGTABLE_NOPUD_H
57437 #define _PGTABLE_NOPUD_H
57438
57439-#ifndef __ASSEMBLY__
57440-
57441 #define __PAGETABLE_PUD_FOLDED
57442
57443+#define PUD_SHIFT PGDIR_SHIFT
57444+#define PTRS_PER_PUD 1
57445+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57446+#define PUD_MASK (~(PUD_SIZE-1))
57447+
57448+#ifndef __ASSEMBLY__
57449+
57450 /*
57451 * Having the pud type consist of a pgd gets the size right, and allows
57452 * us to conceptually access the pgd entry that this pud is folded into
57453@@ -12,11 +17,6 @@
57454 */
57455 typedef struct { pgd_t pgd; } pud_t;
57456
57457-#define PUD_SHIFT PGDIR_SHIFT
57458-#define PTRS_PER_PUD 1
57459-#define PUD_SIZE (1UL << PUD_SHIFT)
57460-#define PUD_MASK (~(PUD_SIZE-1))
57461-
57462 /*
57463 * The "pgd_xxx()" functions here are trivial for a folded two-level
57464 * setup: the pud is never bad, and a pud always exists (as it's folded
57465diff -urNp linux-3.1.1/include/asm-generic/vmlinux.lds.h linux-3.1.1/include/asm-generic/vmlinux.lds.h
57466--- linux-3.1.1/include/asm-generic/vmlinux.lds.h 2011-11-11 15:19:27.000000000 -0500
57467+++ linux-3.1.1/include/asm-generic/vmlinux.lds.h 2011-11-16 18:39:08.000000000 -0500
57468@@ -217,6 +217,7 @@
57469 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57470 VMLINUX_SYMBOL(__start_rodata) = .; \
57471 *(.rodata) *(.rodata.*) \
57472+ *(.data..read_only) \
57473 *(__vermagic) /* Kernel version magic */ \
57474 . = ALIGN(8); \
57475 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57476@@ -723,17 +724,18 @@
57477 * section in the linker script will go there too. @phdr should have
57478 * a leading colon.
57479 *
57480- * Note that this macros defines __per_cpu_load as an absolute symbol.
57481+ * Note that this macros defines per_cpu_load as an absolute symbol.
57482 * If there is no need to put the percpu section at a predetermined
57483 * address, use PERCPU_SECTION.
57484 */
57485 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57486- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57487- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57488+ per_cpu_load = .; \
57489+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57490 - LOAD_OFFSET) { \
57491+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57492 PERCPU_INPUT(cacheline) \
57493 } phdr \
57494- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57495+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57496
57497 /**
57498 * PERCPU_SECTION - define output section for percpu area, simple version
57499diff -urNp linux-3.1.1/include/drm/drm_crtc_helper.h linux-3.1.1/include/drm/drm_crtc_helper.h
57500--- linux-3.1.1/include/drm/drm_crtc_helper.h 2011-11-11 15:19:27.000000000 -0500
57501+++ linux-3.1.1/include/drm/drm_crtc_helper.h 2011-11-16 18:39:08.000000000 -0500
57502@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57503
57504 /* disable crtc when not in use - more explicit than dpms off */
57505 void (*disable)(struct drm_crtc *crtc);
57506-};
57507+} __no_const;
57508
57509 struct drm_encoder_helper_funcs {
57510 void (*dpms)(struct drm_encoder *encoder, int mode);
57511@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57512 struct drm_connector *connector);
57513 /* disable encoder when not in use - more explicit than dpms off */
57514 void (*disable)(struct drm_encoder *encoder);
57515-};
57516+} __no_const;
57517
57518 struct drm_connector_helper_funcs {
57519 int (*get_modes)(struct drm_connector *connector);
57520diff -urNp linux-3.1.1/include/drm/drmP.h linux-3.1.1/include/drm/drmP.h
57521--- linux-3.1.1/include/drm/drmP.h 2011-11-11 15:19:27.000000000 -0500
57522+++ linux-3.1.1/include/drm/drmP.h 2011-11-16 18:39:08.000000000 -0500
57523@@ -73,6 +73,7 @@
57524 #include <linux/workqueue.h>
57525 #include <linux/poll.h>
57526 #include <asm/pgalloc.h>
57527+#include <asm/local.h>
57528 #include "drm.h"
57529
57530 #include <linux/idr.h>
57531@@ -1035,7 +1036,7 @@ struct drm_device {
57532
57533 /** \name Usage Counters */
57534 /*@{ */
57535- int open_count; /**< Outstanding files open */
57536+ local_t open_count; /**< Outstanding files open */
57537 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57538 atomic_t vma_count; /**< Outstanding vma areas open */
57539 int buf_use; /**< Buffers in use -- cannot alloc */
57540@@ -1046,7 +1047,7 @@ struct drm_device {
57541 /*@{ */
57542 unsigned long counters;
57543 enum drm_stat_type types[15];
57544- atomic_t counts[15];
57545+ atomic_unchecked_t counts[15];
57546 /*@} */
57547
57548 struct list_head filelist;
57549diff -urNp linux-3.1.1/include/drm/ttm/ttm_memory.h linux-3.1.1/include/drm/ttm/ttm_memory.h
57550--- linux-3.1.1/include/drm/ttm/ttm_memory.h 2011-11-11 15:19:27.000000000 -0500
57551+++ linux-3.1.1/include/drm/ttm/ttm_memory.h 2011-11-16 18:39:08.000000000 -0500
57552@@ -47,7 +47,7 @@
57553
57554 struct ttm_mem_shrink {
57555 int (*do_shrink) (struct ttm_mem_shrink *);
57556-};
57557+} __no_const;
57558
57559 /**
57560 * struct ttm_mem_global - Global memory accounting structure.
57561diff -urNp linux-3.1.1/include/linux/a.out.h linux-3.1.1/include/linux/a.out.h
57562--- linux-3.1.1/include/linux/a.out.h 2011-11-11 15:19:27.000000000 -0500
57563+++ linux-3.1.1/include/linux/a.out.h 2011-11-16 18:39:08.000000000 -0500
57564@@ -39,6 +39,14 @@ enum machine_type {
57565 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57566 };
57567
57568+/* Constants for the N_FLAGS field */
57569+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57570+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57571+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57572+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57573+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57574+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57575+
57576 #if !defined (N_MAGIC)
57577 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57578 #endif
57579diff -urNp linux-3.1.1/include/linux/atmdev.h linux-3.1.1/include/linux/atmdev.h
57580--- linux-3.1.1/include/linux/atmdev.h 2011-11-11 15:19:27.000000000 -0500
57581+++ linux-3.1.1/include/linux/atmdev.h 2011-11-16 18:39:08.000000000 -0500
57582@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57583 #endif
57584
57585 struct k_atm_aal_stats {
57586-#define __HANDLE_ITEM(i) atomic_t i
57587+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57588 __AAL_STAT_ITEMS
57589 #undef __HANDLE_ITEM
57590 };
57591diff -urNp linux-3.1.1/include/linux/binfmts.h linux-3.1.1/include/linux/binfmts.h
57592--- linux-3.1.1/include/linux/binfmts.h 2011-11-11 15:19:27.000000000 -0500
57593+++ linux-3.1.1/include/linux/binfmts.h 2011-11-16 18:39:08.000000000 -0500
57594@@ -88,6 +88,7 @@ struct linux_binfmt {
57595 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57596 int (*load_shlib)(struct file *);
57597 int (*core_dump)(struct coredump_params *cprm);
57598+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57599 unsigned long min_coredump; /* minimal dump size */
57600 };
57601
57602diff -urNp linux-3.1.1/include/linux/blkdev.h linux-3.1.1/include/linux/blkdev.h
57603--- linux-3.1.1/include/linux/blkdev.h 2011-11-11 15:19:27.000000000 -0500
57604+++ linux-3.1.1/include/linux/blkdev.h 2011-11-16 18:39:08.000000000 -0500
57605@@ -1321,7 +1321,7 @@ struct block_device_operations {
57606 /* this callback is with swap_lock and sometimes page table lock held */
57607 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57608 struct module *owner;
57609-};
57610+} __do_const;
57611
57612 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57613 unsigned long);
57614diff -urNp linux-3.1.1/include/linux/blktrace_api.h linux-3.1.1/include/linux/blktrace_api.h
57615--- linux-3.1.1/include/linux/blktrace_api.h 2011-11-11 15:19:27.000000000 -0500
57616+++ linux-3.1.1/include/linux/blktrace_api.h 2011-11-16 18:39:08.000000000 -0500
57617@@ -162,7 +162,7 @@ struct blk_trace {
57618 struct dentry *dir;
57619 struct dentry *dropped_file;
57620 struct dentry *msg_file;
57621- atomic_t dropped;
57622+ atomic_unchecked_t dropped;
57623 };
57624
57625 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57626diff -urNp linux-3.1.1/include/linux/byteorder/little_endian.h linux-3.1.1/include/linux/byteorder/little_endian.h
57627--- linux-3.1.1/include/linux/byteorder/little_endian.h 2011-11-11 15:19:27.000000000 -0500
57628+++ linux-3.1.1/include/linux/byteorder/little_endian.h 2011-11-16 18:39:08.000000000 -0500
57629@@ -42,51 +42,51 @@
57630
57631 static inline __le64 __cpu_to_le64p(const __u64 *p)
57632 {
57633- return (__force __le64)*p;
57634+ return (__force const __le64)*p;
57635 }
57636 static inline __u64 __le64_to_cpup(const __le64 *p)
57637 {
57638- return (__force __u64)*p;
57639+ return (__force const __u64)*p;
57640 }
57641 static inline __le32 __cpu_to_le32p(const __u32 *p)
57642 {
57643- return (__force __le32)*p;
57644+ return (__force const __le32)*p;
57645 }
57646 static inline __u32 __le32_to_cpup(const __le32 *p)
57647 {
57648- return (__force __u32)*p;
57649+ return (__force const __u32)*p;
57650 }
57651 static inline __le16 __cpu_to_le16p(const __u16 *p)
57652 {
57653- return (__force __le16)*p;
57654+ return (__force const __le16)*p;
57655 }
57656 static inline __u16 __le16_to_cpup(const __le16 *p)
57657 {
57658- return (__force __u16)*p;
57659+ return (__force const __u16)*p;
57660 }
57661 static inline __be64 __cpu_to_be64p(const __u64 *p)
57662 {
57663- return (__force __be64)__swab64p(p);
57664+ return (__force const __be64)__swab64p(p);
57665 }
57666 static inline __u64 __be64_to_cpup(const __be64 *p)
57667 {
57668- return __swab64p((__u64 *)p);
57669+ return __swab64p((const __u64 *)p);
57670 }
57671 static inline __be32 __cpu_to_be32p(const __u32 *p)
57672 {
57673- return (__force __be32)__swab32p(p);
57674+ return (__force const __be32)__swab32p(p);
57675 }
57676 static inline __u32 __be32_to_cpup(const __be32 *p)
57677 {
57678- return __swab32p((__u32 *)p);
57679+ return __swab32p((const __u32 *)p);
57680 }
57681 static inline __be16 __cpu_to_be16p(const __u16 *p)
57682 {
57683- return (__force __be16)__swab16p(p);
57684+ return (__force const __be16)__swab16p(p);
57685 }
57686 static inline __u16 __be16_to_cpup(const __be16 *p)
57687 {
57688- return __swab16p((__u16 *)p);
57689+ return __swab16p((const __u16 *)p);
57690 }
57691 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57692 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57693diff -urNp linux-3.1.1/include/linux/cache.h linux-3.1.1/include/linux/cache.h
57694--- linux-3.1.1/include/linux/cache.h 2011-11-11 15:19:27.000000000 -0500
57695+++ linux-3.1.1/include/linux/cache.h 2011-11-16 18:39:08.000000000 -0500
57696@@ -16,6 +16,10 @@
57697 #define __read_mostly
57698 #endif
57699
57700+#ifndef __read_only
57701+#define __read_only __read_mostly
57702+#endif
57703+
57704 #ifndef ____cacheline_aligned
57705 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57706 #endif
57707diff -urNp linux-3.1.1/include/linux/capability.h linux-3.1.1/include/linux/capability.h
57708--- linux-3.1.1/include/linux/capability.h 2011-11-11 15:19:27.000000000 -0500
57709+++ linux-3.1.1/include/linux/capability.h 2011-11-16 18:40:31.000000000 -0500
57710@@ -547,6 +547,9 @@ extern bool capable(int cap);
57711 extern bool ns_capable(struct user_namespace *ns, int cap);
57712 extern bool task_ns_capable(struct task_struct *t, int cap);
57713 extern bool nsown_capable(int cap);
57714+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
57715+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
57716+extern bool capable_nolog(int cap);
57717
57718 /* audit system wants to get cap info from files as well */
57719 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
57720diff -urNp linux-3.1.1/include/linux/cleancache.h linux-3.1.1/include/linux/cleancache.h
57721--- linux-3.1.1/include/linux/cleancache.h 2011-11-11 15:19:27.000000000 -0500
57722+++ linux-3.1.1/include/linux/cleancache.h 2011-11-16 18:39:08.000000000 -0500
57723@@ -31,7 +31,7 @@ struct cleancache_ops {
57724 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
57725 void (*flush_inode)(int, struct cleancache_filekey);
57726 void (*flush_fs)(int);
57727-};
57728+} __no_const;
57729
57730 extern struct cleancache_ops
57731 cleancache_register_ops(struct cleancache_ops *ops);
57732diff -urNp linux-3.1.1/include/linux/compiler-gcc4.h linux-3.1.1/include/linux/compiler-gcc4.h
57733--- linux-3.1.1/include/linux/compiler-gcc4.h 2011-11-11 15:19:27.000000000 -0500
57734+++ linux-3.1.1/include/linux/compiler-gcc4.h 2011-11-16 18:39:08.000000000 -0500
57735@@ -31,6 +31,12 @@
57736
57737
57738 #if __GNUC_MINOR__ >= 5
57739+
57740+#ifdef CONSTIFY_PLUGIN
57741+#define __no_const __attribute__((no_const))
57742+#define __do_const __attribute__((do_const))
57743+#endif
57744+
57745 /*
57746 * Mark a position in code as unreachable. This can be used to
57747 * suppress control flow warnings after asm blocks that transfer
57748@@ -46,6 +52,11 @@
57749 #define __noclone __attribute__((__noclone__))
57750
57751 #endif
57752+
57753+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57754+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57755+#define __bos0(ptr) __bos((ptr), 0)
57756+#define __bos1(ptr) __bos((ptr), 1)
57757 #endif
57758
57759 #if __GNUC_MINOR__ > 0
57760diff -urNp linux-3.1.1/include/linux/compiler.h linux-3.1.1/include/linux/compiler.h
57761--- linux-3.1.1/include/linux/compiler.h 2011-11-11 15:19:27.000000000 -0500
57762+++ linux-3.1.1/include/linux/compiler.h 2011-11-16 18:39:08.000000000 -0500
57763@@ -5,31 +5,62 @@
57764
57765 #ifdef __CHECKER__
57766 # define __user __attribute__((noderef, address_space(1)))
57767+# define __force_user __force __user
57768 # define __kernel __attribute__((address_space(0)))
57769+# define __force_kernel __force __kernel
57770 # define __safe __attribute__((safe))
57771 # define __force __attribute__((force))
57772 # define __nocast __attribute__((nocast))
57773 # define __iomem __attribute__((noderef, address_space(2)))
57774+# define __force_iomem __force __iomem
57775 # define __acquires(x) __attribute__((context(x,0,1)))
57776 # define __releases(x) __attribute__((context(x,1,0)))
57777 # define __acquire(x) __context__(x,1)
57778 # define __release(x) __context__(x,-1)
57779 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
57780 # define __percpu __attribute__((noderef, address_space(3)))
57781+# define __force_percpu __force __percpu
57782 #ifdef CONFIG_SPARSE_RCU_POINTER
57783 # define __rcu __attribute__((noderef, address_space(4)))
57784+# define __force_rcu __force __rcu
57785 #else
57786 # define __rcu
57787+# define __force_rcu
57788 #endif
57789 extern void __chk_user_ptr(const volatile void __user *);
57790 extern void __chk_io_ptr(const volatile void __iomem *);
57791+#elif defined(CHECKER_PLUGIN)
57792+//# define __user
57793+//# define __force_user
57794+//# define __kernel
57795+//# define __force_kernel
57796+# define __safe
57797+# define __force
57798+# define __nocast
57799+# define __iomem
57800+# define __force_iomem
57801+# define __chk_user_ptr(x) (void)0
57802+# define __chk_io_ptr(x) (void)0
57803+# define __builtin_warning(x, y...) (1)
57804+# define __acquires(x)
57805+# define __releases(x)
57806+# define __acquire(x) (void)0
57807+# define __release(x) (void)0
57808+# define __cond_lock(x,c) (c)
57809+# define __percpu
57810+# define __force_percpu
57811+# define __rcu
57812+# define __force_rcu
57813 #else
57814 # define __user
57815+# define __force_user
57816 # define __kernel
57817+# define __force_kernel
57818 # define __safe
57819 # define __force
57820 # define __nocast
57821 # define __iomem
57822+# define __force_iomem
57823 # define __chk_user_ptr(x) (void)0
57824 # define __chk_io_ptr(x) (void)0
57825 # define __builtin_warning(x, y...) (1)
57826@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile
57827 # define __release(x) (void)0
57828 # define __cond_lock(x,c) (c)
57829 # define __percpu
57830+# define __force_percpu
57831 # define __rcu
57832+# define __force_rcu
57833 #endif
57834
57835 #ifdef __KERNEL__
57836@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_
57837 # define __attribute_const__ /* unimplemented */
57838 #endif
57839
57840+#ifndef __no_const
57841+# define __no_const
57842+#endif
57843+
57844+#ifndef __do_const
57845+# define __do_const
57846+#endif
57847+
57848 /*
57849 * Tell gcc if a function is cold. The compiler will assume any path
57850 * directly leading to the call is unlikely.
57851@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_
57852 #define __cold
57853 #endif
57854
57855+#ifndef __alloc_size
57856+#define __alloc_size(...)
57857+#endif
57858+
57859+#ifndef __bos
57860+#define __bos(ptr, arg)
57861+#endif
57862+
57863+#ifndef __bos0
57864+#define __bos0(ptr)
57865+#endif
57866+
57867+#ifndef __bos1
57868+#define __bos1(ptr)
57869+#endif
57870+
57871 /* Simple shorthand for a section definition */
57872 #ifndef __section
57873 # define __section(S) __attribute__ ((__section__(#S)))
57874@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_
57875 * use is to mediate communication between process-level code and irq/NMI
57876 * handlers, all running on the same CPU.
57877 */
57878-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57879+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57880+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57881
57882 #endif /* __LINUX_COMPILER_H */
57883diff -urNp linux-3.1.1/include/linux/cpuset.h linux-3.1.1/include/linux/cpuset.h
57884--- linux-3.1.1/include/linux/cpuset.h 2011-11-11 15:19:27.000000000 -0500
57885+++ linux-3.1.1/include/linux/cpuset.h 2011-11-16 18:39:08.000000000 -0500
57886@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
57887 * nodemask.
57888 */
57889 smp_mb();
57890- --ACCESS_ONCE(current->mems_allowed_change_disable);
57891+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
57892 }
57893
57894 static inline void set_mems_allowed(nodemask_t nodemask)
57895diff -urNp linux-3.1.1/include/linux/crypto.h linux-3.1.1/include/linux/crypto.h
57896--- linux-3.1.1/include/linux/crypto.h 2011-11-11 15:19:27.000000000 -0500
57897+++ linux-3.1.1/include/linux/crypto.h 2011-11-16 18:39:08.000000000 -0500
57898@@ -361,7 +361,7 @@ struct cipher_tfm {
57899 const u8 *key, unsigned int keylen);
57900 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57901 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57902-};
57903+} __no_const;
57904
57905 struct hash_tfm {
57906 int (*init)(struct hash_desc *desc);
57907@@ -382,13 +382,13 @@ struct compress_tfm {
57908 int (*cot_decompress)(struct crypto_tfm *tfm,
57909 const u8 *src, unsigned int slen,
57910 u8 *dst, unsigned int *dlen);
57911-};
57912+} __no_const;
57913
57914 struct rng_tfm {
57915 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57916 unsigned int dlen);
57917 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57918-};
57919+} __no_const;
57920
57921 #define crt_ablkcipher crt_u.ablkcipher
57922 #define crt_aead crt_u.aead
57923diff -urNp linux-3.1.1/include/linux/decompress/mm.h linux-3.1.1/include/linux/decompress/mm.h
57924--- linux-3.1.1/include/linux/decompress/mm.h 2011-11-11 15:19:27.000000000 -0500
57925+++ linux-3.1.1/include/linux/decompress/mm.h 2011-11-16 18:39:08.000000000 -0500
57926@@ -77,7 +77,7 @@ static void free(void *where)
57927 * warnings when not needed (indeed large_malloc / large_free are not
57928 * needed by inflate */
57929
57930-#define malloc(a) kmalloc(a, GFP_KERNEL)
57931+#define malloc(a) kmalloc((a), GFP_KERNEL)
57932 #define free(a) kfree(a)
57933
57934 #define large_malloc(a) vmalloc(a)
57935diff -urNp linux-3.1.1/include/linux/dma-mapping.h linux-3.1.1/include/linux/dma-mapping.h
57936--- linux-3.1.1/include/linux/dma-mapping.h 2011-11-11 15:19:27.000000000 -0500
57937+++ linux-3.1.1/include/linux/dma-mapping.h 2011-11-16 18:39:08.000000000 -0500
57938@@ -42,7 +42,7 @@ struct dma_map_ops {
57939 int (*dma_supported)(struct device *dev, u64 mask);
57940 int (*set_dma_mask)(struct device *dev, u64 mask);
57941 int is_phys;
57942-};
57943+} __do_const;
57944
57945 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57946
57947diff -urNp linux-3.1.1/include/linux/efi.h linux-3.1.1/include/linux/efi.h
57948--- linux-3.1.1/include/linux/efi.h 2011-11-11 15:19:27.000000000 -0500
57949+++ linux-3.1.1/include/linux/efi.h 2011-11-16 18:39:08.000000000 -0500
57950@@ -446,7 +446,7 @@ struct efivar_operations {
57951 efi_get_variable_t *get_variable;
57952 efi_get_next_variable_t *get_next_variable;
57953 efi_set_variable_t *set_variable;
57954-};
57955+} __no_const;
57956
57957 struct efivars {
57958 /*
57959diff -urNp linux-3.1.1/include/linux/elf.h linux-3.1.1/include/linux/elf.h
57960--- linux-3.1.1/include/linux/elf.h 2011-11-11 15:19:27.000000000 -0500
57961+++ linux-3.1.1/include/linux/elf.h 2011-11-16 18:39:08.000000000 -0500
57962@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57963 #define PT_GNU_EH_FRAME 0x6474e550
57964
57965 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57966+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57967+
57968+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57969+
57970+/* Constants for the e_flags field */
57971+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57972+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57973+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57974+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57975+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57976+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57977
57978 /*
57979 * Extended Numbering
57980@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
57981 #define DT_DEBUG 21
57982 #define DT_TEXTREL 22
57983 #define DT_JMPREL 23
57984+#define DT_FLAGS 30
57985+ #define DF_TEXTREL 0x00000004
57986 #define DT_ENCODING 32
57987 #define OLD_DT_LOOS 0x60000000
57988 #define DT_LOOS 0x6000000d
57989@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
57990 #define PF_W 0x2
57991 #define PF_X 0x1
57992
57993+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57994+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57995+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57996+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57997+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57998+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57999+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
58000+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
58001+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
58002+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
58003+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
58004+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
58005+
58006 typedef struct elf32_phdr{
58007 Elf32_Word p_type;
58008 Elf32_Off p_offset;
58009@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
58010 #define EI_OSABI 7
58011 #define EI_PAD 8
58012
58013+#define EI_PAX 14
58014+
58015 #define ELFMAG0 0x7f /* EI_MAG */
58016 #define ELFMAG1 'E'
58017 #define ELFMAG2 'L'
58018@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
58019 #define elf_note elf32_note
58020 #define elf_addr_t Elf32_Off
58021 #define Elf_Half Elf32_Half
58022+#define elf_dyn Elf32_Dyn
58023
58024 #else
58025
58026@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
58027 #define elf_note elf64_note
58028 #define elf_addr_t Elf64_Off
58029 #define Elf_Half Elf64_Half
58030+#define elf_dyn Elf64_Dyn
58031
58032 #endif
58033
58034diff -urNp linux-3.1.1/include/linux/filter.h linux-3.1.1/include/linux/filter.h
58035--- linux-3.1.1/include/linux/filter.h 2011-11-11 15:19:27.000000000 -0500
58036+++ linux-3.1.1/include/linux/filter.h 2011-11-20 19:21:53.000000000 -0500
58037@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_A
58038
58039 struct sk_buff;
58040 struct sock;
58041+struct bpf_jit_work;
58042
58043 struct sk_filter
58044 {
58045@@ -141,6 +142,9 @@ struct sk_filter
58046 unsigned int len; /* Number of filter blocks */
58047 unsigned int (*bpf_func)(const struct sk_buff *skb,
58048 const struct sock_filter *filter);
58049+#ifdef CONFIG_BPF_JIT
58050+ struct bpf_jit_work *work;
58051+#endif
58052 struct rcu_head rcu;
58053 struct sock_filter insns[0];
58054 };
58055diff -urNp linux-3.1.1/include/linux/firewire.h linux-3.1.1/include/linux/firewire.h
58056--- linux-3.1.1/include/linux/firewire.h 2011-11-11 15:19:27.000000000 -0500
58057+++ linux-3.1.1/include/linux/firewire.h 2011-11-16 18:39:08.000000000 -0500
58058@@ -428,7 +428,7 @@ struct fw_iso_context {
58059 union {
58060 fw_iso_callback_t sc;
58061 fw_iso_mc_callback_t mc;
58062- } callback;
58063+ } __no_const callback;
58064 void *callback_data;
58065 };
58066
58067diff -urNp linux-3.1.1/include/linux/fscache-cache.h linux-3.1.1/include/linux/fscache-cache.h
58068--- linux-3.1.1/include/linux/fscache-cache.h 2011-11-11 15:19:27.000000000 -0500
58069+++ linux-3.1.1/include/linux/fscache-cache.h 2011-11-16 18:39:08.000000000 -0500
58070@@ -102,7 +102,7 @@ struct fscache_operation {
58071 fscache_operation_release_t release;
58072 };
58073
58074-extern atomic_t fscache_op_debug_id;
58075+extern atomic_unchecked_t fscache_op_debug_id;
58076 extern void fscache_op_work_func(struct work_struct *work);
58077
58078 extern void fscache_enqueue_operation(struct fscache_operation *);
58079@@ -122,7 +122,7 @@ static inline void fscache_operation_ini
58080 {
58081 INIT_WORK(&op->work, fscache_op_work_func);
58082 atomic_set(&op->usage, 1);
58083- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58084+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58085 op->processor = processor;
58086 op->release = release;
58087 INIT_LIST_HEAD(&op->pend_link);
58088diff -urNp linux-3.1.1/include/linux/fs.h linux-3.1.1/include/linux/fs.h
58089--- linux-3.1.1/include/linux/fs.h 2011-11-11 15:19:27.000000000 -0500
58090+++ linux-3.1.1/include/linux/fs.h 2011-11-16 23:39:39.000000000 -0500
58091@@ -1588,7 +1588,8 @@ struct file_operations {
58092 int (*setlease)(struct file *, long, struct file_lock **);
58093 long (*fallocate)(struct file *file, int mode, loff_t offset,
58094 loff_t len);
58095-};
58096+} __do_const;
58097+typedef struct file_operations __no_const file_operations_no_const;
58098
58099 struct inode_operations {
58100 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
58101diff -urNp linux-3.1.1/include/linux/fsnotify.h linux-3.1.1/include/linux/fsnotify.h
58102--- linux-3.1.1/include/linux/fsnotify.h 2011-11-11 15:19:27.000000000 -0500
58103+++ linux-3.1.1/include/linux/fsnotify.h 2011-11-16 18:39:08.000000000 -0500
58104@@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
58105 */
58106 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
58107 {
58108- return kstrdup(name, GFP_KERNEL);
58109+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
58110 }
58111
58112 /*
58113diff -urNp linux-3.1.1/include/linux/fs_struct.h linux-3.1.1/include/linux/fs_struct.h
58114--- linux-3.1.1/include/linux/fs_struct.h 2011-11-11 15:19:27.000000000 -0500
58115+++ linux-3.1.1/include/linux/fs_struct.h 2011-11-16 18:39:08.000000000 -0500
58116@@ -6,7 +6,7 @@
58117 #include <linux/seqlock.h>
58118
58119 struct fs_struct {
58120- int users;
58121+ atomic_t users;
58122 spinlock_t lock;
58123 seqcount_t seq;
58124 int umask;
58125diff -urNp linux-3.1.1/include/linux/ftrace_event.h linux-3.1.1/include/linux/ftrace_event.h
58126--- linux-3.1.1/include/linux/ftrace_event.h 2011-11-11 15:19:27.000000000 -0500
58127+++ linux-3.1.1/include/linux/ftrace_event.h 2011-11-16 18:39:08.000000000 -0500
58128@@ -97,7 +97,7 @@ struct trace_event_functions {
58129 trace_print_func raw;
58130 trace_print_func hex;
58131 trace_print_func binary;
58132-};
58133+} __no_const;
58134
58135 struct trace_event {
58136 struct hlist_node node;
58137@@ -252,7 +252,7 @@ extern int trace_define_field(struct ftr
58138 extern int trace_add_event_call(struct ftrace_event_call *call);
58139 extern void trace_remove_event_call(struct ftrace_event_call *call);
58140
58141-#define is_signed_type(type) (((type)(-1)) < 0)
58142+#define is_signed_type(type) (((type)(-1)) < (type)1)
58143
58144 int trace_set_clr_event(const char *system, const char *event, int set);
58145
58146diff -urNp linux-3.1.1/include/linux/genhd.h linux-3.1.1/include/linux/genhd.h
58147--- linux-3.1.1/include/linux/genhd.h 2011-11-11 15:19:27.000000000 -0500
58148+++ linux-3.1.1/include/linux/genhd.h 2011-11-16 18:39:08.000000000 -0500
58149@@ -184,7 +184,7 @@ struct gendisk {
58150 struct kobject *slave_dir;
58151
58152 struct timer_rand_state *random;
58153- atomic_t sync_io; /* RAID */
58154+ atomic_unchecked_t sync_io; /* RAID */
58155 struct disk_events *ev;
58156 #ifdef CONFIG_BLK_DEV_INTEGRITY
58157 struct blk_integrity *integrity;
58158diff -urNp linux-3.1.1/include/linux/gracl.h linux-3.1.1/include/linux/gracl.h
58159--- linux-3.1.1/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
58160+++ linux-3.1.1/include/linux/gracl.h 2011-11-16 18:40:31.000000000 -0500
58161@@ -0,0 +1,317 @@
58162+#ifndef GR_ACL_H
58163+#define GR_ACL_H
58164+
58165+#include <linux/grdefs.h>
58166+#include <linux/resource.h>
58167+#include <linux/capability.h>
58168+#include <linux/dcache.h>
58169+#include <asm/resource.h>
58170+
58171+/* Major status information */
58172+
58173+#define GR_VERSION "grsecurity 2.2.2"
58174+#define GRSECURITY_VERSION 0x2202
58175+
58176+enum {
58177+ GR_SHUTDOWN = 0,
58178+ GR_ENABLE = 1,
58179+ GR_SPROLE = 2,
58180+ GR_RELOAD = 3,
58181+ GR_SEGVMOD = 4,
58182+ GR_STATUS = 5,
58183+ GR_UNSPROLE = 6,
58184+ GR_PASSSET = 7,
58185+ GR_SPROLEPAM = 8,
58186+};
58187+
58188+/* Password setup definitions
58189+ * kernel/grhash.c */
58190+enum {
58191+ GR_PW_LEN = 128,
58192+ GR_SALT_LEN = 16,
58193+ GR_SHA_LEN = 32,
58194+};
58195+
58196+enum {
58197+ GR_SPROLE_LEN = 64,
58198+};
58199+
58200+enum {
58201+ GR_NO_GLOB = 0,
58202+ GR_REG_GLOB,
58203+ GR_CREATE_GLOB
58204+};
58205+
58206+#define GR_NLIMITS 32
58207+
58208+/* Begin Data Structures */
58209+
58210+struct sprole_pw {
58211+ unsigned char *rolename;
58212+ unsigned char salt[GR_SALT_LEN];
58213+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58214+};
58215+
58216+struct name_entry {
58217+ __u32 key;
58218+ ino_t inode;
58219+ dev_t device;
58220+ char *name;
58221+ __u16 len;
58222+ __u8 deleted;
58223+ struct name_entry *prev;
58224+ struct name_entry *next;
58225+};
58226+
58227+struct inodev_entry {
58228+ struct name_entry *nentry;
58229+ struct inodev_entry *prev;
58230+ struct inodev_entry *next;
58231+};
58232+
58233+struct acl_role_db {
58234+ struct acl_role_label **r_hash;
58235+ __u32 r_size;
58236+};
58237+
58238+struct inodev_db {
58239+ struct inodev_entry **i_hash;
58240+ __u32 i_size;
58241+};
58242+
58243+struct name_db {
58244+ struct name_entry **n_hash;
58245+ __u32 n_size;
58246+};
58247+
58248+struct crash_uid {
58249+ uid_t uid;
58250+ unsigned long expires;
58251+};
58252+
58253+struct gr_hash_struct {
58254+ void **table;
58255+ void **nametable;
58256+ void *first;
58257+ __u32 table_size;
58258+ __u32 used_size;
58259+ int type;
58260+};
58261+
58262+/* Userspace Grsecurity ACL data structures */
58263+
58264+struct acl_subject_label {
58265+ char *filename;
58266+ ino_t inode;
58267+ dev_t device;
58268+ __u32 mode;
58269+ kernel_cap_t cap_mask;
58270+ kernel_cap_t cap_lower;
58271+ kernel_cap_t cap_invert_audit;
58272+
58273+ struct rlimit res[GR_NLIMITS];
58274+ __u32 resmask;
58275+
58276+ __u8 user_trans_type;
58277+ __u8 group_trans_type;
58278+ uid_t *user_transitions;
58279+ gid_t *group_transitions;
58280+ __u16 user_trans_num;
58281+ __u16 group_trans_num;
58282+
58283+ __u32 sock_families[2];
58284+ __u32 ip_proto[8];
58285+ __u32 ip_type;
58286+ struct acl_ip_label **ips;
58287+ __u32 ip_num;
58288+ __u32 inaddr_any_override;
58289+
58290+ __u32 crashes;
58291+ unsigned long expires;
58292+
58293+ struct acl_subject_label *parent_subject;
58294+ struct gr_hash_struct *hash;
58295+ struct acl_subject_label *prev;
58296+ struct acl_subject_label *next;
58297+
58298+ struct acl_object_label **obj_hash;
58299+ __u32 obj_hash_size;
58300+ __u16 pax_flags;
58301+};
58302+
58303+struct role_allowed_ip {
58304+ __u32 addr;
58305+ __u32 netmask;
58306+
58307+ struct role_allowed_ip *prev;
58308+ struct role_allowed_ip *next;
58309+};
58310+
58311+struct role_transition {
58312+ char *rolename;
58313+
58314+ struct role_transition *prev;
58315+ struct role_transition *next;
58316+};
58317+
58318+struct acl_role_label {
58319+ char *rolename;
58320+ uid_t uidgid;
58321+ __u16 roletype;
58322+
58323+ __u16 auth_attempts;
58324+ unsigned long expires;
58325+
58326+ struct acl_subject_label *root_label;
58327+ struct gr_hash_struct *hash;
58328+
58329+ struct acl_role_label *prev;
58330+ struct acl_role_label *next;
58331+
58332+ struct role_transition *transitions;
58333+ struct role_allowed_ip *allowed_ips;
58334+ uid_t *domain_children;
58335+ __u16 domain_child_num;
58336+
58337+ struct acl_subject_label **subj_hash;
58338+ __u32 subj_hash_size;
58339+};
58340+
58341+struct user_acl_role_db {
58342+ struct acl_role_label **r_table;
58343+ __u32 num_pointers; /* Number of allocations to track */
58344+ __u32 num_roles; /* Number of roles */
58345+ __u32 num_domain_children; /* Number of domain children */
58346+ __u32 num_subjects; /* Number of subjects */
58347+ __u32 num_objects; /* Number of objects */
58348+};
58349+
58350+struct acl_object_label {
58351+ char *filename;
58352+ ino_t inode;
58353+ dev_t device;
58354+ __u32 mode;
58355+
58356+ struct acl_subject_label *nested;
58357+ struct acl_object_label *globbed;
58358+
58359+ /* next two structures not used */
58360+
58361+ struct acl_object_label *prev;
58362+ struct acl_object_label *next;
58363+};
58364+
58365+struct acl_ip_label {
58366+ char *iface;
58367+ __u32 addr;
58368+ __u32 netmask;
58369+ __u16 low, high;
58370+ __u8 mode;
58371+ __u32 type;
58372+ __u32 proto[8];
58373+
58374+ /* next two structures not used */
58375+
58376+ struct acl_ip_label *prev;
58377+ struct acl_ip_label *next;
58378+};
58379+
58380+struct gr_arg {
58381+ struct user_acl_role_db role_db;
58382+ unsigned char pw[GR_PW_LEN];
58383+ unsigned char salt[GR_SALT_LEN];
58384+ unsigned char sum[GR_SHA_LEN];
58385+ unsigned char sp_role[GR_SPROLE_LEN];
58386+ struct sprole_pw *sprole_pws;
58387+ dev_t segv_device;
58388+ ino_t segv_inode;
58389+ uid_t segv_uid;
58390+ __u16 num_sprole_pws;
58391+ __u16 mode;
58392+};
58393+
58394+struct gr_arg_wrapper {
58395+ struct gr_arg *arg;
58396+ __u32 version;
58397+ __u32 size;
58398+};
58399+
58400+struct subject_map {
58401+ struct acl_subject_label *user;
58402+ struct acl_subject_label *kernel;
58403+ struct subject_map *prev;
58404+ struct subject_map *next;
58405+};
58406+
58407+struct acl_subj_map_db {
58408+ struct subject_map **s_hash;
58409+ __u32 s_size;
58410+};
58411+
58412+/* End Data Structures Section */
58413+
58414+/* Hash functions generated by empirical testing by Brad Spengler
58415+ Makes good use of the low bits of the inode. Generally 0-1 times
58416+ in loop for successful match. 0-3 for unsuccessful match.
58417+ Shift/add algorithm with modulus of table size and an XOR*/
58418+
58419+static __inline__ unsigned int
58420+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58421+{
58422+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58423+}
58424+
58425+ static __inline__ unsigned int
58426+shash(const struct acl_subject_label *userp, const unsigned int sz)
58427+{
58428+ return ((const unsigned long)userp % sz);
58429+}
58430+
58431+static __inline__ unsigned int
58432+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58433+{
58434+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58435+}
58436+
58437+static __inline__ unsigned int
58438+nhash(const char *name, const __u16 len, const unsigned int sz)
58439+{
58440+ return full_name_hash((const unsigned char *)name, len) % sz;
58441+}
58442+
58443+#define FOR_EACH_ROLE_START(role) \
58444+ role = role_list; \
58445+ while (role) {
58446+
58447+#define FOR_EACH_ROLE_END(role) \
58448+ role = role->prev; \
58449+ }
58450+
58451+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58452+ subj = NULL; \
58453+ iter = 0; \
58454+ while (iter < role->subj_hash_size) { \
58455+ if (subj == NULL) \
58456+ subj = role->subj_hash[iter]; \
58457+ if (subj == NULL) { \
58458+ iter++; \
58459+ continue; \
58460+ }
58461+
58462+#define FOR_EACH_SUBJECT_END(subj,iter) \
58463+ subj = subj->next; \
58464+ if (subj == NULL) \
58465+ iter++; \
58466+ }
58467+
58468+
58469+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58470+ subj = role->hash->first; \
58471+ while (subj != NULL) {
58472+
58473+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58474+ subj = subj->next; \
58475+ }
58476+
58477+#endif
58478+
58479diff -urNp linux-3.1.1/include/linux/gralloc.h linux-3.1.1/include/linux/gralloc.h
58480--- linux-3.1.1/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58481+++ linux-3.1.1/include/linux/gralloc.h 2011-11-16 18:40:31.000000000 -0500
58482@@ -0,0 +1,9 @@
58483+#ifndef __GRALLOC_H
58484+#define __GRALLOC_H
58485+
58486+void acl_free_all(void);
58487+int acl_alloc_stack_init(unsigned long size);
58488+void *acl_alloc(unsigned long len);
58489+void *acl_alloc_num(unsigned long num, unsigned long len);
58490+
58491+#endif
58492diff -urNp linux-3.1.1/include/linux/grdefs.h linux-3.1.1/include/linux/grdefs.h
58493--- linux-3.1.1/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58494+++ linux-3.1.1/include/linux/grdefs.h 2011-11-16 18:40:31.000000000 -0500
58495@@ -0,0 +1,140 @@
58496+#ifndef GRDEFS_H
58497+#define GRDEFS_H
58498+
58499+/* Begin grsecurity status declarations */
58500+
58501+enum {
58502+ GR_READY = 0x01,
58503+ GR_STATUS_INIT = 0x00 // disabled state
58504+};
58505+
58506+/* Begin ACL declarations */
58507+
58508+/* Role flags */
58509+
58510+enum {
58511+ GR_ROLE_USER = 0x0001,
58512+ GR_ROLE_GROUP = 0x0002,
58513+ GR_ROLE_DEFAULT = 0x0004,
58514+ GR_ROLE_SPECIAL = 0x0008,
58515+ GR_ROLE_AUTH = 0x0010,
58516+ GR_ROLE_NOPW = 0x0020,
58517+ GR_ROLE_GOD = 0x0040,
58518+ GR_ROLE_LEARN = 0x0080,
58519+ GR_ROLE_TPE = 0x0100,
58520+ GR_ROLE_DOMAIN = 0x0200,
58521+ GR_ROLE_PAM = 0x0400,
58522+ GR_ROLE_PERSIST = 0x0800
58523+};
58524+
58525+/* ACL Subject and Object mode flags */
58526+enum {
58527+ GR_DELETED = 0x80000000
58528+};
58529+
58530+/* ACL Object-only mode flags */
58531+enum {
58532+ GR_READ = 0x00000001,
58533+ GR_APPEND = 0x00000002,
58534+ GR_WRITE = 0x00000004,
58535+ GR_EXEC = 0x00000008,
58536+ GR_FIND = 0x00000010,
58537+ GR_INHERIT = 0x00000020,
58538+ GR_SETID = 0x00000040,
58539+ GR_CREATE = 0x00000080,
58540+ GR_DELETE = 0x00000100,
58541+ GR_LINK = 0x00000200,
58542+ GR_AUDIT_READ = 0x00000400,
58543+ GR_AUDIT_APPEND = 0x00000800,
58544+ GR_AUDIT_WRITE = 0x00001000,
58545+ GR_AUDIT_EXEC = 0x00002000,
58546+ GR_AUDIT_FIND = 0x00004000,
58547+ GR_AUDIT_INHERIT= 0x00008000,
58548+ GR_AUDIT_SETID = 0x00010000,
58549+ GR_AUDIT_CREATE = 0x00020000,
58550+ GR_AUDIT_DELETE = 0x00040000,
58551+ GR_AUDIT_LINK = 0x00080000,
58552+ GR_PTRACERD = 0x00100000,
58553+ GR_NOPTRACE = 0x00200000,
58554+ GR_SUPPRESS = 0x00400000,
58555+ GR_NOLEARN = 0x00800000,
58556+ GR_INIT_TRANSFER= 0x01000000
58557+};
58558+
58559+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58560+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58561+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58562+
58563+/* ACL subject-only mode flags */
58564+enum {
58565+ GR_KILL = 0x00000001,
58566+ GR_VIEW = 0x00000002,
58567+ GR_PROTECTED = 0x00000004,
58568+ GR_LEARN = 0x00000008,
58569+ GR_OVERRIDE = 0x00000010,
58570+ /* just a placeholder, this mode is only used in userspace */
58571+ GR_DUMMY = 0x00000020,
58572+ GR_PROTSHM = 0x00000040,
58573+ GR_KILLPROC = 0x00000080,
58574+ GR_KILLIPPROC = 0x00000100,
58575+ /* just a placeholder, this mode is only used in userspace */
58576+ GR_NOTROJAN = 0x00000200,
58577+ GR_PROTPROCFD = 0x00000400,
58578+ GR_PROCACCT = 0x00000800,
58579+ GR_RELAXPTRACE = 0x00001000,
58580+ GR_NESTED = 0x00002000,
58581+ GR_INHERITLEARN = 0x00004000,
58582+ GR_PROCFIND = 0x00008000,
58583+ GR_POVERRIDE = 0x00010000,
58584+ GR_KERNELAUTH = 0x00020000,
58585+ GR_ATSECURE = 0x00040000,
58586+ GR_SHMEXEC = 0x00080000
58587+};
58588+
58589+enum {
58590+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58591+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58592+ GR_PAX_ENABLE_MPROTECT = 0x0004,
58593+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58594+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58595+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58596+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58597+ GR_PAX_DISABLE_MPROTECT = 0x0400,
58598+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58599+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58600+};
58601+
58602+enum {
58603+ GR_ID_USER = 0x01,
58604+ GR_ID_GROUP = 0x02,
58605+};
58606+
58607+enum {
58608+ GR_ID_ALLOW = 0x01,
58609+ GR_ID_DENY = 0x02,
58610+};
58611+
58612+#define GR_CRASH_RES 31
58613+#define GR_UIDTABLE_MAX 500
58614+
58615+/* begin resource learning section */
58616+enum {
58617+ GR_RLIM_CPU_BUMP = 60,
58618+ GR_RLIM_FSIZE_BUMP = 50000,
58619+ GR_RLIM_DATA_BUMP = 10000,
58620+ GR_RLIM_STACK_BUMP = 1000,
58621+ GR_RLIM_CORE_BUMP = 10000,
58622+ GR_RLIM_RSS_BUMP = 500000,
58623+ GR_RLIM_NPROC_BUMP = 1,
58624+ GR_RLIM_NOFILE_BUMP = 5,
58625+ GR_RLIM_MEMLOCK_BUMP = 50000,
58626+ GR_RLIM_AS_BUMP = 500000,
58627+ GR_RLIM_LOCKS_BUMP = 2,
58628+ GR_RLIM_SIGPENDING_BUMP = 5,
58629+ GR_RLIM_MSGQUEUE_BUMP = 10000,
58630+ GR_RLIM_NICE_BUMP = 1,
58631+ GR_RLIM_RTPRIO_BUMP = 1,
58632+ GR_RLIM_RTTIME_BUMP = 1000000
58633+};
58634+
58635+#endif
58636diff -urNp linux-3.1.1/include/linux/grinternal.h linux-3.1.1/include/linux/grinternal.h
58637--- linux-3.1.1/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58638+++ linux-3.1.1/include/linux/grinternal.h 2011-11-16 18:40:31.000000000 -0500
58639@@ -0,0 +1,220 @@
58640+#ifndef __GRINTERNAL_H
58641+#define __GRINTERNAL_H
58642+
58643+#ifdef CONFIG_GRKERNSEC
58644+
58645+#include <linux/fs.h>
58646+#include <linux/mnt_namespace.h>
58647+#include <linux/nsproxy.h>
58648+#include <linux/gracl.h>
58649+#include <linux/grdefs.h>
58650+#include <linux/grmsg.h>
58651+
58652+void gr_add_learn_entry(const char *fmt, ...)
58653+ __attribute__ ((format (printf, 1, 2)));
58654+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58655+ const struct vfsmount *mnt);
58656+__u32 gr_check_create(const struct dentry *new_dentry,
58657+ const struct dentry *parent,
58658+ const struct vfsmount *mnt, const __u32 mode);
58659+int gr_check_protected_task(const struct task_struct *task);
58660+__u32 to_gr_audit(const __u32 reqmode);
58661+int gr_set_acls(const int type);
58662+int gr_apply_subject_to_task(struct task_struct *task);
58663+int gr_acl_is_enabled(void);
58664+char gr_roletype_to_char(void);
58665+
58666+void gr_handle_alertkill(struct task_struct *task);
58667+char *gr_to_filename(const struct dentry *dentry,
58668+ const struct vfsmount *mnt);
58669+char *gr_to_filename1(const struct dentry *dentry,
58670+ const struct vfsmount *mnt);
58671+char *gr_to_filename2(const struct dentry *dentry,
58672+ const struct vfsmount *mnt);
58673+char *gr_to_filename3(const struct dentry *dentry,
58674+ const struct vfsmount *mnt);
58675+
58676+extern int grsec_enable_harden_ptrace;
58677+extern int grsec_enable_link;
58678+extern int grsec_enable_fifo;
58679+extern int grsec_enable_execve;
58680+extern int grsec_enable_shm;
58681+extern int grsec_enable_execlog;
58682+extern int grsec_enable_signal;
58683+extern int grsec_enable_audit_ptrace;
58684+extern int grsec_enable_forkfail;
58685+extern int grsec_enable_time;
58686+extern int grsec_enable_rofs;
58687+extern int grsec_enable_chroot_shmat;
58688+extern int grsec_enable_chroot_mount;
58689+extern int grsec_enable_chroot_double;
58690+extern int grsec_enable_chroot_pivot;
58691+extern int grsec_enable_chroot_chdir;
58692+extern int grsec_enable_chroot_chmod;
58693+extern int grsec_enable_chroot_mknod;
58694+extern int grsec_enable_chroot_fchdir;
58695+extern int grsec_enable_chroot_nice;
58696+extern int grsec_enable_chroot_execlog;
58697+extern int grsec_enable_chroot_caps;
58698+extern int grsec_enable_chroot_sysctl;
58699+extern int grsec_enable_chroot_unix;
58700+extern int grsec_enable_tpe;
58701+extern int grsec_tpe_gid;
58702+extern int grsec_enable_tpe_all;
58703+extern int grsec_enable_tpe_invert;
58704+extern int grsec_enable_socket_all;
58705+extern int grsec_socket_all_gid;
58706+extern int grsec_enable_socket_client;
58707+extern int grsec_socket_client_gid;
58708+extern int grsec_enable_socket_server;
58709+extern int grsec_socket_server_gid;
58710+extern int grsec_audit_gid;
58711+extern int grsec_enable_group;
58712+extern int grsec_enable_audit_textrel;
58713+extern int grsec_enable_log_rwxmaps;
58714+extern int grsec_enable_mount;
58715+extern int grsec_enable_chdir;
58716+extern int grsec_resource_logging;
58717+extern int grsec_enable_blackhole;
58718+extern int grsec_lastack_retries;
58719+extern int grsec_enable_brute;
58720+extern int grsec_lock;
58721+
58722+extern spinlock_t grsec_alert_lock;
58723+extern unsigned long grsec_alert_wtime;
58724+extern unsigned long grsec_alert_fyet;
58725+
58726+extern spinlock_t grsec_audit_lock;
58727+
58728+extern rwlock_t grsec_exec_file_lock;
58729+
58730+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58731+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58732+ (tsk)->exec_file->f_vfsmnt) : "/")
58733+
58734+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58735+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58736+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58737+
58738+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58739+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
58740+ (tsk)->exec_file->f_vfsmnt) : "/")
58741+
58742+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58743+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58744+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58745+
58746+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58747+
58748+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58749+
58750+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58751+ (task)->pid, (cred)->uid, \
58752+ (cred)->euid, (cred)->gid, (cred)->egid, \
58753+ gr_parent_task_fullpath(task), \
58754+ (task)->real_parent->comm, (task)->real_parent->pid, \
58755+ (pcred)->uid, (pcred)->euid, \
58756+ (pcred)->gid, (pcred)->egid
58757+
58758+#define GR_CHROOT_CAPS {{ \
58759+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58760+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58761+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58762+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58763+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58764+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
58765+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
58766+
58767+#define security_learn(normal_msg,args...) \
58768+({ \
58769+ read_lock(&grsec_exec_file_lock); \
58770+ gr_add_learn_entry(normal_msg "\n", ## args); \
58771+ read_unlock(&grsec_exec_file_lock); \
58772+})
58773+
58774+enum {
58775+ GR_DO_AUDIT,
58776+ GR_DONT_AUDIT,
58777+ /* used for non-audit messages that we shouldn't kill the task on */
58778+ GR_DONT_AUDIT_GOOD
58779+};
58780+
58781+enum {
58782+ GR_TTYSNIFF,
58783+ GR_RBAC,
58784+ GR_RBAC_STR,
58785+ GR_STR_RBAC,
58786+ GR_RBAC_MODE2,
58787+ GR_RBAC_MODE3,
58788+ GR_FILENAME,
58789+ GR_SYSCTL_HIDDEN,
58790+ GR_NOARGS,
58791+ GR_ONE_INT,
58792+ GR_ONE_INT_TWO_STR,
58793+ GR_ONE_STR,
58794+ GR_STR_INT,
58795+ GR_TWO_STR_INT,
58796+ GR_TWO_INT,
58797+ GR_TWO_U64,
58798+ GR_THREE_INT,
58799+ GR_FIVE_INT_TWO_STR,
58800+ GR_TWO_STR,
58801+ GR_THREE_STR,
58802+ GR_FOUR_STR,
58803+ GR_STR_FILENAME,
58804+ GR_FILENAME_STR,
58805+ GR_FILENAME_TWO_INT,
58806+ GR_FILENAME_TWO_INT_STR,
58807+ GR_TEXTREL,
58808+ GR_PTRACE,
58809+ GR_RESOURCE,
58810+ GR_CAP,
58811+ GR_SIG,
58812+ GR_SIG2,
58813+ GR_CRASH1,
58814+ GR_CRASH2,
58815+ GR_PSACCT,
58816+ GR_RWXMAP
58817+};
58818+
58819+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58820+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58821+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58822+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58823+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58824+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58825+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58826+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58827+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58828+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58829+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58830+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58831+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58832+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58833+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58834+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58835+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58836+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58837+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58838+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58839+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58840+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58841+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58842+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58843+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58844+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58845+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58846+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58847+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58848+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58849+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58850+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58851+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58852+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58853+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58854+
58855+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58856+
58857+#endif
58858+
58859+#endif
58860diff -urNp linux-3.1.1/include/linux/grmsg.h linux-3.1.1/include/linux/grmsg.h
58861--- linux-3.1.1/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58862+++ linux-3.1.1/include/linux/grmsg.h 2011-11-16 18:40:31.000000000 -0500
58863@@ -0,0 +1,108 @@
58864+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58865+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58866+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58867+#define GR_STOPMOD_MSG "denied modification of module state by "
58868+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58869+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58870+#define GR_IOPERM_MSG "denied use of ioperm() by "
58871+#define GR_IOPL_MSG "denied use of iopl() by "
58872+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58873+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58874+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58875+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58876+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58877+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58878+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58879+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58880+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58881+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58882+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58883+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58884+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58885+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58886+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58887+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58888+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58889+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58890+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58891+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58892+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58893+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58894+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58895+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58896+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58897+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58898+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58899+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58900+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58901+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58902+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58903+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58904+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58905+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58906+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58907+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58908+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58909+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58910+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58911+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58912+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58913+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58914+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58915+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58916+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58917+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58918+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58919+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58920+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58921+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58922+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58923+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58924+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58925+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58926+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58927+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58928+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58929+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58930+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58931+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58932+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58933+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58934+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58935+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58936+#define GR_FAILFORK_MSG "failed fork with errno %s by "
58937+#define GR_NICE_CHROOT_MSG "denied priority change by "
58938+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58939+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58940+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58941+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58942+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58943+#define GR_TIME_MSG "time set by "
58944+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58945+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58946+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58947+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58948+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58949+#define GR_BIND_MSG "denied bind() by "
58950+#define GR_CONNECT_MSG "denied connect() by "
58951+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58952+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58953+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58954+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58955+#define GR_CAP_ACL_MSG "use of %s denied for "
58956+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
58957+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58958+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58959+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58960+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58961+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58962+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58963+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58964+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58965+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58966+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58967+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58968+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58969+#define GR_VM86_MSG "denied use of vm86 by "
58970+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58971+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58972diff -urNp linux-3.1.1/include/linux/grsecurity.h linux-3.1.1/include/linux/grsecurity.h
58973--- linux-3.1.1/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
58974+++ linux-3.1.1/include/linux/grsecurity.h 2011-11-17 00:16:10.000000000 -0500
58975@@ -0,0 +1,228 @@
58976+#ifndef GR_SECURITY_H
58977+#define GR_SECURITY_H
58978+#include <linux/fs.h>
58979+#include <linux/fs_struct.h>
58980+#include <linux/binfmts.h>
58981+#include <linux/gracl.h>
58982+
58983+/* notify of brain-dead configs */
58984+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58985+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58986+#endif
58987+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58988+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58989+#endif
58990+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58991+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58992+#endif
58993+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58994+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58995+#endif
58996+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58997+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58998+#endif
58999+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59000+#error "CONFIG_PAX enabled, but no PaX options are enabled."
59001+#endif
59002+
59003+#include <linux/compat.h>
59004+
59005+struct user_arg_ptr {
59006+#ifdef CONFIG_COMPAT
59007+ bool is_compat;
59008+#endif
59009+ union {
59010+ const char __user *const __user *native;
59011+#ifdef CONFIG_COMPAT
59012+ compat_uptr_t __user *compat;
59013+#endif
59014+ } ptr;
59015+};
59016+
59017+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59018+void gr_handle_brute_check(void);
59019+void gr_handle_kernel_exploit(void);
59020+int gr_process_user_ban(void);
59021+
59022+char gr_roletype_to_char(void);
59023+
59024+int gr_acl_enable_at_secure(void);
59025+
59026+int gr_check_user_change(int real, int effective, int fs);
59027+int gr_check_group_change(int real, int effective, int fs);
59028+
59029+void gr_del_task_from_ip_table(struct task_struct *p);
59030+
59031+int gr_pid_is_chrooted(struct task_struct *p);
59032+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59033+int gr_handle_chroot_nice(void);
59034+int gr_handle_chroot_sysctl(const int op);
59035+int gr_handle_chroot_setpriority(struct task_struct *p,
59036+ const int niceval);
59037+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59038+int gr_handle_chroot_chroot(const struct dentry *dentry,
59039+ const struct vfsmount *mnt);
59040+void gr_handle_chroot_chdir(struct path *path);
59041+int gr_handle_chroot_chmod(const struct dentry *dentry,
59042+ const struct vfsmount *mnt, const int mode);
59043+int gr_handle_chroot_mknod(const struct dentry *dentry,
59044+ const struct vfsmount *mnt, const int mode);
59045+int gr_handle_chroot_mount(const struct dentry *dentry,
59046+ const struct vfsmount *mnt,
59047+ const char *dev_name);
59048+int gr_handle_chroot_pivot(void);
59049+int gr_handle_chroot_unix(const pid_t pid);
59050+
59051+int gr_handle_rawio(const struct inode *inode);
59052+
59053+void gr_handle_ioperm(void);
59054+void gr_handle_iopl(void);
59055+
59056+int gr_tpe_allow(const struct file *file);
59057+
59058+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59059+void gr_clear_chroot_entries(struct task_struct *task);
59060+
59061+void gr_log_forkfail(const int retval);
59062+void gr_log_timechange(void);
59063+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59064+void gr_log_chdir(const struct dentry *dentry,
59065+ const struct vfsmount *mnt);
59066+void gr_log_chroot_exec(const struct dentry *dentry,
59067+ const struct vfsmount *mnt);
59068+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
59069+void gr_log_remount(const char *devname, const int retval);
59070+void gr_log_unmount(const char *devname, const int retval);
59071+void gr_log_mount(const char *from, const char *to, const int retval);
59072+void gr_log_textrel(struct vm_area_struct *vma);
59073+void gr_log_rwxmmap(struct file *file);
59074+void gr_log_rwxmprotect(struct file *file);
59075+
59076+int gr_handle_follow_link(const struct inode *parent,
59077+ const struct inode *inode,
59078+ const struct dentry *dentry,
59079+ const struct vfsmount *mnt);
59080+int gr_handle_fifo(const struct dentry *dentry,
59081+ const struct vfsmount *mnt,
59082+ const struct dentry *dir, const int flag,
59083+ const int acc_mode);
59084+int gr_handle_hardlink(const struct dentry *dentry,
59085+ const struct vfsmount *mnt,
59086+ struct inode *inode,
59087+ const int mode, const char *to);
59088+
59089+int gr_is_capable(const int cap);
59090+int gr_is_capable_nolog(const int cap);
59091+void gr_learn_resource(const struct task_struct *task, const int limit,
59092+ const unsigned long wanted, const int gt);
59093+void gr_copy_label(struct task_struct *tsk);
59094+void gr_handle_crash(struct task_struct *task, const int sig);
59095+int gr_handle_signal(const struct task_struct *p, const int sig);
59096+int gr_check_crash_uid(const uid_t uid);
59097+int gr_check_protected_task(const struct task_struct *task);
59098+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59099+int gr_acl_handle_mmap(const struct file *file,
59100+ const unsigned long prot);
59101+int gr_acl_handle_mprotect(const struct file *file,
59102+ const unsigned long prot);
59103+int gr_check_hidden_task(const struct task_struct *tsk);
59104+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59105+ const struct vfsmount *mnt);
59106+__u32 gr_acl_handle_utime(const struct dentry *dentry,
59107+ const struct vfsmount *mnt);
59108+__u32 gr_acl_handle_access(const struct dentry *dentry,
59109+ const struct vfsmount *mnt, const int fmode);
59110+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
59111+ const struct vfsmount *mnt, mode_t mode);
59112+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59113+ const struct vfsmount *mnt, mode_t mode);
59114+__u32 gr_acl_handle_chown(const struct dentry *dentry,
59115+ const struct vfsmount *mnt);
59116+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59117+ const struct vfsmount *mnt);
59118+int gr_handle_ptrace(struct task_struct *task, const long request);
59119+int gr_handle_proc_ptrace(struct task_struct *task);
59120+__u32 gr_acl_handle_execve(const struct dentry *dentry,
59121+ const struct vfsmount *mnt);
59122+int gr_check_crash_exec(const struct file *filp);
59123+int gr_acl_is_enabled(void);
59124+void gr_set_kernel_label(struct task_struct *task);
59125+void gr_set_role_label(struct task_struct *task, const uid_t uid,
59126+ const gid_t gid);
59127+int gr_set_proc_label(const struct dentry *dentry,
59128+ const struct vfsmount *mnt,
59129+ const int unsafe_share);
59130+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59131+ const struct vfsmount *mnt);
59132+__u32 gr_acl_handle_open(const struct dentry *dentry,
59133+ const struct vfsmount *mnt, int acc_mode);
59134+__u32 gr_acl_handle_creat(const struct dentry *dentry,
59135+ const struct dentry *p_dentry,
59136+ const struct vfsmount *p_mnt,
59137+ int open_flags, int acc_mode, const int imode);
59138+void gr_handle_create(const struct dentry *dentry,
59139+ const struct vfsmount *mnt);
59140+void gr_handle_proc_create(const struct dentry *dentry,
59141+ const struct inode *inode);
59142+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59143+ const struct dentry *parent_dentry,
59144+ const struct vfsmount *parent_mnt,
59145+ const int mode);
59146+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59147+ const struct dentry *parent_dentry,
59148+ const struct vfsmount *parent_mnt);
59149+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59150+ const struct vfsmount *mnt);
59151+void gr_handle_delete(const ino_t ino, const dev_t dev);
59152+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59153+ const struct vfsmount *mnt);
59154+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59155+ const struct dentry *parent_dentry,
59156+ const struct vfsmount *parent_mnt,
59157+ const char *from);
59158+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59159+ const struct dentry *parent_dentry,
59160+ const struct vfsmount *parent_mnt,
59161+ const struct dentry *old_dentry,
59162+ const struct vfsmount *old_mnt, const char *to);
59163+int gr_acl_handle_rename(struct dentry *new_dentry,
59164+ struct dentry *parent_dentry,
59165+ const struct vfsmount *parent_mnt,
59166+ struct dentry *old_dentry,
59167+ struct inode *old_parent_inode,
59168+ struct vfsmount *old_mnt, const char *newname);
59169+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59170+ struct dentry *old_dentry,
59171+ struct dentry *new_dentry,
59172+ struct vfsmount *mnt, const __u8 replace);
59173+__u32 gr_check_link(const struct dentry *new_dentry,
59174+ const struct dentry *parent_dentry,
59175+ const struct vfsmount *parent_mnt,
59176+ const struct dentry *old_dentry,
59177+ const struct vfsmount *old_mnt);
59178+int gr_acl_handle_filldir(const struct file *file, const char *name,
59179+ const unsigned int namelen, const ino_t ino);
59180+
59181+__u32 gr_acl_handle_unix(const struct dentry *dentry,
59182+ const struct vfsmount *mnt);
59183+void gr_acl_handle_exit(void);
59184+void gr_acl_handle_psacct(struct task_struct *task, const long code);
59185+int gr_acl_handle_procpidmem(const struct task_struct *task);
59186+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59187+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59188+void gr_audit_ptrace(struct task_struct *task);
59189+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59190+
59191+#ifdef CONFIG_GRKERNSEC
59192+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59193+void gr_handle_vm86(void);
59194+void gr_handle_mem_readwrite(u64 from, u64 to);
59195+
59196+extern int grsec_enable_dmesg;
59197+extern int grsec_disable_privio;
59198+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59199+extern int grsec_enable_chroot_findtask;
59200+#endif
59201+#endif
59202+
59203+#endif
59204diff -urNp linux-3.1.1/include/linux/grsock.h linux-3.1.1/include/linux/grsock.h
59205--- linux-3.1.1/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
59206+++ linux-3.1.1/include/linux/grsock.h 2011-11-16 18:40:31.000000000 -0500
59207@@ -0,0 +1,19 @@
59208+#ifndef __GRSOCK_H
59209+#define __GRSOCK_H
59210+
59211+extern void gr_attach_curr_ip(const struct sock *sk);
59212+extern int gr_handle_sock_all(const int family, const int type,
59213+ const int protocol);
59214+extern int gr_handle_sock_server(const struct sockaddr *sck);
59215+extern int gr_handle_sock_server_other(const struct sock *sck);
59216+extern int gr_handle_sock_client(const struct sockaddr *sck);
59217+extern int gr_search_connect(struct socket * sock,
59218+ struct sockaddr_in * addr);
59219+extern int gr_search_bind(struct socket * sock,
59220+ struct sockaddr_in * addr);
59221+extern int gr_search_listen(struct socket * sock);
59222+extern int gr_search_accept(struct socket * sock);
59223+extern int gr_search_socket(const int domain, const int type,
59224+ const int protocol);
59225+
59226+#endif
59227diff -urNp linux-3.1.1/include/linux/hid.h linux-3.1.1/include/linux/hid.h
59228--- linux-3.1.1/include/linux/hid.h 2011-11-11 15:19:27.000000000 -0500
59229+++ linux-3.1.1/include/linux/hid.h 2011-11-16 18:39:08.000000000 -0500
59230@@ -676,7 +676,7 @@ struct hid_ll_driver {
59231 unsigned int code, int value);
59232
59233 int (*parse)(struct hid_device *hdev);
59234-};
59235+} __no_const;
59236
59237 #define PM_HINT_FULLON 1<<5
59238 #define PM_HINT_NORMAL 1<<1
59239diff -urNp linux-3.1.1/include/linux/highmem.h linux-3.1.1/include/linux/highmem.h
59240--- linux-3.1.1/include/linux/highmem.h 2011-11-11 15:19:27.000000000 -0500
59241+++ linux-3.1.1/include/linux/highmem.h 2011-11-16 18:39:08.000000000 -0500
59242@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
59243 kunmap_atomic(kaddr, KM_USER0);
59244 }
59245
59246+static inline void sanitize_highpage(struct page *page)
59247+{
59248+ void *kaddr;
59249+ unsigned long flags;
59250+
59251+ local_irq_save(flags);
59252+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
59253+ clear_page(kaddr);
59254+ kunmap_atomic(kaddr, KM_CLEARPAGE);
59255+ local_irq_restore(flags);
59256+}
59257+
59258 static inline void zero_user_segments(struct page *page,
59259 unsigned start1, unsigned end1,
59260 unsigned start2, unsigned end2)
59261diff -urNp linux-3.1.1/include/linux/i2c.h linux-3.1.1/include/linux/i2c.h
59262--- linux-3.1.1/include/linux/i2c.h 2011-11-11 15:19:27.000000000 -0500
59263+++ linux-3.1.1/include/linux/i2c.h 2011-11-16 18:39:08.000000000 -0500
59264@@ -346,6 +346,7 @@ struct i2c_algorithm {
59265 /* To determine what the adapter supports */
59266 u32 (*functionality) (struct i2c_adapter *);
59267 };
59268+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59269
59270 /*
59271 * i2c_adapter is the structure used to identify a physical i2c bus along
59272diff -urNp linux-3.1.1/include/linux/i2o.h linux-3.1.1/include/linux/i2o.h
59273--- linux-3.1.1/include/linux/i2o.h 2011-11-11 15:19:27.000000000 -0500
59274+++ linux-3.1.1/include/linux/i2o.h 2011-11-16 18:39:08.000000000 -0500
59275@@ -564,7 +564,7 @@ struct i2o_controller {
59276 struct i2o_device *exec; /* Executive */
59277 #if BITS_PER_LONG == 64
59278 spinlock_t context_list_lock; /* lock for context_list */
59279- atomic_t context_list_counter; /* needed for unique contexts */
59280+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59281 struct list_head context_list; /* list of context id's
59282 and pointers */
59283 #endif
59284diff -urNp linux-3.1.1/include/linux/init.h linux-3.1.1/include/linux/init.h
59285--- linux-3.1.1/include/linux/init.h 2011-11-11 15:19:27.000000000 -0500
59286+++ linux-3.1.1/include/linux/init.h 2011-11-16 18:39:08.000000000 -0500
59287@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
59288
59289 /* Each module must use one module_init(). */
59290 #define module_init(initfn) \
59291- static inline initcall_t __inittest(void) \
59292+ static inline __used initcall_t __inittest(void) \
59293 { return initfn; } \
59294 int init_module(void) __attribute__((alias(#initfn)));
59295
59296 /* This is only required if you want to be unloadable. */
59297 #define module_exit(exitfn) \
59298- static inline exitcall_t __exittest(void) \
59299+ static inline __used exitcall_t __exittest(void) \
59300 { return exitfn; } \
59301 void cleanup_module(void) __attribute__((alias(#exitfn)));
59302
59303diff -urNp linux-3.1.1/include/linux/init_task.h linux-3.1.1/include/linux/init_task.h
59304--- linux-3.1.1/include/linux/init_task.h 2011-11-11 15:19:27.000000000 -0500
59305+++ linux-3.1.1/include/linux/init_task.h 2011-11-16 18:39:08.000000000 -0500
59306@@ -126,6 +126,12 @@ extern struct cred init_cred;
59307 # define INIT_PERF_EVENTS(tsk)
59308 #endif
59309
59310+#ifdef CONFIG_X86
59311+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59312+#else
59313+#define INIT_TASK_THREAD_INFO
59314+#endif
59315+
59316 /*
59317 * INIT_TASK is used to set up the first task table, touch at
59318 * your own risk!. Base=0, limit=0x1fffff (=2MB)
59319@@ -164,6 +170,7 @@ extern struct cred init_cred;
59320 RCU_INIT_POINTER(.cred, &init_cred), \
59321 .comm = "swapper", \
59322 .thread = INIT_THREAD, \
59323+ INIT_TASK_THREAD_INFO \
59324 .fs = &init_fs, \
59325 .files = &init_files, \
59326 .signal = &init_signals, \
59327diff -urNp linux-3.1.1/include/linux/intel-iommu.h linux-3.1.1/include/linux/intel-iommu.h
59328--- linux-3.1.1/include/linux/intel-iommu.h 2011-11-11 15:19:27.000000000 -0500
59329+++ linux-3.1.1/include/linux/intel-iommu.h 2011-11-16 18:39:08.000000000 -0500
59330@@ -296,7 +296,7 @@ struct iommu_flush {
59331 u8 fm, u64 type);
59332 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59333 unsigned int size_order, u64 type);
59334-};
59335+} __no_const;
59336
59337 enum {
59338 SR_DMAR_FECTL_REG,
59339diff -urNp linux-3.1.1/include/linux/interrupt.h linux-3.1.1/include/linux/interrupt.h
59340--- linux-3.1.1/include/linux/interrupt.h 2011-11-11 15:19:27.000000000 -0500
59341+++ linux-3.1.1/include/linux/interrupt.h 2011-11-16 18:39:08.000000000 -0500
59342@@ -425,7 +425,7 @@ enum
59343 /* map softirq index to softirq name. update 'softirq_to_name' in
59344 * kernel/softirq.c when adding a new softirq.
59345 */
59346-extern char *softirq_to_name[NR_SOFTIRQS];
59347+extern const char * const softirq_to_name[NR_SOFTIRQS];
59348
59349 /* softirq mask and active fields moved to irq_cpustat_t in
59350 * asm/hardirq.h to get better cache usage. KAO
59351@@ -433,12 +433,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
59352
59353 struct softirq_action
59354 {
59355- void (*action)(struct softirq_action *);
59356+ void (*action)(void);
59357 };
59358
59359 asmlinkage void do_softirq(void);
59360 asmlinkage void __do_softirq(void);
59361-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59362+extern void open_softirq(int nr, void (*action)(void));
59363 extern void softirq_init(void);
59364 static inline void __raise_softirq_irqoff(unsigned int nr)
59365 {
59366diff -urNp linux-3.1.1/include/linux/kallsyms.h linux-3.1.1/include/linux/kallsyms.h
59367--- linux-3.1.1/include/linux/kallsyms.h 2011-11-11 15:19:27.000000000 -0500
59368+++ linux-3.1.1/include/linux/kallsyms.h 2011-11-16 18:40:31.000000000 -0500
59369@@ -15,7 +15,8 @@
59370
59371 struct module;
59372
59373-#ifdef CONFIG_KALLSYMS
59374+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59375+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59376 /* Lookup the address for a symbol. Returns 0 if not found. */
59377 unsigned long kallsyms_lookup_name(const char *name);
59378
59379@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
59380 /* Stupid that this does nothing, but I didn't create this mess. */
59381 #define __print_symbol(fmt, addr)
59382 #endif /*CONFIG_KALLSYMS*/
59383+#else /* when included by kallsyms.c, vsnprintf.c, or
59384+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59385+extern void __print_symbol(const char *fmt, unsigned long address);
59386+extern int sprint_backtrace(char *buffer, unsigned long address);
59387+extern int sprint_symbol(char *buffer, unsigned long address);
59388+const char *kallsyms_lookup(unsigned long addr,
59389+ unsigned long *symbolsize,
59390+ unsigned long *offset,
59391+ char **modname, char *namebuf);
59392+#endif
59393
59394 /* This macro allows us to keep printk typechecking */
59395 static void __check_printsym_format(const char *fmt, ...)
59396diff -urNp linux-3.1.1/include/linux/kgdb.h linux-3.1.1/include/linux/kgdb.h
59397--- linux-3.1.1/include/linux/kgdb.h 2011-11-11 15:19:27.000000000 -0500
59398+++ linux-3.1.1/include/linux/kgdb.h 2011-11-16 18:39:08.000000000 -0500
59399@@ -53,7 +53,7 @@ extern int kgdb_connected;
59400 extern int kgdb_io_module_registered;
59401
59402 extern atomic_t kgdb_setting_breakpoint;
59403-extern atomic_t kgdb_cpu_doing_single_step;
59404+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59405
59406 extern struct task_struct *kgdb_usethread;
59407 extern struct task_struct *kgdb_contthread;
59408@@ -251,7 +251,7 @@ struct kgdb_arch {
59409 void (*disable_hw_break)(struct pt_regs *regs);
59410 void (*remove_all_hw_break)(void);
59411 void (*correct_hw_break)(void);
59412-};
59413+} __do_const;
59414
59415 /**
59416 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59417@@ -276,7 +276,7 @@ struct kgdb_io {
59418 void (*pre_exception) (void);
59419 void (*post_exception) (void);
59420 int is_console;
59421-};
59422+} __do_const;
59423
59424 extern struct kgdb_arch arch_kgdb_ops;
59425
59426diff -urNp linux-3.1.1/include/linux/kmod.h linux-3.1.1/include/linux/kmod.h
59427--- linux-3.1.1/include/linux/kmod.h 2011-11-11 15:19:27.000000000 -0500
59428+++ linux-3.1.1/include/linux/kmod.h 2011-11-16 18:40:31.000000000 -0500
59429@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
59430 * usually useless though. */
59431 extern int __request_module(bool wait, const char *name, ...) \
59432 __attribute__((format(printf, 2, 3)));
59433+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59434+ __attribute__((format(printf, 3, 4)));
59435 #define request_module(mod...) __request_module(true, mod)
59436 #define request_module_nowait(mod...) __request_module(false, mod)
59437 #define try_then_request_module(x, mod...) \
59438diff -urNp linux-3.1.1/include/linux/kvm_host.h linux-3.1.1/include/linux/kvm_host.h
59439--- linux-3.1.1/include/linux/kvm_host.h 2011-11-11 15:19:27.000000000 -0500
59440+++ linux-3.1.1/include/linux/kvm_host.h 2011-11-16 18:39:08.000000000 -0500
59441@@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59442 void vcpu_load(struct kvm_vcpu *vcpu);
59443 void vcpu_put(struct kvm_vcpu *vcpu);
59444
59445-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59446+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59447 struct module *module);
59448 void kvm_exit(void);
59449
59450@@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59451 struct kvm_guest_debug *dbg);
59452 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59453
59454-int kvm_arch_init(void *opaque);
59455+int kvm_arch_init(const void *opaque);
59456 void kvm_arch_exit(void);
59457
59458 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59459diff -urNp linux-3.1.1/include/linux/libata.h linux-3.1.1/include/linux/libata.h
59460--- linux-3.1.1/include/linux/libata.h 2011-11-11 15:19:27.000000000 -0500
59461+++ linux-3.1.1/include/linux/libata.h 2011-11-16 18:39:08.000000000 -0500
59462@@ -909,7 +909,7 @@ struct ata_port_operations {
59463 * fields must be pointers.
59464 */
59465 const struct ata_port_operations *inherits;
59466-};
59467+} __do_const;
59468
59469 struct ata_port_info {
59470 unsigned long flags;
59471diff -urNp linux-3.1.1/include/linux/mca.h linux-3.1.1/include/linux/mca.h
59472--- linux-3.1.1/include/linux/mca.h 2011-11-11 15:19:27.000000000 -0500
59473+++ linux-3.1.1/include/linux/mca.h 2011-11-16 18:39:08.000000000 -0500
59474@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59475 int region);
59476 void * (*mca_transform_memory)(struct mca_device *,
59477 void *memory);
59478-};
59479+} __no_const;
59480
59481 struct mca_bus {
59482 u64 default_dma_mask;
59483diff -urNp linux-3.1.1/include/linux/memory.h linux-3.1.1/include/linux/memory.h
59484--- linux-3.1.1/include/linux/memory.h 2011-11-11 15:19:27.000000000 -0500
59485+++ linux-3.1.1/include/linux/memory.h 2011-11-16 18:39:08.000000000 -0500
59486@@ -144,7 +144,7 @@ struct memory_accessor {
59487 size_t count);
59488 ssize_t (*write)(struct memory_accessor *, const char *buf,
59489 off_t offset, size_t count);
59490-};
59491+} __no_const;
59492
59493 /*
59494 * Kernel text modification mutex, used for code patching. Users of this lock
59495diff -urNp linux-3.1.1/include/linux/mfd/abx500.h linux-3.1.1/include/linux/mfd/abx500.h
59496--- linux-3.1.1/include/linux/mfd/abx500.h 2011-11-11 15:19:27.000000000 -0500
59497+++ linux-3.1.1/include/linux/mfd/abx500.h 2011-11-16 18:39:08.000000000 -0500
59498@@ -234,6 +234,7 @@ struct abx500_ops {
59499 int (*event_registers_startup_state_get) (struct device *, u8 *);
59500 int (*startup_irq_enabled) (struct device *, unsigned int);
59501 };
59502+typedef struct abx500_ops __no_const abx500_ops_no_const;
59503
59504 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59505 void abx500_remove_ops(struct device *dev);
59506diff -urNp linux-3.1.1/include/linux/mm.h linux-3.1.1/include/linux/mm.h
59507--- linux-3.1.1/include/linux/mm.h 2011-11-11 15:19:27.000000000 -0500
59508+++ linux-3.1.1/include/linux/mm.h 2011-11-16 18:39:08.000000000 -0500
59509@@ -114,7 +114,14 @@ extern unsigned int kobjsize(const void
59510
59511 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59512 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59513+
59514+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59515+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59516+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59517+#else
59518 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59519+#endif
59520+
59521 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59522 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59523
59524@@ -1011,34 +1018,6 @@ int set_page_dirty(struct page *page);
59525 int set_page_dirty_lock(struct page *page);
59526 int clear_page_dirty_for_io(struct page *page);
59527
59528-/* Is the vma a continuation of the stack vma above it? */
59529-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59530-{
59531- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59532-}
59533-
59534-static inline int stack_guard_page_start(struct vm_area_struct *vma,
59535- unsigned long addr)
59536-{
59537- return (vma->vm_flags & VM_GROWSDOWN) &&
59538- (vma->vm_start == addr) &&
59539- !vma_growsdown(vma->vm_prev, addr);
59540-}
59541-
59542-/* Is the vma a continuation of the stack vma below it? */
59543-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59544-{
59545- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59546-}
59547-
59548-static inline int stack_guard_page_end(struct vm_area_struct *vma,
59549- unsigned long addr)
59550-{
59551- return (vma->vm_flags & VM_GROWSUP) &&
59552- (vma->vm_end == addr) &&
59553- !vma_growsup(vma->vm_next, addr);
59554-}
59555-
59556 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59557 unsigned long old_addr, struct vm_area_struct *new_vma,
59558 unsigned long new_addr, unsigned long len);
59559@@ -1133,6 +1112,15 @@ static inline void sync_mm_rss(struct ta
59560 }
59561 #endif
59562
59563+#ifdef CONFIG_MMU
59564+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59565+#else
59566+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59567+{
59568+ return __pgprot(0);
59569+}
59570+#endif
59571+
59572 int vma_wants_writenotify(struct vm_area_struct *vma);
59573
59574 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59575@@ -1417,6 +1405,7 @@ out:
59576 }
59577
59578 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59579+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59580
59581 extern unsigned long do_brk(unsigned long, unsigned long);
59582
59583@@ -1474,6 +1463,10 @@ extern struct vm_area_struct * find_vma(
59584 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59585 struct vm_area_struct **pprev);
59586
59587+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59588+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59589+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59590+
59591 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59592 NULL if none. Assume start_addr < end_addr. */
59593 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59594@@ -1490,15 +1483,6 @@ static inline unsigned long vma_pages(st
59595 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59596 }
59597
59598-#ifdef CONFIG_MMU
59599-pgprot_t vm_get_page_prot(unsigned long vm_flags);
59600-#else
59601-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
59602-{
59603- return __pgprot(0);
59604-}
59605-#endif
59606-
59607 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59608 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59609 unsigned long pfn, unsigned long size, pgprot_t);
59610@@ -1612,7 +1596,7 @@ extern int unpoison_memory(unsigned long
59611 extern int sysctl_memory_failure_early_kill;
59612 extern int sysctl_memory_failure_recovery;
59613 extern void shake_page(struct page *p, int access);
59614-extern atomic_long_t mce_bad_pages;
59615+extern atomic_long_unchecked_t mce_bad_pages;
59616 extern int soft_offline_page(struct page *page, int flags);
59617
59618 extern void dump_page(struct page *page);
59619@@ -1626,5 +1610,11 @@ extern void copy_user_huge_page(struct p
59620 unsigned int pages_per_huge_page);
59621 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
59622
59623+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59624+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59625+#else
59626+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59627+#endif
59628+
59629 #endif /* __KERNEL__ */
59630 #endif /* _LINUX_MM_H */
59631diff -urNp linux-3.1.1/include/linux/mm_types.h linux-3.1.1/include/linux/mm_types.h
59632--- linux-3.1.1/include/linux/mm_types.h 2011-11-11 15:19:27.000000000 -0500
59633+++ linux-3.1.1/include/linux/mm_types.h 2011-11-16 18:39:08.000000000 -0500
59634@@ -230,6 +230,8 @@ struct vm_area_struct {
59635 #ifdef CONFIG_NUMA
59636 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59637 #endif
59638+
59639+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59640 };
59641
59642 struct core_thread {
59643@@ -362,6 +364,24 @@ struct mm_struct {
59644 #ifdef CONFIG_CPUMASK_OFFSTACK
59645 struct cpumask cpumask_allocation;
59646 #endif
59647+
59648+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59649+ unsigned long pax_flags;
59650+#endif
59651+
59652+#ifdef CONFIG_PAX_DLRESOLVE
59653+ unsigned long call_dl_resolve;
59654+#endif
59655+
59656+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59657+ unsigned long call_syscall;
59658+#endif
59659+
59660+#ifdef CONFIG_PAX_ASLR
59661+ unsigned long delta_mmap; /* randomized offset */
59662+ unsigned long delta_stack; /* randomized offset */
59663+#endif
59664+
59665 };
59666
59667 static inline void mm_init_cpumask(struct mm_struct *mm)
59668diff -urNp linux-3.1.1/include/linux/mmu_notifier.h linux-3.1.1/include/linux/mmu_notifier.h
59669--- linux-3.1.1/include/linux/mmu_notifier.h 2011-11-11 15:19:27.000000000 -0500
59670+++ linux-3.1.1/include/linux/mmu_notifier.h 2011-11-16 18:39:08.000000000 -0500
59671@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
59672 */
59673 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59674 ({ \
59675- pte_t __pte; \
59676+ pte_t ___pte; \
59677 struct vm_area_struct *___vma = __vma; \
59678 unsigned long ___address = __address; \
59679- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59680+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59681 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59682- __pte; \
59683+ ___pte; \
59684 })
59685
59686 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
59687diff -urNp linux-3.1.1/include/linux/mmzone.h linux-3.1.1/include/linux/mmzone.h
59688--- linux-3.1.1/include/linux/mmzone.h 2011-11-11 15:19:27.000000000 -0500
59689+++ linux-3.1.1/include/linux/mmzone.h 2011-11-16 18:39:08.000000000 -0500
59690@@ -356,7 +356,7 @@ struct zone {
59691 unsigned long flags; /* zone flags, see below */
59692
59693 /* Zone statistics */
59694- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59695+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59696
59697 /*
59698 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
59699diff -urNp linux-3.1.1/include/linux/mod_devicetable.h linux-3.1.1/include/linux/mod_devicetable.h
59700--- linux-3.1.1/include/linux/mod_devicetable.h 2011-11-11 15:19:27.000000000 -0500
59701+++ linux-3.1.1/include/linux/mod_devicetable.h 2011-11-16 18:39:08.000000000 -0500
59702@@ -12,7 +12,7 @@
59703 typedef unsigned long kernel_ulong_t;
59704 #endif
59705
59706-#define PCI_ANY_ID (~0)
59707+#define PCI_ANY_ID ((__u16)~0)
59708
59709 struct pci_device_id {
59710 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59711@@ -131,7 +131,7 @@ struct usb_device_id {
59712 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59713 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59714
59715-#define HID_ANY_ID (~0)
59716+#define HID_ANY_ID (~0U)
59717
59718 struct hid_device_id {
59719 __u16 bus;
59720diff -urNp linux-3.1.1/include/linux/module.h linux-3.1.1/include/linux/module.h
59721--- linux-3.1.1/include/linux/module.h 2011-11-11 15:19:27.000000000 -0500
59722+++ linux-3.1.1/include/linux/module.h 2011-11-16 18:39:08.000000000 -0500
59723@@ -16,6 +16,7 @@
59724 #include <linux/kobject.h>
59725 #include <linux/moduleparam.h>
59726 #include <linux/tracepoint.h>
59727+#include <linux/fs.h>
59728
59729 #include <linux/percpu.h>
59730 #include <asm/module.h>
59731@@ -327,19 +328,16 @@ struct module
59732 int (*init)(void);
59733
59734 /* If this is non-NULL, vfree after init() returns */
59735- void *module_init;
59736+ void *module_init_rx, *module_init_rw;
59737
59738 /* Here is the actual code + data, vfree'd on unload. */
59739- void *module_core;
59740+ void *module_core_rx, *module_core_rw;
59741
59742 /* Here are the sizes of the init and core sections */
59743- unsigned int init_size, core_size;
59744+ unsigned int init_size_rw, core_size_rw;
59745
59746 /* The size of the executable code in each section. */
59747- unsigned int init_text_size, core_text_size;
59748-
59749- /* Size of RO sections of the module (text+rodata) */
59750- unsigned int init_ro_size, core_ro_size;
59751+ unsigned int init_size_rx, core_size_rx;
59752
59753 /* Arch-specific module values */
59754 struct mod_arch_specific arch;
59755@@ -395,6 +393,10 @@ struct module
59756 #ifdef CONFIG_EVENT_TRACING
59757 struct ftrace_event_call **trace_events;
59758 unsigned int num_trace_events;
59759+ struct file_operations trace_id;
59760+ struct file_operations trace_enable;
59761+ struct file_operations trace_format;
59762+ struct file_operations trace_filter;
59763 #endif
59764 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59765 unsigned int num_ftrace_callsites;
59766@@ -445,16 +447,46 @@ bool is_module_address(unsigned long add
59767 bool is_module_percpu_address(unsigned long addr);
59768 bool is_module_text_address(unsigned long addr);
59769
59770+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59771+{
59772+
59773+#ifdef CONFIG_PAX_KERNEXEC
59774+ if (ktla_ktva(addr) >= (unsigned long)start &&
59775+ ktla_ktva(addr) < (unsigned long)start + size)
59776+ return 1;
59777+#endif
59778+
59779+ return ((void *)addr >= start && (void *)addr < start + size);
59780+}
59781+
59782+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59783+{
59784+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59785+}
59786+
59787+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59788+{
59789+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59790+}
59791+
59792+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59793+{
59794+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59795+}
59796+
59797+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59798+{
59799+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59800+}
59801+
59802 static inline int within_module_core(unsigned long addr, struct module *mod)
59803 {
59804- return (unsigned long)mod->module_core <= addr &&
59805- addr < (unsigned long)mod->module_core + mod->core_size;
59806+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59807 }
59808
59809 static inline int within_module_init(unsigned long addr, struct module *mod)
59810 {
59811- return (unsigned long)mod->module_init <= addr &&
59812- addr < (unsigned long)mod->module_init + mod->init_size;
59813+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59814 }
59815
59816 /* Search for module by name: must hold module_mutex. */
59817diff -urNp linux-3.1.1/include/linux/moduleloader.h linux-3.1.1/include/linux/moduleloader.h
59818--- linux-3.1.1/include/linux/moduleloader.h 2011-11-11 15:19:27.000000000 -0500
59819+++ linux-3.1.1/include/linux/moduleloader.h 2011-11-16 18:39:08.000000000 -0500
59820@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(st
59821 sections. Returns NULL on failure. */
59822 void *module_alloc(unsigned long size);
59823
59824+#ifdef CONFIG_PAX_KERNEXEC
59825+void *module_alloc_exec(unsigned long size);
59826+#else
59827+#define module_alloc_exec(x) module_alloc(x)
59828+#endif
59829+
59830 /* Free memory returned from module_alloc. */
59831 void module_free(struct module *mod, void *module_region);
59832
59833+#ifdef CONFIG_PAX_KERNEXEC
59834+void module_free_exec(struct module *mod, void *module_region);
59835+#else
59836+#define module_free_exec(x, y) module_free((x), (y))
59837+#endif
59838+
59839 /* Apply the given relocation to the (simplified) ELF. Return -error
59840 or 0. */
59841 int apply_relocate(Elf_Shdr *sechdrs,
59842diff -urNp linux-3.1.1/include/linux/moduleparam.h linux-3.1.1/include/linux/moduleparam.h
59843--- linux-3.1.1/include/linux/moduleparam.h 2011-11-11 15:19:27.000000000 -0500
59844+++ linux-3.1.1/include/linux/moduleparam.h 2011-11-16 18:39:08.000000000 -0500
59845@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
59846 * @len is usually just sizeof(string).
59847 */
59848 #define module_param_string(name, string, len, perm) \
59849- static const struct kparam_string __param_string_##name \
59850+ static const struct kparam_string __param_string_##name __used \
59851 = { len, string }; \
59852 __module_param_call(MODULE_PARAM_PREFIX, name, \
59853 &param_ops_string, \
59854@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
59855 * module_param_named() for why this might be necessary.
59856 */
59857 #define module_param_array_named(name, array, type, nump, perm) \
59858- static const struct kparam_array __param_arr_##name \
59859+ static const struct kparam_array __param_arr_##name __used \
59860 = { .max = ARRAY_SIZE(array), .num = nump, \
59861 .ops = &param_ops_##type, \
59862 .elemsize = sizeof(array[0]), .elem = array }; \
59863diff -urNp linux-3.1.1/include/linux/namei.h linux-3.1.1/include/linux/namei.h
59864--- linux-3.1.1/include/linux/namei.h 2011-11-11 15:19:27.000000000 -0500
59865+++ linux-3.1.1/include/linux/namei.h 2011-11-16 18:39:08.000000000 -0500
59866@@ -24,7 +24,7 @@ struct nameidata {
59867 unsigned seq;
59868 int last_type;
59869 unsigned depth;
59870- char *saved_names[MAX_NESTED_LINKS + 1];
59871+ const char *saved_names[MAX_NESTED_LINKS + 1];
59872
59873 /* Intent data */
59874 union {
59875@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
59876 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59877 extern void unlock_rename(struct dentry *, struct dentry *);
59878
59879-static inline void nd_set_link(struct nameidata *nd, char *path)
59880+static inline void nd_set_link(struct nameidata *nd, const char *path)
59881 {
59882 nd->saved_names[nd->depth] = path;
59883 }
59884
59885-static inline char *nd_get_link(struct nameidata *nd)
59886+static inline const char *nd_get_link(const struct nameidata *nd)
59887 {
59888 return nd->saved_names[nd->depth];
59889 }
59890diff -urNp linux-3.1.1/include/linux/netdevice.h linux-3.1.1/include/linux/netdevice.h
59891--- linux-3.1.1/include/linux/netdevice.h 2011-11-11 15:19:27.000000000 -0500
59892+++ linux-3.1.1/include/linux/netdevice.h 2011-11-16 18:39:08.000000000 -0500
59893@@ -944,6 +944,7 @@ struct net_device_ops {
59894 int (*ndo_set_features)(struct net_device *dev,
59895 u32 features);
59896 };
59897+typedef struct net_device_ops __no_const net_device_ops_no_const;
59898
59899 /*
59900 * The DEVICE structure.
59901diff -urNp linux-3.1.1/include/linux/netfilter/xt_gradm.h linux-3.1.1/include/linux/netfilter/xt_gradm.h
59902--- linux-3.1.1/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59903+++ linux-3.1.1/include/linux/netfilter/xt_gradm.h 2011-11-16 18:40:31.000000000 -0500
59904@@ -0,0 +1,9 @@
59905+#ifndef _LINUX_NETFILTER_XT_GRADM_H
59906+#define _LINUX_NETFILTER_XT_GRADM_H 1
59907+
59908+struct xt_gradm_mtinfo {
59909+ __u16 flags;
59910+ __u16 invflags;
59911+};
59912+
59913+#endif
59914diff -urNp linux-3.1.1/include/linux/of_pdt.h linux-3.1.1/include/linux/of_pdt.h
59915--- linux-3.1.1/include/linux/of_pdt.h 2011-11-11 15:19:27.000000000 -0500
59916+++ linux-3.1.1/include/linux/of_pdt.h 2011-11-16 18:39:08.000000000 -0500
59917@@ -32,7 +32,7 @@ struct of_pdt_ops {
59918
59919 /* return 0 on success; fill in 'len' with number of bytes in path */
59920 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
59921-};
59922+} __no_const;
59923
59924 extern void *prom_early_alloc(unsigned long size);
59925
59926diff -urNp linux-3.1.1/include/linux/oprofile.h linux-3.1.1/include/linux/oprofile.h
59927--- linux-3.1.1/include/linux/oprofile.h 2011-11-11 15:19:27.000000000 -0500
59928+++ linux-3.1.1/include/linux/oprofile.h 2011-11-16 18:39:08.000000000 -0500
59929@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
59930 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59931 char const * name, ulong * val);
59932
59933-/** Create a file for read-only access to an atomic_t. */
59934+/** Create a file for read-only access to an atomic_unchecked_t. */
59935 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59936- char const * name, atomic_t * val);
59937+ char const * name, atomic_unchecked_t * val);
59938
59939 /** create a directory */
59940 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59941diff -urNp linux-3.1.1/include/linux/padata.h linux-3.1.1/include/linux/padata.h
59942--- linux-3.1.1/include/linux/padata.h 2011-11-11 15:19:27.000000000 -0500
59943+++ linux-3.1.1/include/linux/padata.h 2011-11-16 18:39:08.000000000 -0500
59944@@ -129,7 +129,7 @@ struct parallel_data {
59945 struct padata_instance *pinst;
59946 struct padata_parallel_queue __percpu *pqueue;
59947 struct padata_serial_queue __percpu *squeue;
59948- atomic_t seq_nr;
59949+ atomic_unchecked_t seq_nr;
59950 atomic_t reorder_objects;
59951 atomic_t refcnt;
59952 unsigned int max_seq_nr;
59953diff -urNp linux-3.1.1/include/linux/perf_event.h linux-3.1.1/include/linux/perf_event.h
59954--- linux-3.1.1/include/linux/perf_event.h 2011-11-11 15:19:27.000000000 -0500
59955+++ linux-3.1.1/include/linux/perf_event.h 2011-11-16 18:39:08.000000000 -0500
59956@@ -745,8 +745,8 @@ struct perf_event {
59957
59958 enum perf_event_active_state state;
59959 unsigned int attach_state;
59960- local64_t count;
59961- atomic64_t child_count;
59962+ local64_t count; /* PaX: fix it one day */
59963+ atomic64_unchecked_t child_count;
59964
59965 /*
59966 * These are the total time in nanoseconds that the event
59967@@ -797,8 +797,8 @@ struct perf_event {
59968 * These accumulate total time (in nanoseconds) that children
59969 * events have been enabled and running, respectively.
59970 */
59971- atomic64_t child_total_time_enabled;
59972- atomic64_t child_total_time_running;
59973+ atomic64_unchecked_t child_total_time_enabled;
59974+ atomic64_unchecked_t child_total_time_running;
59975
59976 /*
59977 * Protect attach/detach and child_list:
59978diff -urNp linux-3.1.1/include/linux/pipe_fs_i.h linux-3.1.1/include/linux/pipe_fs_i.h
59979--- linux-3.1.1/include/linux/pipe_fs_i.h 2011-11-11 15:19:27.000000000 -0500
59980+++ linux-3.1.1/include/linux/pipe_fs_i.h 2011-11-16 18:39:08.000000000 -0500
59981@@ -46,9 +46,9 @@ struct pipe_buffer {
59982 struct pipe_inode_info {
59983 wait_queue_head_t wait;
59984 unsigned int nrbufs, curbuf, buffers;
59985- unsigned int readers;
59986- unsigned int writers;
59987- unsigned int waiting_writers;
59988+ atomic_t readers;
59989+ atomic_t writers;
59990+ atomic_t waiting_writers;
59991 unsigned int r_counter;
59992 unsigned int w_counter;
59993 struct page *tmp_page;
59994diff -urNp linux-3.1.1/include/linux/pm_runtime.h linux-3.1.1/include/linux/pm_runtime.h
59995--- linux-3.1.1/include/linux/pm_runtime.h 2011-11-11 15:19:27.000000000 -0500
59996+++ linux-3.1.1/include/linux/pm_runtime.h 2011-11-16 18:39:08.000000000 -0500
59997@@ -99,7 +99,7 @@ static inline bool pm_runtime_callbacks_
59998
59999 static inline void pm_runtime_mark_last_busy(struct device *dev)
60000 {
60001- ACCESS_ONCE(dev->power.last_busy) = jiffies;
60002+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
60003 }
60004
60005 #else /* !CONFIG_PM_RUNTIME */
60006diff -urNp linux-3.1.1/include/linux/poison.h linux-3.1.1/include/linux/poison.h
60007--- linux-3.1.1/include/linux/poison.h 2011-11-11 15:19:27.000000000 -0500
60008+++ linux-3.1.1/include/linux/poison.h 2011-11-16 18:39:08.000000000 -0500
60009@@ -19,8 +19,8 @@
60010 * under normal circumstances, used to verify that nobody uses
60011 * non-initialized list entries.
60012 */
60013-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60014-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60015+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60016+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60017
60018 /********** include/linux/timer.h **********/
60019 /*
60020diff -urNp linux-3.1.1/include/linux/preempt.h linux-3.1.1/include/linux/preempt.h
60021--- linux-3.1.1/include/linux/preempt.h 2011-11-11 15:19:27.000000000 -0500
60022+++ linux-3.1.1/include/linux/preempt.h 2011-11-16 18:39:08.000000000 -0500
60023@@ -123,7 +123,7 @@ struct preempt_ops {
60024 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60025 void (*sched_out)(struct preempt_notifier *notifier,
60026 struct task_struct *next);
60027-};
60028+} __no_const;
60029
60030 /**
60031 * preempt_notifier - key for installing preemption notifiers
60032diff -urNp linux-3.1.1/include/linux/proc_fs.h linux-3.1.1/include/linux/proc_fs.h
60033--- linux-3.1.1/include/linux/proc_fs.h 2011-11-11 15:19:27.000000000 -0500
60034+++ linux-3.1.1/include/linux/proc_fs.h 2011-11-16 18:40:31.000000000 -0500
60035@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
60036 return proc_create_data(name, mode, parent, proc_fops, NULL);
60037 }
60038
60039+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60040+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60041+{
60042+#ifdef CONFIG_GRKERNSEC_PROC_USER
60043+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60044+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60045+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60046+#else
60047+ return proc_create_data(name, mode, parent, proc_fops, NULL);
60048+#endif
60049+}
60050+
60051+
60052 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60053 mode_t mode, struct proc_dir_entry *base,
60054 read_proc_t *read_proc, void * data)
60055@@ -258,7 +271,7 @@ union proc_op {
60056 int (*proc_show)(struct seq_file *m,
60057 struct pid_namespace *ns, struct pid *pid,
60058 struct task_struct *task);
60059-};
60060+} __no_const;
60061
60062 struct ctl_table_header;
60063 struct ctl_table;
60064diff -urNp linux-3.1.1/include/linux/ptrace.h linux-3.1.1/include/linux/ptrace.h
60065--- linux-3.1.1/include/linux/ptrace.h 2011-11-11 15:19:27.000000000 -0500
60066+++ linux-3.1.1/include/linux/ptrace.h 2011-11-16 18:40:31.000000000 -0500
60067@@ -129,10 +129,10 @@ extern void __ptrace_unlink(struct task_
60068 extern void exit_ptrace(struct task_struct *tracer);
60069 #define PTRACE_MODE_READ 1
60070 #define PTRACE_MODE_ATTACH 2
60071-/* Returns 0 on success, -errno on denial. */
60072-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60073 /* Returns true on success, false on denial. */
60074 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60075+/* Returns true on success, false on denial. */
60076+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60077
60078 static inline int ptrace_reparented(struct task_struct *child)
60079 {
60080diff -urNp linux-3.1.1/include/linux/random.h linux-3.1.1/include/linux/random.h
60081--- linux-3.1.1/include/linux/random.h 2011-11-11 15:19:27.000000000 -0500
60082+++ linux-3.1.1/include/linux/random.h 2011-11-16 18:39:08.000000000 -0500
60083@@ -69,12 +69,17 @@ void srandom32(u32 seed);
60084
60085 u32 prandom32(struct rnd_state *);
60086
60087+static inline unsigned long pax_get_random_long(void)
60088+{
60089+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60090+}
60091+
60092 /*
60093 * Handle minimum values for seeds
60094 */
60095 static inline u32 __seed(u32 x, u32 m)
60096 {
60097- return (x < m) ? x + m : x;
60098+ return (x <= m) ? x + m + 1 : x;
60099 }
60100
60101 /**
60102diff -urNp linux-3.1.1/include/linux/reboot.h linux-3.1.1/include/linux/reboot.h
60103--- linux-3.1.1/include/linux/reboot.h 2011-11-11 15:19:27.000000000 -0500
60104+++ linux-3.1.1/include/linux/reboot.h 2011-11-16 18:39:08.000000000 -0500
60105@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(st
60106 * Architecture-specific implementations of sys_reboot commands.
60107 */
60108
60109-extern void machine_restart(char *cmd);
60110-extern void machine_halt(void);
60111-extern void machine_power_off(void);
60112+extern void machine_restart(char *cmd) __noreturn;
60113+extern void machine_halt(void) __noreturn;
60114+extern void machine_power_off(void) __noreturn;
60115
60116 extern void machine_shutdown(void);
60117 struct pt_regs;
60118@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struc
60119 */
60120
60121 extern void kernel_restart_prepare(char *cmd);
60122-extern void kernel_restart(char *cmd);
60123-extern void kernel_halt(void);
60124-extern void kernel_power_off(void);
60125+extern void kernel_restart(char *cmd) __noreturn;
60126+extern void kernel_halt(void) __noreturn;
60127+extern void kernel_power_off(void) __noreturn;
60128
60129 extern int C_A_D; /* for sysctl */
60130 void ctrl_alt_del(void);
60131@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
60132 * Emergency restart, callable from an interrupt handler.
60133 */
60134
60135-extern void emergency_restart(void);
60136+extern void emergency_restart(void) __noreturn;
60137 #include <asm/emergency-restart.h>
60138
60139 #endif
60140diff -urNp linux-3.1.1/include/linux/reiserfs_fs.h linux-3.1.1/include/linux/reiserfs_fs.h
60141--- linux-3.1.1/include/linux/reiserfs_fs.h 2011-11-11 15:19:27.000000000 -0500
60142+++ linux-3.1.1/include/linux/reiserfs_fs.h 2011-11-16 18:39:08.000000000 -0500
60143@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
60144 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60145
60146 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60147-#define get_generation(s) atomic_read (&fs_generation(s))
60148+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60149 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60150 #define __fs_changed(gen,s) (gen != get_generation (s))
60151 #define fs_changed(gen,s) \
60152diff -urNp linux-3.1.1/include/linux/reiserfs_fs_sb.h linux-3.1.1/include/linux/reiserfs_fs_sb.h
60153--- linux-3.1.1/include/linux/reiserfs_fs_sb.h 2011-11-11 15:19:27.000000000 -0500
60154+++ linux-3.1.1/include/linux/reiserfs_fs_sb.h 2011-11-16 18:39:08.000000000 -0500
60155@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
60156 /* Comment? -Hans */
60157 wait_queue_head_t s_wait;
60158 /* To be obsoleted soon by per buffer seals.. -Hans */
60159- atomic_t s_generation_counter; // increased by one every time the
60160+ atomic_unchecked_t s_generation_counter; // increased by one every time the
60161 // tree gets re-balanced
60162 unsigned long s_properties; /* File system properties. Currently holds
60163 on-disk FS format */
60164diff -urNp linux-3.1.1/include/linux/relay.h linux-3.1.1/include/linux/relay.h
60165--- linux-3.1.1/include/linux/relay.h 2011-11-11 15:19:27.000000000 -0500
60166+++ linux-3.1.1/include/linux/relay.h 2011-11-16 18:39:08.000000000 -0500
60167@@ -159,7 +159,7 @@ struct rchan_callbacks
60168 * The callback should return 0 if successful, negative if not.
60169 */
60170 int (*remove_buf_file)(struct dentry *dentry);
60171-};
60172+} __no_const;
60173
60174 /*
60175 * CONFIG_RELAY kernel API, kernel/relay.c
60176diff -urNp linux-3.1.1/include/linux/rfkill.h linux-3.1.1/include/linux/rfkill.h
60177--- linux-3.1.1/include/linux/rfkill.h 2011-11-11 15:19:27.000000000 -0500
60178+++ linux-3.1.1/include/linux/rfkill.h 2011-11-16 18:39:08.000000000 -0500
60179@@ -147,6 +147,7 @@ struct rfkill_ops {
60180 void (*query)(struct rfkill *rfkill, void *data);
60181 int (*set_block)(void *data, bool blocked);
60182 };
60183+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
60184
60185 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
60186 /**
60187diff -urNp linux-3.1.1/include/linux/rmap.h linux-3.1.1/include/linux/rmap.h
60188--- linux-3.1.1/include/linux/rmap.h 2011-11-11 15:19:27.000000000 -0500
60189+++ linux-3.1.1/include/linux/rmap.h 2011-11-16 18:39:08.000000000 -0500
60190@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
60191 void anon_vma_init(void); /* create anon_vma_cachep */
60192 int anon_vma_prepare(struct vm_area_struct *);
60193 void unlink_anon_vmas(struct vm_area_struct *);
60194-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60195-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60196+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60197+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60198 void __anon_vma_link(struct vm_area_struct *);
60199
60200 static inline void anon_vma_merge(struct vm_area_struct *vma,
60201diff -urNp linux-3.1.1/include/linux/sched.h linux-3.1.1/include/linux/sched.h
60202--- linux-3.1.1/include/linux/sched.h 2011-11-11 15:19:27.000000000 -0500
60203+++ linux-3.1.1/include/linux/sched.h 2011-11-16 18:40:31.000000000 -0500
60204@@ -100,6 +100,7 @@ struct bio_list;
60205 struct fs_struct;
60206 struct perf_event_context;
60207 struct blk_plug;
60208+struct linux_binprm;
60209
60210 /*
60211 * List of flags we want to share for kernel threads,
60212@@ -380,10 +381,13 @@ struct user_namespace;
60213 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60214
60215 extern int sysctl_max_map_count;
60216+extern unsigned long sysctl_heap_stack_gap;
60217
60218 #include <linux/aio.h>
60219
60220 #ifdef CONFIG_MMU
60221+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60222+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60223 extern void arch_pick_mmap_layout(struct mm_struct *mm);
60224 extern unsigned long
60225 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60226@@ -629,6 +633,17 @@ struct signal_struct {
60227 #ifdef CONFIG_TASKSTATS
60228 struct taskstats *stats;
60229 #endif
60230+
60231+#ifdef CONFIG_GRKERNSEC
60232+ u32 curr_ip;
60233+ u32 saved_ip;
60234+ u32 gr_saddr;
60235+ u32 gr_daddr;
60236+ u16 gr_sport;
60237+ u16 gr_dport;
60238+ u8 used_accept:1;
60239+#endif
60240+
60241 #ifdef CONFIG_AUDIT
60242 unsigned audit_tty;
60243 struct tty_audit_buf *tty_audit_buf;
60244@@ -710,6 +725,11 @@ struct user_struct {
60245 struct key *session_keyring; /* UID's default session keyring */
60246 #endif
60247
60248+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60249+ unsigned int banned;
60250+ unsigned long ban_expires;
60251+#endif
60252+
60253 /* Hash table maintenance information */
60254 struct hlist_node uidhash_node;
60255 uid_t uid;
60256@@ -1340,8 +1360,8 @@ struct task_struct {
60257 struct list_head thread_group;
60258
60259 struct completion *vfork_done; /* for vfork() */
60260- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60261- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60262+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60263+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60264
60265 cputime_t utime, stime, utimescaled, stimescaled;
60266 cputime_t gtime;
60267@@ -1357,13 +1377,6 @@ struct task_struct {
60268 struct task_cputime cputime_expires;
60269 struct list_head cpu_timers[3];
60270
60271-/* process credentials */
60272- const struct cred __rcu *real_cred; /* objective and real subjective task
60273- * credentials (COW) */
60274- const struct cred __rcu *cred; /* effective (overridable) subjective task
60275- * credentials (COW) */
60276- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60277-
60278 char comm[TASK_COMM_LEN]; /* executable name excluding path
60279 - access with [gs]et_task_comm (which lock
60280 it with task_lock())
60281@@ -1380,8 +1393,16 @@ struct task_struct {
60282 #endif
60283 /* CPU-specific state of this task */
60284 struct thread_struct thread;
60285+/* thread_info moved to task_struct */
60286+#ifdef CONFIG_X86
60287+ struct thread_info tinfo;
60288+#endif
60289 /* filesystem information */
60290 struct fs_struct *fs;
60291+
60292+ const struct cred __rcu *cred; /* effective (overridable) subjective task
60293+ * credentials (COW) */
60294+
60295 /* open file information */
60296 struct files_struct *files;
60297 /* namespaces */
60298@@ -1428,6 +1449,11 @@ struct task_struct {
60299 struct rt_mutex_waiter *pi_blocked_on;
60300 #endif
60301
60302+/* process credentials */
60303+ const struct cred __rcu *real_cred; /* objective and real subjective task
60304+ * credentials (COW) */
60305+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60306+
60307 #ifdef CONFIG_DEBUG_MUTEXES
60308 /* mutex deadlock detection */
60309 struct mutex_waiter *blocked_on;
60310@@ -1537,6 +1563,21 @@ struct task_struct {
60311 unsigned long default_timer_slack_ns;
60312
60313 struct list_head *scm_work_list;
60314+
60315+#ifdef CONFIG_GRKERNSEC
60316+ /* grsecurity */
60317+ struct dentry *gr_chroot_dentry;
60318+ struct acl_subject_label *acl;
60319+ struct acl_role_label *role;
60320+ struct file *exec_file;
60321+ u16 acl_role_id;
60322+ /* is this the task that authenticated to the special role */
60323+ u8 acl_sp_role;
60324+ u8 is_writable;
60325+ u8 brute;
60326+ u8 gr_is_chrooted;
60327+#endif
60328+
60329 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60330 /* Index of current stored address in ret_stack */
60331 int curr_ret_stack;
60332@@ -1571,6 +1612,57 @@ struct task_struct {
60333 #endif
60334 };
60335
60336+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60337+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60338+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60339+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60340+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60341+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60342+
60343+#ifdef CONFIG_PAX_SOFTMODE
60344+extern int pax_softmode;
60345+#endif
60346+
60347+extern int pax_check_flags(unsigned long *);
60348+
60349+/* if tsk != current then task_lock must be held on it */
60350+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60351+static inline unsigned long pax_get_flags(struct task_struct *tsk)
60352+{
60353+ if (likely(tsk->mm))
60354+ return tsk->mm->pax_flags;
60355+ else
60356+ return 0UL;
60357+}
60358+
60359+/* if tsk != current then task_lock must be held on it */
60360+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60361+{
60362+ if (likely(tsk->mm)) {
60363+ tsk->mm->pax_flags = flags;
60364+ return 0;
60365+ }
60366+ return -EINVAL;
60367+}
60368+#endif
60369+
60370+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60371+extern void pax_set_initial_flags(struct linux_binprm *bprm);
60372+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60373+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60374+#endif
60375+
60376+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60377+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
60378+extern void pax_report_refcount_overflow(struct pt_regs *regs);
60379+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60380+
60381+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60382+extern void pax_track_stack(void);
60383+#else
60384+static inline void pax_track_stack(void) {}
60385+#endif
60386+
60387 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60388 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60389
60390@@ -2074,7 +2166,9 @@ void yield(void);
60391 extern struct exec_domain default_exec_domain;
60392
60393 union thread_union {
60394+#ifndef CONFIG_X86
60395 struct thread_info thread_info;
60396+#endif
60397 unsigned long stack[THREAD_SIZE/sizeof(long)];
60398 };
60399
60400@@ -2107,6 +2201,7 @@ extern struct pid_namespace init_pid_ns;
60401 */
60402
60403 extern struct task_struct *find_task_by_vpid(pid_t nr);
60404+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60405 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60406 struct pid_namespace *ns);
60407
60408@@ -2243,7 +2338,7 @@ extern void __cleanup_sighand(struct sig
60409 extern void exit_itimers(struct signal_struct *);
60410 extern void flush_itimer_signals(void);
60411
60412-extern NORET_TYPE void do_group_exit(int);
60413+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60414
60415 extern void daemonize(const char *, ...);
60416 extern int allow_signal(int);
60417@@ -2408,13 +2503,17 @@ static inline unsigned long *end_of_stac
60418
60419 #endif
60420
60421-static inline int object_is_on_stack(void *obj)
60422+static inline int object_starts_on_stack(void *obj)
60423 {
60424- void *stack = task_stack_page(current);
60425+ const void *stack = task_stack_page(current);
60426
60427 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60428 }
60429
60430+#ifdef CONFIG_PAX_USERCOPY
60431+extern int object_is_on_stack(const void *obj, unsigned long len);
60432+#endif
60433+
60434 extern void thread_info_cache_init(void);
60435
60436 #ifdef CONFIG_DEBUG_STACK_USAGE
60437diff -urNp linux-3.1.1/include/linux/screen_info.h linux-3.1.1/include/linux/screen_info.h
60438--- linux-3.1.1/include/linux/screen_info.h 2011-11-11 15:19:27.000000000 -0500
60439+++ linux-3.1.1/include/linux/screen_info.h 2011-11-16 18:39:08.000000000 -0500
60440@@ -43,7 +43,8 @@ struct screen_info {
60441 __u16 pages; /* 0x32 */
60442 __u16 vesa_attributes; /* 0x34 */
60443 __u32 capabilities; /* 0x36 */
60444- __u8 _reserved[6]; /* 0x3a */
60445+ __u16 vesapm_size; /* 0x3a */
60446+ __u8 _reserved[4]; /* 0x3c */
60447 } __attribute__((packed));
60448
60449 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60450diff -urNp linux-3.1.1/include/linux/security.h linux-3.1.1/include/linux/security.h
60451--- linux-3.1.1/include/linux/security.h 2011-11-11 15:19:27.000000000 -0500
60452+++ linux-3.1.1/include/linux/security.h 2011-11-16 18:40:31.000000000 -0500
60453@@ -36,6 +36,7 @@
60454 #include <linux/key.h>
60455 #include <linux/xfrm.h>
60456 #include <linux/slab.h>
60457+#include <linux/grsecurity.h>
60458 #include <net/flow.h>
60459
60460 /* Maximum number of letters for an LSM name string */
60461diff -urNp linux-3.1.1/include/linux/seq_file.h linux-3.1.1/include/linux/seq_file.h
60462--- linux-3.1.1/include/linux/seq_file.h 2011-11-11 15:19:27.000000000 -0500
60463+++ linux-3.1.1/include/linux/seq_file.h 2011-11-16 18:39:08.000000000 -0500
60464@@ -33,6 +33,7 @@ struct seq_operations {
60465 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60466 int (*show) (struct seq_file *m, void *v);
60467 };
60468+typedef struct seq_operations __no_const seq_operations_no_const;
60469
60470 #define SEQ_SKIP 1
60471
60472diff -urNp linux-3.1.1/include/linux/shm.h linux-3.1.1/include/linux/shm.h
60473--- linux-3.1.1/include/linux/shm.h 2011-11-11 15:19:27.000000000 -0500
60474+++ linux-3.1.1/include/linux/shm.h 2011-11-16 18:59:58.000000000 -0500
60475@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the ke
60476
60477 /* The task created the shm object. NULL if the task is dead. */
60478 struct task_struct *shm_creator;
60479+#ifdef CONFIG_GRKERNSEC
60480+ time_t shm_createtime;
60481+ pid_t shm_lapid;
60482+#endif
60483 };
60484
60485 /* shm_mode upper byte flags */
60486diff -urNp linux-3.1.1/include/linux/skbuff.h linux-3.1.1/include/linux/skbuff.h
60487--- linux-3.1.1/include/linux/skbuff.h 2011-11-11 15:19:27.000000000 -0500
60488+++ linux-3.1.1/include/linux/skbuff.h 2011-11-16 18:39:08.000000000 -0500
60489@@ -610,7 +610,7 @@ static inline struct skb_shared_hwtstamp
60490 */
60491 static inline int skb_queue_empty(const struct sk_buff_head *list)
60492 {
60493- return list->next == (struct sk_buff *)list;
60494+ return list->next == (const struct sk_buff *)list;
60495 }
60496
60497 /**
60498@@ -623,7 +623,7 @@ static inline int skb_queue_empty(const
60499 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60500 const struct sk_buff *skb)
60501 {
60502- return skb->next == (struct sk_buff *)list;
60503+ return skb->next == (const struct sk_buff *)list;
60504 }
60505
60506 /**
60507@@ -636,7 +636,7 @@ static inline bool skb_queue_is_last(con
60508 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60509 const struct sk_buff *skb)
60510 {
60511- return skb->prev == (struct sk_buff *)list;
60512+ return skb->prev == (const struct sk_buff *)list;
60513 }
60514
60515 /**
60516@@ -1458,7 +1458,7 @@ static inline int pskb_network_may_pull(
60517 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
60518 */
60519 #ifndef NET_SKB_PAD
60520-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
60521+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
60522 #endif
60523
60524 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60525diff -urNp linux-3.1.1/include/linux/slab_def.h linux-3.1.1/include/linux/slab_def.h
60526--- linux-3.1.1/include/linux/slab_def.h 2011-11-11 15:19:27.000000000 -0500
60527+++ linux-3.1.1/include/linux/slab_def.h 2011-11-16 18:39:08.000000000 -0500
60528@@ -68,10 +68,10 @@ struct kmem_cache {
60529 unsigned long node_allocs;
60530 unsigned long node_frees;
60531 unsigned long node_overflow;
60532- atomic_t allochit;
60533- atomic_t allocmiss;
60534- atomic_t freehit;
60535- atomic_t freemiss;
60536+ atomic_unchecked_t allochit;
60537+ atomic_unchecked_t allocmiss;
60538+ atomic_unchecked_t freehit;
60539+ atomic_unchecked_t freemiss;
60540
60541 /*
60542 * If debugging is enabled, then the allocator can add additional
60543diff -urNp linux-3.1.1/include/linux/slab.h linux-3.1.1/include/linux/slab.h
60544--- linux-3.1.1/include/linux/slab.h 2011-11-11 15:19:27.000000000 -0500
60545+++ linux-3.1.1/include/linux/slab.h 2011-11-16 18:39:08.000000000 -0500
60546@@ -11,12 +11,20 @@
60547
60548 #include <linux/gfp.h>
60549 #include <linux/types.h>
60550+#include <linux/err.h>
60551
60552 /*
60553 * Flags to pass to kmem_cache_create().
60554 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60555 */
60556 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60557+
60558+#ifdef CONFIG_PAX_USERCOPY
60559+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60560+#else
60561+#define SLAB_USERCOPY 0x00000000UL
60562+#endif
60563+
60564 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60565 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60566 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60567@@ -87,10 +95,13 @@
60568 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60569 * Both make kfree a no-op.
60570 */
60571-#define ZERO_SIZE_PTR ((void *)16)
60572+#define ZERO_SIZE_PTR \
60573+({ \
60574+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60575+ (void *)(-MAX_ERRNO-1L); \
60576+})
60577
60578-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60579- (unsigned long)ZERO_SIZE_PTR)
60580+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60581
60582 /*
60583 * struct kmem_cache related prototypes
60584@@ -161,6 +172,7 @@ void * __must_check krealloc(const void
60585 void kfree(const void *);
60586 void kzfree(const void *);
60587 size_t ksize(const void *);
60588+void check_object_size(const void *ptr, unsigned long n, bool to);
60589
60590 /*
60591 * Allocator specific definitions. These are mainly used to establish optimized
60592@@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t
60593
60594 void __init kmem_cache_init_late(void);
60595
60596+#define kmalloc(x, y) \
60597+({ \
60598+ void *___retval; \
60599+ intoverflow_t ___x = (intoverflow_t)x; \
60600+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
60601+ ___retval = NULL; \
60602+ else \
60603+ ___retval = kmalloc((size_t)___x, (y)); \
60604+ ___retval; \
60605+})
60606+
60607+#define kmalloc_node(x, y, z) \
60608+({ \
60609+ void *___retval; \
60610+ intoverflow_t ___x = (intoverflow_t)x; \
60611+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60612+ ___retval = NULL; \
60613+ else \
60614+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
60615+ ___retval; \
60616+})
60617+
60618+#define kzalloc(x, y) \
60619+({ \
60620+ void *___retval; \
60621+ intoverflow_t ___x = (intoverflow_t)x; \
60622+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
60623+ ___retval = NULL; \
60624+ else \
60625+ ___retval = kzalloc((size_t)___x, (y)); \
60626+ ___retval; \
60627+})
60628+
60629+#define __krealloc(x, y, z) \
60630+({ \
60631+ void *___retval; \
60632+ intoverflow_t ___y = (intoverflow_t)y; \
60633+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
60634+ ___retval = NULL; \
60635+ else \
60636+ ___retval = __krealloc((x), (size_t)___y, (z)); \
60637+ ___retval; \
60638+})
60639+
60640+#define krealloc(x, y, z) \
60641+({ \
60642+ void *___retval; \
60643+ intoverflow_t ___y = (intoverflow_t)y; \
60644+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
60645+ ___retval = NULL; \
60646+ else \
60647+ ___retval = krealloc((x), (size_t)___y, (z)); \
60648+ ___retval; \
60649+})
60650+
60651 #endif /* _LINUX_SLAB_H */
60652diff -urNp linux-3.1.1/include/linux/slub_def.h linux-3.1.1/include/linux/slub_def.h
60653--- linux-3.1.1/include/linux/slub_def.h 2011-11-11 15:19:27.000000000 -0500
60654+++ linux-3.1.1/include/linux/slub_def.h 2011-11-16 18:39:08.000000000 -0500
60655@@ -85,7 +85,7 @@ struct kmem_cache {
60656 struct kmem_cache_order_objects max;
60657 struct kmem_cache_order_objects min;
60658 gfp_t allocflags; /* gfp flags to use on each alloc */
60659- int refcount; /* Refcount for slab cache destroy */
60660+ atomic_t refcount; /* Refcount for slab cache destroy */
60661 void (*ctor)(void *);
60662 int inuse; /* Offset to metadata */
60663 int align; /* Alignment */
60664@@ -211,7 +211,7 @@ static __always_inline struct kmem_cache
60665 }
60666
60667 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60668-void *__kmalloc(size_t size, gfp_t flags);
60669+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60670
60671 static __always_inline void *
60672 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
60673diff -urNp linux-3.1.1/include/linux/sonet.h linux-3.1.1/include/linux/sonet.h
60674--- linux-3.1.1/include/linux/sonet.h 2011-11-11 15:19:27.000000000 -0500
60675+++ linux-3.1.1/include/linux/sonet.h 2011-11-16 18:39:08.000000000 -0500
60676@@ -61,7 +61,7 @@ struct sonet_stats {
60677 #include <linux/atomic.h>
60678
60679 struct k_sonet_stats {
60680-#define __HANDLE_ITEM(i) atomic_t i
60681+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60682 __SONET_ITEMS
60683 #undef __HANDLE_ITEM
60684 };
60685diff -urNp linux-3.1.1/include/linux/sunrpc/clnt.h linux-3.1.1/include/linux/sunrpc/clnt.h
60686--- linux-3.1.1/include/linux/sunrpc/clnt.h 2011-11-11 15:19:27.000000000 -0500
60687+++ linux-3.1.1/include/linux/sunrpc/clnt.h 2011-11-16 18:39:08.000000000 -0500
60688@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
60689 {
60690 switch (sap->sa_family) {
60691 case AF_INET:
60692- return ntohs(((struct sockaddr_in *)sap)->sin_port);
60693+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60694 case AF_INET6:
60695- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60696+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60697 }
60698 return 0;
60699 }
60700@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
60701 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60702 const struct sockaddr *src)
60703 {
60704- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60705+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60706 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60707
60708 dsin->sin_family = ssin->sin_family;
60709@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
60710 if (sa->sa_family != AF_INET6)
60711 return 0;
60712
60713- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60714+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60715 }
60716
60717 #endif /* __KERNEL__ */
60718diff -urNp linux-3.1.1/include/linux/sunrpc/sched.h linux-3.1.1/include/linux/sunrpc/sched.h
60719--- linux-3.1.1/include/linux/sunrpc/sched.h 2011-11-11 15:19:27.000000000 -0500
60720+++ linux-3.1.1/include/linux/sunrpc/sched.h 2011-11-16 18:39:08.000000000 -0500
60721@@ -105,6 +105,7 @@ struct rpc_call_ops {
60722 void (*rpc_call_done)(struct rpc_task *, void *);
60723 void (*rpc_release)(void *);
60724 };
60725+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
60726
60727 struct rpc_task_setup {
60728 struct rpc_task *task;
60729diff -urNp linux-3.1.1/include/linux/sunrpc/svc_rdma.h linux-3.1.1/include/linux/sunrpc/svc_rdma.h
60730--- linux-3.1.1/include/linux/sunrpc/svc_rdma.h 2011-11-11 15:19:27.000000000 -0500
60731+++ linux-3.1.1/include/linux/sunrpc/svc_rdma.h 2011-11-16 18:39:08.000000000 -0500
60732@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60733 extern unsigned int svcrdma_max_requests;
60734 extern unsigned int svcrdma_max_req_size;
60735
60736-extern atomic_t rdma_stat_recv;
60737-extern atomic_t rdma_stat_read;
60738-extern atomic_t rdma_stat_write;
60739-extern atomic_t rdma_stat_sq_starve;
60740-extern atomic_t rdma_stat_rq_starve;
60741-extern atomic_t rdma_stat_rq_poll;
60742-extern atomic_t rdma_stat_rq_prod;
60743-extern atomic_t rdma_stat_sq_poll;
60744-extern atomic_t rdma_stat_sq_prod;
60745+extern atomic_unchecked_t rdma_stat_recv;
60746+extern atomic_unchecked_t rdma_stat_read;
60747+extern atomic_unchecked_t rdma_stat_write;
60748+extern atomic_unchecked_t rdma_stat_sq_starve;
60749+extern atomic_unchecked_t rdma_stat_rq_starve;
60750+extern atomic_unchecked_t rdma_stat_rq_poll;
60751+extern atomic_unchecked_t rdma_stat_rq_prod;
60752+extern atomic_unchecked_t rdma_stat_sq_poll;
60753+extern atomic_unchecked_t rdma_stat_sq_prod;
60754
60755 #define RPCRDMA_VERSION 1
60756
60757diff -urNp linux-3.1.1/include/linux/sysctl.h linux-3.1.1/include/linux/sysctl.h
60758--- linux-3.1.1/include/linux/sysctl.h 2011-11-11 15:19:27.000000000 -0500
60759+++ linux-3.1.1/include/linux/sysctl.h 2011-11-16 18:40:31.000000000 -0500
60760@@ -155,7 +155,11 @@ enum
60761 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60762 };
60763
60764-
60765+#ifdef CONFIG_PAX_SOFTMODE
60766+enum {
60767+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60768+};
60769+#endif
60770
60771 /* CTL_VM names: */
60772 enum
60773@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
60774
60775 extern int proc_dostring(struct ctl_table *, int,
60776 void __user *, size_t *, loff_t *);
60777+extern int proc_dostring_modpriv(struct ctl_table *, int,
60778+ void __user *, size_t *, loff_t *);
60779 extern int proc_dointvec(struct ctl_table *, int,
60780 void __user *, size_t *, loff_t *);
60781 extern int proc_dointvec_minmax(struct ctl_table *, int,
60782diff -urNp linux-3.1.1/include/linux/tty_ldisc.h linux-3.1.1/include/linux/tty_ldisc.h
60783--- linux-3.1.1/include/linux/tty_ldisc.h 2011-11-11 15:19:27.000000000 -0500
60784+++ linux-3.1.1/include/linux/tty_ldisc.h 2011-11-16 18:39:08.000000000 -0500
60785@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
60786
60787 struct module *owner;
60788
60789- int refcount;
60790+ atomic_t refcount;
60791 };
60792
60793 struct tty_ldisc {
60794diff -urNp linux-3.1.1/include/linux/types.h linux-3.1.1/include/linux/types.h
60795--- linux-3.1.1/include/linux/types.h 2011-11-11 15:19:27.000000000 -0500
60796+++ linux-3.1.1/include/linux/types.h 2011-11-16 18:39:08.000000000 -0500
60797@@ -213,10 +213,26 @@ typedef struct {
60798 int counter;
60799 } atomic_t;
60800
60801+#ifdef CONFIG_PAX_REFCOUNT
60802+typedef struct {
60803+ int counter;
60804+} atomic_unchecked_t;
60805+#else
60806+typedef atomic_t atomic_unchecked_t;
60807+#endif
60808+
60809 #ifdef CONFIG_64BIT
60810 typedef struct {
60811 long counter;
60812 } atomic64_t;
60813+
60814+#ifdef CONFIG_PAX_REFCOUNT
60815+typedef struct {
60816+ long counter;
60817+} atomic64_unchecked_t;
60818+#else
60819+typedef atomic64_t atomic64_unchecked_t;
60820+#endif
60821 #endif
60822
60823 struct list_head {
60824diff -urNp linux-3.1.1/include/linux/uaccess.h linux-3.1.1/include/linux/uaccess.h
60825--- linux-3.1.1/include/linux/uaccess.h 2011-11-11 15:19:27.000000000 -0500
60826+++ linux-3.1.1/include/linux/uaccess.h 2011-11-16 18:39:08.000000000 -0500
60827@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60828 long ret; \
60829 mm_segment_t old_fs = get_fs(); \
60830 \
60831- set_fs(KERNEL_DS); \
60832 pagefault_disable(); \
60833- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60834- pagefault_enable(); \
60835+ set_fs(KERNEL_DS); \
60836+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
60837 set_fs(old_fs); \
60838+ pagefault_enable(); \
60839 ret; \
60840 })
60841
60842diff -urNp linux-3.1.1/include/linux/unaligned/access_ok.h linux-3.1.1/include/linux/unaligned/access_ok.h
60843--- linux-3.1.1/include/linux/unaligned/access_ok.h 2011-11-11 15:19:27.000000000 -0500
60844+++ linux-3.1.1/include/linux/unaligned/access_ok.h 2011-11-16 18:39:08.000000000 -0500
60845@@ -6,32 +6,32 @@
60846
60847 static inline u16 get_unaligned_le16(const void *p)
60848 {
60849- return le16_to_cpup((__le16 *)p);
60850+ return le16_to_cpup((const __le16 *)p);
60851 }
60852
60853 static inline u32 get_unaligned_le32(const void *p)
60854 {
60855- return le32_to_cpup((__le32 *)p);
60856+ return le32_to_cpup((const __le32 *)p);
60857 }
60858
60859 static inline u64 get_unaligned_le64(const void *p)
60860 {
60861- return le64_to_cpup((__le64 *)p);
60862+ return le64_to_cpup((const __le64 *)p);
60863 }
60864
60865 static inline u16 get_unaligned_be16(const void *p)
60866 {
60867- return be16_to_cpup((__be16 *)p);
60868+ return be16_to_cpup((const __be16 *)p);
60869 }
60870
60871 static inline u32 get_unaligned_be32(const void *p)
60872 {
60873- return be32_to_cpup((__be32 *)p);
60874+ return be32_to_cpup((const __be32 *)p);
60875 }
60876
60877 static inline u64 get_unaligned_be64(const void *p)
60878 {
60879- return be64_to_cpup((__be64 *)p);
60880+ return be64_to_cpup((const __be64 *)p);
60881 }
60882
60883 static inline void put_unaligned_le16(u16 val, void *p)
60884diff -urNp linux-3.1.1/include/linux/vermagic.h linux-3.1.1/include/linux/vermagic.h
60885--- linux-3.1.1/include/linux/vermagic.h 2011-11-11 15:19:27.000000000 -0500
60886+++ linux-3.1.1/include/linux/vermagic.h 2011-11-16 18:54:54.000000000 -0500
60887@@ -26,9 +26,35 @@
60888 #define MODULE_ARCH_VERMAGIC ""
60889 #endif
60890
60891+#ifdef CONFIG_PAX_REFCOUNT
60892+#define MODULE_PAX_REFCOUNT "REFCOUNT "
60893+#else
60894+#define MODULE_PAX_REFCOUNT ""
60895+#endif
60896+
60897+#ifdef CONSTIFY_PLUGIN
60898+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
60899+#else
60900+#define MODULE_CONSTIFY_PLUGIN ""
60901+#endif
60902+
60903+#ifdef STACKLEAK_PLUGIN
60904+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
60905+#else
60906+#define MODULE_STACKLEAK_PLUGIN ""
60907+#endif
60908+
60909+#ifdef CONFIG_GRKERNSEC
60910+#define MODULE_GRSEC "GRSEC "
60911+#else
60912+#define MODULE_GRSEC ""
60913+#endif
60914+
60915 #define VERMAGIC_STRING \
60916 UTS_RELEASE " " \
60917 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
60918 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
60919- MODULE_ARCH_VERMAGIC
60920+ MODULE_ARCH_VERMAGIC \
60921+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
60922+ MODULE_GRSEC
60923
60924diff -urNp linux-3.1.1/include/linux/vmalloc.h linux-3.1.1/include/linux/vmalloc.h
60925--- linux-3.1.1/include/linux/vmalloc.h 2011-11-11 15:19:27.000000000 -0500
60926+++ linux-3.1.1/include/linux/vmalloc.h 2011-11-16 18:39:08.000000000 -0500
60927@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining
60928 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
60929 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
60930 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
60931+
60932+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
60933+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
60934+#endif
60935+
60936 /* bits [20..32] reserved for arch specific ioremap internals */
60937
60938 /*
60939@@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
60940 # endif
60941 #endif
60942
60943+#define vmalloc(x) \
60944+({ \
60945+ void *___retval; \
60946+ intoverflow_t ___x = (intoverflow_t)x; \
60947+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
60948+ ___retval = NULL; \
60949+ else \
60950+ ___retval = vmalloc((unsigned long)___x); \
60951+ ___retval; \
60952+})
60953+
60954+#define vzalloc(x) \
60955+({ \
60956+ void *___retval; \
60957+ intoverflow_t ___x = (intoverflow_t)x; \
60958+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
60959+ ___retval = NULL; \
60960+ else \
60961+ ___retval = vzalloc((unsigned long)___x); \
60962+ ___retval; \
60963+})
60964+
60965+#define __vmalloc(x, y, z) \
60966+({ \
60967+ void *___retval; \
60968+ intoverflow_t ___x = (intoverflow_t)x; \
60969+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
60970+ ___retval = NULL; \
60971+ else \
60972+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
60973+ ___retval; \
60974+})
60975+
60976+#define vmalloc_user(x) \
60977+({ \
60978+ void *___retval; \
60979+ intoverflow_t ___x = (intoverflow_t)x; \
60980+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
60981+ ___retval = NULL; \
60982+ else \
60983+ ___retval = vmalloc_user((unsigned long)___x); \
60984+ ___retval; \
60985+})
60986+
60987+#define vmalloc_exec(x) \
60988+({ \
60989+ void *___retval; \
60990+ intoverflow_t ___x = (intoverflow_t)x; \
60991+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
60992+ ___retval = NULL; \
60993+ else \
60994+ ___retval = vmalloc_exec((unsigned long)___x); \
60995+ ___retval; \
60996+})
60997+
60998+#define vmalloc_node(x, y) \
60999+({ \
61000+ void *___retval; \
61001+ intoverflow_t ___x = (intoverflow_t)x; \
61002+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61003+ ___retval = NULL; \
61004+ else \
61005+ ___retval = vmalloc_node((unsigned long)___x, (y));\
61006+ ___retval; \
61007+})
61008+
61009+#define vzalloc_node(x, y) \
61010+({ \
61011+ void *___retval; \
61012+ intoverflow_t ___x = (intoverflow_t)x; \
61013+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
61014+ ___retval = NULL; \
61015+ else \
61016+ ___retval = vzalloc_node((unsigned long)___x, (y));\
61017+ ___retval; \
61018+})
61019+
61020+#define vmalloc_32(x) \
61021+({ \
61022+ void *___retval; \
61023+ intoverflow_t ___x = (intoverflow_t)x; \
61024+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61025+ ___retval = NULL; \
61026+ else \
61027+ ___retval = vmalloc_32((unsigned long)___x); \
61028+ ___retval; \
61029+})
61030+
61031+#define vmalloc_32_user(x) \
61032+({ \
61033+void *___retval; \
61034+ intoverflow_t ___x = (intoverflow_t)x; \
61035+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61036+ ___retval = NULL; \
61037+ else \
61038+ ___retval = vmalloc_32_user((unsigned long)___x);\
61039+ ___retval; \
61040+})
61041+
61042 #endif /* _LINUX_VMALLOC_H */
61043diff -urNp linux-3.1.1/include/linux/vmstat.h linux-3.1.1/include/linux/vmstat.h
61044--- linux-3.1.1/include/linux/vmstat.h 2011-11-11 15:19:27.000000000 -0500
61045+++ linux-3.1.1/include/linux/vmstat.h 2011-11-16 18:39:08.000000000 -0500
61046@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
61047 /*
61048 * Zone based page accounting with per cpu differentials.
61049 */
61050-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61051+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61052
61053 static inline void zone_page_state_add(long x, struct zone *zone,
61054 enum zone_stat_item item)
61055 {
61056- atomic_long_add(x, &zone->vm_stat[item]);
61057- atomic_long_add(x, &vm_stat[item]);
61058+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61059+ atomic_long_add_unchecked(x, &vm_stat[item]);
61060 }
61061
61062 static inline unsigned long global_page_state(enum zone_stat_item item)
61063 {
61064- long x = atomic_long_read(&vm_stat[item]);
61065+ long x = atomic_long_read_unchecked(&vm_stat[item]);
61066 #ifdef CONFIG_SMP
61067 if (x < 0)
61068 x = 0;
61069@@ -109,7 +109,7 @@ static inline unsigned long global_page_
61070 static inline unsigned long zone_page_state(struct zone *zone,
61071 enum zone_stat_item item)
61072 {
61073- long x = atomic_long_read(&zone->vm_stat[item]);
61074+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61075 #ifdef CONFIG_SMP
61076 if (x < 0)
61077 x = 0;
61078@@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
61079 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61080 enum zone_stat_item item)
61081 {
61082- long x = atomic_long_read(&zone->vm_stat[item]);
61083+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61084
61085 #ifdef CONFIG_SMP
61086 int cpu;
61087@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
61088
61089 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61090 {
61091- atomic_long_inc(&zone->vm_stat[item]);
61092- atomic_long_inc(&vm_stat[item]);
61093+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
61094+ atomic_long_inc_unchecked(&vm_stat[item]);
61095 }
61096
61097 static inline void __inc_zone_page_state(struct page *page,
61098@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
61099
61100 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61101 {
61102- atomic_long_dec(&zone->vm_stat[item]);
61103- atomic_long_dec(&vm_stat[item]);
61104+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
61105+ atomic_long_dec_unchecked(&vm_stat[item]);
61106 }
61107
61108 static inline void __dec_zone_page_state(struct page *page,
61109diff -urNp linux-3.1.1/include/media/saa7146_vv.h linux-3.1.1/include/media/saa7146_vv.h
61110--- linux-3.1.1/include/media/saa7146_vv.h 2011-11-11 15:19:27.000000000 -0500
61111+++ linux-3.1.1/include/media/saa7146_vv.h 2011-11-16 18:39:08.000000000 -0500
61112@@ -163,7 +163,7 @@ struct saa7146_ext_vv
61113 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
61114
61115 /* the extension can override this */
61116- struct v4l2_ioctl_ops ops;
61117+ v4l2_ioctl_ops_no_const ops;
61118 /* pointer to the saa7146 core ops */
61119 const struct v4l2_ioctl_ops *core_ops;
61120
61121diff -urNp linux-3.1.1/include/media/v4l2-dev.h linux-3.1.1/include/media/v4l2-dev.h
61122--- linux-3.1.1/include/media/v4l2-dev.h 2011-11-11 15:19:27.000000000 -0500
61123+++ linux-3.1.1/include/media/v4l2-dev.h 2011-11-16 18:39:08.000000000 -0500
61124@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
61125
61126
61127 struct v4l2_file_operations {
61128- struct module *owner;
61129+ struct module * const owner;
61130 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61131 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61132 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61133@@ -68,6 +68,7 @@ struct v4l2_file_operations {
61134 int (*open) (struct file *);
61135 int (*release) (struct file *);
61136 };
61137+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61138
61139 /*
61140 * Newer version of video_device, handled by videodev2.c
61141diff -urNp linux-3.1.1/include/media/v4l2-ioctl.h linux-3.1.1/include/media/v4l2-ioctl.h
61142--- linux-3.1.1/include/media/v4l2-ioctl.h 2011-11-11 15:19:27.000000000 -0500
61143+++ linux-3.1.1/include/media/v4l2-ioctl.h 2011-11-17 18:44:20.000000000 -0500
61144@@ -272,7 +272,7 @@ struct v4l2_ioctl_ops {
61145 long (*vidioc_default) (struct file *file, void *fh,
61146 bool valid_prio, int cmd, void *arg);
61147 };
61148-
61149+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61150
61151 /* v4l debugging and diagnostics */
61152
61153diff -urNp linux-3.1.1/include/net/caif/caif_hsi.h linux-3.1.1/include/net/caif/caif_hsi.h
61154--- linux-3.1.1/include/net/caif/caif_hsi.h 2011-11-11 15:19:27.000000000 -0500
61155+++ linux-3.1.1/include/net/caif/caif_hsi.h 2011-11-16 18:39:08.000000000 -0500
61156@@ -94,7 +94,7 @@ struct cfhsi_drv {
61157 void (*rx_done_cb) (struct cfhsi_drv *drv);
61158 void (*wake_up_cb) (struct cfhsi_drv *drv);
61159 void (*wake_down_cb) (struct cfhsi_drv *drv);
61160-};
61161+} __no_const;
61162
61163 /* Structure implemented by HSI device. */
61164 struct cfhsi_dev {
61165diff -urNp linux-3.1.1/include/net/caif/cfctrl.h linux-3.1.1/include/net/caif/cfctrl.h
61166--- linux-3.1.1/include/net/caif/cfctrl.h 2011-11-11 15:19:27.000000000 -0500
61167+++ linux-3.1.1/include/net/caif/cfctrl.h 2011-11-16 18:39:08.000000000 -0500
61168@@ -52,7 +52,7 @@ struct cfctrl_rsp {
61169 void (*radioset_rsp)(void);
61170 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61171 struct cflayer *client_layer);
61172-};
61173+} __no_const;
61174
61175 /* Link Setup Parameters for CAIF-Links. */
61176 struct cfctrl_link_param {
61177@@ -101,8 +101,8 @@ struct cfctrl_request_info {
61178 struct cfctrl {
61179 struct cfsrvl serv;
61180 struct cfctrl_rsp res;
61181- atomic_t req_seq_no;
61182- atomic_t rsp_seq_no;
61183+ atomic_unchecked_t req_seq_no;
61184+ atomic_unchecked_t rsp_seq_no;
61185 struct list_head list;
61186 /* Protects from simultaneous access to first_req list */
61187 spinlock_t info_list_lock;
61188diff -urNp linux-3.1.1/include/net/flow.h linux-3.1.1/include/net/flow.h
61189--- linux-3.1.1/include/net/flow.h 2011-11-11 15:19:27.000000000 -0500
61190+++ linux-3.1.1/include/net/flow.h 2011-11-16 18:39:08.000000000 -0500
61191@@ -207,6 +207,6 @@ extern struct flow_cache_object *flow_ca
61192 u8 dir, flow_resolve_t resolver, void *ctx);
61193
61194 extern void flow_cache_flush(void);
61195-extern atomic_t flow_cache_genid;
61196+extern atomic_unchecked_t flow_cache_genid;
61197
61198 #endif
61199diff -urNp linux-3.1.1/include/net/inetpeer.h linux-3.1.1/include/net/inetpeer.h
61200--- linux-3.1.1/include/net/inetpeer.h 2011-11-11 15:19:27.000000000 -0500
61201+++ linux-3.1.1/include/net/inetpeer.h 2011-11-16 18:39:08.000000000 -0500
61202@@ -47,8 +47,8 @@ struct inet_peer {
61203 */
61204 union {
61205 struct {
61206- atomic_t rid; /* Frag reception counter */
61207- atomic_t ip_id_count; /* IP ID for the next packet */
61208+ atomic_unchecked_t rid; /* Frag reception counter */
61209+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61210 __u32 tcp_ts;
61211 __u32 tcp_ts_stamp;
61212 };
61213@@ -112,11 +112,11 @@ static inline int inet_getid(struct inet
61214 more++;
61215 inet_peer_refcheck(p);
61216 do {
61217- old = atomic_read(&p->ip_id_count);
61218+ old = atomic_read_unchecked(&p->ip_id_count);
61219 new = old + more;
61220 if (!new)
61221 new = 1;
61222- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
61223+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
61224 return new;
61225 }
61226
61227diff -urNp linux-3.1.1/include/net/ip_fib.h linux-3.1.1/include/net/ip_fib.h
61228--- linux-3.1.1/include/net/ip_fib.h 2011-11-11 15:19:27.000000000 -0500
61229+++ linux-3.1.1/include/net/ip_fib.h 2011-11-16 18:39:08.000000000 -0500
61230@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
61231
61232 #define FIB_RES_SADDR(net, res) \
61233 ((FIB_RES_NH(res).nh_saddr_genid == \
61234- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61235+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61236 FIB_RES_NH(res).nh_saddr : \
61237 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61238 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61239diff -urNp linux-3.1.1/include/net/ip_vs.h linux-3.1.1/include/net/ip_vs.h
61240--- linux-3.1.1/include/net/ip_vs.h 2011-11-11 15:19:27.000000000 -0500
61241+++ linux-3.1.1/include/net/ip_vs.h 2011-11-16 18:39:08.000000000 -0500
61242@@ -509,7 +509,7 @@ struct ip_vs_conn {
61243 struct ip_vs_conn *control; /* Master control connection */
61244 atomic_t n_control; /* Number of controlled ones */
61245 struct ip_vs_dest *dest; /* real server */
61246- atomic_t in_pkts; /* incoming packet counter */
61247+ atomic_unchecked_t in_pkts; /* incoming packet counter */
61248
61249 /* packet transmitter for different forwarding methods. If it
61250 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61251@@ -647,7 +647,7 @@ struct ip_vs_dest {
61252 __be16 port; /* port number of the server */
61253 union nf_inet_addr addr; /* IP address of the server */
61254 volatile unsigned flags; /* dest status flags */
61255- atomic_t conn_flags; /* flags to copy to conn */
61256+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
61257 atomic_t weight; /* server weight */
61258
61259 atomic_t refcnt; /* reference counter */
61260diff -urNp linux-3.1.1/include/net/irda/ircomm_core.h linux-3.1.1/include/net/irda/ircomm_core.h
61261--- linux-3.1.1/include/net/irda/ircomm_core.h 2011-11-11 15:19:27.000000000 -0500
61262+++ linux-3.1.1/include/net/irda/ircomm_core.h 2011-11-16 18:39:08.000000000 -0500
61263@@ -51,7 +51,7 @@ typedef struct {
61264 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61265 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61266 struct ircomm_info *);
61267-} call_t;
61268+} __no_const call_t;
61269
61270 struct ircomm_cb {
61271 irda_queue_t queue;
61272diff -urNp linux-3.1.1/include/net/irda/ircomm_tty.h linux-3.1.1/include/net/irda/ircomm_tty.h
61273--- linux-3.1.1/include/net/irda/ircomm_tty.h 2011-11-11 15:19:27.000000000 -0500
61274+++ linux-3.1.1/include/net/irda/ircomm_tty.h 2011-11-16 18:39:08.000000000 -0500
61275@@ -35,6 +35,7 @@
61276 #include <linux/termios.h>
61277 #include <linux/timer.h>
61278 #include <linux/tty.h> /* struct tty_struct */
61279+#include <asm/local.h>
61280
61281 #include <net/irda/irias_object.h>
61282 #include <net/irda/ircomm_core.h>
61283@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61284 unsigned short close_delay;
61285 unsigned short closing_wait; /* time to wait before closing */
61286
61287- int open_count;
61288- int blocked_open; /* # of blocked opens */
61289+ local_t open_count;
61290+ local_t blocked_open; /* # of blocked opens */
61291
61292 /* Protect concurent access to :
61293 * o self->open_count
61294diff -urNp linux-3.1.1/include/net/iucv/af_iucv.h linux-3.1.1/include/net/iucv/af_iucv.h
61295--- linux-3.1.1/include/net/iucv/af_iucv.h 2011-11-11 15:19:27.000000000 -0500
61296+++ linux-3.1.1/include/net/iucv/af_iucv.h 2011-11-16 18:39:08.000000000 -0500
61297@@ -87,7 +87,7 @@ struct iucv_sock {
61298 struct iucv_sock_list {
61299 struct hlist_head head;
61300 rwlock_t lock;
61301- atomic_t autobind_name;
61302+ atomic_unchecked_t autobind_name;
61303 };
61304
61305 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61306diff -urNp linux-3.1.1/include/net/lapb.h linux-3.1.1/include/net/lapb.h
61307--- linux-3.1.1/include/net/lapb.h 2011-11-11 15:19:27.000000000 -0500
61308+++ linux-3.1.1/include/net/lapb.h 2011-11-16 18:39:08.000000000 -0500
61309@@ -95,7 +95,7 @@ struct lapb_cb {
61310 struct sk_buff_head write_queue;
61311 struct sk_buff_head ack_queue;
61312 unsigned char window;
61313- struct lapb_register_struct callbacks;
61314+ struct lapb_register_struct *callbacks;
61315
61316 /* FRMR control information */
61317 struct lapb_frame frmr_data;
61318diff -urNp linux-3.1.1/include/net/neighbour.h linux-3.1.1/include/net/neighbour.h
61319--- linux-3.1.1/include/net/neighbour.h 2011-11-11 15:19:27.000000000 -0500
61320+++ linux-3.1.1/include/net/neighbour.h 2011-11-16 18:39:08.000000000 -0500
61321@@ -122,7 +122,7 @@ struct neigh_ops {
61322 void (*error_report)(struct neighbour *, struct sk_buff *);
61323 int (*output)(struct neighbour *, struct sk_buff *);
61324 int (*connected_output)(struct neighbour *, struct sk_buff *);
61325-};
61326+} __do_const;
61327
61328 struct pneigh_entry {
61329 struct pneigh_entry *next;
61330diff -urNp linux-3.1.1/include/net/netlink.h linux-3.1.1/include/net/netlink.h
61331--- linux-3.1.1/include/net/netlink.h 2011-11-11 15:19:27.000000000 -0500
61332+++ linux-3.1.1/include/net/netlink.h 2011-11-16 18:39:08.000000000 -0500
61333@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
61334 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61335 {
61336 if (mark)
61337- skb_trim(skb, (unsigned char *) mark - skb->data);
61338+ skb_trim(skb, (const unsigned char *) mark - skb->data);
61339 }
61340
61341 /**
61342diff -urNp linux-3.1.1/include/net/netns/ipv4.h linux-3.1.1/include/net/netns/ipv4.h
61343--- linux-3.1.1/include/net/netns/ipv4.h 2011-11-11 15:19:27.000000000 -0500
61344+++ linux-3.1.1/include/net/netns/ipv4.h 2011-11-16 18:39:08.000000000 -0500
61345@@ -56,8 +56,8 @@ struct netns_ipv4 {
61346
61347 unsigned int sysctl_ping_group_range[2];
61348
61349- atomic_t rt_genid;
61350- atomic_t dev_addr_genid;
61351+ atomic_unchecked_t rt_genid;
61352+ atomic_unchecked_t dev_addr_genid;
61353
61354 #ifdef CONFIG_IP_MROUTE
61355 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61356diff -urNp linux-3.1.1/include/net/sctp/sctp.h linux-3.1.1/include/net/sctp/sctp.h
61357--- linux-3.1.1/include/net/sctp/sctp.h 2011-11-11 15:19:27.000000000 -0500
61358+++ linux-3.1.1/include/net/sctp/sctp.h 2011-11-16 18:39:08.000000000 -0500
61359@@ -318,9 +318,9 @@ do { \
61360
61361 #else /* SCTP_DEBUG */
61362
61363-#define SCTP_DEBUG_PRINTK(whatever...)
61364-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61365-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61366+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61367+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61368+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61369 #define SCTP_ENABLE_DEBUG
61370 #define SCTP_DISABLE_DEBUG
61371 #define SCTP_ASSERT(expr, str, func)
61372diff -urNp linux-3.1.1/include/net/sock.h linux-3.1.1/include/net/sock.h
61373--- linux-3.1.1/include/net/sock.h 2011-11-11 15:19:27.000000000 -0500
61374+++ linux-3.1.1/include/net/sock.h 2011-11-16 18:39:08.000000000 -0500
61375@@ -278,7 +278,7 @@ struct sock {
61376 #ifdef CONFIG_RPS
61377 __u32 sk_rxhash;
61378 #endif
61379- atomic_t sk_drops;
61380+ atomic_unchecked_t sk_drops;
61381 int sk_rcvbuf;
61382
61383 struct sk_filter __rcu *sk_filter;
61384@@ -1391,7 +1391,7 @@ static inline void sk_nocaps_add(struct
61385 }
61386
61387 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
61388- char __user *from, char *to,
61389+ char __user *from, unsigned char *to,
61390 int copy, int offset)
61391 {
61392 if (skb->ip_summed == CHECKSUM_NONE) {
61393diff -urNp linux-3.1.1/include/net/tcp.h linux-3.1.1/include/net/tcp.h
61394--- linux-3.1.1/include/net/tcp.h 2011-11-11 15:19:27.000000000 -0500
61395+++ linux-3.1.1/include/net/tcp.h 2011-11-16 18:39:08.000000000 -0500
61396@@ -1401,8 +1401,8 @@ enum tcp_seq_states {
61397 struct tcp_seq_afinfo {
61398 char *name;
61399 sa_family_t family;
61400- struct file_operations seq_fops;
61401- struct seq_operations seq_ops;
61402+ file_operations_no_const seq_fops;
61403+ seq_operations_no_const seq_ops;
61404 };
61405
61406 struct tcp_iter_state {
61407diff -urNp linux-3.1.1/include/net/udp.h linux-3.1.1/include/net/udp.h
61408--- linux-3.1.1/include/net/udp.h 2011-11-11 15:19:27.000000000 -0500
61409+++ linux-3.1.1/include/net/udp.h 2011-11-16 18:39:08.000000000 -0500
61410@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
61411 char *name;
61412 sa_family_t family;
61413 struct udp_table *udp_table;
61414- struct file_operations seq_fops;
61415- struct seq_operations seq_ops;
61416+ file_operations_no_const seq_fops;
61417+ seq_operations_no_const seq_ops;
61418 };
61419
61420 struct udp_iter_state {
61421diff -urNp linux-3.1.1/include/net/xfrm.h linux-3.1.1/include/net/xfrm.h
61422--- linux-3.1.1/include/net/xfrm.h 2011-11-11 15:19:27.000000000 -0500
61423+++ linux-3.1.1/include/net/xfrm.h 2011-11-16 18:39:08.000000000 -0500
61424@@ -505,7 +505,7 @@ struct xfrm_policy {
61425 struct timer_list timer;
61426
61427 struct flow_cache_object flo;
61428- atomic_t genid;
61429+ atomic_unchecked_t genid;
61430 u32 priority;
61431 u32 index;
61432 struct xfrm_mark mark;
61433diff -urNp linux-3.1.1/include/rdma/iw_cm.h linux-3.1.1/include/rdma/iw_cm.h
61434--- linux-3.1.1/include/rdma/iw_cm.h 2011-11-11 15:19:27.000000000 -0500
61435+++ linux-3.1.1/include/rdma/iw_cm.h 2011-11-16 18:39:08.000000000 -0500
61436@@ -120,7 +120,7 @@ struct iw_cm_verbs {
61437 int backlog);
61438
61439 int (*destroy_listen)(struct iw_cm_id *cm_id);
61440-};
61441+} __no_const;
61442
61443 /**
61444 * iw_create_cm_id - Create an IW CM identifier.
61445diff -urNp linux-3.1.1/include/scsi/libfc.h linux-3.1.1/include/scsi/libfc.h
61446--- linux-3.1.1/include/scsi/libfc.h 2011-11-11 15:19:27.000000000 -0500
61447+++ linux-3.1.1/include/scsi/libfc.h 2011-11-16 18:39:08.000000000 -0500
61448@@ -758,6 +758,7 @@ struct libfc_function_template {
61449 */
61450 void (*disc_stop_final) (struct fc_lport *);
61451 };
61452+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61453
61454 /**
61455 * struct fc_disc - Discovery context
61456@@ -861,7 +862,7 @@ struct fc_lport {
61457 struct fc_vport *vport;
61458
61459 /* Operational Information */
61460- struct libfc_function_template tt;
61461+ libfc_function_template_no_const tt;
61462 u8 link_up;
61463 u8 qfull;
61464 enum fc_lport_state state;
61465diff -urNp linux-3.1.1/include/scsi/scsi_device.h linux-3.1.1/include/scsi/scsi_device.h
61466--- linux-3.1.1/include/scsi/scsi_device.h 2011-11-11 15:19:27.000000000 -0500
61467+++ linux-3.1.1/include/scsi/scsi_device.h 2011-11-16 18:39:08.000000000 -0500
61468@@ -161,9 +161,9 @@ struct scsi_device {
61469 unsigned int max_device_blocked; /* what device_blocked counts down from */
61470 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61471
61472- atomic_t iorequest_cnt;
61473- atomic_t iodone_cnt;
61474- atomic_t ioerr_cnt;
61475+ atomic_unchecked_t iorequest_cnt;
61476+ atomic_unchecked_t iodone_cnt;
61477+ atomic_unchecked_t ioerr_cnt;
61478
61479 struct device sdev_gendev,
61480 sdev_dev;
61481diff -urNp linux-3.1.1/include/scsi/scsi_transport_fc.h linux-3.1.1/include/scsi/scsi_transport_fc.h
61482--- linux-3.1.1/include/scsi/scsi_transport_fc.h 2011-11-11 15:19:27.000000000 -0500
61483+++ linux-3.1.1/include/scsi/scsi_transport_fc.h 2011-11-16 18:39:08.000000000 -0500
61484@@ -711,7 +711,7 @@ struct fc_function_template {
61485 unsigned long show_host_system_hostname:1;
61486
61487 unsigned long disable_target_scan:1;
61488-};
61489+} __do_const;
61490
61491
61492 /**
61493diff -urNp linux-3.1.1/include/sound/ak4xxx-adda.h linux-3.1.1/include/sound/ak4xxx-adda.h
61494--- linux-3.1.1/include/sound/ak4xxx-adda.h 2011-11-11 15:19:27.000000000 -0500
61495+++ linux-3.1.1/include/sound/ak4xxx-adda.h 2011-11-16 18:39:08.000000000 -0500
61496@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61497 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61498 unsigned char val);
61499 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61500-};
61501+} __no_const;
61502
61503 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61504
61505diff -urNp linux-3.1.1/include/sound/hwdep.h linux-3.1.1/include/sound/hwdep.h
61506--- linux-3.1.1/include/sound/hwdep.h 2011-11-11 15:19:27.000000000 -0500
61507+++ linux-3.1.1/include/sound/hwdep.h 2011-11-16 18:39:08.000000000 -0500
61508@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61509 struct snd_hwdep_dsp_status *status);
61510 int (*dsp_load)(struct snd_hwdep *hw,
61511 struct snd_hwdep_dsp_image *image);
61512-};
61513+} __no_const;
61514
61515 struct snd_hwdep {
61516 struct snd_card *card;
61517diff -urNp linux-3.1.1/include/sound/info.h linux-3.1.1/include/sound/info.h
61518--- linux-3.1.1/include/sound/info.h 2011-11-11 15:19:27.000000000 -0500
61519+++ linux-3.1.1/include/sound/info.h 2011-11-16 18:39:08.000000000 -0500
61520@@ -44,7 +44,7 @@ struct snd_info_entry_text {
61521 struct snd_info_buffer *buffer);
61522 void (*write)(struct snd_info_entry *entry,
61523 struct snd_info_buffer *buffer);
61524-};
61525+} __no_const;
61526
61527 struct snd_info_entry_ops {
61528 int (*open)(struct snd_info_entry *entry,
61529diff -urNp linux-3.1.1/include/sound/pcm.h linux-3.1.1/include/sound/pcm.h
61530--- linux-3.1.1/include/sound/pcm.h 2011-11-11 15:19:27.000000000 -0500
61531+++ linux-3.1.1/include/sound/pcm.h 2011-11-16 18:39:08.000000000 -0500
61532@@ -81,6 +81,7 @@ struct snd_pcm_ops {
61533 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61534 int (*ack)(struct snd_pcm_substream *substream);
61535 };
61536+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61537
61538 /*
61539 *
61540diff -urNp linux-3.1.1/include/sound/sb16_csp.h linux-3.1.1/include/sound/sb16_csp.h
61541--- linux-3.1.1/include/sound/sb16_csp.h 2011-11-11 15:19:27.000000000 -0500
61542+++ linux-3.1.1/include/sound/sb16_csp.h 2011-11-16 18:39:08.000000000 -0500
61543@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
61544 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61545 int (*csp_stop) (struct snd_sb_csp * p);
61546 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61547-};
61548+} __no_const;
61549
61550 /*
61551 * CSP private data
61552diff -urNp linux-3.1.1/include/sound/soc.h linux-3.1.1/include/sound/soc.h
61553--- linux-3.1.1/include/sound/soc.h 2011-11-11 15:19:27.000000000 -0500
61554+++ linux-3.1.1/include/sound/soc.h 2011-11-16 18:39:08.000000000 -0500
61555@@ -676,7 +676,7 @@ struct snd_soc_platform_driver {
61556 /* platform IO - used for platform DAPM */
61557 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
61558 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
61559-};
61560+} __do_const;
61561
61562 struct snd_soc_platform {
61563 const char *name;
61564diff -urNp linux-3.1.1/include/sound/ymfpci.h linux-3.1.1/include/sound/ymfpci.h
61565--- linux-3.1.1/include/sound/ymfpci.h 2011-11-11 15:19:27.000000000 -0500
61566+++ linux-3.1.1/include/sound/ymfpci.h 2011-11-16 18:39:08.000000000 -0500
61567@@ -358,7 +358,7 @@ struct snd_ymfpci {
61568 spinlock_t reg_lock;
61569 spinlock_t voice_lock;
61570 wait_queue_head_t interrupt_sleep;
61571- atomic_t interrupt_sleep_count;
61572+ atomic_unchecked_t interrupt_sleep_count;
61573 struct snd_info_entry *proc_entry;
61574 const struct firmware *dsp_microcode;
61575 const struct firmware *controller_microcode;
61576diff -urNp linux-3.1.1/include/target/target_core_base.h linux-3.1.1/include/target/target_core_base.h
61577--- linux-3.1.1/include/target/target_core_base.h 2011-11-11 15:19:27.000000000 -0500
61578+++ linux-3.1.1/include/target/target_core_base.h 2011-11-16 18:39:08.000000000 -0500
61579@@ -356,7 +356,7 @@ struct t10_reservation_ops {
61580 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
61581 int (*t10_pr_register)(struct se_cmd *);
61582 int (*t10_pr_clear)(struct se_cmd *);
61583-};
61584+} __no_const;
61585
61586 struct t10_reservation {
61587 /* Reservation effects all target ports */
61588@@ -496,8 +496,8 @@ struct se_cmd {
61589 atomic_t t_task_cdbs_left;
61590 atomic_t t_task_cdbs_ex_left;
61591 atomic_t t_task_cdbs_timeout_left;
61592- atomic_t t_task_cdbs_sent;
61593- atomic_t t_transport_aborted;
61594+ atomic_unchecked_t t_task_cdbs_sent;
61595+ atomic_unchecked_t t_transport_aborted;
61596 atomic_t t_transport_active;
61597 atomic_t t_transport_complete;
61598 atomic_t t_transport_queue_active;
61599@@ -744,7 +744,7 @@ struct se_device {
61600 atomic_t active_cmds;
61601 atomic_t simple_cmds;
61602 atomic_t depth_left;
61603- atomic_t dev_ordered_id;
61604+ atomic_unchecked_t dev_ordered_id;
61605 atomic_t dev_tur_active;
61606 atomic_t execute_tasks;
61607 atomic_t dev_status_thr_count;
61608diff -urNp linux-3.1.1/include/trace/events/irq.h linux-3.1.1/include/trace/events/irq.h
61609--- linux-3.1.1/include/trace/events/irq.h 2011-11-11 15:19:27.000000000 -0500
61610+++ linux-3.1.1/include/trace/events/irq.h 2011-11-16 18:39:08.000000000 -0500
61611@@ -36,7 +36,7 @@ struct softirq_action;
61612 */
61613 TRACE_EVENT(irq_handler_entry,
61614
61615- TP_PROTO(int irq, struct irqaction *action),
61616+ TP_PROTO(int irq, const struct irqaction *action),
61617
61618 TP_ARGS(irq, action),
61619
61620@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
61621 */
61622 TRACE_EVENT(irq_handler_exit,
61623
61624- TP_PROTO(int irq, struct irqaction *action, int ret),
61625+ TP_PROTO(int irq, const struct irqaction *action, int ret),
61626
61627 TP_ARGS(irq, action, ret),
61628
61629diff -urNp linux-3.1.1/include/video/udlfb.h linux-3.1.1/include/video/udlfb.h
61630--- linux-3.1.1/include/video/udlfb.h 2011-11-11 15:19:27.000000000 -0500
61631+++ linux-3.1.1/include/video/udlfb.h 2011-11-16 18:39:08.000000000 -0500
61632@@ -51,10 +51,10 @@ struct dlfb_data {
61633 int base8;
61634 u32 pseudo_palette[256];
61635 /* blit-only rendering path metrics, exposed through sysfs */
61636- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61637- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
61638- atomic_t bytes_sent; /* to usb, after compression including overhead */
61639- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
61640+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61641+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
61642+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
61643+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
61644 };
61645
61646 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
61647diff -urNp linux-3.1.1/include/video/uvesafb.h linux-3.1.1/include/video/uvesafb.h
61648--- linux-3.1.1/include/video/uvesafb.h 2011-11-11 15:19:27.000000000 -0500
61649+++ linux-3.1.1/include/video/uvesafb.h 2011-11-16 18:39:08.000000000 -0500
61650@@ -177,6 +177,7 @@ struct uvesafb_par {
61651 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61652 u8 pmi_setpal; /* PMI for palette changes */
61653 u16 *pmi_base; /* protected mode interface location */
61654+ u8 *pmi_code; /* protected mode code location */
61655 void *pmi_start;
61656 void *pmi_pal;
61657 u8 *vbe_state_orig; /*
61658diff -urNp linux-3.1.1/init/do_mounts.c linux-3.1.1/init/do_mounts.c
61659--- linux-3.1.1/init/do_mounts.c 2011-11-11 15:19:27.000000000 -0500
61660+++ linux-3.1.1/init/do_mounts.c 2011-11-16 18:39:08.000000000 -0500
61661@@ -287,11 +287,11 @@ static void __init get_fs_names(char *pa
61662
61663 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61664 {
61665- int err = sys_mount(name, "/root", fs, flags, data);
61666+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
61667 if (err)
61668 return err;
61669
61670- sys_chdir((const char __user __force *)"/root");
61671+ sys_chdir((const char __force_user*)"/root");
61672 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61673 printk(KERN_INFO
61674 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61675@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
61676 va_start(args, fmt);
61677 vsprintf(buf, fmt, args);
61678 va_end(args);
61679- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61680+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61681 if (fd >= 0) {
61682 sys_ioctl(fd, FDEJECT, 0);
61683 sys_close(fd);
61684 }
61685 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61686- fd = sys_open("/dev/console", O_RDWR, 0);
61687+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
61688 if (fd >= 0) {
61689 sys_ioctl(fd, TCGETS, (long)&termios);
61690 termios.c_lflag &= ~ICANON;
61691 sys_ioctl(fd, TCSETSF, (long)&termios);
61692- sys_read(fd, &c, 1);
61693+ sys_read(fd, (char __user *)&c, 1);
61694 termios.c_lflag |= ICANON;
61695 sys_ioctl(fd, TCSETSF, (long)&termios);
61696 sys_close(fd);
61697@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
61698 mount_root();
61699 out:
61700 devtmpfs_mount("dev");
61701- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61702- sys_chroot((const char __user __force *)".");
61703+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61704+ sys_chroot((const char __force_user *)".");
61705 }
61706diff -urNp linux-3.1.1/init/do_mounts.h linux-3.1.1/init/do_mounts.h
61707--- linux-3.1.1/init/do_mounts.h 2011-11-11 15:19:27.000000000 -0500
61708+++ linux-3.1.1/init/do_mounts.h 2011-11-16 18:39:08.000000000 -0500
61709@@ -15,15 +15,15 @@ extern int root_mountflags;
61710
61711 static inline int create_dev(char *name, dev_t dev)
61712 {
61713- sys_unlink(name);
61714- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61715+ sys_unlink((char __force_user *)name);
61716+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
61717 }
61718
61719 #if BITS_PER_LONG == 32
61720 static inline u32 bstat(char *name)
61721 {
61722 struct stat64 stat;
61723- if (sys_stat64(name, &stat) != 0)
61724+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
61725 return 0;
61726 if (!S_ISBLK(stat.st_mode))
61727 return 0;
61728@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
61729 static inline u32 bstat(char *name)
61730 {
61731 struct stat stat;
61732- if (sys_newstat(name, &stat) != 0)
61733+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
61734 return 0;
61735 if (!S_ISBLK(stat.st_mode))
61736 return 0;
61737diff -urNp linux-3.1.1/init/do_mounts_initrd.c linux-3.1.1/init/do_mounts_initrd.c
61738--- linux-3.1.1/init/do_mounts_initrd.c 2011-11-11 15:19:27.000000000 -0500
61739+++ linux-3.1.1/init/do_mounts_initrd.c 2011-11-16 18:39:08.000000000 -0500
61740@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
61741 create_dev("/dev/root.old", Root_RAM0);
61742 /* mount initrd on rootfs' /root */
61743 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61744- sys_mkdir("/old", 0700);
61745- root_fd = sys_open("/", 0, 0);
61746- old_fd = sys_open("/old", 0, 0);
61747+ sys_mkdir((const char __force_user *)"/old", 0700);
61748+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
61749+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
61750 /* move initrd over / and chdir/chroot in initrd root */
61751- sys_chdir("/root");
61752- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61753- sys_chroot(".");
61754+ sys_chdir((const char __force_user *)"/root");
61755+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61756+ sys_chroot((const char __force_user *)".");
61757
61758 /*
61759 * In case that a resume from disk is carried out by linuxrc or one of
61760@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
61761
61762 /* move initrd to rootfs' /old */
61763 sys_fchdir(old_fd);
61764- sys_mount("/", ".", NULL, MS_MOVE, NULL);
61765+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
61766 /* switch root and cwd back to / of rootfs */
61767 sys_fchdir(root_fd);
61768- sys_chroot(".");
61769+ sys_chroot((const char __force_user *)".");
61770 sys_close(old_fd);
61771 sys_close(root_fd);
61772
61773 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61774- sys_chdir("/old");
61775+ sys_chdir((const char __force_user *)"/old");
61776 return;
61777 }
61778
61779@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
61780 mount_root();
61781
61782 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61783- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61784+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
61785 if (!error)
61786 printk("okay\n");
61787 else {
61788- int fd = sys_open("/dev/root.old", O_RDWR, 0);
61789+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
61790 if (error == -ENOENT)
61791 printk("/initrd does not exist. Ignored.\n");
61792 else
61793 printk("failed\n");
61794 printk(KERN_NOTICE "Unmounting old root\n");
61795- sys_umount("/old", MNT_DETACH);
61796+ sys_umount((char __force_user *)"/old", MNT_DETACH);
61797 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61798 if (fd < 0) {
61799 error = fd;
61800@@ -116,11 +116,11 @@ int __init initrd_load(void)
61801 * mounted in the normal path.
61802 */
61803 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61804- sys_unlink("/initrd.image");
61805+ sys_unlink((const char __force_user *)"/initrd.image");
61806 handle_initrd();
61807 return 1;
61808 }
61809 }
61810- sys_unlink("/initrd.image");
61811+ sys_unlink((const char __force_user *)"/initrd.image");
61812 return 0;
61813 }
61814diff -urNp linux-3.1.1/init/do_mounts_md.c linux-3.1.1/init/do_mounts_md.c
61815--- linux-3.1.1/init/do_mounts_md.c 2011-11-11 15:19:27.000000000 -0500
61816+++ linux-3.1.1/init/do_mounts_md.c 2011-11-16 18:39:08.000000000 -0500
61817@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61818 partitioned ? "_d" : "", minor,
61819 md_setup_args[ent].device_names);
61820
61821- fd = sys_open(name, 0, 0);
61822+ fd = sys_open((char __force_user *)name, 0, 0);
61823 if (fd < 0) {
61824 printk(KERN_ERR "md: open failed - cannot start "
61825 "array %s\n", name);
61826@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61827 * array without it
61828 */
61829 sys_close(fd);
61830- fd = sys_open(name, 0, 0);
61831+ fd = sys_open((char __force_user *)name, 0, 0);
61832 sys_ioctl(fd, BLKRRPART, 0);
61833 }
61834 sys_close(fd);
61835@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61836
61837 wait_for_device_probe();
61838
61839- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
61840+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
61841 if (fd >= 0) {
61842 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61843 sys_close(fd);
61844diff -urNp linux-3.1.1/init/initramfs.c linux-3.1.1/init/initramfs.c
61845--- linux-3.1.1/init/initramfs.c 2011-11-11 15:19:27.000000000 -0500
61846+++ linux-3.1.1/init/initramfs.c 2011-11-16 18:39:08.000000000 -0500
61847@@ -74,7 +74,7 @@ static void __init free_hash(void)
61848 }
61849 }
61850
61851-static long __init do_utime(char __user *filename, time_t mtime)
61852+static long __init do_utime(__force char __user *filename, time_t mtime)
61853 {
61854 struct timespec t[2];
61855
61856@@ -109,7 +109,7 @@ static void __init dir_utime(void)
61857 struct dir_entry *de, *tmp;
61858 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61859 list_del(&de->list);
61860- do_utime(de->name, de->mtime);
61861+ do_utime((char __force_user *)de->name, de->mtime);
61862 kfree(de->name);
61863 kfree(de);
61864 }
61865@@ -271,7 +271,7 @@ static int __init maybe_link(void)
61866 if (nlink >= 2) {
61867 char *old = find_link(major, minor, ino, mode, collected);
61868 if (old)
61869- return (sys_link(old, collected) < 0) ? -1 : 1;
61870+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
61871 }
61872 return 0;
61873 }
61874@@ -280,11 +280,11 @@ static void __init clean_path(char *path
61875 {
61876 struct stat st;
61877
61878- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61879+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
61880 if (S_ISDIR(st.st_mode))
61881- sys_rmdir(path);
61882+ sys_rmdir((char __force_user *)path);
61883 else
61884- sys_unlink(path);
61885+ sys_unlink((char __force_user *)path);
61886 }
61887 }
61888
61889@@ -305,7 +305,7 @@ static int __init do_name(void)
61890 int openflags = O_WRONLY|O_CREAT;
61891 if (ml != 1)
61892 openflags |= O_TRUNC;
61893- wfd = sys_open(collected, openflags, mode);
61894+ wfd = sys_open((char __force_user *)collected, openflags, mode);
61895
61896 if (wfd >= 0) {
61897 sys_fchown(wfd, uid, gid);
61898@@ -317,17 +317,17 @@ static int __init do_name(void)
61899 }
61900 }
61901 } else if (S_ISDIR(mode)) {
61902- sys_mkdir(collected, mode);
61903- sys_chown(collected, uid, gid);
61904- sys_chmod(collected, mode);
61905+ sys_mkdir((char __force_user *)collected, mode);
61906+ sys_chown((char __force_user *)collected, uid, gid);
61907+ sys_chmod((char __force_user *)collected, mode);
61908 dir_add(collected, mtime);
61909 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61910 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61911 if (maybe_link() == 0) {
61912- sys_mknod(collected, mode, rdev);
61913- sys_chown(collected, uid, gid);
61914- sys_chmod(collected, mode);
61915- do_utime(collected, mtime);
61916+ sys_mknod((char __force_user *)collected, mode, rdev);
61917+ sys_chown((char __force_user *)collected, uid, gid);
61918+ sys_chmod((char __force_user *)collected, mode);
61919+ do_utime((char __force_user *)collected, mtime);
61920 }
61921 }
61922 return 0;
61923@@ -336,15 +336,15 @@ static int __init do_name(void)
61924 static int __init do_copy(void)
61925 {
61926 if (count >= body_len) {
61927- sys_write(wfd, victim, body_len);
61928+ sys_write(wfd, (char __force_user *)victim, body_len);
61929 sys_close(wfd);
61930- do_utime(vcollected, mtime);
61931+ do_utime((char __force_user *)vcollected, mtime);
61932 kfree(vcollected);
61933 eat(body_len);
61934 state = SkipIt;
61935 return 0;
61936 } else {
61937- sys_write(wfd, victim, count);
61938+ sys_write(wfd, (char __force_user *)victim, count);
61939 body_len -= count;
61940 eat(count);
61941 return 1;
61942@@ -355,9 +355,9 @@ static int __init do_symlink(void)
61943 {
61944 collected[N_ALIGN(name_len) + body_len] = '\0';
61945 clean_path(collected, 0);
61946- sys_symlink(collected + N_ALIGN(name_len), collected);
61947- sys_lchown(collected, uid, gid);
61948- do_utime(collected, mtime);
61949+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
61950+ sys_lchown((char __force_user *)collected, uid, gid);
61951+ do_utime((char __force_user *)collected, mtime);
61952 state = SkipIt;
61953 next_state = Reset;
61954 return 0;
61955diff -urNp linux-3.1.1/init/Kconfig linux-3.1.1/init/Kconfig
61956--- linux-3.1.1/init/Kconfig 2011-11-11 15:19:27.000000000 -0500
61957+++ linux-3.1.1/init/Kconfig 2011-11-16 18:39:08.000000000 -0500
61958@@ -1202,7 +1202,7 @@ config SLUB_DEBUG
61959
61960 config COMPAT_BRK
61961 bool "Disable heap randomization"
61962- default y
61963+ default n
61964 help
61965 Randomizing heap placement makes heap exploits harder, but it
61966 also breaks ancient binaries (including anything libc5 based).
61967diff -urNp linux-3.1.1/init/main.c linux-3.1.1/init/main.c
61968--- linux-3.1.1/init/main.c 2011-11-11 15:19:27.000000000 -0500
61969+++ linux-3.1.1/init/main.c 2011-11-16 18:40:44.000000000 -0500
61970@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
61971 extern void tc_init(void);
61972 #endif
61973
61974+extern void grsecurity_init(void);
61975+
61976 /*
61977 * Debug helper: via this flag we know that we are in 'early bootup code'
61978 * where only the boot processor is running with IRQ disabled. This means
61979@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
61980
61981 __setup("reset_devices", set_reset_devices);
61982
61983+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
61984+extern char pax_enter_kernel_user[];
61985+extern char pax_exit_kernel_user[];
61986+extern pgdval_t clone_pgd_mask;
61987+#endif
61988+
61989+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
61990+static int __init setup_pax_nouderef(char *str)
61991+{
61992+#ifdef CONFIG_X86_32
61993+ unsigned int cpu;
61994+ struct desc_struct *gdt;
61995+
61996+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
61997+ gdt = get_cpu_gdt_table(cpu);
61998+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
61999+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62000+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62001+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62002+ }
62003+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62004+#else
62005+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62006+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62007+ clone_pgd_mask = ~(pgdval_t)0UL;
62008+#endif
62009+
62010+ return 0;
62011+}
62012+early_param("pax_nouderef", setup_pax_nouderef);
62013+#endif
62014+
62015+#ifdef CONFIG_PAX_SOFTMODE
62016+int pax_softmode;
62017+
62018+static int __init setup_pax_softmode(char *str)
62019+{
62020+ get_option(&str, &pax_softmode);
62021+ return 1;
62022+}
62023+__setup("pax_softmode=", setup_pax_softmode);
62024+#endif
62025+
62026 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62027 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62028 static const char *panic_later, *panic_param;
62029@@ -678,6 +723,7 @@ int __init_or_module do_one_initcall(ini
62030 {
62031 int count = preempt_count();
62032 int ret;
62033+ const char *msg1 = "", *msg2 = "";
62034
62035 if (initcall_debug)
62036 ret = do_one_initcall_debug(fn);
62037@@ -690,15 +736,15 @@ int __init_or_module do_one_initcall(ini
62038 sprintf(msgbuf, "error code %d ", ret);
62039
62040 if (preempt_count() != count) {
62041- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62042+ msg1 = " preemption imbalance";
62043 preempt_count() = count;
62044 }
62045 if (irqs_disabled()) {
62046- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62047+ msg2 = " disabled interrupts";
62048 local_irq_enable();
62049 }
62050- if (msgbuf[0]) {
62051- printk("initcall %pF returned with %s\n", fn, msgbuf);
62052+ if (msgbuf[0] || *msg1 || *msg2) {
62053+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62054 }
62055
62056 return ret;
62057@@ -817,7 +863,7 @@ static int __init kernel_init(void * unu
62058 do_basic_setup();
62059
62060 /* Open the /dev/console on the rootfs, this should never fail */
62061- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
62062+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
62063 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
62064
62065 (void) sys_dup(0);
62066@@ -830,11 +876,13 @@ static int __init kernel_init(void * unu
62067 if (!ramdisk_execute_command)
62068 ramdisk_execute_command = "/init";
62069
62070- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62071+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
62072 ramdisk_execute_command = NULL;
62073 prepare_namespace();
62074 }
62075
62076+ grsecurity_init();
62077+
62078 /*
62079 * Ok, we have completed the initial bootup, and
62080 * we're essentially up and running. Get rid of the
62081diff -urNp linux-3.1.1/ipc/mqueue.c linux-3.1.1/ipc/mqueue.c
62082--- linux-3.1.1/ipc/mqueue.c 2011-11-11 15:19:27.000000000 -0500
62083+++ linux-3.1.1/ipc/mqueue.c 2011-11-16 18:40:44.000000000 -0500
62084@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(st
62085 mq_bytes = (mq_msg_tblsz +
62086 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62087
62088+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62089 spin_lock(&mq_lock);
62090 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62091 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
62092diff -urNp linux-3.1.1/ipc/msg.c linux-3.1.1/ipc/msg.c
62093--- linux-3.1.1/ipc/msg.c 2011-11-11 15:19:27.000000000 -0500
62094+++ linux-3.1.1/ipc/msg.c 2011-11-16 18:39:08.000000000 -0500
62095@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
62096 return security_msg_queue_associate(msq, msgflg);
62097 }
62098
62099+static struct ipc_ops msg_ops = {
62100+ .getnew = newque,
62101+ .associate = msg_security,
62102+ .more_checks = NULL
62103+};
62104+
62105 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62106 {
62107 struct ipc_namespace *ns;
62108- struct ipc_ops msg_ops;
62109 struct ipc_params msg_params;
62110
62111 ns = current->nsproxy->ipc_ns;
62112
62113- msg_ops.getnew = newque;
62114- msg_ops.associate = msg_security;
62115- msg_ops.more_checks = NULL;
62116-
62117 msg_params.key = key;
62118 msg_params.flg = msgflg;
62119
62120diff -urNp linux-3.1.1/ipc/sem.c linux-3.1.1/ipc/sem.c
62121--- linux-3.1.1/ipc/sem.c 2011-11-11 15:19:27.000000000 -0500
62122+++ linux-3.1.1/ipc/sem.c 2011-11-16 18:40:44.000000000 -0500
62123@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
62124 return 0;
62125 }
62126
62127+static struct ipc_ops sem_ops = {
62128+ .getnew = newary,
62129+ .associate = sem_security,
62130+ .more_checks = sem_more_checks
62131+};
62132+
62133 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62134 {
62135 struct ipc_namespace *ns;
62136- struct ipc_ops sem_ops;
62137 struct ipc_params sem_params;
62138
62139 ns = current->nsproxy->ipc_ns;
62140@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
62141 if (nsems < 0 || nsems > ns->sc_semmsl)
62142 return -EINVAL;
62143
62144- sem_ops.getnew = newary;
62145- sem_ops.associate = sem_security;
62146- sem_ops.more_checks = sem_more_checks;
62147-
62148 sem_params.key = key;
62149 sem_params.flg = semflg;
62150 sem_params.u.nsems = nsems;
62151@@ -848,6 +849,8 @@ static int semctl_main(struct ipc_namesp
62152 int nsems;
62153 struct list_head tasks;
62154
62155+ pax_track_stack();
62156+
62157 sma = sem_lock_check(ns, semid);
62158 if (IS_ERR(sma))
62159 return PTR_ERR(sma);
62160@@ -1295,6 +1298,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
62161 struct ipc_namespace *ns;
62162 struct list_head tasks;
62163
62164+ pax_track_stack();
62165+
62166 ns = current->nsproxy->ipc_ns;
62167
62168 if (nsops < 1 || semid < 0)
62169diff -urNp linux-3.1.1/ipc/shm.c linux-3.1.1/ipc/shm.c
62170--- linux-3.1.1/ipc/shm.c 2011-11-11 15:19:27.000000000 -0500
62171+++ linux-3.1.1/ipc/shm.c 2011-11-16 18:40:44.000000000 -0500
62172@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
62173 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62174 #endif
62175
62176+#ifdef CONFIG_GRKERNSEC
62177+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62178+ const time_t shm_createtime, const uid_t cuid,
62179+ const int shmid);
62180+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62181+ const time_t shm_createtime);
62182+#endif
62183+
62184 void shm_init_ns(struct ipc_namespace *ns)
62185 {
62186 ns->shm_ctlmax = SHMMAX;
62187@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *
62188 shp->shm_lprid = 0;
62189 shp->shm_atim = shp->shm_dtim = 0;
62190 shp->shm_ctim = get_seconds();
62191+#ifdef CONFIG_GRKERNSEC
62192+ {
62193+ struct timespec timeval;
62194+ do_posix_clock_monotonic_gettime(&timeval);
62195+
62196+ shp->shm_createtime = timeval.tv_sec;
62197+ }
62198+#endif
62199 shp->shm_segsz = size;
62200 shp->shm_nattch = 0;
62201 shp->shm_file = file;
62202@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct
62203 return 0;
62204 }
62205
62206+static struct ipc_ops shm_ops = {
62207+ .getnew = newseg,
62208+ .associate = shm_security,
62209+ .more_checks = shm_more_checks
62210+};
62211+
62212 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62213 {
62214 struct ipc_namespace *ns;
62215- struct ipc_ops shm_ops;
62216 struct ipc_params shm_params;
62217
62218 ns = current->nsproxy->ipc_ns;
62219
62220- shm_ops.getnew = newseg;
62221- shm_ops.associate = shm_security;
62222- shm_ops.more_checks = shm_more_checks;
62223-
62224 shm_params.key = key;
62225 shm_params.flg = shmflg;
62226 shm_params.u.size = size;
62227@@ -870,8 +887,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
62228 case SHM_LOCK:
62229 case SHM_UNLOCK:
62230 {
62231- struct file *uninitialized_var(shm_file);
62232-
62233 lru_add_drain_all(); /* drain pagevecs to lru lists */
62234
62235 shp = shm_lock_check(ns, shmid);
62236@@ -1004,9 +1019,21 @@ long do_shmat(int shmid, char __user *sh
62237 if (err)
62238 goto out_unlock;
62239
62240+#ifdef CONFIG_GRKERNSEC
62241+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62242+ shp->shm_perm.cuid, shmid) ||
62243+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62244+ err = -EACCES;
62245+ goto out_unlock;
62246+ }
62247+#endif
62248+
62249 path = shp->shm_file->f_path;
62250 path_get(&path);
62251 shp->shm_nattch++;
62252+#ifdef CONFIG_GRKERNSEC
62253+ shp->shm_lapid = current->pid;
62254+#endif
62255 size = i_size_read(path.dentry->d_inode);
62256 shm_unlock(shp);
62257
62258diff -urNp linux-3.1.1/kernel/acct.c linux-3.1.1/kernel/acct.c
62259--- linux-3.1.1/kernel/acct.c 2011-11-11 15:19:27.000000000 -0500
62260+++ linux-3.1.1/kernel/acct.c 2011-11-16 18:39:08.000000000 -0500
62261@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
62262 */
62263 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62264 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62265- file->f_op->write(file, (char *)&ac,
62266+ file->f_op->write(file, (char __force_user *)&ac,
62267 sizeof(acct_t), &file->f_pos);
62268 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62269 set_fs(fs);
62270diff -urNp linux-3.1.1/kernel/audit.c linux-3.1.1/kernel/audit.c
62271--- linux-3.1.1/kernel/audit.c 2011-11-11 15:19:27.000000000 -0500
62272+++ linux-3.1.1/kernel/audit.c 2011-11-16 18:39:08.000000000 -0500
62273@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
62274 3) suppressed due to audit_rate_limit
62275 4) suppressed due to audit_backlog_limit
62276 */
62277-static atomic_t audit_lost = ATOMIC_INIT(0);
62278+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62279
62280 /* The netlink socket. */
62281 static struct sock *audit_sock;
62282@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
62283 unsigned long now;
62284 int print;
62285
62286- atomic_inc(&audit_lost);
62287+ atomic_inc_unchecked(&audit_lost);
62288
62289 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62290
62291@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
62292 printk(KERN_WARNING
62293 "audit: audit_lost=%d audit_rate_limit=%d "
62294 "audit_backlog_limit=%d\n",
62295- atomic_read(&audit_lost),
62296+ atomic_read_unchecked(&audit_lost),
62297 audit_rate_limit,
62298 audit_backlog_limit);
62299 audit_panic(message);
62300@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_b
62301 status_set.pid = audit_pid;
62302 status_set.rate_limit = audit_rate_limit;
62303 status_set.backlog_limit = audit_backlog_limit;
62304- status_set.lost = atomic_read(&audit_lost);
62305+ status_set.lost = atomic_read_unchecked(&audit_lost);
62306 status_set.backlog = skb_queue_len(&audit_skb_queue);
62307 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62308 &status_set, sizeof(status_set));
62309diff -urNp linux-3.1.1/kernel/auditsc.c linux-3.1.1/kernel/auditsc.c
62310--- linux-3.1.1/kernel/auditsc.c 2011-11-11 15:19:27.000000000 -0500
62311+++ linux-3.1.1/kernel/auditsc.c 2011-11-16 18:39:08.000000000 -0500
62312@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
62313 }
62314
62315 /* global counter which is incremented every time something logs in */
62316-static atomic_t session_id = ATOMIC_INIT(0);
62317+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62318
62319 /**
62320 * audit_set_loginuid - set a task's audit_context loginuid
62321@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
62322 */
62323 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62324 {
62325- unsigned int sessionid = atomic_inc_return(&session_id);
62326+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62327 struct audit_context *context = task->audit_context;
62328
62329 if (context && context->in_syscall) {
62330diff -urNp linux-3.1.1/kernel/capability.c linux-3.1.1/kernel/capability.c
62331--- linux-3.1.1/kernel/capability.c 2011-11-11 15:19:27.000000000 -0500
62332+++ linux-3.1.1/kernel/capability.c 2011-11-16 18:40:44.000000000 -0500
62333@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
62334 * before modification is attempted and the application
62335 * fails.
62336 */
62337+ if (tocopy > ARRAY_SIZE(kdata))
62338+ return -EFAULT;
62339+
62340 if (copy_to_user(dataptr, kdata, tocopy
62341 * sizeof(struct __user_cap_data_struct))) {
62342 return -EFAULT;
62343@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
62344 BUG();
62345 }
62346
62347- if (security_capable(ns, current_cred(), cap) == 0) {
62348+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
62349 current->flags |= PF_SUPERPRIV;
62350 return true;
62351 }
62352@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
62353 }
62354 EXPORT_SYMBOL(ns_capable);
62355
62356+bool ns_capable_nolog(struct user_namespace *ns, int cap)
62357+{
62358+ if (unlikely(!cap_valid(cap))) {
62359+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62360+ BUG();
62361+ }
62362+
62363+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
62364+ current->flags |= PF_SUPERPRIV;
62365+ return true;
62366+ }
62367+ return false;
62368+}
62369+EXPORT_SYMBOL(ns_capable_nolog);
62370+
62371+bool capable_nolog(int cap)
62372+{
62373+ return ns_capable_nolog(&init_user_ns, cap);
62374+}
62375+EXPORT_SYMBOL(capable_nolog);
62376+
62377 /**
62378 * task_ns_capable - Determine whether current task has a superior
62379 * capability targeted at a specific task's user namespace.
62380@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
62381 }
62382 EXPORT_SYMBOL(task_ns_capable);
62383
62384+bool task_ns_capable_nolog(struct task_struct *t, int cap)
62385+{
62386+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
62387+}
62388+EXPORT_SYMBOL(task_ns_capable_nolog);
62389+
62390 /**
62391 * nsown_capable - Check superior capability to one's own user_ns
62392 * @cap: The capability in question
62393diff -urNp linux-3.1.1/kernel/cgroup.c linux-3.1.1/kernel/cgroup.c
62394--- linux-3.1.1/kernel/cgroup.c 2011-11-11 15:19:27.000000000 -0500
62395+++ linux-3.1.1/kernel/cgroup.c 2011-11-16 18:40:44.000000000 -0500
62396@@ -595,6 +595,8 @@ static struct css_set *find_css_set(
62397 struct hlist_head *hhead;
62398 struct cg_cgroup_link *link;
62399
62400+ pax_track_stack();
62401+
62402 /* First see if we already have a cgroup group that matches
62403 * the desired set */
62404 read_lock(&css_set_lock);
62405diff -urNp linux-3.1.1/kernel/compat.c linux-3.1.1/kernel/compat.c
62406--- linux-3.1.1/kernel/compat.c 2011-11-11 15:19:27.000000000 -0500
62407+++ linux-3.1.1/kernel/compat.c 2011-11-16 18:40:44.000000000 -0500
62408@@ -13,6 +13,7 @@
62409
62410 #include <linux/linkage.h>
62411 #include <linux/compat.h>
62412+#include <linux/module.h>
62413 #include <linux/errno.h>
62414 #include <linux/time.h>
62415 #include <linux/signal.h>
62416@@ -167,7 +168,7 @@ static long compat_nanosleep_restart(str
62417 mm_segment_t oldfs;
62418 long ret;
62419
62420- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
62421+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
62422 oldfs = get_fs();
62423 set_fs(KERNEL_DS);
62424 ret = hrtimer_nanosleep_restart(restart);
62425@@ -199,7 +200,7 @@ asmlinkage long compat_sys_nanosleep(str
62426 oldfs = get_fs();
62427 set_fs(KERNEL_DS);
62428 ret = hrtimer_nanosleep(&tu,
62429- rmtp ? (struct timespec __user *)&rmt : NULL,
62430+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
62431 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
62432 set_fs(oldfs);
62433
62434@@ -308,7 +309,7 @@ asmlinkage long compat_sys_sigpending(co
62435 mm_segment_t old_fs = get_fs();
62436
62437 set_fs(KERNEL_DS);
62438- ret = sys_sigpending((old_sigset_t __user *) &s);
62439+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
62440 set_fs(old_fs);
62441 if (ret == 0)
62442 ret = put_user(s, set);
62443@@ -331,8 +332,8 @@ asmlinkage long compat_sys_sigprocmask(i
62444 old_fs = get_fs();
62445 set_fs(KERNEL_DS);
62446 ret = sys_sigprocmask(how,
62447- set ? (old_sigset_t __user *) &s : NULL,
62448- oset ? (old_sigset_t __user *) &s : NULL);
62449+ set ? (old_sigset_t __force_user *) &s : NULL,
62450+ oset ? (old_sigset_t __force_user *) &s : NULL);
62451 set_fs(old_fs);
62452 if (ret == 0)
62453 if (oset)
62454@@ -369,7 +370,7 @@ asmlinkage long compat_sys_old_getrlimit
62455 mm_segment_t old_fs = get_fs();
62456
62457 set_fs(KERNEL_DS);
62458- ret = sys_old_getrlimit(resource, &r);
62459+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
62460 set_fs(old_fs);
62461
62462 if (!ret) {
62463@@ -441,7 +442,7 @@ asmlinkage long compat_sys_getrusage(int
62464 mm_segment_t old_fs = get_fs();
62465
62466 set_fs(KERNEL_DS);
62467- ret = sys_getrusage(who, (struct rusage __user *) &r);
62468+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
62469 set_fs(old_fs);
62470
62471 if (ret)
62472@@ -468,8 +469,8 @@ compat_sys_wait4(compat_pid_t pid, compa
62473 set_fs (KERNEL_DS);
62474 ret = sys_wait4(pid,
62475 (stat_addr ?
62476- (unsigned int __user *) &status : NULL),
62477- options, (struct rusage __user *) &r);
62478+ (unsigned int __force_user *) &status : NULL),
62479+ options, (struct rusage __force_user *) &r);
62480 set_fs (old_fs);
62481
62482 if (ret > 0) {
62483@@ -494,8 +495,8 @@ asmlinkage long compat_sys_waitid(int wh
62484 memset(&info, 0, sizeof(info));
62485
62486 set_fs(KERNEL_DS);
62487- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
62488- uru ? (struct rusage __user *)&ru : NULL);
62489+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
62490+ uru ? (struct rusage __force_user *)&ru : NULL);
62491 set_fs(old_fs);
62492
62493 if ((ret < 0) || (info.si_signo == 0))
62494@@ -625,8 +626,8 @@ long compat_sys_timer_settime(timer_t ti
62495 oldfs = get_fs();
62496 set_fs(KERNEL_DS);
62497 err = sys_timer_settime(timer_id, flags,
62498- (struct itimerspec __user *) &newts,
62499- (struct itimerspec __user *) &oldts);
62500+ (struct itimerspec __force_user *) &newts,
62501+ (struct itimerspec __force_user *) &oldts);
62502 set_fs(oldfs);
62503 if (!err && old && put_compat_itimerspec(old, &oldts))
62504 return -EFAULT;
62505@@ -643,7 +644,7 @@ long compat_sys_timer_gettime(timer_t ti
62506 oldfs = get_fs();
62507 set_fs(KERNEL_DS);
62508 err = sys_timer_gettime(timer_id,
62509- (struct itimerspec __user *) &ts);
62510+ (struct itimerspec __force_user *) &ts);
62511 set_fs(oldfs);
62512 if (!err && put_compat_itimerspec(setting, &ts))
62513 return -EFAULT;
62514@@ -662,7 +663,7 @@ long compat_sys_clock_settime(clockid_t
62515 oldfs = get_fs();
62516 set_fs(KERNEL_DS);
62517 err = sys_clock_settime(which_clock,
62518- (struct timespec __user *) &ts);
62519+ (struct timespec __force_user *) &ts);
62520 set_fs(oldfs);
62521 return err;
62522 }
62523@@ -677,7 +678,7 @@ long compat_sys_clock_gettime(clockid_t
62524 oldfs = get_fs();
62525 set_fs(KERNEL_DS);
62526 err = sys_clock_gettime(which_clock,
62527- (struct timespec __user *) &ts);
62528+ (struct timespec __force_user *) &ts);
62529 set_fs(oldfs);
62530 if (!err && put_compat_timespec(&ts, tp))
62531 return -EFAULT;
62532@@ -697,7 +698,7 @@ long compat_sys_clock_adjtime(clockid_t
62533
62534 oldfs = get_fs();
62535 set_fs(KERNEL_DS);
62536- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
62537+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
62538 set_fs(oldfs);
62539
62540 err = compat_put_timex(utp, &txc);
62541@@ -717,7 +718,7 @@ long compat_sys_clock_getres(clockid_t w
62542 oldfs = get_fs();
62543 set_fs(KERNEL_DS);
62544 err = sys_clock_getres(which_clock,
62545- (struct timespec __user *) &ts);
62546+ (struct timespec __force_user *) &ts);
62547 set_fs(oldfs);
62548 if (!err && tp && put_compat_timespec(&ts, tp))
62549 return -EFAULT;
62550@@ -729,9 +730,9 @@ static long compat_clock_nanosleep_resta
62551 long err;
62552 mm_segment_t oldfs;
62553 struct timespec tu;
62554- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
62555+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
62556
62557- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
62558+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
62559 oldfs = get_fs();
62560 set_fs(KERNEL_DS);
62561 err = clock_nanosleep_restart(restart);
62562@@ -763,8 +764,8 @@ long compat_sys_clock_nanosleep(clockid_
62563 oldfs = get_fs();
62564 set_fs(KERNEL_DS);
62565 err = sys_clock_nanosleep(which_clock, flags,
62566- (struct timespec __user *) &in,
62567- (struct timespec __user *) &out);
62568+ (struct timespec __force_user *) &in,
62569+ (struct timespec __force_user *) &out);
62570 set_fs(oldfs);
62571
62572 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
62573diff -urNp linux-3.1.1/kernel/configs.c linux-3.1.1/kernel/configs.c
62574--- linux-3.1.1/kernel/configs.c 2011-11-11 15:19:27.000000000 -0500
62575+++ linux-3.1.1/kernel/configs.c 2011-11-16 18:40:44.000000000 -0500
62576@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
62577 struct proc_dir_entry *entry;
62578
62579 /* create the current config file */
62580+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62581+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62582+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62583+ &ikconfig_file_ops);
62584+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62585+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62586+ &ikconfig_file_ops);
62587+#endif
62588+#else
62589 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62590 &ikconfig_file_ops);
62591+#endif
62592+
62593 if (!entry)
62594 return -ENOMEM;
62595
62596diff -urNp linux-3.1.1/kernel/cred.c linux-3.1.1/kernel/cred.c
62597--- linux-3.1.1/kernel/cred.c 2011-11-11 15:19:27.000000000 -0500
62598+++ linux-3.1.1/kernel/cred.c 2011-11-16 18:40:44.000000000 -0500
62599@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
62600 */
62601 void __put_cred(struct cred *cred)
62602 {
62603+ pax_track_stack();
62604+
62605 kdebug("__put_cred(%p{%d,%d})", cred,
62606 atomic_read(&cred->usage),
62607 read_cred_subscribers(cred));
62608@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
62609 {
62610 struct cred *cred;
62611
62612+ pax_track_stack();
62613+
62614 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62615 atomic_read(&tsk->cred->usage),
62616 read_cred_subscribers(tsk->cred));
62617@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
62618 {
62619 const struct cred *cred;
62620
62621+ pax_track_stack();
62622+
62623 rcu_read_lock();
62624
62625 do {
62626@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
62627 {
62628 struct cred *new;
62629
62630+ pax_track_stack();
62631+
62632 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62633 if (!new)
62634 return NULL;
62635@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
62636 const struct cred *old;
62637 struct cred *new;
62638
62639+ pax_track_stack();
62640+
62641 validate_process_creds();
62642
62643 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62644@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
62645 struct thread_group_cred *tgcred = NULL;
62646 struct cred *new;
62647
62648+ pax_track_stack();
62649+
62650 #ifdef CONFIG_KEYS
62651 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62652 if (!tgcred)
62653@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
62654 struct cred *new;
62655 int ret;
62656
62657+ pax_track_stack();
62658+
62659 if (
62660 #ifdef CONFIG_KEYS
62661 !p->cred->thread_keyring &&
62662@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
62663 struct task_struct *task = current;
62664 const struct cred *old = task->real_cred;
62665
62666+ pax_track_stack();
62667+
62668 kdebug("commit_creds(%p{%d,%d})", new,
62669 atomic_read(&new->usage),
62670 read_cred_subscribers(new));
62671@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
62672
62673 get_cred(new); /* we will require a ref for the subj creds too */
62674
62675+ gr_set_role_label(task, new->uid, new->gid);
62676+
62677 /* dumpability changes */
62678 if (old->euid != new->euid ||
62679 old->egid != new->egid ||
62680@@ -549,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
62681 */
62682 void abort_creds(struct cred *new)
62683 {
62684+ pax_track_stack();
62685+
62686 kdebug("abort_creds(%p{%d,%d})", new,
62687 atomic_read(&new->usage),
62688 read_cred_subscribers(new));
62689@@ -572,6 +592,8 @@ const struct cred *override_creds(const
62690 {
62691 const struct cred *old = current->cred;
62692
62693+ pax_track_stack();
62694+
62695 kdebug("override_creds(%p{%d,%d})", new,
62696 atomic_read(&new->usage),
62697 read_cred_subscribers(new));
62698@@ -601,6 +623,8 @@ void revert_creds(const struct cred *old
62699 {
62700 const struct cred *override = current->cred;
62701
62702+ pax_track_stack();
62703+
62704 kdebug("revert_creds(%p{%d,%d})", old,
62705 atomic_read(&old->usage),
62706 read_cred_subscribers(old));
62707@@ -647,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
62708 const struct cred *old;
62709 struct cred *new;
62710
62711+ pax_track_stack();
62712+
62713 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62714 if (!new)
62715 return NULL;
62716@@ -701,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62717 */
62718 int set_security_override(struct cred *new, u32 secid)
62719 {
62720+ pax_track_stack();
62721+
62722 return security_kernel_act_as(new, secid);
62723 }
62724 EXPORT_SYMBOL(set_security_override);
62725@@ -720,6 +748,8 @@ int set_security_override_from_ctx(struc
62726 u32 secid;
62727 int ret;
62728
62729+ pax_track_stack();
62730+
62731 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62732 if (ret < 0)
62733 return ret;
62734diff -urNp linux-3.1.1/kernel/debug/debug_core.c linux-3.1.1/kernel/debug/debug_core.c
62735--- linux-3.1.1/kernel/debug/debug_core.c 2011-11-11 15:19:27.000000000 -0500
62736+++ linux-3.1.1/kernel/debug/debug_core.c 2011-11-16 18:39:08.000000000 -0500
62737@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
62738 */
62739 static atomic_t masters_in_kgdb;
62740 static atomic_t slaves_in_kgdb;
62741-static atomic_t kgdb_break_tasklet_var;
62742+static atomic_unchecked_t kgdb_break_tasklet_var;
62743 atomic_t kgdb_setting_breakpoint;
62744
62745 struct task_struct *kgdb_usethread;
62746@@ -129,7 +129,7 @@ int kgdb_single_step;
62747 static pid_t kgdb_sstep_pid;
62748
62749 /* to keep track of the CPU which is doing the single stepping*/
62750-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62751+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62752
62753 /*
62754 * If you are debugging a problem where roundup (the collection of
62755@@ -542,7 +542,7 @@ return_normal:
62756 * kernel will only try for the value of sstep_tries before
62757 * giving up and continuing on.
62758 */
62759- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
62760+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
62761 (kgdb_info[cpu].task &&
62762 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
62763 atomic_set(&kgdb_active, -1);
62764@@ -636,8 +636,8 @@ cpu_master_loop:
62765 }
62766
62767 kgdb_restore:
62768- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
62769- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
62770+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
62771+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
62772 if (kgdb_info[sstep_cpu].task)
62773 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
62774 else
62775@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
62776 static void kgdb_tasklet_bpt(unsigned long ing)
62777 {
62778 kgdb_breakpoint();
62779- atomic_set(&kgdb_break_tasklet_var, 0);
62780+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
62781 }
62782
62783 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
62784
62785 void kgdb_schedule_breakpoint(void)
62786 {
62787- if (atomic_read(&kgdb_break_tasklet_var) ||
62788+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
62789 atomic_read(&kgdb_active) != -1 ||
62790 atomic_read(&kgdb_setting_breakpoint))
62791 return;
62792- atomic_inc(&kgdb_break_tasklet_var);
62793+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
62794 tasklet_schedule(&kgdb_tasklet_breakpoint);
62795 }
62796 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
62797diff -urNp linux-3.1.1/kernel/debug/kdb/kdb_main.c linux-3.1.1/kernel/debug/kdb/kdb_main.c
62798--- linux-3.1.1/kernel/debug/kdb/kdb_main.c 2011-11-11 15:19:27.000000000 -0500
62799+++ linux-3.1.1/kernel/debug/kdb/kdb_main.c 2011-11-16 18:39:08.000000000 -0500
62800@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
62801 list_for_each_entry(mod, kdb_modules, list) {
62802
62803 kdb_printf("%-20s%8u 0x%p ", mod->name,
62804- mod->core_size, (void *)mod);
62805+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
62806 #ifdef CONFIG_MODULE_UNLOAD
62807 kdb_printf("%4d ", module_refcount(mod));
62808 #endif
62809@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
62810 kdb_printf(" (Loading)");
62811 else
62812 kdb_printf(" (Live)");
62813- kdb_printf(" 0x%p", mod->module_core);
62814+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
62815
62816 #ifdef CONFIG_MODULE_UNLOAD
62817 {
62818diff -urNp linux-3.1.1/kernel/events/core.c linux-3.1.1/kernel/events/core.c
62819--- linux-3.1.1/kernel/events/core.c 2011-11-11 15:19:27.000000000 -0500
62820+++ linux-3.1.1/kernel/events/core.c 2011-11-16 18:39:08.000000000 -0500
62821@@ -172,7 +172,7 @@ int perf_proc_update_handler(struct ctl_
62822 return 0;
62823 }
62824
62825-static atomic64_t perf_event_id;
62826+static atomic64_unchecked_t perf_event_id;
62827
62828 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
62829 enum event_type_t event_type);
62830@@ -2535,7 +2535,7 @@ static void __perf_event_read(void *info
62831
62832 static inline u64 perf_event_count(struct perf_event *event)
62833 {
62834- return local64_read(&event->count) + atomic64_read(&event->child_count);
62835+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
62836 }
62837
62838 static u64 perf_event_read(struct perf_event *event)
62839@@ -3060,9 +3060,9 @@ u64 perf_event_read_value(struct perf_ev
62840 mutex_lock(&event->child_mutex);
62841 total += perf_event_read(event);
62842 *enabled += event->total_time_enabled +
62843- atomic64_read(&event->child_total_time_enabled);
62844+ atomic64_read_unchecked(&event->child_total_time_enabled);
62845 *running += event->total_time_running +
62846- atomic64_read(&event->child_total_time_running);
62847+ atomic64_read_unchecked(&event->child_total_time_running);
62848
62849 list_for_each_entry(child, &event->child_list, child_list) {
62850 total += perf_event_read(child);
62851@@ -3448,10 +3448,10 @@ void perf_event_update_userpage(struct p
62852 userpg->offset -= local64_read(&event->hw.prev_count);
62853
62854 userpg->time_enabled = enabled +
62855- atomic64_read(&event->child_total_time_enabled);
62856+ atomic64_read_unchecked(&event->child_total_time_enabled);
62857
62858 userpg->time_running = running +
62859- atomic64_read(&event->child_total_time_running);
62860+ atomic64_read_unchecked(&event->child_total_time_running);
62861
62862 barrier();
62863 ++userpg->lock;
62864@@ -3822,11 +3822,11 @@ static void perf_output_read_one(struct
62865 values[n++] = perf_event_count(event);
62866 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
62867 values[n++] = enabled +
62868- atomic64_read(&event->child_total_time_enabled);
62869+ atomic64_read_unchecked(&event->child_total_time_enabled);
62870 }
62871 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
62872 values[n++] = running +
62873- atomic64_read(&event->child_total_time_running);
62874+ atomic64_read_unchecked(&event->child_total_time_running);
62875 }
62876 if (read_format & PERF_FORMAT_ID)
62877 values[n++] = primary_event_id(event);
62878@@ -4477,12 +4477,12 @@ static void perf_event_mmap_event(struct
62879 * need to add enough zero bytes after the string to handle
62880 * the 64bit alignment we do later.
62881 */
62882- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
62883+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
62884 if (!buf) {
62885 name = strncpy(tmp, "//enomem", sizeof(tmp));
62886 goto got_name;
62887 }
62888- name = d_path(&file->f_path, buf, PATH_MAX);
62889+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
62890 if (IS_ERR(name)) {
62891 name = strncpy(tmp, "//toolong", sizeof(tmp));
62892 goto got_name;
62893@@ -5833,7 +5833,7 @@ perf_event_alloc(struct perf_event_attr
62894 event->parent = parent_event;
62895
62896 event->ns = get_pid_ns(current->nsproxy->pid_ns);
62897- event->id = atomic64_inc_return(&perf_event_id);
62898+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
62899
62900 event->state = PERF_EVENT_STATE_INACTIVE;
62901
62902@@ -6355,10 +6355,10 @@ static void sync_child_event(struct perf
62903 /*
62904 * Add back the child's count to the parent's count:
62905 */
62906- atomic64_add(child_val, &parent_event->child_count);
62907- atomic64_add(child_event->total_time_enabled,
62908+ atomic64_add_unchecked(child_val, &parent_event->child_count);
62909+ atomic64_add_unchecked(child_event->total_time_enabled,
62910 &parent_event->child_total_time_enabled);
62911- atomic64_add(child_event->total_time_running,
62912+ atomic64_add_unchecked(child_event->total_time_running,
62913 &parent_event->child_total_time_running);
62914
62915 /*
62916diff -urNp linux-3.1.1/kernel/exit.c linux-3.1.1/kernel/exit.c
62917--- linux-3.1.1/kernel/exit.c 2011-11-11 15:19:27.000000000 -0500
62918+++ linux-3.1.1/kernel/exit.c 2011-11-16 19:33:48.000000000 -0500
62919@@ -57,6 +57,10 @@
62920 #include <asm/pgtable.h>
62921 #include <asm/mmu_context.h>
62922
62923+#ifdef CONFIG_GRKERNSEC
62924+extern rwlock_t grsec_exec_file_lock;
62925+#endif
62926+
62927 static void exit_mm(struct task_struct * tsk);
62928
62929 static void __unhash_process(struct task_struct *p, bool group_dead)
62930@@ -168,6 +172,10 @@ void release_task(struct task_struct * p
62931 struct task_struct *leader;
62932 int zap_leader;
62933 repeat:
62934+#ifdef CONFIG_NET
62935+ gr_del_task_from_ip_table(p);
62936+#endif
62937+
62938 /* don't need to get the RCU readlock here - the process is dead and
62939 * can't be modifying its own credentials. But shut RCU-lockdep up */
62940 rcu_read_lock();
62941@@ -324,11 +332,22 @@ static void reparent_to_kthreadd(void)
62942 {
62943 write_lock_irq(&tasklist_lock);
62944
62945+#ifdef CONFIG_GRKERNSEC
62946+ write_lock(&grsec_exec_file_lock);
62947+ if (current->exec_file) {
62948+ fput(current->exec_file);
62949+ current->exec_file = NULL;
62950+ }
62951+ write_unlock(&grsec_exec_file_lock);
62952+#endif
62953+
62954 ptrace_unlink(current);
62955 /* Reparent to init */
62956 current->real_parent = current->parent = kthreadd_task;
62957 list_move_tail(&current->sibling, &current->real_parent->children);
62958
62959+ gr_set_kernel_label(current);
62960+
62961 /* Set the exit signal to SIGCHLD so we signal init on exit */
62962 current->exit_signal = SIGCHLD;
62963
62964@@ -380,7 +399,7 @@ int allow_signal(int sig)
62965 * know it'll be handled, so that they don't get converted to
62966 * SIGKILL or just silently dropped.
62967 */
62968- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62969+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62970 recalc_sigpending();
62971 spin_unlock_irq(&current->sighand->siglock);
62972 return 0;
62973@@ -416,6 +435,17 @@ void daemonize(const char *name, ...)
62974 vsnprintf(current->comm, sizeof(current->comm), name, args);
62975 va_end(args);
62976
62977+#ifdef CONFIG_GRKERNSEC
62978+ write_lock(&grsec_exec_file_lock);
62979+ if (current->exec_file) {
62980+ fput(current->exec_file);
62981+ current->exec_file = NULL;
62982+ }
62983+ write_unlock(&grsec_exec_file_lock);
62984+#endif
62985+
62986+ gr_set_kernel_label(current);
62987+
62988 /*
62989 * If we were started as result of loading a module, close all of the
62990 * user space pages. We don't need them, and if we didn't close them
62991@@ -895,6 +925,8 @@ NORET_TYPE void do_exit(long code)
62992 struct task_struct *tsk = current;
62993 int group_dead;
62994
62995+ set_fs(USER_DS);
62996+
62997 profile_task_exit(tsk);
62998
62999 WARN_ON(blk_needs_flush_plug(tsk));
63000@@ -911,7 +943,6 @@ NORET_TYPE void do_exit(long code)
63001 * mm_release()->clear_child_tid() from writing to a user-controlled
63002 * kernel address.
63003 */
63004- set_fs(USER_DS);
63005
63006 ptrace_event(PTRACE_EVENT_EXIT, code);
63007
63008@@ -973,6 +1004,9 @@ NORET_TYPE void do_exit(long code)
63009 tsk->exit_code = code;
63010 taskstats_exit(tsk, group_dead);
63011
63012+ gr_acl_handle_psacct(tsk, code);
63013+ gr_acl_handle_exit();
63014+
63015 exit_mm(tsk);
63016
63017 if (group_dead)
63018diff -urNp linux-3.1.1/kernel/fork.c linux-3.1.1/kernel/fork.c
63019--- linux-3.1.1/kernel/fork.c 2011-11-11 15:19:27.000000000 -0500
63020+++ linux-3.1.1/kernel/fork.c 2011-11-16 19:36:31.000000000 -0500
63021@@ -285,7 +285,7 @@ static struct task_struct *dup_task_stru
63022 *stackend = STACK_END_MAGIC; /* for overflow detection */
63023
63024 #ifdef CONFIG_CC_STACKPROTECTOR
63025- tsk->stack_canary = get_random_int();
63026+ tsk->stack_canary = pax_get_random_long();
63027 #endif
63028
63029 /*
63030@@ -309,13 +309,77 @@ out:
63031 }
63032
63033 #ifdef CONFIG_MMU
63034+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
63035+{
63036+ struct vm_area_struct *tmp;
63037+ unsigned long charge;
63038+ struct mempolicy *pol;
63039+ struct file *file;
63040+
63041+ charge = 0;
63042+ if (mpnt->vm_flags & VM_ACCOUNT) {
63043+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63044+ if (security_vm_enough_memory(len))
63045+ goto fail_nomem;
63046+ charge = len;
63047+ }
63048+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63049+ if (!tmp)
63050+ goto fail_nomem;
63051+ *tmp = *mpnt;
63052+ tmp->vm_mm = mm;
63053+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
63054+ pol = mpol_dup(vma_policy(mpnt));
63055+ if (IS_ERR(pol))
63056+ goto fail_nomem_policy;
63057+ vma_set_policy(tmp, pol);
63058+ if (anon_vma_fork(tmp, mpnt))
63059+ goto fail_nomem_anon_vma_fork;
63060+ tmp->vm_flags &= ~VM_LOCKED;
63061+ tmp->vm_next = tmp->vm_prev = NULL;
63062+ tmp->vm_mirror = NULL;
63063+ file = tmp->vm_file;
63064+ if (file) {
63065+ struct inode *inode = file->f_path.dentry->d_inode;
63066+ struct address_space *mapping = file->f_mapping;
63067+
63068+ get_file(file);
63069+ if (tmp->vm_flags & VM_DENYWRITE)
63070+ atomic_dec(&inode->i_writecount);
63071+ mutex_lock(&mapping->i_mmap_mutex);
63072+ if (tmp->vm_flags & VM_SHARED)
63073+ mapping->i_mmap_writable++;
63074+ flush_dcache_mmap_lock(mapping);
63075+ /* insert tmp into the share list, just after mpnt */
63076+ vma_prio_tree_add(tmp, mpnt);
63077+ flush_dcache_mmap_unlock(mapping);
63078+ mutex_unlock(&mapping->i_mmap_mutex);
63079+ }
63080+
63081+ /*
63082+ * Clear hugetlb-related page reserves for children. This only
63083+ * affects MAP_PRIVATE mappings. Faults generated by the child
63084+ * are not guaranteed to succeed, even if read-only
63085+ */
63086+ if (is_vm_hugetlb_page(tmp))
63087+ reset_vma_resv_huge_pages(tmp);
63088+
63089+ return tmp;
63090+
63091+fail_nomem_anon_vma_fork:
63092+ mpol_put(pol);
63093+fail_nomem_policy:
63094+ kmem_cache_free(vm_area_cachep, tmp);
63095+fail_nomem:
63096+ vm_unacct_memory(charge);
63097+ return NULL;
63098+}
63099+
63100 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63101 {
63102 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
63103 struct rb_node **rb_link, *rb_parent;
63104 int retval;
63105- unsigned long charge;
63106- struct mempolicy *pol;
63107
63108 down_write(&oldmm->mmap_sem);
63109 flush_cache_dup_mm(oldmm);
63110@@ -327,8 +391,8 @@ static int dup_mmap(struct mm_struct *mm
63111 mm->locked_vm = 0;
63112 mm->mmap = NULL;
63113 mm->mmap_cache = NULL;
63114- mm->free_area_cache = oldmm->mmap_base;
63115- mm->cached_hole_size = ~0UL;
63116+ mm->free_area_cache = oldmm->free_area_cache;
63117+ mm->cached_hole_size = oldmm->cached_hole_size;
63118 mm->map_count = 0;
63119 cpumask_clear(mm_cpumask(mm));
63120 mm->mm_rb = RB_ROOT;
63121@@ -344,8 +408,6 @@ static int dup_mmap(struct mm_struct *mm
63122
63123 prev = NULL;
63124 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63125- struct file *file;
63126-
63127 if (mpnt->vm_flags & VM_DONTCOPY) {
63128 long pages = vma_pages(mpnt);
63129 mm->total_vm -= pages;
63130@@ -353,55 +415,13 @@ static int dup_mmap(struct mm_struct *mm
63131 -pages);
63132 continue;
63133 }
63134- charge = 0;
63135- if (mpnt->vm_flags & VM_ACCOUNT) {
63136- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63137- if (security_vm_enough_memory(len))
63138- goto fail_nomem;
63139- charge = len;
63140- }
63141- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63142- if (!tmp)
63143- goto fail_nomem;
63144- *tmp = *mpnt;
63145- INIT_LIST_HEAD(&tmp->anon_vma_chain);
63146- pol = mpol_dup(vma_policy(mpnt));
63147- retval = PTR_ERR(pol);
63148- if (IS_ERR(pol))
63149- goto fail_nomem_policy;
63150- vma_set_policy(tmp, pol);
63151- tmp->vm_mm = mm;
63152- if (anon_vma_fork(tmp, mpnt))
63153- goto fail_nomem_anon_vma_fork;
63154- tmp->vm_flags &= ~VM_LOCKED;
63155- tmp->vm_next = tmp->vm_prev = NULL;
63156- file = tmp->vm_file;
63157- if (file) {
63158- struct inode *inode = file->f_path.dentry->d_inode;
63159- struct address_space *mapping = file->f_mapping;
63160-
63161- get_file(file);
63162- if (tmp->vm_flags & VM_DENYWRITE)
63163- atomic_dec(&inode->i_writecount);
63164- mutex_lock(&mapping->i_mmap_mutex);
63165- if (tmp->vm_flags & VM_SHARED)
63166- mapping->i_mmap_writable++;
63167- flush_dcache_mmap_lock(mapping);
63168- /* insert tmp into the share list, just after mpnt */
63169- vma_prio_tree_add(tmp, mpnt);
63170- flush_dcache_mmap_unlock(mapping);
63171- mutex_unlock(&mapping->i_mmap_mutex);
63172+ tmp = dup_vma(mm, mpnt);
63173+ if (!tmp) {
63174+ retval = -ENOMEM;
63175+ goto out;
63176 }
63177
63178 /*
63179- * Clear hugetlb-related page reserves for children. This only
63180- * affects MAP_PRIVATE mappings. Faults generated by the child
63181- * are not guaranteed to succeed, even if read-only
63182- */
63183- if (is_vm_hugetlb_page(tmp))
63184- reset_vma_resv_huge_pages(tmp);
63185-
63186- /*
63187 * Link in the new vma and copy the page table entries.
63188 */
63189 *pprev = tmp;
63190@@ -422,6 +442,31 @@ static int dup_mmap(struct mm_struct *mm
63191 if (retval)
63192 goto out;
63193 }
63194+
63195+#ifdef CONFIG_PAX_SEGMEXEC
63196+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63197+ struct vm_area_struct *mpnt_m;
63198+
63199+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63200+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63201+
63202+ if (!mpnt->vm_mirror)
63203+ continue;
63204+
63205+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63206+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63207+ mpnt->vm_mirror = mpnt_m;
63208+ } else {
63209+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63210+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63211+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63212+ mpnt->vm_mirror->vm_mirror = mpnt;
63213+ }
63214+ }
63215+ BUG_ON(mpnt_m);
63216+ }
63217+#endif
63218+
63219 /* a new mm has just been created */
63220 arch_dup_mmap(oldmm, mm);
63221 retval = 0;
63222@@ -430,14 +475,6 @@ out:
63223 flush_tlb_mm(oldmm);
63224 up_write(&oldmm->mmap_sem);
63225 return retval;
63226-fail_nomem_anon_vma_fork:
63227- mpol_put(pol);
63228-fail_nomem_policy:
63229- kmem_cache_free(vm_area_cachep, tmp);
63230-fail_nomem:
63231- retval = -ENOMEM;
63232- vm_unacct_memory(charge);
63233- goto out;
63234 }
63235
63236 static inline int mm_alloc_pgd(struct mm_struct *mm)
63237@@ -837,13 +874,14 @@ static int copy_fs(unsigned long clone_f
63238 spin_unlock(&fs->lock);
63239 return -EAGAIN;
63240 }
63241- fs->users++;
63242+ atomic_inc(&fs->users);
63243 spin_unlock(&fs->lock);
63244 return 0;
63245 }
63246 tsk->fs = copy_fs_struct(fs);
63247 if (!tsk->fs)
63248 return -ENOMEM;
63249+ gr_set_chroot_entries(tsk, &tsk->fs->root);
63250 return 0;
63251 }
63252
63253@@ -1105,6 +1143,9 @@ static struct task_struct *copy_process(
63254 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63255 #endif
63256 retval = -EAGAIN;
63257+
63258+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63259+
63260 if (atomic_read(&p->real_cred->user->processes) >=
63261 task_rlimit(p, RLIMIT_NPROC)) {
63262 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63263@@ -1264,6 +1305,8 @@ static struct task_struct *copy_process(
63264 if (clone_flags & CLONE_THREAD)
63265 p->tgid = current->tgid;
63266
63267+ gr_copy_label(p);
63268+
63269 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63270 /*
63271 * Clear TID on mm_release()?
63272@@ -1428,6 +1471,8 @@ bad_fork_cleanup_count:
63273 bad_fork_free:
63274 free_task(p);
63275 fork_out:
63276+ gr_log_forkfail(retval);
63277+
63278 return ERR_PTR(retval);
63279 }
63280
63281@@ -1528,6 +1573,8 @@ long do_fork(unsigned long clone_flags,
63282 if (clone_flags & CLONE_PARENT_SETTID)
63283 put_user(nr, parent_tidptr);
63284
63285+ gr_handle_brute_check();
63286+
63287 if (clone_flags & CLONE_VFORK) {
63288 p->vfork_done = &vfork;
63289 init_completion(&vfork);
63290@@ -1637,7 +1684,7 @@ static int unshare_fs(unsigned long unsh
63291 return 0;
63292
63293 /* don't need lock here; in the worst case we'll do useless copy */
63294- if (fs->users == 1)
63295+ if (atomic_read(&fs->users) == 1)
63296 return 0;
63297
63298 *new_fsp = copy_fs_struct(fs);
63299@@ -1726,7 +1773,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
63300 fs = current->fs;
63301 spin_lock(&fs->lock);
63302 current->fs = new_fs;
63303- if (--fs->users)
63304+ gr_set_chroot_entries(current, &current->fs->root);
63305+ if (atomic_dec_return(&fs->users))
63306 new_fs = NULL;
63307 else
63308 new_fs = fs;
63309diff -urNp linux-3.1.1/kernel/futex.c linux-3.1.1/kernel/futex.c
63310--- linux-3.1.1/kernel/futex.c 2011-11-11 15:19:27.000000000 -0500
63311+++ linux-3.1.1/kernel/futex.c 2011-11-16 18:40:44.000000000 -0500
63312@@ -54,6 +54,7 @@
63313 #include <linux/mount.h>
63314 #include <linux/pagemap.h>
63315 #include <linux/syscalls.h>
63316+#include <linux/ptrace.h>
63317 #include <linux/signal.h>
63318 #include <linux/module.h>
63319 #include <linux/magic.h>
63320@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
63321 struct page *page, *page_head;
63322 int err, ro = 0;
63323
63324+#ifdef CONFIG_PAX_SEGMEXEC
63325+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63326+ return -EFAULT;
63327+#endif
63328+
63329 /*
63330 * The futex address must be "naturally" aligned.
63331 */
63332@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
63333 struct futex_q q = futex_q_init;
63334 int ret;
63335
63336+ pax_track_stack();
63337+
63338 if (!bitset)
63339 return -EINVAL;
63340 q.bitset = bitset;
63341@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
63342 struct futex_q q = futex_q_init;
63343 int res, ret;
63344
63345+ pax_track_stack();
63346+
63347 if (!bitset)
63348 return -EINVAL;
63349
63350@@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63351 {
63352 struct robust_list_head __user *head;
63353 unsigned long ret;
63354+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63355 const struct cred *cred = current_cred(), *pcred;
63356+#endif
63357
63358 if (!futex_cmpxchg_enabled)
63359 return -ENOSYS;
63360@@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63361 if (!p)
63362 goto err_unlock;
63363 ret = -EPERM;
63364+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63365+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63366+ goto err_unlock;
63367+#else
63368 pcred = __task_cred(p);
63369 /* If victim is in different user_ns, then uids are not
63370 comparable, so we must have CAP_SYS_PTRACE */
63371@@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63372 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
63373 goto err_unlock;
63374 ok:
63375+#endif
63376 head = p->robust_list;
63377 rcu_read_unlock();
63378 }
63379@@ -2712,6 +2729,7 @@ static int __init futex_init(void)
63380 {
63381 u32 curval;
63382 int i;
63383+ mm_segment_t oldfs;
63384
63385 /*
63386 * This will fail and we want it. Some arch implementations do
63387@@ -2723,8 +2741,11 @@ static int __init futex_init(void)
63388 * implementation, the non-functional ones will return
63389 * -ENOSYS.
63390 */
63391+ oldfs = get_fs();
63392+ set_fs(USER_DS);
63393 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
63394 futex_cmpxchg_enabled = 1;
63395+ set_fs(oldfs);
63396
63397 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
63398 plist_head_init(&futex_queues[i].chain);
63399diff -urNp linux-3.1.1/kernel/futex_compat.c linux-3.1.1/kernel/futex_compat.c
63400--- linux-3.1.1/kernel/futex_compat.c 2011-11-11 15:19:27.000000000 -0500
63401+++ linux-3.1.1/kernel/futex_compat.c 2011-11-16 18:40:44.000000000 -0500
63402@@ -10,6 +10,7 @@
63403 #include <linux/compat.h>
63404 #include <linux/nsproxy.h>
63405 #include <linux/futex.h>
63406+#include <linux/ptrace.h>
63407
63408 #include <asm/uaccess.h>
63409
63410@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
63411 {
63412 struct compat_robust_list_head __user *head;
63413 unsigned long ret;
63414- const struct cred *cred = current_cred(), *pcred;
63415+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63416+ const struct cred *cred = current_cred();
63417+ const struct cred *pcred;
63418+#endif
63419
63420 if (!futex_cmpxchg_enabled)
63421 return -ENOSYS;
63422@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
63423 if (!p)
63424 goto err_unlock;
63425 ret = -EPERM;
63426+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63427+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63428+ goto err_unlock;
63429+#else
63430 pcred = __task_cred(p);
63431 /* If victim is in different user_ns, then uids are not
63432 comparable, so we must have CAP_SYS_PTRACE */
63433@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
63434 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
63435 goto err_unlock;
63436 ok:
63437+#endif
63438 head = p->compat_robust_list;
63439 rcu_read_unlock();
63440 }
63441diff -urNp linux-3.1.1/kernel/gcov/base.c linux-3.1.1/kernel/gcov/base.c
63442--- linux-3.1.1/kernel/gcov/base.c 2011-11-11 15:19:27.000000000 -0500
63443+++ linux-3.1.1/kernel/gcov/base.c 2011-11-16 18:39:08.000000000 -0500
63444@@ -102,11 +102,6 @@ void gcov_enable_events(void)
63445 }
63446
63447 #ifdef CONFIG_MODULES
63448-static inline int within(void *addr, void *start, unsigned long size)
63449-{
63450- return ((addr >= start) && (addr < start + size));
63451-}
63452-
63453 /* Update list and generate events when modules are unloaded. */
63454 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63455 void *data)
63456@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
63457 prev = NULL;
63458 /* Remove entries located in module from linked list. */
63459 for (info = gcov_info_head; info; info = info->next) {
63460- if (within(info, mod->module_core, mod->core_size)) {
63461+ if (within_module_core_rw((unsigned long)info, mod)) {
63462 if (prev)
63463 prev->next = info->next;
63464 else
63465diff -urNp linux-3.1.1/kernel/hrtimer.c linux-3.1.1/kernel/hrtimer.c
63466--- linux-3.1.1/kernel/hrtimer.c 2011-11-11 15:19:27.000000000 -0500
63467+++ linux-3.1.1/kernel/hrtimer.c 2011-11-16 18:39:08.000000000 -0500
63468@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
63469 local_irq_restore(flags);
63470 }
63471
63472-static void run_hrtimer_softirq(struct softirq_action *h)
63473+static void run_hrtimer_softirq(void)
63474 {
63475 hrtimer_peek_ahead_timers();
63476 }
63477diff -urNp linux-3.1.1/kernel/jump_label.c linux-3.1.1/kernel/jump_label.c
63478--- linux-3.1.1/kernel/jump_label.c 2011-11-11 15:19:27.000000000 -0500
63479+++ linux-3.1.1/kernel/jump_label.c 2011-11-16 18:39:08.000000000 -0500
63480@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
63481
63482 size = (((unsigned long)stop - (unsigned long)start)
63483 / sizeof(struct jump_entry));
63484+ pax_open_kernel();
63485 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
63486+ pax_close_kernel();
63487 }
63488
63489 static void jump_label_update(struct jump_label_key *key, int enable);
63490@@ -297,10 +299,12 @@ static void jump_label_invalidate_module
63491 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
63492 struct jump_entry *iter;
63493
63494+ pax_open_kernel();
63495 for (iter = iter_start; iter < iter_stop; iter++) {
63496 if (within_module_init(iter->code, mod))
63497 iter->code = 0;
63498 }
63499+ pax_close_kernel();
63500 }
63501
63502 static int
63503diff -urNp linux-3.1.1/kernel/kallsyms.c linux-3.1.1/kernel/kallsyms.c
63504--- linux-3.1.1/kernel/kallsyms.c 2011-11-11 15:19:27.000000000 -0500
63505+++ linux-3.1.1/kernel/kallsyms.c 2011-11-16 18:40:44.000000000 -0500
63506@@ -11,6 +11,9 @@
63507 * Changed the compression method from stem compression to "table lookup"
63508 * compression (see scripts/kallsyms.c for a more complete description)
63509 */
63510+#ifdef CONFIG_GRKERNSEC_HIDESYM
63511+#define __INCLUDED_BY_HIDESYM 1
63512+#endif
63513 #include <linux/kallsyms.h>
63514 #include <linux/module.h>
63515 #include <linux/init.h>
63516@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
63517
63518 static inline int is_kernel_inittext(unsigned long addr)
63519 {
63520+ if (system_state != SYSTEM_BOOTING)
63521+ return 0;
63522+
63523 if (addr >= (unsigned long)_sinittext
63524 && addr <= (unsigned long)_einittext)
63525 return 1;
63526 return 0;
63527 }
63528
63529+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63530+#ifdef CONFIG_MODULES
63531+static inline int is_module_text(unsigned long addr)
63532+{
63533+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63534+ return 1;
63535+
63536+ addr = ktla_ktva(addr);
63537+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63538+}
63539+#else
63540+static inline int is_module_text(unsigned long addr)
63541+{
63542+ return 0;
63543+}
63544+#endif
63545+#endif
63546+
63547 static inline int is_kernel_text(unsigned long addr)
63548 {
63549 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63550@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
63551
63552 static inline int is_kernel(unsigned long addr)
63553 {
63554+
63555+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63556+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
63557+ return 1;
63558+
63559+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63560+#else
63561 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63562+#endif
63563+
63564 return 1;
63565 return in_gate_area_no_mm(addr);
63566 }
63567
63568 static int is_ksym_addr(unsigned long addr)
63569 {
63570+
63571+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63572+ if (is_module_text(addr))
63573+ return 0;
63574+#endif
63575+
63576 if (all_var)
63577 return is_kernel(addr);
63578
63579@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
63580
63581 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63582 {
63583- iter->name[0] = '\0';
63584 iter->nameoff = get_symbol_offset(new_pos);
63585 iter->pos = new_pos;
63586 }
63587@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
63588 {
63589 struct kallsym_iter *iter = m->private;
63590
63591+#ifdef CONFIG_GRKERNSEC_HIDESYM
63592+ if (current_uid())
63593+ return 0;
63594+#endif
63595+
63596 /* Some debugging symbols have no name. Ignore them. */
63597 if (!iter->name[0])
63598 return 0;
63599@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
63600 struct kallsym_iter *iter;
63601 int ret;
63602
63603- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63604+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63605 if (!iter)
63606 return -ENOMEM;
63607 reset_iter(iter, 0);
63608diff -urNp linux-3.1.1/kernel/kexec.c linux-3.1.1/kernel/kexec.c
63609--- linux-3.1.1/kernel/kexec.c 2011-11-11 15:19:27.000000000 -0500
63610+++ linux-3.1.1/kernel/kexec.c 2011-11-16 18:39:08.000000000 -0500
63611@@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(un
63612 unsigned long flags)
63613 {
63614 struct compat_kexec_segment in;
63615- struct kexec_segment out, __user *ksegments;
63616+ struct kexec_segment out;
63617+ struct kexec_segment __user *ksegments;
63618 unsigned long i, result;
63619
63620 /* Don't allow clients that don't understand the native
63621diff -urNp linux-3.1.1/kernel/kmod.c linux-3.1.1/kernel/kmod.c
63622--- linux-3.1.1/kernel/kmod.c 2011-11-11 15:19:27.000000000 -0500
63623+++ linux-3.1.1/kernel/kmod.c 2011-11-16 18:40:44.000000000 -0500
63624@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63625 * If module auto-loading support is disabled then this function
63626 * becomes a no-operation.
63627 */
63628-int __request_module(bool wait, const char *fmt, ...)
63629+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63630 {
63631- va_list args;
63632 char module_name[MODULE_NAME_LEN];
63633 unsigned int max_modprobes;
63634 int ret;
63635- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63636+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63637 static char *envp[] = { "HOME=/",
63638 "TERM=linux",
63639 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63640@@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
63641 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
63642 static int kmod_loop_msg;
63643
63644- va_start(args, fmt);
63645- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63646- va_end(args);
63647+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63648 if (ret >= MODULE_NAME_LEN)
63649 return -ENAMETOOLONG;
63650
63651@@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
63652 if (ret)
63653 return ret;
63654
63655+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63656+ if (!current_uid()) {
63657+ /* hack to workaround consolekit/udisks stupidity */
63658+ read_lock(&tasklist_lock);
63659+ if (!strcmp(current->comm, "mount") &&
63660+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63661+ read_unlock(&tasklist_lock);
63662+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63663+ return -EPERM;
63664+ }
63665+ read_unlock(&tasklist_lock);
63666+ }
63667+#endif
63668+
63669 /* If modprobe needs a service that is in a module, we get a recursive
63670 * loop. Limit the number of running kmod threads to max_threads/2 or
63671 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63672@@ -133,6 +144,47 @@ int __request_module(bool wait, const ch
63673 atomic_dec(&kmod_concurrent);
63674 return ret;
63675 }
63676+
63677+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63678+{
63679+ va_list args;
63680+ int ret;
63681+
63682+ va_start(args, fmt);
63683+ ret = ____request_module(wait, module_param, fmt, args);
63684+ va_end(args);
63685+
63686+ return ret;
63687+}
63688+
63689+int __request_module(bool wait, const char *fmt, ...)
63690+{
63691+ va_list args;
63692+ int ret;
63693+
63694+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63695+ if (current_uid()) {
63696+ char module_param[MODULE_NAME_LEN];
63697+
63698+ memset(module_param, 0, sizeof(module_param));
63699+
63700+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63701+
63702+ va_start(args, fmt);
63703+ ret = ____request_module(wait, module_param, fmt, args);
63704+ va_end(args);
63705+
63706+ return ret;
63707+ }
63708+#endif
63709+
63710+ va_start(args, fmt);
63711+ ret = ____request_module(wait, NULL, fmt, args);
63712+ va_end(args);
63713+
63714+ return ret;
63715+}
63716+
63717 EXPORT_SYMBOL(__request_module);
63718 #endif /* CONFIG_MODULES */
63719
63720@@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
63721 *
63722 * Thus the __user pointer cast is valid here.
63723 */
63724- sys_wait4(pid, (int __user *)&ret, 0, NULL);
63725+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
63726
63727 /*
63728 * If ret is 0, either ____call_usermodehelper failed and the
63729diff -urNp linux-3.1.1/kernel/kprobes.c linux-3.1.1/kernel/kprobes.c
63730--- linux-3.1.1/kernel/kprobes.c 2011-11-11 15:19:27.000000000 -0500
63731+++ linux-3.1.1/kernel/kprobes.c 2011-11-16 18:39:08.000000000 -0500
63732@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
63733 * kernel image and loaded module images reside. This is required
63734 * so x86_64 can correctly handle the %rip-relative fixups.
63735 */
63736- kip->insns = module_alloc(PAGE_SIZE);
63737+ kip->insns = module_alloc_exec(PAGE_SIZE);
63738 if (!kip->insns) {
63739 kfree(kip);
63740 return NULL;
63741@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
63742 */
63743 if (!list_is_singular(&kip->list)) {
63744 list_del(&kip->list);
63745- module_free(NULL, kip->insns);
63746+ module_free_exec(NULL, kip->insns);
63747 kfree(kip);
63748 }
63749 return 1;
63750@@ -1949,7 +1949,7 @@ static int __init init_kprobes(void)
63751 {
63752 int i, err = 0;
63753 unsigned long offset = 0, size = 0;
63754- char *modname, namebuf[128];
63755+ char *modname, namebuf[KSYM_NAME_LEN];
63756 const char *symbol_name;
63757 void *addr;
63758 struct kprobe_blackpoint *kb;
63759@@ -2075,7 +2075,7 @@ static int __kprobes show_kprobe_addr(st
63760 const char *sym = NULL;
63761 unsigned int i = *(loff_t *) v;
63762 unsigned long offset = 0;
63763- char *modname, namebuf[128];
63764+ char *modname, namebuf[KSYM_NAME_LEN];
63765
63766 head = &kprobe_table[i];
63767 preempt_disable();
63768diff -urNp linux-3.1.1/kernel/lockdep.c linux-3.1.1/kernel/lockdep.c
63769--- linux-3.1.1/kernel/lockdep.c 2011-11-11 15:19:27.000000000 -0500
63770+++ linux-3.1.1/kernel/lockdep.c 2011-11-16 18:39:08.000000000 -0500
63771@@ -583,6 +583,10 @@ static int static_obj(void *obj)
63772 end = (unsigned long) &_end,
63773 addr = (unsigned long) obj;
63774
63775+#ifdef CONFIG_PAX_KERNEXEC
63776+ start = ktla_ktva(start);
63777+#endif
63778+
63779 /*
63780 * static variable?
63781 */
63782@@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
63783 if (!static_obj(lock->key)) {
63784 debug_locks_off();
63785 printk("INFO: trying to register non-static key.\n");
63786+ printk("lock:%pS key:%pS.\n", lock, lock->key);
63787 printk("the code is fine but needs lockdep annotation.\n");
63788 printk("turning off the locking correctness validator.\n");
63789 dump_stack();
63790@@ -2948,7 +2953,7 @@ static int __lock_acquire(struct lockdep
63791 if (!class)
63792 return 0;
63793 }
63794- atomic_inc((atomic_t *)&class->ops);
63795+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
63796 if (very_verbose(class)) {
63797 printk("\nacquire class [%p] %s", class->key, class->name);
63798 if (class->name_version > 1)
63799diff -urNp linux-3.1.1/kernel/lockdep_proc.c linux-3.1.1/kernel/lockdep_proc.c
63800--- linux-3.1.1/kernel/lockdep_proc.c 2011-11-11 15:19:27.000000000 -0500
63801+++ linux-3.1.1/kernel/lockdep_proc.c 2011-11-16 18:39:08.000000000 -0500
63802@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63803
63804 static void print_name(struct seq_file *m, struct lock_class *class)
63805 {
63806- char str[128];
63807+ char str[KSYM_NAME_LEN];
63808 const char *name = class->name;
63809
63810 if (!name) {
63811diff -urNp linux-3.1.1/kernel/module.c linux-3.1.1/kernel/module.c
63812--- linux-3.1.1/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
63813+++ linux-3.1.1/kernel/module.c 2011-11-16 18:40:44.000000000 -0500
63814@@ -58,6 +58,7 @@
63815 #include <linux/jump_label.h>
63816 #include <linux/pfn.h>
63817 #include <linux/bsearch.h>
63818+#include <linux/grsecurity.h>
63819
63820 #define CREATE_TRACE_POINTS
63821 #include <trace/events/module.h>
63822@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
63823
63824 /* Bounds of module allocation, for speeding __module_address.
63825 * Protected by module_mutex. */
63826-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63827+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63828+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63829
63830 int register_module_notifier(struct notifier_block * nb)
63831 {
63832@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
63833 return true;
63834
63835 list_for_each_entry_rcu(mod, &modules, list) {
63836- struct symsearch arr[] = {
63837+ struct symsearch modarr[] = {
63838 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63839 NOT_GPL_ONLY, false },
63840 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63841@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
63842 #endif
63843 };
63844
63845- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63846+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63847 return true;
63848 }
63849 return false;
63850@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
63851 static int percpu_modalloc(struct module *mod,
63852 unsigned long size, unsigned long align)
63853 {
63854- if (align > PAGE_SIZE) {
63855+ if (align-1 >= PAGE_SIZE) {
63856 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63857 mod->name, align, PAGE_SIZE);
63858 align = PAGE_SIZE;
63859@@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
63860 */
63861 #ifdef CONFIG_SYSFS
63862
63863-#ifdef CONFIG_KALLSYMS
63864+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63865 static inline bool sect_empty(const Elf_Shdr *sect)
63866 {
63867 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
63868@@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base
63869
63870 static void unset_module_core_ro_nx(struct module *mod)
63871 {
63872- set_page_attributes(mod->module_core + mod->core_text_size,
63873- mod->module_core + mod->core_size,
63874+ set_page_attributes(mod->module_core_rw,
63875+ mod->module_core_rw + mod->core_size_rw,
63876 set_memory_x);
63877- set_page_attributes(mod->module_core,
63878- mod->module_core + mod->core_ro_size,
63879+ set_page_attributes(mod->module_core_rx,
63880+ mod->module_core_rx + mod->core_size_rx,
63881 set_memory_rw);
63882 }
63883
63884 static void unset_module_init_ro_nx(struct module *mod)
63885 {
63886- set_page_attributes(mod->module_init + mod->init_text_size,
63887- mod->module_init + mod->init_size,
63888+ set_page_attributes(mod->module_init_rw,
63889+ mod->module_init_rw + mod->init_size_rw,
63890 set_memory_x);
63891- set_page_attributes(mod->module_init,
63892- mod->module_init + mod->init_ro_size,
63893+ set_page_attributes(mod->module_init_rx,
63894+ mod->module_init_rx + mod->init_size_rx,
63895 set_memory_rw);
63896 }
63897
63898@@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
63899
63900 mutex_lock(&module_mutex);
63901 list_for_each_entry_rcu(mod, &modules, list) {
63902- if ((mod->module_core) && (mod->core_text_size)) {
63903- set_page_attributes(mod->module_core,
63904- mod->module_core + mod->core_text_size,
63905+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
63906+ set_page_attributes(mod->module_core_rx,
63907+ mod->module_core_rx + mod->core_size_rx,
63908 set_memory_rw);
63909 }
63910- if ((mod->module_init) && (mod->init_text_size)) {
63911- set_page_attributes(mod->module_init,
63912- mod->module_init + mod->init_text_size,
63913+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
63914+ set_page_attributes(mod->module_init_rx,
63915+ mod->module_init_rx + mod->init_size_rx,
63916 set_memory_rw);
63917 }
63918 }
63919@@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
63920
63921 mutex_lock(&module_mutex);
63922 list_for_each_entry_rcu(mod, &modules, list) {
63923- if ((mod->module_core) && (mod->core_text_size)) {
63924- set_page_attributes(mod->module_core,
63925- mod->module_core + mod->core_text_size,
63926+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
63927+ set_page_attributes(mod->module_core_rx,
63928+ mod->module_core_rx + mod->core_size_rx,
63929 set_memory_ro);
63930 }
63931- if ((mod->module_init) && (mod->init_text_size)) {
63932- set_page_attributes(mod->module_init,
63933- mod->module_init + mod->init_text_size,
63934+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
63935+ set_page_attributes(mod->module_init_rx,
63936+ mod->module_init_rx + mod->init_size_rx,
63937 set_memory_ro);
63938 }
63939 }
63940@@ -1748,16 +1750,19 @@ static void free_module(struct module *m
63941
63942 /* This may be NULL, but that's OK */
63943 unset_module_init_ro_nx(mod);
63944- module_free(mod, mod->module_init);
63945+ module_free(mod, mod->module_init_rw);
63946+ module_free_exec(mod, mod->module_init_rx);
63947 kfree(mod->args);
63948 percpu_modfree(mod);
63949
63950 /* Free lock-classes: */
63951- lockdep_free_key_range(mod->module_core, mod->core_size);
63952+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63953+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63954
63955 /* Finally, free the core (containing the module structure) */
63956 unset_module_core_ro_nx(mod);
63957- module_free(mod, mod->module_core);
63958+ module_free_exec(mod, mod->module_core_rx);
63959+ module_free(mod, mod->module_core_rw);
63960
63961 #ifdef CONFIG_MPU
63962 update_protections(current->mm);
63963@@ -1826,10 +1831,31 @@ static int simplify_symbols(struct modul
63964 unsigned int i;
63965 int ret = 0;
63966 const struct kernel_symbol *ksym;
63967+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63968+ int is_fs_load = 0;
63969+ int register_filesystem_found = 0;
63970+ char *p;
63971+
63972+ p = strstr(mod->args, "grsec_modharden_fs");
63973+ if (p) {
63974+ char *endptr = p + strlen("grsec_modharden_fs");
63975+ /* copy \0 as well */
63976+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63977+ is_fs_load = 1;
63978+ }
63979+#endif
63980
63981 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
63982 const char *name = info->strtab + sym[i].st_name;
63983
63984+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63985+ /* it's a real shame this will never get ripped and copied
63986+ upstream! ;(
63987+ */
63988+ if (is_fs_load && !strcmp(name, "register_filesystem"))
63989+ register_filesystem_found = 1;
63990+#endif
63991+
63992 switch (sym[i].st_shndx) {
63993 case SHN_COMMON:
63994 /* We compiled with -fno-common. These are not
63995@@ -1850,7 +1876,9 @@ static int simplify_symbols(struct modul
63996 ksym = resolve_symbol_wait(mod, info, name);
63997 /* Ok if resolved. */
63998 if (ksym && !IS_ERR(ksym)) {
63999+ pax_open_kernel();
64000 sym[i].st_value = ksym->value;
64001+ pax_close_kernel();
64002 break;
64003 }
64004
64005@@ -1869,11 +1897,20 @@ static int simplify_symbols(struct modul
64006 secbase = (unsigned long)mod_percpu(mod);
64007 else
64008 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
64009+ pax_open_kernel();
64010 sym[i].st_value += secbase;
64011+ pax_close_kernel();
64012 break;
64013 }
64014 }
64015
64016+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64017+ if (is_fs_load && !register_filesystem_found) {
64018+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
64019+ ret = -EPERM;
64020+ }
64021+#endif
64022+
64023 return ret;
64024 }
64025
64026@@ -1977,22 +2014,12 @@ static void layout_sections(struct modul
64027 || s->sh_entsize != ~0UL
64028 || strstarts(sname, ".init"))
64029 continue;
64030- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
64031+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64032+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64033+ else
64034+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64035 DEBUGP("\t%s\n", name);
64036 }
64037- switch (m) {
64038- case 0: /* executable */
64039- mod->core_size = debug_align(mod->core_size);
64040- mod->core_text_size = mod->core_size;
64041- break;
64042- case 1: /* RO: text and ro-data */
64043- mod->core_size = debug_align(mod->core_size);
64044- mod->core_ro_size = mod->core_size;
64045- break;
64046- case 3: /* whole core */
64047- mod->core_size = debug_align(mod->core_size);
64048- break;
64049- }
64050 }
64051
64052 DEBUGP("Init section allocation order:\n");
64053@@ -2006,23 +2033,13 @@ static void layout_sections(struct modul
64054 || s->sh_entsize != ~0UL
64055 || !strstarts(sname, ".init"))
64056 continue;
64057- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64058- | INIT_OFFSET_MASK);
64059+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64060+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64061+ else
64062+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64063+ s->sh_entsize |= INIT_OFFSET_MASK;
64064 DEBUGP("\t%s\n", sname);
64065 }
64066- switch (m) {
64067- case 0: /* executable */
64068- mod->init_size = debug_align(mod->init_size);
64069- mod->init_text_size = mod->init_size;
64070- break;
64071- case 1: /* RO: text and ro-data */
64072- mod->init_size = debug_align(mod->init_size);
64073- mod->init_ro_size = mod->init_size;
64074- break;
64075- case 3: /* whole init */
64076- mod->init_size = debug_align(mod->init_size);
64077- break;
64078- }
64079 }
64080 }
64081
64082@@ -2187,7 +2204,7 @@ static void layout_symtab(struct module
64083
64084 /* Put symbol section at end of init part of module. */
64085 symsect->sh_flags |= SHF_ALLOC;
64086- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64087+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64088 info->index.sym) | INIT_OFFSET_MASK;
64089 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
64090
64091@@ -2204,19 +2221,19 @@ static void layout_symtab(struct module
64092 }
64093
64094 /* Append room for core symbols at end of core part. */
64095- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64096- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
64097+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64098+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
64099
64100 /* Put string table section at end of init part of module. */
64101 strsect->sh_flags |= SHF_ALLOC;
64102- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64103+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64104 info->index.str) | INIT_OFFSET_MASK;
64105 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
64106
64107 /* Append room for core symbols' strings at end of core part. */
64108- info->stroffs = mod->core_size;
64109+ info->stroffs = mod->core_size_rx;
64110 __set_bit(0, info->strmap);
64111- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
64112+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
64113 }
64114
64115 static void add_kallsyms(struct module *mod, const struct load_info *info)
64116@@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *
64117 /* Make sure we get permanent strtab: don't use info->strtab. */
64118 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
64119
64120+ pax_open_kernel();
64121+
64122 /* Set types up while we still have access to sections. */
64123 for (i = 0; i < mod->num_symtab; i++)
64124 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
64125
64126- mod->core_symtab = dst = mod->module_core + info->symoffs;
64127+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
64128 src = mod->symtab;
64129 *dst = *src;
64130 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64131@@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *
64132 }
64133 mod->core_num_syms = ndst;
64134
64135- mod->core_strtab = s = mod->module_core + info->stroffs;
64136+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
64137 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
64138 if (test_bit(i, info->strmap))
64139 *++s = mod->strtab[i];
64140+
64141+ pax_close_kernel();
64142 }
64143 #else
64144 static inline void layout_symtab(struct module *mod, struct load_info *info)
64145@@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long
64146 return size == 0 ? NULL : vmalloc_exec(size);
64147 }
64148
64149-static void *module_alloc_update_bounds(unsigned long size)
64150+static void *module_alloc_update_bounds_rw(unsigned long size)
64151 {
64152 void *ret = module_alloc(size);
64153
64154 if (ret) {
64155 mutex_lock(&module_mutex);
64156 /* Update module bounds. */
64157- if ((unsigned long)ret < module_addr_min)
64158- module_addr_min = (unsigned long)ret;
64159- if ((unsigned long)ret + size > module_addr_max)
64160- module_addr_max = (unsigned long)ret + size;
64161+ if ((unsigned long)ret < module_addr_min_rw)
64162+ module_addr_min_rw = (unsigned long)ret;
64163+ if ((unsigned long)ret + size > module_addr_max_rw)
64164+ module_addr_max_rw = (unsigned long)ret + size;
64165+ mutex_unlock(&module_mutex);
64166+ }
64167+ return ret;
64168+}
64169+
64170+static void *module_alloc_update_bounds_rx(unsigned long size)
64171+{
64172+ void *ret = module_alloc_exec(size);
64173+
64174+ if (ret) {
64175+ mutex_lock(&module_mutex);
64176+ /* Update module bounds. */
64177+ if ((unsigned long)ret < module_addr_min_rx)
64178+ module_addr_min_rx = (unsigned long)ret;
64179+ if ((unsigned long)ret + size > module_addr_max_rx)
64180+ module_addr_max_rx = (unsigned long)ret + size;
64181 mutex_unlock(&module_mutex);
64182 }
64183 return ret;
64184@@ -2589,7 +2626,7 @@ static int move_module(struct module *mo
64185 void *ptr;
64186
64187 /* Do the allocs. */
64188- ptr = module_alloc_update_bounds(mod->core_size);
64189+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64190 /*
64191 * The pointer to this block is stored in the module structure
64192 * which is inside the block. Just mark it as not being a
64193@@ -2599,23 +2636,50 @@ static int move_module(struct module *mo
64194 if (!ptr)
64195 return -ENOMEM;
64196
64197- memset(ptr, 0, mod->core_size);
64198- mod->module_core = ptr;
64199+ memset(ptr, 0, mod->core_size_rw);
64200+ mod->module_core_rw = ptr;
64201
64202- ptr = module_alloc_update_bounds(mod->init_size);
64203+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64204 /*
64205 * The pointer to this block is stored in the module structure
64206 * which is inside the block. This block doesn't need to be
64207 * scanned as it contains data and code that will be freed
64208 * after the module is initialized.
64209 */
64210- kmemleak_ignore(ptr);
64211- if (!ptr && mod->init_size) {
64212- module_free(mod, mod->module_core);
64213+ kmemleak_not_leak(ptr);
64214+ if (!ptr && mod->init_size_rw) {
64215+ module_free(mod, mod->module_core_rw);
64216 return -ENOMEM;
64217 }
64218- memset(ptr, 0, mod->init_size);
64219- mod->module_init = ptr;
64220+ memset(ptr, 0, mod->init_size_rw);
64221+ mod->module_init_rw = ptr;
64222+
64223+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64224+ kmemleak_not_leak(ptr);
64225+ if (!ptr) {
64226+ module_free(mod, mod->module_init_rw);
64227+ module_free(mod, mod->module_core_rw);
64228+ return -ENOMEM;
64229+ }
64230+
64231+ pax_open_kernel();
64232+ memset(ptr, 0, mod->core_size_rx);
64233+ pax_close_kernel();
64234+ mod->module_core_rx = ptr;
64235+
64236+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64237+ kmemleak_not_leak(ptr);
64238+ if (!ptr && mod->init_size_rx) {
64239+ module_free_exec(mod, mod->module_core_rx);
64240+ module_free(mod, mod->module_init_rw);
64241+ module_free(mod, mod->module_core_rw);
64242+ return -ENOMEM;
64243+ }
64244+
64245+ pax_open_kernel();
64246+ memset(ptr, 0, mod->init_size_rx);
64247+ pax_close_kernel();
64248+ mod->module_init_rx = ptr;
64249
64250 /* Transfer each section which specifies SHF_ALLOC */
64251 DEBUGP("final section addresses:\n");
64252@@ -2626,16 +2690,45 @@ static int move_module(struct module *mo
64253 if (!(shdr->sh_flags & SHF_ALLOC))
64254 continue;
64255
64256- if (shdr->sh_entsize & INIT_OFFSET_MASK)
64257- dest = mod->module_init
64258- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64259- else
64260- dest = mod->module_core + shdr->sh_entsize;
64261+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64262+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64263+ dest = mod->module_init_rw
64264+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64265+ else
64266+ dest = mod->module_init_rx
64267+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64268+ } else {
64269+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64270+ dest = mod->module_core_rw + shdr->sh_entsize;
64271+ else
64272+ dest = mod->module_core_rx + shdr->sh_entsize;
64273+ }
64274+
64275+ if (shdr->sh_type != SHT_NOBITS) {
64276+
64277+#ifdef CONFIG_PAX_KERNEXEC
64278+#ifdef CONFIG_X86_64
64279+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64280+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64281+#endif
64282+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64283+ pax_open_kernel();
64284+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64285+ pax_close_kernel();
64286+ } else
64287+#endif
64288
64289- if (shdr->sh_type != SHT_NOBITS)
64290 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64291+ }
64292 /* Update sh_addr to point to copy in image. */
64293- shdr->sh_addr = (unsigned long)dest;
64294+
64295+#ifdef CONFIG_PAX_KERNEXEC
64296+ if (shdr->sh_flags & SHF_EXECINSTR)
64297+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
64298+ else
64299+#endif
64300+
64301+ shdr->sh_addr = (unsigned long)dest;
64302 DEBUGP("\t0x%lx %s\n",
64303 shdr->sh_addr, info->secstrings + shdr->sh_name);
64304 }
64305@@ -2686,12 +2779,12 @@ static void flush_module_icache(const st
64306 * Do it before processing of module parameters, so the module
64307 * can provide parameter accessor functions of its own.
64308 */
64309- if (mod->module_init)
64310- flush_icache_range((unsigned long)mod->module_init,
64311- (unsigned long)mod->module_init
64312- + mod->init_size);
64313- flush_icache_range((unsigned long)mod->module_core,
64314- (unsigned long)mod->module_core + mod->core_size);
64315+ if (mod->module_init_rx)
64316+ flush_icache_range((unsigned long)mod->module_init_rx,
64317+ (unsigned long)mod->module_init_rx
64318+ + mod->init_size_rx);
64319+ flush_icache_range((unsigned long)mod->module_core_rx,
64320+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
64321
64322 set_fs(old_fs);
64323 }
64324@@ -2771,8 +2864,10 @@ static void module_deallocate(struct mod
64325 {
64326 kfree(info->strmap);
64327 percpu_modfree(mod);
64328- module_free(mod, mod->module_init);
64329- module_free(mod, mod->module_core);
64330+ module_free_exec(mod, mod->module_init_rx);
64331+ module_free_exec(mod, mod->module_core_rx);
64332+ module_free(mod, mod->module_init_rw);
64333+ module_free(mod, mod->module_core_rw);
64334 }
64335
64336 int __weak module_finalize(const Elf_Ehdr *hdr,
64337@@ -2836,9 +2931,38 @@ static struct module *load_module(void _
64338 if (err)
64339 goto free_unload;
64340
64341+ /* Now copy in args */
64342+ mod->args = strndup_user(uargs, ~0UL >> 1);
64343+ if (IS_ERR(mod->args)) {
64344+ err = PTR_ERR(mod->args);
64345+ goto free_unload;
64346+ }
64347+
64348 /* Set up MODINFO_ATTR fields */
64349 setup_modinfo(mod, &info);
64350
64351+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64352+ {
64353+ char *p, *p2;
64354+
64355+ if (strstr(mod->args, "grsec_modharden_netdev")) {
64356+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64357+ err = -EPERM;
64358+ goto free_modinfo;
64359+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64360+ p += strlen("grsec_modharden_normal");
64361+ p2 = strstr(p, "_");
64362+ if (p2) {
64363+ *p2 = '\0';
64364+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64365+ *p2 = '_';
64366+ }
64367+ err = -EPERM;
64368+ goto free_modinfo;
64369+ }
64370+ }
64371+#endif
64372+
64373 /* Fix up syms, so that st_value is a pointer to location. */
64374 err = simplify_symbols(mod, &info);
64375 if (err < 0)
64376@@ -2854,13 +2978,6 @@ static struct module *load_module(void _
64377
64378 flush_module_icache(mod);
64379
64380- /* Now copy in args */
64381- mod->args = strndup_user(uargs, ~0UL >> 1);
64382- if (IS_ERR(mod->args)) {
64383- err = PTR_ERR(mod->args);
64384- goto free_arch_cleanup;
64385- }
64386-
64387 /* Mark state as coming so strong_try_module_get() ignores us. */
64388 mod->state = MODULE_STATE_COMING;
64389
64390@@ -2920,11 +3037,10 @@ static struct module *load_module(void _
64391 unlock:
64392 mutex_unlock(&module_mutex);
64393 synchronize_sched();
64394- kfree(mod->args);
64395- free_arch_cleanup:
64396 module_arch_cleanup(mod);
64397 free_modinfo:
64398 free_modinfo(mod);
64399+ kfree(mod->args);
64400 free_unload:
64401 module_unload_free(mod);
64402 free_module:
64403@@ -2965,16 +3081,16 @@ SYSCALL_DEFINE3(init_module, void __user
64404 MODULE_STATE_COMING, mod);
64405
64406 /* Set RO and NX regions for core */
64407- set_section_ro_nx(mod->module_core,
64408- mod->core_text_size,
64409- mod->core_ro_size,
64410- mod->core_size);
64411+ set_section_ro_nx(mod->module_core_rx,
64412+ mod->core_size_rx,
64413+ mod->core_size_rx,
64414+ mod->core_size_rx);
64415
64416 /* Set RO and NX regions for init */
64417- set_section_ro_nx(mod->module_init,
64418- mod->init_text_size,
64419- mod->init_ro_size,
64420- mod->init_size);
64421+ set_section_ro_nx(mod->module_init_rx,
64422+ mod->init_size_rx,
64423+ mod->init_size_rx,
64424+ mod->init_size_rx);
64425
64426 do_mod_ctors(mod);
64427 /* Start the module */
64428@@ -3020,11 +3136,12 @@ SYSCALL_DEFINE3(init_module, void __user
64429 mod->strtab = mod->core_strtab;
64430 #endif
64431 unset_module_init_ro_nx(mod);
64432- module_free(mod, mod->module_init);
64433- mod->module_init = NULL;
64434- mod->init_size = 0;
64435- mod->init_ro_size = 0;
64436- mod->init_text_size = 0;
64437+ module_free(mod, mod->module_init_rw);
64438+ module_free_exec(mod, mod->module_init_rx);
64439+ mod->module_init_rw = NULL;
64440+ mod->module_init_rx = NULL;
64441+ mod->init_size_rw = 0;
64442+ mod->init_size_rx = 0;
64443 mutex_unlock(&module_mutex);
64444
64445 return 0;
64446@@ -3055,10 +3172,16 @@ static const char *get_ksymbol(struct mo
64447 unsigned long nextval;
64448
64449 /* At worse, next value is at end of module */
64450- if (within_module_init(addr, mod))
64451- nextval = (unsigned long)mod->module_init+mod->init_text_size;
64452+ if (within_module_init_rx(addr, mod))
64453+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64454+ else if (within_module_init_rw(addr, mod))
64455+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64456+ else if (within_module_core_rx(addr, mod))
64457+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64458+ else if (within_module_core_rw(addr, mod))
64459+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64460 else
64461- nextval = (unsigned long)mod->module_core+mod->core_text_size;
64462+ return NULL;
64463
64464 /* Scan for closest preceding symbol, and next symbol. (ELF
64465 starts real symbols at 1). */
64466@@ -3304,7 +3427,7 @@ static int m_show(struct seq_file *m, vo
64467 char buf[8];
64468
64469 seq_printf(m, "%s %u",
64470- mod->name, mod->init_size + mod->core_size);
64471+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64472 print_unload_info(m, mod);
64473
64474 /* Informative for users. */
64475@@ -3313,7 +3436,7 @@ static int m_show(struct seq_file *m, vo
64476 mod->state == MODULE_STATE_COMING ? "Loading":
64477 "Live");
64478 /* Used by oprofile and other similar tools. */
64479- seq_printf(m, " 0x%pK", mod->module_core);
64480+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
64481
64482 /* Taints info */
64483 if (mod->taints)
64484@@ -3349,7 +3472,17 @@ static const struct file_operations proc
64485
64486 static int __init proc_modules_init(void)
64487 {
64488+#ifndef CONFIG_GRKERNSEC_HIDESYM
64489+#ifdef CONFIG_GRKERNSEC_PROC_USER
64490+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64491+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64492+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64493+#else
64494 proc_create("modules", 0, NULL, &proc_modules_operations);
64495+#endif
64496+#else
64497+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64498+#endif
64499 return 0;
64500 }
64501 module_init(proc_modules_init);
64502@@ -3408,12 +3541,12 @@ struct module *__module_address(unsigned
64503 {
64504 struct module *mod;
64505
64506- if (addr < module_addr_min || addr > module_addr_max)
64507+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64508+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
64509 return NULL;
64510
64511 list_for_each_entry_rcu(mod, &modules, list)
64512- if (within_module_core(addr, mod)
64513- || within_module_init(addr, mod))
64514+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
64515 return mod;
64516 return NULL;
64517 }
64518@@ -3447,11 +3580,20 @@ bool is_module_text_address(unsigned lon
64519 */
64520 struct module *__module_text_address(unsigned long addr)
64521 {
64522- struct module *mod = __module_address(addr);
64523+ struct module *mod;
64524+
64525+#ifdef CONFIG_X86_32
64526+ addr = ktla_ktva(addr);
64527+#endif
64528+
64529+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64530+ return NULL;
64531+
64532+ mod = __module_address(addr);
64533+
64534 if (mod) {
64535 /* Make sure it's within the text section. */
64536- if (!within(addr, mod->module_init, mod->init_text_size)
64537- && !within(addr, mod->module_core, mod->core_text_size))
64538+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64539 mod = NULL;
64540 }
64541 return mod;
64542diff -urNp linux-3.1.1/kernel/mutex.c linux-3.1.1/kernel/mutex.c
64543--- linux-3.1.1/kernel/mutex.c 2011-11-11 15:19:27.000000000 -0500
64544+++ linux-3.1.1/kernel/mutex.c 2011-11-16 18:39:08.000000000 -0500
64545@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
64546 spin_lock_mutex(&lock->wait_lock, flags);
64547
64548 debug_mutex_lock_common(lock, &waiter);
64549- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64550+ debug_mutex_add_waiter(lock, &waiter, task);
64551
64552 /* add waiting tasks to the end of the waitqueue (FIFO): */
64553 list_add_tail(&waiter.list, &lock->wait_list);
64554@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
64555 * TASK_UNINTERRUPTIBLE case.)
64556 */
64557 if (unlikely(signal_pending_state(state, task))) {
64558- mutex_remove_waiter(lock, &waiter,
64559- task_thread_info(task));
64560+ mutex_remove_waiter(lock, &waiter, task);
64561 mutex_release(&lock->dep_map, 1, ip);
64562 spin_unlock_mutex(&lock->wait_lock, flags);
64563
64564@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
64565 done:
64566 lock_acquired(&lock->dep_map, ip);
64567 /* got the lock - rejoice! */
64568- mutex_remove_waiter(lock, &waiter, current_thread_info());
64569+ mutex_remove_waiter(lock, &waiter, task);
64570 mutex_set_owner(lock);
64571
64572 /* set it to 0 if there are no waiters left: */
64573diff -urNp linux-3.1.1/kernel/mutex-debug.c linux-3.1.1/kernel/mutex-debug.c
64574--- linux-3.1.1/kernel/mutex-debug.c 2011-11-11 15:19:27.000000000 -0500
64575+++ linux-3.1.1/kernel/mutex-debug.c 2011-11-16 18:39:08.000000000 -0500
64576@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64577 }
64578
64579 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64580- struct thread_info *ti)
64581+ struct task_struct *task)
64582 {
64583 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64584
64585 /* Mark the current thread as blocked on the lock: */
64586- ti->task->blocked_on = waiter;
64587+ task->blocked_on = waiter;
64588 }
64589
64590 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64591- struct thread_info *ti)
64592+ struct task_struct *task)
64593 {
64594 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64595- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64596- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64597- ti->task->blocked_on = NULL;
64598+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64599+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64600+ task->blocked_on = NULL;
64601
64602 list_del_init(&waiter->list);
64603 waiter->task = NULL;
64604diff -urNp linux-3.1.1/kernel/mutex-debug.h linux-3.1.1/kernel/mutex-debug.h
64605--- linux-3.1.1/kernel/mutex-debug.h 2011-11-11 15:19:27.000000000 -0500
64606+++ linux-3.1.1/kernel/mutex-debug.h 2011-11-16 18:39:08.000000000 -0500
64607@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
64608 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64609 extern void debug_mutex_add_waiter(struct mutex *lock,
64610 struct mutex_waiter *waiter,
64611- struct thread_info *ti);
64612+ struct task_struct *task);
64613 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64614- struct thread_info *ti);
64615+ struct task_struct *task);
64616 extern void debug_mutex_unlock(struct mutex *lock);
64617 extern void debug_mutex_init(struct mutex *lock, const char *name,
64618 struct lock_class_key *key);
64619diff -urNp linux-3.1.1/kernel/padata.c linux-3.1.1/kernel/padata.c
64620--- linux-3.1.1/kernel/padata.c 2011-11-11 15:19:27.000000000 -0500
64621+++ linux-3.1.1/kernel/padata.c 2011-11-16 18:39:08.000000000 -0500
64622@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
64623 padata->pd = pd;
64624 padata->cb_cpu = cb_cpu;
64625
64626- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
64627- atomic_set(&pd->seq_nr, -1);
64628+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
64629+ atomic_set_unchecked(&pd->seq_nr, -1);
64630
64631- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
64632+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
64633
64634 target_cpu = padata_cpu_hash(padata);
64635 queue = per_cpu_ptr(pd->pqueue, target_cpu);
64636@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
64637 padata_init_pqueues(pd);
64638 padata_init_squeues(pd);
64639 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
64640- atomic_set(&pd->seq_nr, -1);
64641+ atomic_set_unchecked(&pd->seq_nr, -1);
64642 atomic_set(&pd->reorder_objects, 0);
64643 atomic_set(&pd->refcnt, 0);
64644 pd->pinst = pinst;
64645diff -urNp linux-3.1.1/kernel/panic.c linux-3.1.1/kernel/panic.c
64646--- linux-3.1.1/kernel/panic.c 2011-11-11 15:19:27.000000000 -0500
64647+++ linux-3.1.1/kernel/panic.c 2011-11-16 18:40:44.000000000 -0500
64648@@ -371,7 +371,7 @@ static void warn_slowpath_common(const c
64649 const char *board;
64650
64651 printk(KERN_WARNING "------------[ cut here ]------------\n");
64652- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64653+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64654 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64655 if (board)
64656 printk(KERN_WARNING "Hardware name: %s\n", board);
64657@@ -426,7 +426,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64658 */
64659 void __stack_chk_fail(void)
64660 {
64661- panic("stack-protector: Kernel stack is corrupted in: %p\n",
64662+ dump_stack();
64663+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64664 __builtin_return_address(0));
64665 }
64666 EXPORT_SYMBOL(__stack_chk_fail);
64667diff -urNp linux-3.1.1/kernel/pid.c linux-3.1.1/kernel/pid.c
64668--- linux-3.1.1/kernel/pid.c 2011-11-11 15:19:27.000000000 -0500
64669+++ linux-3.1.1/kernel/pid.c 2011-11-16 18:40:44.000000000 -0500
64670@@ -33,6 +33,7 @@
64671 #include <linux/rculist.h>
64672 #include <linux/bootmem.h>
64673 #include <linux/hash.h>
64674+#include <linux/security.h>
64675 #include <linux/pid_namespace.h>
64676 #include <linux/init_task.h>
64677 #include <linux/syscalls.h>
64678@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64679
64680 int pid_max = PID_MAX_DEFAULT;
64681
64682-#define RESERVED_PIDS 300
64683+#define RESERVED_PIDS 500
64684
64685 int pid_max_min = RESERVED_PIDS + 1;
64686 int pid_max_max = PID_MAX_LIMIT;
64687@@ -418,8 +419,15 @@ EXPORT_SYMBOL(pid_task);
64688 */
64689 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64690 {
64691+ struct task_struct *task;
64692+
64693 rcu_lockdep_assert(rcu_read_lock_held());
64694- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64695+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64696+
64697+ if (gr_pid_is_chrooted(task))
64698+ return NULL;
64699+
64700+ return task;
64701 }
64702
64703 struct task_struct *find_task_by_vpid(pid_t vnr)
64704@@ -427,6 +435,12 @@ struct task_struct *find_task_by_vpid(pi
64705 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64706 }
64707
64708+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64709+{
64710+ rcu_lockdep_assert(rcu_read_lock_held());
64711+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64712+}
64713+
64714 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64715 {
64716 struct pid *pid;
64717diff -urNp linux-3.1.1/kernel/posix-cpu-timers.c linux-3.1.1/kernel/posix-cpu-timers.c
64718--- linux-3.1.1/kernel/posix-cpu-timers.c 2011-11-11 15:19:27.000000000 -0500
64719+++ linux-3.1.1/kernel/posix-cpu-timers.c 2011-11-16 18:40:44.000000000 -0500
64720@@ -6,6 +6,7 @@
64721 #include <linux/posix-timers.h>
64722 #include <linux/errno.h>
64723 #include <linux/math64.h>
64724+#include <linux/security.h>
64725 #include <asm/uaccess.h>
64726 #include <linux/kernel_stat.h>
64727 #include <trace/events/timer.h>
64728@@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
64729
64730 static __init int init_posix_cpu_timers(void)
64731 {
64732- struct k_clock process = {
64733+ static struct k_clock process = {
64734 .clock_getres = process_cpu_clock_getres,
64735 .clock_get = process_cpu_clock_get,
64736 .timer_create = process_cpu_timer_create,
64737 .nsleep = process_cpu_nsleep,
64738 .nsleep_restart = process_cpu_nsleep_restart,
64739 };
64740- struct k_clock thread = {
64741+ static struct k_clock thread = {
64742 .clock_getres = thread_cpu_clock_getres,
64743 .clock_get = thread_cpu_clock_get,
64744 .timer_create = thread_cpu_timer_create,
64745diff -urNp linux-3.1.1/kernel/posix-timers.c linux-3.1.1/kernel/posix-timers.c
64746--- linux-3.1.1/kernel/posix-timers.c 2011-11-11 15:19:27.000000000 -0500
64747+++ linux-3.1.1/kernel/posix-timers.c 2011-11-16 18:40:44.000000000 -0500
64748@@ -43,6 +43,7 @@
64749 #include <linux/idr.h>
64750 #include <linux/posix-clock.h>
64751 #include <linux/posix-timers.h>
64752+#include <linux/grsecurity.h>
64753 #include <linux/syscalls.h>
64754 #include <linux/wait.h>
64755 #include <linux/workqueue.h>
64756@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
64757 * which we beg off on and pass to do_sys_settimeofday().
64758 */
64759
64760-static struct k_clock posix_clocks[MAX_CLOCKS];
64761+static struct k_clock *posix_clocks[MAX_CLOCKS];
64762
64763 /*
64764 * These ones are defined below.
64765@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
64766 */
64767 static __init int init_posix_timers(void)
64768 {
64769- struct k_clock clock_realtime = {
64770+ static struct k_clock clock_realtime = {
64771 .clock_getres = hrtimer_get_res,
64772 .clock_get = posix_clock_realtime_get,
64773 .clock_set = posix_clock_realtime_set,
64774@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
64775 .timer_get = common_timer_get,
64776 .timer_del = common_timer_del,
64777 };
64778- struct k_clock clock_monotonic = {
64779+ static struct k_clock clock_monotonic = {
64780 .clock_getres = hrtimer_get_res,
64781 .clock_get = posix_ktime_get_ts,
64782 .nsleep = common_nsleep,
64783@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
64784 .timer_get = common_timer_get,
64785 .timer_del = common_timer_del,
64786 };
64787- struct k_clock clock_monotonic_raw = {
64788+ static struct k_clock clock_monotonic_raw = {
64789 .clock_getres = hrtimer_get_res,
64790 .clock_get = posix_get_monotonic_raw,
64791 };
64792- struct k_clock clock_realtime_coarse = {
64793+ static struct k_clock clock_realtime_coarse = {
64794 .clock_getres = posix_get_coarse_res,
64795 .clock_get = posix_get_realtime_coarse,
64796 };
64797- struct k_clock clock_monotonic_coarse = {
64798+ static struct k_clock clock_monotonic_coarse = {
64799 .clock_getres = posix_get_coarse_res,
64800 .clock_get = posix_get_monotonic_coarse,
64801 };
64802- struct k_clock clock_boottime = {
64803+ static struct k_clock clock_boottime = {
64804 .clock_getres = hrtimer_get_res,
64805 .clock_get = posix_get_boottime,
64806 .nsleep = common_nsleep,
64807@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
64808 .timer_del = common_timer_del,
64809 };
64810
64811+ pax_track_stack();
64812+
64813 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
64814 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
64815 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64816@@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
64817 return;
64818 }
64819
64820- posix_clocks[clock_id] = *new_clock;
64821+ posix_clocks[clock_id] = new_clock;
64822 }
64823 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
64824
64825@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
64826 return (id & CLOCKFD_MASK) == CLOCKFD ?
64827 &clock_posix_dynamic : &clock_posix_cpu;
64828
64829- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
64830+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
64831 return NULL;
64832- return &posix_clocks[id];
64833+ return posix_clocks[id];
64834 }
64835
64836 static int common_timer_create(struct k_itimer *new_timer)
64837@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64838 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64839 return -EFAULT;
64840
64841+ /* only the CLOCK_REALTIME clock can be set, all other clocks
64842+ have their clock_set fptr set to a nosettime dummy function
64843+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64844+ call common_clock_set, which calls do_sys_settimeofday, which
64845+ we hook
64846+ */
64847+
64848 return kc->clock_set(which_clock, &new_tp);
64849 }
64850
64851diff -urNp linux-3.1.1/kernel/power/poweroff.c linux-3.1.1/kernel/power/poweroff.c
64852--- linux-3.1.1/kernel/power/poweroff.c 2011-11-11 15:19:27.000000000 -0500
64853+++ linux-3.1.1/kernel/power/poweroff.c 2011-11-16 18:39:08.000000000 -0500
64854@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64855 .enable_mask = SYSRQ_ENABLE_BOOT,
64856 };
64857
64858-static int pm_sysrq_init(void)
64859+static int __init pm_sysrq_init(void)
64860 {
64861 register_sysrq_key('o', &sysrq_poweroff_op);
64862 return 0;
64863diff -urNp linux-3.1.1/kernel/power/process.c linux-3.1.1/kernel/power/process.c
64864--- linux-3.1.1/kernel/power/process.c 2011-11-11 15:19:27.000000000 -0500
64865+++ linux-3.1.1/kernel/power/process.c 2011-11-16 18:39:08.000000000 -0500
64866@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
64867 u64 elapsed_csecs64;
64868 unsigned int elapsed_csecs;
64869 bool wakeup = false;
64870+ bool timedout = false;
64871
64872 do_gettimeofday(&start);
64873
64874@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
64875
64876 while (true) {
64877 todo = 0;
64878+ if (time_after(jiffies, end_time))
64879+ timedout = true;
64880 read_lock(&tasklist_lock);
64881 do_each_thread(g, p) {
64882 if (frozen(p) || !freezable(p))
64883@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
64884 * try_to_stop() after schedule() in ptrace/signal
64885 * stop sees TIF_FREEZE.
64886 */
64887- if (!task_is_stopped_or_traced(p) &&
64888- !freezer_should_skip(p))
64889+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64890 todo++;
64891+ if (timedout) {
64892+ printk(KERN_ERR "Task refusing to freeze:\n");
64893+ sched_show_task(p);
64894+ }
64895+ }
64896 } while_each_thread(g, p);
64897 read_unlock(&tasklist_lock);
64898
64899@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
64900 todo += wq_busy;
64901 }
64902
64903- if (!todo || time_after(jiffies, end_time))
64904+ if (!todo || timedout)
64905 break;
64906
64907 if (pm_wakeup_pending()) {
64908diff -urNp linux-3.1.1/kernel/printk.c linux-3.1.1/kernel/printk.c
64909--- linux-3.1.1/kernel/printk.c 2011-11-11 15:19:27.000000000 -0500
64910+++ linux-3.1.1/kernel/printk.c 2011-11-16 19:38:11.000000000 -0500
64911@@ -313,6 +313,11 @@ static int check_syslog_permissions(int
64912 if (from_file && type != SYSLOG_ACTION_OPEN)
64913 return 0;
64914
64915+#ifdef CONFIG_GRKERNSEC_DMESG
64916+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
64917+ return -EPERM;
64918+#endif
64919+
64920 if (syslog_action_restricted(type)) {
64921 if (capable(CAP_SYSLOG))
64922 return 0;
64923diff -urNp linux-3.1.1/kernel/profile.c linux-3.1.1/kernel/profile.c
64924--- linux-3.1.1/kernel/profile.c 2011-11-11 15:19:27.000000000 -0500
64925+++ linux-3.1.1/kernel/profile.c 2011-11-16 18:39:08.000000000 -0500
64926@@ -39,7 +39,7 @@ struct profile_hit {
64927 /* Oprofile timer tick hook */
64928 static int (*timer_hook)(struct pt_regs *) __read_mostly;
64929
64930-static atomic_t *prof_buffer;
64931+static atomic_unchecked_t *prof_buffer;
64932 static unsigned long prof_len, prof_shift;
64933
64934 int prof_on __read_mostly;
64935@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
64936 hits[i].pc = 0;
64937 continue;
64938 }
64939- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64940+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64941 hits[i].hits = hits[i].pc = 0;
64942 }
64943 }
64944@@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
64945 * Add the current hit(s) and flush the write-queue out
64946 * to the global buffer:
64947 */
64948- atomic_add(nr_hits, &prof_buffer[pc]);
64949+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64950 for (i = 0; i < NR_PROFILE_HIT; ++i) {
64951- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64952+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64953 hits[i].pc = hits[i].hits = 0;
64954 }
64955 out:
64956@@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
64957 {
64958 unsigned long pc;
64959 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64960- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64961+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64962 }
64963 #endif /* !CONFIG_SMP */
64964
64965@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
64966 return -EFAULT;
64967 buf++; p++; count--; read++;
64968 }
64969- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64970+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64971 if (copy_to_user(buf, (void *)pnt, count))
64972 return -EFAULT;
64973 read += count;
64974@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
64975 }
64976 #endif
64977 profile_discard_flip_buffers();
64978- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64979+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64980 return count;
64981 }
64982
64983diff -urNp linux-3.1.1/kernel/ptrace.c linux-3.1.1/kernel/ptrace.c
64984--- linux-3.1.1/kernel/ptrace.c 2011-11-11 15:19:27.000000000 -0500
64985+++ linux-3.1.1/kernel/ptrace.c 2011-11-16 19:50:22.000000000 -0500
64986@@ -161,7 +161,8 @@ int ptrace_check_attach(struct task_stru
64987 return ret;
64988 }
64989
64990-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64991+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64992+ unsigned int log)
64993 {
64994 const struct cred *cred = current_cred(), *tcred;
64995
64996@@ -187,7 +188,8 @@ int __ptrace_may_access(struct task_stru
64997 cred->gid == tcred->sgid &&
64998 cred->gid == tcred->gid))
64999 goto ok;
65000- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
65001+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
65002+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
65003 goto ok;
65004 rcu_read_unlock();
65005 return -EPERM;
65006@@ -196,7 +198,9 @@ ok:
65007 smp_rmb();
65008 if (task->mm)
65009 dumpable = get_dumpable(task->mm);
65010- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
65011+ if (!dumpable &&
65012+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
65013+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
65014 return -EPERM;
65015
65016 return security_ptrace_access_check(task, mode);
65017@@ -206,7 +210,16 @@ bool ptrace_may_access(struct task_struc
65018 {
65019 int err;
65020 task_lock(task);
65021- err = __ptrace_may_access(task, mode);
65022+ err = __ptrace_may_access(task, mode, 0);
65023+ task_unlock(task);
65024+ return !err;
65025+}
65026+
65027+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65028+{
65029+ int err;
65030+ task_lock(task);
65031+ err = __ptrace_may_access(task, mode, 1);
65032 task_unlock(task);
65033 return !err;
65034 }
65035@@ -251,7 +264,7 @@ static int ptrace_attach(struct task_str
65036 goto out;
65037
65038 task_lock(task);
65039- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65040+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65041 task_unlock(task);
65042 if (retval)
65043 goto unlock_creds;
65044@@ -266,7 +279,7 @@ static int ptrace_attach(struct task_str
65045 task->ptrace = PT_PTRACED;
65046 if (seize)
65047 task->ptrace |= PT_SEIZED;
65048- if (task_ns_capable(task, CAP_SYS_PTRACE))
65049+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
65050 task->ptrace |= PT_PTRACE_CAP;
65051
65052 __ptrace_link(task, current);
65053@@ -461,6 +474,8 @@ int ptrace_readdata(struct task_struct *
65054 {
65055 int copied = 0;
65056
65057+ pax_track_stack();
65058+
65059 while (len > 0) {
65060 char buf[128];
65061 int this_len, retval;
65062@@ -472,7 +487,7 @@ int ptrace_readdata(struct task_struct *
65063 break;
65064 return -EIO;
65065 }
65066- if (copy_to_user(dst, buf, retval))
65067+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
65068 return -EFAULT;
65069 copied += retval;
65070 src += retval;
65071@@ -486,6 +501,8 @@ int ptrace_writedata(struct task_struct
65072 {
65073 int copied = 0;
65074
65075+ pax_track_stack();
65076+
65077 while (len > 0) {
65078 char buf[128];
65079 int this_len, retval;
65080@@ -669,10 +686,12 @@ int ptrace_request(struct task_struct *c
65081 bool seized = child->ptrace & PT_SEIZED;
65082 int ret = -EIO;
65083 siginfo_t siginfo, *si;
65084- void __user *datavp = (void __user *) data;
65085+ void __user *datavp = (__force void __user *) data;
65086 unsigned long __user *datalp = datavp;
65087 unsigned long flags;
65088
65089+ pax_track_stack();
65090+
65091 switch (request) {
65092 case PTRACE_PEEKTEXT:
65093 case PTRACE_PEEKDATA:
65094@@ -871,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
65095 goto out;
65096 }
65097
65098+ if (gr_handle_ptrace(child, request)) {
65099+ ret = -EPERM;
65100+ goto out_put_task_struct;
65101+ }
65102+
65103 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65104 ret = ptrace_attach(child, request, data);
65105 /*
65106 * Some architectures need to do book-keeping after
65107 * a ptrace attach.
65108 */
65109- if (!ret)
65110+ if (!ret) {
65111 arch_ptrace_attach(child);
65112+ gr_audit_ptrace(child);
65113+ }
65114 goto out_put_task_struct;
65115 }
65116
65117@@ -904,7 +930,7 @@ int generic_ptrace_peekdata(struct task_
65118 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65119 if (copied != sizeof(tmp))
65120 return -EIO;
65121- return put_user(tmp, (unsigned long __user *)data);
65122+ return put_user(tmp, (__force unsigned long __user *)data);
65123 }
65124
65125 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
65126@@ -927,6 +953,8 @@ int compat_ptrace_request(struct task_st
65127 siginfo_t siginfo;
65128 int ret;
65129
65130+ pax_track_stack();
65131+
65132 switch (request) {
65133 case PTRACE_PEEKTEXT:
65134 case PTRACE_PEEKDATA:
65135@@ -1014,14 +1042,21 @@ asmlinkage long compat_sys_ptrace(compat
65136 goto out;
65137 }
65138
65139+ if (gr_handle_ptrace(child, request)) {
65140+ ret = -EPERM;
65141+ goto out_put_task_struct;
65142+ }
65143+
65144 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65145 ret = ptrace_attach(child, request, data);
65146 /*
65147 * Some architectures need to do book-keeping after
65148 * a ptrace attach.
65149 */
65150- if (!ret)
65151+ if (!ret) {
65152 arch_ptrace_attach(child);
65153+ gr_audit_ptrace(child);
65154+ }
65155 goto out_put_task_struct;
65156 }
65157
65158diff -urNp linux-3.1.1/kernel/rcutorture.c linux-3.1.1/kernel/rcutorture.c
65159--- linux-3.1.1/kernel/rcutorture.c 2011-11-11 15:19:27.000000000 -0500
65160+++ linux-3.1.1/kernel/rcutorture.c 2011-11-16 18:39:08.000000000 -0500
65161@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
65162 { 0 };
65163 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65164 { 0 };
65165-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65166-static atomic_t n_rcu_torture_alloc;
65167-static atomic_t n_rcu_torture_alloc_fail;
65168-static atomic_t n_rcu_torture_free;
65169-static atomic_t n_rcu_torture_mberror;
65170-static atomic_t n_rcu_torture_error;
65171+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65172+static atomic_unchecked_t n_rcu_torture_alloc;
65173+static atomic_unchecked_t n_rcu_torture_alloc_fail;
65174+static atomic_unchecked_t n_rcu_torture_free;
65175+static atomic_unchecked_t n_rcu_torture_mberror;
65176+static atomic_unchecked_t n_rcu_torture_error;
65177 static long n_rcu_torture_boost_ktrerror;
65178 static long n_rcu_torture_boost_rterror;
65179 static long n_rcu_torture_boost_failure;
65180@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65181
65182 spin_lock_bh(&rcu_torture_lock);
65183 if (list_empty(&rcu_torture_freelist)) {
65184- atomic_inc(&n_rcu_torture_alloc_fail);
65185+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65186 spin_unlock_bh(&rcu_torture_lock);
65187 return NULL;
65188 }
65189- atomic_inc(&n_rcu_torture_alloc);
65190+ atomic_inc_unchecked(&n_rcu_torture_alloc);
65191 p = rcu_torture_freelist.next;
65192 list_del_init(p);
65193 spin_unlock_bh(&rcu_torture_lock);
65194@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65195 static void
65196 rcu_torture_free(struct rcu_torture *p)
65197 {
65198- atomic_inc(&n_rcu_torture_free);
65199+ atomic_inc_unchecked(&n_rcu_torture_free);
65200 spin_lock_bh(&rcu_torture_lock);
65201 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65202 spin_unlock_bh(&rcu_torture_lock);
65203@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65204 i = rp->rtort_pipe_count;
65205 if (i > RCU_TORTURE_PIPE_LEN)
65206 i = RCU_TORTURE_PIPE_LEN;
65207- atomic_inc(&rcu_torture_wcount[i]);
65208+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65209 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65210 rp->rtort_mbtest = 0;
65211 rcu_torture_free(rp);
65212@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
65213 i = rp->rtort_pipe_count;
65214 if (i > RCU_TORTURE_PIPE_LEN)
65215 i = RCU_TORTURE_PIPE_LEN;
65216- atomic_inc(&rcu_torture_wcount[i]);
65217+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65218 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65219 rp->rtort_mbtest = 0;
65220 list_del(&rp->rtort_free);
65221@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
65222 i = old_rp->rtort_pipe_count;
65223 if (i > RCU_TORTURE_PIPE_LEN)
65224 i = RCU_TORTURE_PIPE_LEN;
65225- atomic_inc(&rcu_torture_wcount[i]);
65226+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65227 old_rp->rtort_pipe_count++;
65228 cur_ops->deferred_free(old_rp);
65229 }
65230@@ -950,7 +950,7 @@ static void rcu_torture_timer(unsigned l
65231 return;
65232 }
65233 if (p->rtort_mbtest == 0)
65234- atomic_inc(&n_rcu_torture_mberror);
65235+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65236 spin_lock(&rand_lock);
65237 cur_ops->read_delay(&rand);
65238 n_rcu_torture_timers++;
65239@@ -1011,7 +1011,7 @@ rcu_torture_reader(void *arg)
65240 continue;
65241 }
65242 if (p->rtort_mbtest == 0)
65243- atomic_inc(&n_rcu_torture_mberror);
65244+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65245 cur_ops->read_delay(&rand);
65246 preempt_disable();
65247 pipe_count = p->rtort_pipe_count;
65248@@ -1070,16 +1070,16 @@ rcu_torture_printk(char *page)
65249 rcu_torture_current,
65250 rcu_torture_current_version,
65251 list_empty(&rcu_torture_freelist),
65252- atomic_read(&n_rcu_torture_alloc),
65253- atomic_read(&n_rcu_torture_alloc_fail),
65254- atomic_read(&n_rcu_torture_free),
65255- atomic_read(&n_rcu_torture_mberror),
65256+ atomic_read_unchecked(&n_rcu_torture_alloc),
65257+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65258+ atomic_read_unchecked(&n_rcu_torture_free),
65259+ atomic_read_unchecked(&n_rcu_torture_mberror),
65260 n_rcu_torture_boost_ktrerror,
65261 n_rcu_torture_boost_rterror,
65262 n_rcu_torture_boost_failure,
65263 n_rcu_torture_boosts,
65264 n_rcu_torture_timers);
65265- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65266+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65267 n_rcu_torture_boost_ktrerror != 0 ||
65268 n_rcu_torture_boost_rterror != 0 ||
65269 n_rcu_torture_boost_failure != 0)
65270@@ -1087,7 +1087,7 @@ rcu_torture_printk(char *page)
65271 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65272 if (i > 1) {
65273 cnt += sprintf(&page[cnt], "!!! ");
65274- atomic_inc(&n_rcu_torture_error);
65275+ atomic_inc_unchecked(&n_rcu_torture_error);
65276 WARN_ON_ONCE(1);
65277 }
65278 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65279@@ -1101,7 +1101,7 @@ rcu_torture_printk(char *page)
65280 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65281 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65282 cnt += sprintf(&page[cnt], " %d",
65283- atomic_read(&rcu_torture_wcount[i]));
65284+ atomic_read_unchecked(&rcu_torture_wcount[i]));
65285 }
65286 cnt += sprintf(&page[cnt], "\n");
65287 if (cur_ops->stats)
65288@@ -1410,7 +1410,7 @@ rcu_torture_cleanup(void)
65289
65290 if (cur_ops->cleanup)
65291 cur_ops->cleanup();
65292- if (atomic_read(&n_rcu_torture_error))
65293+ if (atomic_read_unchecked(&n_rcu_torture_error))
65294 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65295 else
65296 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65297@@ -1474,17 +1474,17 @@ rcu_torture_init(void)
65298
65299 rcu_torture_current = NULL;
65300 rcu_torture_current_version = 0;
65301- atomic_set(&n_rcu_torture_alloc, 0);
65302- atomic_set(&n_rcu_torture_alloc_fail, 0);
65303- atomic_set(&n_rcu_torture_free, 0);
65304- atomic_set(&n_rcu_torture_mberror, 0);
65305- atomic_set(&n_rcu_torture_error, 0);
65306+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65307+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65308+ atomic_set_unchecked(&n_rcu_torture_free, 0);
65309+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65310+ atomic_set_unchecked(&n_rcu_torture_error, 0);
65311 n_rcu_torture_boost_ktrerror = 0;
65312 n_rcu_torture_boost_rterror = 0;
65313 n_rcu_torture_boost_failure = 0;
65314 n_rcu_torture_boosts = 0;
65315 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65316- atomic_set(&rcu_torture_wcount[i], 0);
65317+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65318 for_each_possible_cpu(cpu) {
65319 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65320 per_cpu(rcu_torture_count, cpu)[i] = 0;
65321diff -urNp linux-3.1.1/kernel/rcutree.c linux-3.1.1/kernel/rcutree.c
65322--- linux-3.1.1/kernel/rcutree.c 2011-11-11 15:19:27.000000000 -0500
65323+++ linux-3.1.1/kernel/rcutree.c 2011-11-16 18:39:08.000000000 -0500
65324@@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
65325 }
65326 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65327 smp_mb__before_atomic_inc(); /* See above. */
65328- atomic_inc(&rdtp->dynticks);
65329+ atomic_inc_unchecked(&rdtp->dynticks);
65330 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65331- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65332+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65333 local_irq_restore(flags);
65334
65335 /* If the interrupt queued a callback, get out of dyntick mode. */
65336@@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
65337 return;
65338 }
65339 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65340- atomic_inc(&rdtp->dynticks);
65341+ atomic_inc_unchecked(&rdtp->dynticks);
65342 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65343 smp_mb__after_atomic_inc(); /* See above. */
65344- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65345+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65346 local_irq_restore(flags);
65347 }
65348
65349@@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
65350 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65351
65352 if (rdtp->dynticks_nmi_nesting == 0 &&
65353- (atomic_read(&rdtp->dynticks) & 0x1))
65354+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65355 return;
65356 rdtp->dynticks_nmi_nesting++;
65357 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65358- atomic_inc(&rdtp->dynticks);
65359+ atomic_inc_unchecked(&rdtp->dynticks);
65360 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65361 smp_mb__after_atomic_inc(); /* See above. */
65362- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65363+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65364 }
65365
65366 /**
65367@@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
65368 return;
65369 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65370 smp_mb__before_atomic_inc(); /* See above. */
65371- atomic_inc(&rdtp->dynticks);
65372+ atomic_inc_unchecked(&rdtp->dynticks);
65373 smp_mb__after_atomic_inc(); /* Force delay to next write. */
65374- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65375+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65376 }
65377
65378 /**
65379@@ -469,7 +469,7 @@ void rcu_irq_exit(void)
65380 */
65381 static int dyntick_save_progress_counter(struct rcu_data *rdp)
65382 {
65383- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
65384+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65385 return 0;
65386 }
65387
65388@@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(stru
65389 unsigned long curr;
65390 unsigned long snap;
65391
65392- curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
65393+ curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65394 snap = (unsigned long)rdp->dynticks_snap;
65395
65396 /*
65397@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
65398 /*
65399 * Do softirq processing for the current CPU.
65400 */
65401-static void rcu_process_callbacks(struct softirq_action *unused)
65402+static void rcu_process_callbacks(void)
65403 {
65404 __rcu_process_callbacks(&rcu_sched_state,
65405 &__get_cpu_var(rcu_sched_data));
65406diff -urNp linux-3.1.1/kernel/rcutree.h linux-3.1.1/kernel/rcutree.h
65407--- linux-3.1.1/kernel/rcutree.h 2011-11-11 15:19:27.000000000 -0500
65408+++ linux-3.1.1/kernel/rcutree.h 2011-11-16 18:39:08.000000000 -0500
65409@@ -86,7 +86,7 @@
65410 struct rcu_dynticks {
65411 int dynticks_nesting; /* Track irq/process nesting level. */
65412 int dynticks_nmi_nesting; /* Track NMI nesting level. */
65413- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
65414+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
65415 };
65416
65417 /* RCU's kthread states for tracing. */
65418diff -urNp linux-3.1.1/kernel/rcutree_plugin.h linux-3.1.1/kernel/rcutree_plugin.h
65419--- linux-3.1.1/kernel/rcutree_plugin.h 2011-11-11 15:19:27.000000000 -0500
65420+++ linux-3.1.1/kernel/rcutree_plugin.h 2011-11-16 18:39:08.000000000 -0500
65421@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
65422
65423 /* Clean up and exit. */
65424 smp_mb(); /* ensure expedited GP seen before counter increment. */
65425- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
65426+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
65427 unlock_mb_ret:
65428 mutex_unlock(&sync_rcu_preempt_exp_mutex);
65429 mb_ret:
65430@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
65431
65432 #else /* #ifndef CONFIG_SMP */
65433
65434-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
65435-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
65436+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
65437+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
65438
65439 static int synchronize_sched_expedited_cpu_stop(void *data)
65440 {
65441@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
65442 int firstsnap, s, snap, trycount = 0;
65443
65444 /* Note that atomic_inc_return() implies full memory barrier. */
65445- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
65446+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
65447 get_online_cpus();
65448
65449 /*
65450@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
65451 }
65452
65453 /* Check to see if someone else did our work for us. */
65454- s = atomic_read(&sync_sched_expedited_done);
65455+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65456 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
65457 smp_mb(); /* ensure test happens before caller kfree */
65458 return;
65459@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
65460 * grace period works for us.
65461 */
65462 get_online_cpus();
65463- snap = atomic_read(&sync_sched_expedited_started) - 1;
65464+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
65465 smp_mb(); /* ensure read is before try_stop_cpus(). */
65466 }
65467
65468@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
65469 * than we did beat us to the punch.
65470 */
65471 do {
65472- s = atomic_read(&sync_sched_expedited_done);
65473+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65474 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
65475 smp_mb(); /* ensure test happens before caller kfree */
65476 break;
65477 }
65478- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
65479+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
65480
65481 put_online_cpus();
65482 }
65483diff -urNp linux-3.1.1/kernel/relay.c linux-3.1.1/kernel/relay.c
65484--- linux-3.1.1/kernel/relay.c 2011-11-11 15:19:27.000000000 -0500
65485+++ linux-3.1.1/kernel/relay.c 2011-11-16 18:40:44.000000000 -0500
65486@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
65487 };
65488 ssize_t ret;
65489
65490+ pax_track_stack();
65491+
65492 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65493 return 0;
65494 if (splice_grow_spd(pipe, &spd))
65495diff -urNp linux-3.1.1/kernel/resource.c linux-3.1.1/kernel/resource.c
65496--- linux-3.1.1/kernel/resource.c 2011-11-11 15:19:27.000000000 -0500
65497+++ linux-3.1.1/kernel/resource.c 2011-11-16 18:40:44.000000000 -0500
65498@@ -141,8 +141,18 @@ static const struct file_operations proc
65499
65500 static int __init ioresources_init(void)
65501 {
65502+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65503+#ifdef CONFIG_GRKERNSEC_PROC_USER
65504+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65505+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65506+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65507+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65508+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65509+#endif
65510+#else
65511 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65512 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65513+#endif
65514 return 0;
65515 }
65516 __initcall(ioresources_init);
65517diff -urNp linux-3.1.1/kernel/rtmutex-tester.c linux-3.1.1/kernel/rtmutex-tester.c
65518--- linux-3.1.1/kernel/rtmutex-tester.c 2011-11-11 15:19:27.000000000 -0500
65519+++ linux-3.1.1/kernel/rtmutex-tester.c 2011-11-16 18:39:08.000000000 -0500
65520@@ -20,7 +20,7 @@
65521 #define MAX_RT_TEST_MUTEXES 8
65522
65523 static spinlock_t rttest_lock;
65524-static atomic_t rttest_event;
65525+static atomic_unchecked_t rttest_event;
65526
65527 struct test_thread_data {
65528 int opcode;
65529@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
65530
65531 case RTTEST_LOCKCONT:
65532 td->mutexes[td->opdata] = 1;
65533- td->event = atomic_add_return(1, &rttest_event);
65534+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65535 return 0;
65536
65537 case RTTEST_RESET:
65538@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
65539 return 0;
65540
65541 case RTTEST_RESETEVENT:
65542- atomic_set(&rttest_event, 0);
65543+ atomic_set_unchecked(&rttest_event, 0);
65544 return 0;
65545
65546 default:
65547@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
65548 return ret;
65549
65550 td->mutexes[id] = 1;
65551- td->event = atomic_add_return(1, &rttest_event);
65552+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65553 rt_mutex_lock(&mutexes[id]);
65554- td->event = atomic_add_return(1, &rttest_event);
65555+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65556 td->mutexes[id] = 4;
65557 return 0;
65558
65559@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
65560 return ret;
65561
65562 td->mutexes[id] = 1;
65563- td->event = atomic_add_return(1, &rttest_event);
65564+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65565 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65566- td->event = atomic_add_return(1, &rttest_event);
65567+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65568 td->mutexes[id] = ret ? 0 : 4;
65569 return ret ? -EINTR : 0;
65570
65571@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
65572 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65573 return ret;
65574
65575- td->event = atomic_add_return(1, &rttest_event);
65576+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65577 rt_mutex_unlock(&mutexes[id]);
65578- td->event = atomic_add_return(1, &rttest_event);
65579+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65580 td->mutexes[id] = 0;
65581 return 0;
65582
65583@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
65584 break;
65585
65586 td->mutexes[dat] = 2;
65587- td->event = atomic_add_return(1, &rttest_event);
65588+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65589 break;
65590
65591 default:
65592@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
65593 return;
65594
65595 td->mutexes[dat] = 3;
65596- td->event = atomic_add_return(1, &rttest_event);
65597+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65598 break;
65599
65600 case RTTEST_LOCKNOWAIT:
65601@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
65602 return;
65603
65604 td->mutexes[dat] = 1;
65605- td->event = atomic_add_return(1, &rttest_event);
65606+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65607 return;
65608
65609 default:
65610diff -urNp linux-3.1.1/kernel/sched_autogroup.c linux-3.1.1/kernel/sched_autogroup.c
65611--- linux-3.1.1/kernel/sched_autogroup.c 2011-11-11 15:19:27.000000000 -0500
65612+++ linux-3.1.1/kernel/sched_autogroup.c 2011-11-16 18:39:08.000000000 -0500
65613@@ -7,7 +7,7 @@
65614
65615 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
65616 static struct autogroup autogroup_default;
65617-static atomic_t autogroup_seq_nr;
65618+static atomic_unchecked_t autogroup_seq_nr;
65619
65620 static void __init autogroup_init(struct task_struct *init_task)
65621 {
65622@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
65623
65624 kref_init(&ag->kref);
65625 init_rwsem(&ag->lock);
65626- ag->id = atomic_inc_return(&autogroup_seq_nr);
65627+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
65628 ag->tg = tg;
65629 #ifdef CONFIG_RT_GROUP_SCHED
65630 /*
65631diff -urNp linux-3.1.1/kernel/sched.c linux-3.1.1/kernel/sched.c
65632--- linux-3.1.1/kernel/sched.c 2011-11-11 15:19:27.000000000 -0500
65633+++ linux-3.1.1/kernel/sched.c 2011-11-16 18:40:44.000000000 -0500
65634@@ -4264,6 +4264,8 @@ static void __sched __schedule(void)
65635 struct rq *rq;
65636 int cpu;
65637
65638+ pax_track_stack();
65639+
65640 need_resched:
65641 preempt_disable();
65642 cpu = smp_processor_id();
65643@@ -4950,6 +4952,8 @@ int can_nice(const struct task_struct *p
65644 /* convert nice value [19,-20] to rlimit style value [1,40] */
65645 int nice_rlim = 20 - nice;
65646
65647+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65648+
65649 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
65650 capable(CAP_SYS_NICE));
65651 }
65652@@ -4983,7 +4987,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65653 if (nice > 19)
65654 nice = 19;
65655
65656- if (increment < 0 && !can_nice(current, nice))
65657+ if (increment < 0 && (!can_nice(current, nice) ||
65658+ gr_handle_chroot_nice()))
65659 return -EPERM;
65660
65661 retval = security_task_setnice(current, nice);
65662@@ -5127,6 +5132,7 @@ recheck:
65663 unsigned long rlim_rtprio =
65664 task_rlimit(p, RLIMIT_RTPRIO);
65665
65666+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
65667 /* can't set/change the rt policy */
65668 if (policy != p->policy && !rlim_rtprio)
65669 return -EPERM;
65670diff -urNp linux-3.1.1/kernel/sched_fair.c linux-3.1.1/kernel/sched_fair.c
65671--- linux-3.1.1/kernel/sched_fair.c 2011-11-11 15:19:27.000000000 -0500
65672+++ linux-3.1.1/kernel/sched_fair.c 2011-11-16 18:39:08.000000000 -0500
65673@@ -4062,7 +4062,7 @@ static void nohz_idle_balance(int this_c
65674 * run_rebalance_domains is triggered when needed from the scheduler tick.
65675 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
65676 */
65677-static void run_rebalance_domains(struct softirq_action *h)
65678+static void run_rebalance_domains(void)
65679 {
65680 int this_cpu = smp_processor_id();
65681 struct rq *this_rq = cpu_rq(this_cpu);
65682diff -urNp linux-3.1.1/kernel/signal.c linux-3.1.1/kernel/signal.c
65683--- linux-3.1.1/kernel/signal.c 2011-11-11 15:19:27.000000000 -0500
65684+++ linux-3.1.1/kernel/signal.c 2011-11-16 19:30:04.000000000 -0500
65685@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
65686
65687 int print_fatal_signals __read_mostly;
65688
65689-static void __user *sig_handler(struct task_struct *t, int sig)
65690+static __sighandler_t sig_handler(struct task_struct *t, int sig)
65691 {
65692 return t->sighand->action[sig - 1].sa.sa_handler;
65693 }
65694
65695-static int sig_handler_ignored(void __user *handler, int sig)
65696+static int sig_handler_ignored(__sighandler_t handler, int sig)
65697 {
65698 /* Is it explicitly or implicitly ignored? */
65699 return handler == SIG_IGN ||
65700@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
65701 static int sig_task_ignored(struct task_struct *t, int sig,
65702 int from_ancestor_ns)
65703 {
65704- void __user *handler;
65705+ __sighandler_t handler;
65706
65707 handler = sig_handler(t, sig);
65708
65709@@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_st
65710 atomic_inc(&user->sigpending);
65711 rcu_read_unlock();
65712
65713+ if (!override_rlimit)
65714+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65715+
65716 if (override_rlimit ||
65717 atomic_read(&user->sigpending) <=
65718 task_rlimit(t, RLIMIT_SIGPENDING)) {
65719@@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct
65720
65721 int unhandled_signal(struct task_struct *tsk, int sig)
65722 {
65723- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65724+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65725 if (is_global_init(tsk))
65726 return 1;
65727 if (handler != SIG_IGN && handler != SIG_DFL)
65728@@ -815,6 +818,13 @@ static int check_kill_permission(int sig
65729 }
65730 }
65731
65732+ /* allow glibc communication via tgkill to other threads in our
65733+ thread group */
65734+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65735+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65736+ && gr_handle_signal(t, sig))
65737+ return -EPERM;
65738+
65739 return security_task_kill(t, info, sig, 0);
65740 }
65741
65742@@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct si
65743 return send_signal(sig, info, p, 1);
65744 }
65745
65746-static int
65747+int
65748 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65749 {
65750 return send_signal(sig, info, t, 0);
65751@@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *
65752 unsigned long int flags;
65753 int ret, blocked, ignored;
65754 struct k_sigaction *action;
65755+ int is_unhandled = 0;
65756
65757 spin_lock_irqsave(&t->sighand->siglock, flags);
65758 action = &t->sighand->action[sig-1];
65759@@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *
65760 }
65761 if (action->sa.sa_handler == SIG_DFL)
65762 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65763+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65764+ is_unhandled = 1;
65765 ret = specific_send_sig_info(sig, info, t);
65766 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65767
65768+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
65769+ normal operation */
65770+ if (is_unhandled) {
65771+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65772+ gr_handle_crash(t, sig);
65773+ }
65774+
65775 return ret;
65776 }
65777
65778@@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct
65779 ret = check_kill_permission(sig, info, p);
65780 rcu_read_unlock();
65781
65782- if (!ret && sig)
65783+ if (!ret && sig) {
65784 ret = do_send_sig_info(sig, info, p, true);
65785+ if (!ret)
65786+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65787+ }
65788
65789 return ret;
65790 }
65791@@ -1909,6 +1932,8 @@ static void ptrace_do_notify(int signr,
65792 {
65793 siginfo_t info;
65794
65795+ pax_track_stack();
65796+
65797 memset(&info, 0, sizeof info);
65798 info.si_signo = signr;
65799 info.si_code = exit_code;
65800@@ -2748,7 +2773,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65801 int error = -ESRCH;
65802
65803 rcu_read_lock();
65804- p = find_task_by_vpid(pid);
65805+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65806+ /* allow glibc communication via tgkill to other threads in our
65807+ thread group */
65808+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65809+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
65810+ p = find_task_by_vpid_unrestricted(pid);
65811+ else
65812+#endif
65813+ p = find_task_by_vpid(pid);
65814 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65815 error = check_kill_permission(sig, info, p);
65816 /*
65817diff -urNp linux-3.1.1/kernel/smp.c linux-3.1.1/kernel/smp.c
65818--- linux-3.1.1/kernel/smp.c 2011-11-11 15:19:27.000000000 -0500
65819+++ linux-3.1.1/kernel/smp.c 2011-11-16 18:39:08.000000000 -0500
65820@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
65821 }
65822 EXPORT_SYMBOL(smp_call_function);
65823
65824-void ipi_call_lock(void)
65825+void ipi_call_lock(void) __acquires(call_function.lock)
65826 {
65827 raw_spin_lock(&call_function.lock);
65828 }
65829
65830-void ipi_call_unlock(void)
65831+void ipi_call_unlock(void) __releases(call_function.lock)
65832 {
65833 raw_spin_unlock(&call_function.lock);
65834 }
65835
65836-void ipi_call_lock_irq(void)
65837+void ipi_call_lock_irq(void) __acquires(call_function.lock)
65838 {
65839 raw_spin_lock_irq(&call_function.lock);
65840 }
65841
65842-void ipi_call_unlock_irq(void)
65843+void ipi_call_unlock_irq(void) __releases(call_function.lock)
65844 {
65845 raw_spin_unlock_irq(&call_function.lock);
65846 }
65847diff -urNp linux-3.1.1/kernel/softirq.c linux-3.1.1/kernel/softirq.c
65848--- linux-3.1.1/kernel/softirq.c 2011-11-11 15:19:27.000000000 -0500
65849+++ linux-3.1.1/kernel/softirq.c 2011-11-16 18:39:08.000000000 -0500
65850@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65851
65852 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65853
65854-char *softirq_to_name[NR_SOFTIRQS] = {
65855+const char * const softirq_to_name[NR_SOFTIRQS] = {
65856 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65857 "TASKLET", "SCHED", "HRTIMER", "RCU"
65858 };
65859@@ -235,7 +235,7 @@ restart:
65860 kstat_incr_softirqs_this_cpu(vec_nr);
65861
65862 trace_softirq_entry(vec_nr);
65863- h->action(h);
65864+ h->action();
65865 trace_softirq_exit(vec_nr);
65866 if (unlikely(prev_count != preempt_count())) {
65867 printk(KERN_ERR "huh, entered softirq %u %s %p"
65868@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
65869 local_irq_restore(flags);
65870 }
65871
65872-void open_softirq(int nr, void (*action)(struct softirq_action *))
65873+void open_softirq(int nr, void (*action)(void))
65874 {
65875- softirq_vec[nr].action = action;
65876+ pax_open_kernel();
65877+ *(void **)&softirq_vec[nr].action = action;
65878+ pax_close_kernel();
65879 }
65880
65881 /*
65882@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
65883
65884 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65885
65886-static void tasklet_action(struct softirq_action *a)
65887+static void tasklet_action(void)
65888 {
65889 struct tasklet_struct *list;
65890
65891@@ -476,7 +478,7 @@ static void tasklet_action(struct softir
65892 }
65893 }
65894
65895-static void tasklet_hi_action(struct softirq_action *a)
65896+static void tasklet_hi_action(void)
65897 {
65898 struct tasklet_struct *list;
65899
65900diff -urNp linux-3.1.1/kernel/sys.c linux-3.1.1/kernel/sys.c
65901--- linux-3.1.1/kernel/sys.c 2011-11-11 15:19:27.000000000 -0500
65902+++ linux-3.1.1/kernel/sys.c 2011-11-16 18:40:44.000000000 -0500
65903@@ -157,6 +157,12 @@ static int set_one_prio(struct task_stru
65904 error = -EACCES;
65905 goto out;
65906 }
65907+
65908+ if (gr_handle_chroot_setpriority(p, niceval)) {
65909+ error = -EACCES;
65910+ goto out;
65911+ }
65912+
65913 no_nice = security_task_setnice(p, niceval);
65914 if (no_nice) {
65915 error = no_nice;
65916@@ -571,6 +577,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
65917 goto error;
65918 }
65919
65920+ if (gr_check_group_change(new->gid, new->egid, -1))
65921+ goto error;
65922+
65923 if (rgid != (gid_t) -1 ||
65924 (egid != (gid_t) -1 && egid != old->gid))
65925 new->sgid = new->egid;
65926@@ -600,6 +609,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65927 old = current_cred();
65928
65929 retval = -EPERM;
65930+
65931+ if (gr_check_group_change(gid, gid, gid))
65932+ goto error;
65933+
65934 if (nsown_capable(CAP_SETGID))
65935 new->gid = new->egid = new->sgid = new->fsgid = gid;
65936 else if (gid == old->gid || gid == old->sgid)
65937@@ -687,6 +700,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
65938 goto error;
65939 }
65940
65941+ if (gr_check_user_change(new->uid, new->euid, -1))
65942+ goto error;
65943+
65944 if (new->uid != old->uid) {
65945 retval = set_user(new);
65946 if (retval < 0)
65947@@ -731,6 +747,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65948 old = current_cred();
65949
65950 retval = -EPERM;
65951+
65952+ if (gr_check_crash_uid(uid))
65953+ goto error;
65954+ if (gr_check_user_change(uid, uid, uid))
65955+ goto error;
65956+
65957 if (nsown_capable(CAP_SETUID)) {
65958 new->suid = new->uid = uid;
65959 if (uid != old->uid) {
65960@@ -785,6 +807,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
65961 goto error;
65962 }
65963
65964+ if (gr_check_user_change(ruid, euid, -1))
65965+ goto error;
65966+
65967 if (ruid != (uid_t) -1) {
65968 new->uid = ruid;
65969 if (ruid != old->uid) {
65970@@ -849,6 +874,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
65971 goto error;
65972 }
65973
65974+ if (gr_check_group_change(rgid, egid, -1))
65975+ goto error;
65976+
65977 if (rgid != (gid_t) -1)
65978 new->gid = rgid;
65979 if (egid != (gid_t) -1)
65980@@ -895,6 +923,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65981 old = current_cred();
65982 old_fsuid = old->fsuid;
65983
65984+ if (gr_check_user_change(-1, -1, uid))
65985+ goto error;
65986+
65987 if (uid == old->uid || uid == old->euid ||
65988 uid == old->suid || uid == old->fsuid ||
65989 nsown_capable(CAP_SETUID)) {
65990@@ -905,6 +936,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65991 }
65992 }
65993
65994+error:
65995 abort_creds(new);
65996 return old_fsuid;
65997
65998@@ -931,12 +963,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65999 if (gid == old->gid || gid == old->egid ||
66000 gid == old->sgid || gid == old->fsgid ||
66001 nsown_capable(CAP_SETGID)) {
66002+ if (gr_check_group_change(-1, -1, gid))
66003+ goto error;
66004+
66005 if (gid != old_fsgid) {
66006 new->fsgid = gid;
66007 goto change_okay;
66008 }
66009 }
66010
66011+error:
66012 abort_creds(new);
66013 return old_fsgid;
66014
66015@@ -1242,19 +1278,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_
66016 return -EFAULT;
66017
66018 down_read(&uts_sem);
66019- error = __copy_to_user(&name->sysname, &utsname()->sysname,
66020+ error = __copy_to_user(name->sysname, &utsname()->sysname,
66021 __OLD_UTS_LEN);
66022 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
66023- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
66024+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
66025 __OLD_UTS_LEN);
66026 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
66027- error |= __copy_to_user(&name->release, &utsname()->release,
66028+ error |= __copy_to_user(name->release, &utsname()->release,
66029 __OLD_UTS_LEN);
66030 error |= __put_user(0, name->release + __OLD_UTS_LEN);
66031- error |= __copy_to_user(&name->version, &utsname()->version,
66032+ error |= __copy_to_user(name->version, &utsname()->version,
66033 __OLD_UTS_LEN);
66034 error |= __put_user(0, name->version + __OLD_UTS_LEN);
66035- error |= __copy_to_user(&name->machine, &utsname()->machine,
66036+ error |= __copy_to_user(name->machine, &utsname()->machine,
66037 __OLD_UTS_LEN);
66038 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
66039 up_read(&uts_sem);
66040@@ -1717,7 +1753,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
66041 error = get_dumpable(me->mm);
66042 break;
66043 case PR_SET_DUMPABLE:
66044- if (arg2 < 0 || arg2 > 1) {
66045+ if (arg2 > 1) {
66046 error = -EINVAL;
66047 break;
66048 }
66049diff -urNp linux-3.1.1/kernel/sysctl_binary.c linux-3.1.1/kernel/sysctl_binary.c
66050--- linux-3.1.1/kernel/sysctl_binary.c 2011-11-11 15:19:27.000000000 -0500
66051+++ linux-3.1.1/kernel/sysctl_binary.c 2011-11-16 18:39:08.000000000 -0500
66052@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *f
66053 int i;
66054
66055 set_fs(KERNEL_DS);
66056- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66057+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66058 set_fs(old_fs);
66059 if (result < 0)
66060 goto out_kfree;
66061@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *f
66062 }
66063
66064 set_fs(KERNEL_DS);
66065- result = vfs_write(file, buffer, str - buffer, &pos);
66066+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66067 set_fs(old_fs);
66068 if (result < 0)
66069 goto out_kfree;
66070@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file
66071 int i;
66072
66073 set_fs(KERNEL_DS);
66074- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66075+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66076 set_fs(old_fs);
66077 if (result < 0)
66078 goto out_kfree;
66079@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file
66080 }
66081
66082 set_fs(KERNEL_DS);
66083- result = vfs_write(file, buffer, str - buffer, &pos);
66084+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66085 set_fs(old_fs);
66086 if (result < 0)
66087 goto out_kfree;
66088@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *fil
66089 int i;
66090
66091 set_fs(KERNEL_DS);
66092- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66093+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66094 set_fs(old_fs);
66095 if (result < 0)
66096 goto out;
66097@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struc
66098 __le16 dnaddr;
66099
66100 set_fs(KERNEL_DS);
66101- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66102+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66103 set_fs(old_fs);
66104 if (result < 0)
66105 goto out;
66106@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struc
66107 le16_to_cpu(dnaddr) & 0x3ff);
66108
66109 set_fs(KERNEL_DS);
66110- result = vfs_write(file, buf, len, &pos);
66111+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
66112 set_fs(old_fs);
66113 if (result < 0)
66114 goto out;
66115diff -urNp linux-3.1.1/kernel/sysctl.c linux-3.1.1/kernel/sysctl.c
66116--- linux-3.1.1/kernel/sysctl.c 2011-11-11 15:19:27.000000000 -0500
66117+++ linux-3.1.1/kernel/sysctl.c 2011-11-16 18:40:44.000000000 -0500
66118@@ -85,6 +85,13 @@
66119
66120
66121 #if defined(CONFIG_SYSCTL)
66122+#include <linux/grsecurity.h>
66123+#include <linux/grinternal.h>
66124+
66125+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66126+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66127+ const int op);
66128+extern int gr_handle_chroot_sysctl(const int op);
66129
66130 /* External variables not in a header file. */
66131 extern int sysctl_overcommit_memory;
66132@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
66133 }
66134
66135 #endif
66136+extern struct ctl_table grsecurity_table[];
66137
66138 static struct ctl_table root_table[];
66139 static struct ctl_table_root sysctl_table_root;
66140@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
66141 int sysctl_legacy_va_layout;
66142 #endif
66143
66144+#ifdef CONFIG_PAX_SOFTMODE
66145+static ctl_table pax_table[] = {
66146+ {
66147+ .procname = "softmode",
66148+ .data = &pax_softmode,
66149+ .maxlen = sizeof(unsigned int),
66150+ .mode = 0600,
66151+ .proc_handler = &proc_dointvec,
66152+ },
66153+
66154+ { }
66155+};
66156+#endif
66157+
66158 /* The default sysctl tables: */
66159
66160 static struct ctl_table root_table[] = {
66161@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
66162 #endif
66163
66164 static struct ctl_table kern_table[] = {
66165+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66166+ {
66167+ .procname = "grsecurity",
66168+ .mode = 0500,
66169+ .child = grsecurity_table,
66170+ },
66171+#endif
66172+
66173+#ifdef CONFIG_PAX_SOFTMODE
66174+ {
66175+ .procname = "pax",
66176+ .mode = 0500,
66177+ .child = pax_table,
66178+ },
66179+#endif
66180+
66181 {
66182 .procname = "sched_child_runs_first",
66183 .data = &sysctl_sched_child_runs_first,
66184@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
66185 .data = &modprobe_path,
66186 .maxlen = KMOD_PATH_LEN,
66187 .mode = 0644,
66188- .proc_handler = proc_dostring,
66189+ .proc_handler = proc_dostring_modpriv,
66190 },
66191 {
66192 .procname = "modules_disabled",
66193@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
66194 .extra1 = &zero,
66195 .extra2 = &one,
66196 },
66197+#endif
66198 {
66199 .procname = "kptr_restrict",
66200 .data = &kptr_restrict,
66201 .maxlen = sizeof(int),
66202 .mode = 0644,
66203 .proc_handler = proc_dmesg_restrict,
66204+#ifdef CONFIG_GRKERNSEC_HIDESYM
66205+ .extra1 = &two,
66206+#else
66207 .extra1 = &zero,
66208+#endif
66209 .extra2 = &two,
66210 },
66211-#endif
66212 {
66213 .procname = "ngroups_max",
66214 .data = &ngroups_max,
66215@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
66216 .proc_handler = proc_dointvec_minmax,
66217 .extra1 = &zero,
66218 },
66219+ {
66220+ .procname = "heap_stack_gap",
66221+ .data = &sysctl_heap_stack_gap,
66222+ .maxlen = sizeof(sysctl_heap_stack_gap),
66223+ .mode = 0644,
66224+ .proc_handler = proc_doulongvec_minmax,
66225+ },
66226 #else
66227 {
66228 .procname = "nr_trim_pages",
66229@@ -1709,6 +1758,17 @@ static int test_perm(int mode, int op)
66230 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66231 {
66232 int mode;
66233+ int error;
66234+
66235+ if (table->parent != NULL && table->parent->procname != NULL &&
66236+ table->procname != NULL &&
66237+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66238+ return -EACCES;
66239+ if (gr_handle_chroot_sysctl(op))
66240+ return -EACCES;
66241+ error = gr_handle_sysctl(table, op);
66242+ if (error)
66243+ return error;
66244
66245 if (root->permissions)
66246 mode = root->permissions(root, current->nsproxy, table);
66247@@ -2113,6 +2173,16 @@ int proc_dostring(struct ctl_table *tabl
66248 buffer, lenp, ppos);
66249 }
66250
66251+int proc_dostring_modpriv(struct ctl_table *table, int write,
66252+ void __user *buffer, size_t *lenp, loff_t *ppos)
66253+{
66254+ if (write && !capable(CAP_SYS_MODULE))
66255+ return -EPERM;
66256+
66257+ return _proc_do_string(table->data, table->maxlen, write,
66258+ buffer, lenp, ppos);
66259+}
66260+
66261 static size_t proc_skip_spaces(char **buf)
66262 {
66263 size_t ret;
66264@@ -2218,6 +2288,8 @@ static int proc_put_long(void __user **b
66265 len = strlen(tmp);
66266 if (len > *size)
66267 len = *size;
66268+ if (len > sizeof(tmp))
66269+ len = sizeof(tmp);
66270 if (copy_to_user(*buf, tmp, len))
66271 return -EFAULT;
66272 *size -= len;
66273@@ -2534,8 +2606,11 @@ static int __do_proc_doulongvec_minmax(v
66274 *i = val;
66275 } else {
66276 val = convdiv * (*i) / convmul;
66277- if (!first)
66278+ if (!first) {
66279 err = proc_put_char(&buffer, &left, '\t');
66280+ if (err)
66281+ break;
66282+ }
66283 err = proc_put_long(&buffer, &left, val, false);
66284 if (err)
66285 break;
66286@@ -2930,6 +3005,12 @@ int proc_dostring(struct ctl_table *tabl
66287 return -ENOSYS;
66288 }
66289
66290+int proc_dostring_modpriv(struct ctl_table *table, int write,
66291+ void __user *buffer, size_t *lenp, loff_t *ppos)
66292+{
66293+ return -ENOSYS;
66294+}
66295+
66296 int proc_dointvec(struct ctl_table *table, int write,
66297 void __user *buffer, size_t *lenp, loff_t *ppos)
66298 {
66299@@ -2986,6 +3067,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66300 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66301 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66302 EXPORT_SYMBOL(proc_dostring);
66303+EXPORT_SYMBOL(proc_dostring_modpriv);
66304 EXPORT_SYMBOL(proc_doulongvec_minmax);
66305 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66306 EXPORT_SYMBOL(register_sysctl_table);
66307diff -urNp linux-3.1.1/kernel/sysctl_check.c linux-3.1.1/kernel/sysctl_check.c
66308--- linux-3.1.1/kernel/sysctl_check.c 2011-11-11 15:19:27.000000000 -0500
66309+++ linux-3.1.1/kernel/sysctl_check.c 2011-11-16 18:40:44.000000000 -0500
66310@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
66311 set_fail(&fail, table, "Directory with extra2");
66312 } else {
66313 if ((table->proc_handler == proc_dostring) ||
66314+ (table->proc_handler == proc_dostring_modpriv) ||
66315 (table->proc_handler == proc_dointvec) ||
66316 (table->proc_handler == proc_dointvec_minmax) ||
66317 (table->proc_handler == proc_dointvec_jiffies) ||
66318diff -urNp linux-3.1.1/kernel/taskstats.c linux-3.1.1/kernel/taskstats.c
66319--- linux-3.1.1/kernel/taskstats.c 2011-11-11 15:19:27.000000000 -0500
66320+++ linux-3.1.1/kernel/taskstats.c 2011-11-16 19:35:09.000000000 -0500
66321@@ -27,9 +27,12 @@
66322 #include <linux/cgroup.h>
66323 #include <linux/fs.h>
66324 #include <linux/file.h>
66325+#include <linux/grsecurity.h>
66326 #include <net/genetlink.h>
66327 #include <linux/atomic.h>
66328
66329+extern int gr_is_taskstats_denied(int pid);
66330+
66331 /*
66332 * Maximum length of a cpumask that can be specified in
66333 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66334@@ -556,6 +559,9 @@ err:
66335
66336 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
66337 {
66338+ if (gr_is_taskstats_denied(current->pid))
66339+ return -EACCES;
66340+
66341 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
66342 return cmd_attr_register_cpumask(info);
66343 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
66344diff -urNp linux-3.1.1/kernel/time/alarmtimer.c linux-3.1.1/kernel/time/alarmtimer.c
66345--- linux-3.1.1/kernel/time/alarmtimer.c 2011-11-11 15:19:27.000000000 -0500
66346+++ linux-3.1.1/kernel/time/alarmtimer.c 2011-11-16 18:39:08.000000000 -0500
66347@@ -693,7 +693,7 @@ static int __init alarmtimer_init(void)
66348 {
66349 int error = 0;
66350 int i;
66351- struct k_clock alarm_clock = {
66352+ static struct k_clock alarm_clock = {
66353 .clock_getres = alarm_clock_getres,
66354 .clock_get = alarm_clock_get,
66355 .timer_create = alarm_timer_create,
66356diff -urNp linux-3.1.1/kernel/time/tick-broadcast.c linux-3.1.1/kernel/time/tick-broadcast.c
66357--- linux-3.1.1/kernel/time/tick-broadcast.c 2011-11-11 15:19:27.000000000 -0500
66358+++ linux-3.1.1/kernel/time/tick-broadcast.c 2011-11-16 18:39:08.000000000 -0500
66359@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
66360 * then clear the broadcast bit.
66361 */
66362 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66363- int cpu = smp_processor_id();
66364+ cpu = smp_processor_id();
66365
66366 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66367 tick_broadcast_clear_oneshot(cpu);
66368diff -urNp linux-3.1.1/kernel/time/timekeeping.c linux-3.1.1/kernel/time/timekeeping.c
66369--- linux-3.1.1/kernel/time/timekeeping.c 2011-11-11 15:19:27.000000000 -0500
66370+++ linux-3.1.1/kernel/time/timekeeping.c 2011-11-16 18:40:44.000000000 -0500
66371@@ -14,6 +14,7 @@
66372 #include <linux/init.h>
66373 #include <linux/mm.h>
66374 #include <linux/sched.h>
66375+#include <linux/grsecurity.h>
66376 #include <linux/syscore_ops.h>
66377 #include <linux/clocksource.h>
66378 #include <linux/jiffies.h>
66379@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
66380 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66381 return -EINVAL;
66382
66383+ gr_log_timechange();
66384+
66385 write_seqlock_irqsave(&xtime_lock, flags);
66386
66387 timekeeping_forward_now();
66388diff -urNp linux-3.1.1/kernel/time/timer_list.c linux-3.1.1/kernel/time/timer_list.c
66389--- linux-3.1.1/kernel/time/timer_list.c 2011-11-11 15:19:27.000000000 -0500
66390+++ linux-3.1.1/kernel/time/timer_list.c 2011-11-16 18:40:44.000000000 -0500
66391@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66392
66393 static void print_name_offset(struct seq_file *m, void *sym)
66394 {
66395+#ifdef CONFIG_GRKERNSEC_HIDESYM
66396+ SEQ_printf(m, "<%p>", NULL);
66397+#else
66398 char symname[KSYM_NAME_LEN];
66399
66400 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66401 SEQ_printf(m, "<%pK>", sym);
66402 else
66403 SEQ_printf(m, "%s", symname);
66404+#endif
66405 }
66406
66407 static void
66408@@ -112,7 +116,11 @@ next_one:
66409 static void
66410 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66411 {
66412+#ifdef CONFIG_GRKERNSEC_HIDESYM
66413+ SEQ_printf(m, " .base: %p\n", NULL);
66414+#else
66415 SEQ_printf(m, " .base: %pK\n", base);
66416+#endif
66417 SEQ_printf(m, " .index: %d\n",
66418 base->index);
66419 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66420@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
66421 {
66422 struct proc_dir_entry *pe;
66423
66424+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66425+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66426+#else
66427 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66428+#endif
66429 if (!pe)
66430 return -ENOMEM;
66431 return 0;
66432diff -urNp linux-3.1.1/kernel/time/timer_stats.c linux-3.1.1/kernel/time/timer_stats.c
66433--- linux-3.1.1/kernel/time/timer_stats.c 2011-11-11 15:19:27.000000000 -0500
66434+++ linux-3.1.1/kernel/time/timer_stats.c 2011-11-16 18:40:44.000000000 -0500
66435@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66436 static unsigned long nr_entries;
66437 static struct entry entries[MAX_ENTRIES];
66438
66439-static atomic_t overflow_count;
66440+static atomic_unchecked_t overflow_count;
66441
66442 /*
66443 * The entries are in a hash-table, for fast lookup:
66444@@ -140,7 +140,7 @@ static void reset_entries(void)
66445 nr_entries = 0;
66446 memset(entries, 0, sizeof(entries));
66447 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66448- atomic_set(&overflow_count, 0);
66449+ atomic_set_unchecked(&overflow_count, 0);
66450 }
66451
66452 static struct entry *alloc_entry(void)
66453@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66454 if (likely(entry))
66455 entry->count++;
66456 else
66457- atomic_inc(&overflow_count);
66458+ atomic_inc_unchecked(&overflow_count);
66459
66460 out_unlock:
66461 raw_spin_unlock_irqrestore(lock, flags);
66462@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66463
66464 static void print_name_offset(struct seq_file *m, unsigned long addr)
66465 {
66466+#ifdef CONFIG_GRKERNSEC_HIDESYM
66467+ seq_printf(m, "<%p>", NULL);
66468+#else
66469 char symname[KSYM_NAME_LEN];
66470
66471 if (lookup_symbol_name(addr, symname) < 0)
66472 seq_printf(m, "<%p>", (void *)addr);
66473 else
66474 seq_printf(m, "%s", symname);
66475+#endif
66476 }
66477
66478 static int tstats_show(struct seq_file *m, void *v)
66479@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66480
66481 seq_puts(m, "Timer Stats Version: v0.2\n");
66482 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66483- if (atomic_read(&overflow_count))
66484+ if (atomic_read_unchecked(&overflow_count))
66485 seq_printf(m, "Overflow: %d entries\n",
66486- atomic_read(&overflow_count));
66487+ atomic_read_unchecked(&overflow_count));
66488
66489 for (i = 0; i < nr_entries; i++) {
66490 entry = entries + i;
66491@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
66492 {
66493 struct proc_dir_entry *pe;
66494
66495+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66496+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66497+#else
66498 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66499+#endif
66500 if (!pe)
66501 return -ENOMEM;
66502 return 0;
66503diff -urNp linux-3.1.1/kernel/time.c linux-3.1.1/kernel/time.c
66504--- linux-3.1.1/kernel/time.c 2011-11-11 15:19:27.000000000 -0500
66505+++ linux-3.1.1/kernel/time.c 2011-11-16 18:40:44.000000000 -0500
66506@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
66507 return error;
66508
66509 if (tz) {
66510+ /* we log in do_settimeofday called below, so don't log twice
66511+ */
66512+ if (!tv)
66513+ gr_log_timechange();
66514+
66515 /* SMP safe, global irq locking makes it work. */
66516 sys_tz = *tz;
66517 update_vsyscall_tz();
66518diff -urNp linux-3.1.1/kernel/timer.c linux-3.1.1/kernel/timer.c
66519--- linux-3.1.1/kernel/timer.c 2011-11-11 15:19:27.000000000 -0500
66520+++ linux-3.1.1/kernel/timer.c 2011-11-16 18:39:08.000000000 -0500
66521@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
66522 /*
66523 * This function runs timers and the timer-tq in bottom half context.
66524 */
66525-static void run_timer_softirq(struct softirq_action *h)
66526+static void run_timer_softirq(void)
66527 {
66528 struct tvec_base *base = __this_cpu_read(tvec_bases);
66529
66530diff -urNp linux-3.1.1/kernel/trace/blktrace.c linux-3.1.1/kernel/trace/blktrace.c
66531--- linux-3.1.1/kernel/trace/blktrace.c 2011-11-11 15:19:27.000000000 -0500
66532+++ linux-3.1.1/kernel/trace/blktrace.c 2011-11-16 18:39:08.000000000 -0500
66533@@ -323,7 +323,7 @@ static ssize_t blk_dropped_read(struct f
66534 struct blk_trace *bt = filp->private_data;
66535 char buf[16];
66536
66537- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66538+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66539
66540 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66541 }
66542@@ -388,7 +388,7 @@ static int blk_subbuf_start_callback(str
66543 return 1;
66544
66545 bt = buf->chan->private_data;
66546- atomic_inc(&bt->dropped);
66547+ atomic_inc_unchecked(&bt->dropped);
66548 return 0;
66549 }
66550
66551@@ -489,7 +489,7 @@ int do_blk_trace_setup(struct request_qu
66552
66553 bt->dir = dir;
66554 bt->dev = dev;
66555- atomic_set(&bt->dropped, 0);
66556+ atomic_set_unchecked(&bt->dropped, 0);
66557
66558 ret = -EIO;
66559 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66560diff -urNp linux-3.1.1/kernel/trace/ftrace.c linux-3.1.1/kernel/trace/ftrace.c
66561--- linux-3.1.1/kernel/trace/ftrace.c 2011-11-11 15:19:27.000000000 -0500
66562+++ linux-3.1.1/kernel/trace/ftrace.c 2011-11-16 18:39:08.000000000 -0500
66563@@ -1585,12 +1585,17 @@ ftrace_code_disable(struct module *mod,
66564 if (unlikely(ftrace_disabled))
66565 return 0;
66566
66567+ ret = ftrace_arch_code_modify_prepare();
66568+ FTRACE_WARN_ON(ret);
66569+ if (ret)
66570+ return 0;
66571+
66572 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66573+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66574 if (ret) {
66575 ftrace_bug(ret, ip);
66576- return 0;
66577 }
66578- return 1;
66579+ return ret ? 0 : 1;
66580 }
66581
66582 /*
66583@@ -2607,7 +2612,7 @@ static void ftrace_free_entry_rcu(struct
66584
66585 int
66586 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
66587- void *data)
66588+ void *data)
66589 {
66590 struct ftrace_func_probe *entry;
66591 struct ftrace_page *pg;
66592diff -urNp linux-3.1.1/kernel/trace/trace.c linux-3.1.1/kernel/trace/trace.c
66593--- linux-3.1.1/kernel/trace/trace.c 2011-11-11 15:19:27.000000000 -0500
66594+++ linux-3.1.1/kernel/trace/trace.c 2011-11-16 18:40:44.000000000 -0500
66595@@ -3451,6 +3451,8 @@ static ssize_t tracing_splice_read_pipe(
66596 size_t rem;
66597 unsigned int i;
66598
66599+ pax_track_stack();
66600+
66601 if (splice_grow_spd(pipe, &spd))
66602 return -ENOMEM;
66603
66604@@ -3926,6 +3928,8 @@ tracing_buffers_splice_read(struct file
66605 int entries, size, i;
66606 size_t ret;
66607
66608+ pax_track_stack();
66609+
66610 if (splice_grow_spd(pipe, &spd))
66611 return -ENOMEM;
66612
66613@@ -4093,10 +4097,9 @@ static const struct file_operations trac
66614 };
66615 #endif
66616
66617-static struct dentry *d_tracer;
66618-
66619 struct dentry *tracing_init_dentry(void)
66620 {
66621+ static struct dentry *d_tracer;
66622 static int once;
66623
66624 if (d_tracer)
66625@@ -4116,10 +4119,9 @@ struct dentry *tracing_init_dentry(void)
66626 return d_tracer;
66627 }
66628
66629-static struct dentry *d_percpu;
66630-
66631 struct dentry *tracing_dentry_percpu(void)
66632 {
66633+ static struct dentry *d_percpu;
66634 static int once;
66635 struct dentry *d_tracer;
66636
66637diff -urNp linux-3.1.1/kernel/trace/trace_events.c linux-3.1.1/kernel/trace/trace_events.c
66638--- linux-3.1.1/kernel/trace/trace_events.c 2011-11-11 15:19:27.000000000 -0500
66639+++ linux-3.1.1/kernel/trace/trace_events.c 2011-11-16 18:39:08.000000000 -0500
66640@@ -1300,10 +1300,6 @@ static LIST_HEAD(ftrace_module_file_list
66641 struct ftrace_module_file_ops {
66642 struct list_head list;
66643 struct module *mod;
66644- struct file_operations id;
66645- struct file_operations enable;
66646- struct file_operations format;
66647- struct file_operations filter;
66648 };
66649
66650 static struct ftrace_module_file_ops *
66651@@ -1324,17 +1320,12 @@ trace_create_file_ops(struct module *mod
66652
66653 file_ops->mod = mod;
66654
66655- file_ops->id = ftrace_event_id_fops;
66656- file_ops->id.owner = mod;
66657-
66658- file_ops->enable = ftrace_enable_fops;
66659- file_ops->enable.owner = mod;
66660-
66661- file_ops->filter = ftrace_event_filter_fops;
66662- file_ops->filter.owner = mod;
66663-
66664- file_ops->format = ftrace_event_format_fops;
66665- file_ops->format.owner = mod;
66666+ pax_open_kernel();
66667+ *(void **)&mod->trace_id.owner = mod;
66668+ *(void **)&mod->trace_enable.owner = mod;
66669+ *(void **)&mod->trace_filter.owner = mod;
66670+ *(void **)&mod->trace_format.owner = mod;
66671+ pax_close_kernel();
66672
66673 list_add(&file_ops->list, &ftrace_module_file_list);
66674
66675@@ -1358,8 +1349,8 @@ static void trace_module_add_events(stru
66676
66677 for_each_event(call, start, end) {
66678 __trace_add_event_call(*call, mod,
66679- &file_ops->id, &file_ops->enable,
66680- &file_ops->filter, &file_ops->format);
66681+ &mod->trace_id, &mod->trace_enable,
66682+ &mod->trace_filter, &mod->trace_format);
66683 }
66684 }
66685
66686diff -urNp linux-3.1.1/kernel/trace/trace_kprobe.c linux-3.1.1/kernel/trace/trace_kprobe.c
66687--- linux-3.1.1/kernel/trace/trace_kprobe.c 2011-11-11 15:19:27.000000000 -0500
66688+++ linux-3.1.1/kernel/trace/trace_kprobe.c 2011-11-16 18:39:08.000000000 -0500
66689@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66690 long ret;
66691 int maxlen = get_rloc_len(*(u32 *)dest);
66692 u8 *dst = get_rloc_data(dest);
66693- u8 *src = addr;
66694+ const u8 __user *src = (const u8 __force_user *)addr;
66695 mm_segment_t old_fs = get_fs();
66696 if (!maxlen)
66697 return;
66698@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66699 pagefault_disable();
66700 do
66701 ret = __copy_from_user_inatomic(dst++, src++, 1);
66702- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
66703+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
66704 dst[-1] = '\0';
66705 pagefault_enable();
66706 set_fs(old_fs);
66707@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66708 ((u8 *)get_rloc_data(dest))[0] = '\0';
66709 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
66710 } else
66711- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
66712+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
66713 get_rloc_offs(*(u32 *)dest));
66714 }
66715 /* Return the length of string -- including null terminal byte */
66716@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66717 set_fs(KERNEL_DS);
66718 pagefault_disable();
66719 do {
66720- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
66721+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
66722 len++;
66723 } while (c && ret == 0 && len < MAX_STRING_SIZE);
66724 pagefault_enable();
66725diff -urNp linux-3.1.1/kernel/trace/trace_mmiotrace.c linux-3.1.1/kernel/trace/trace_mmiotrace.c
66726--- linux-3.1.1/kernel/trace/trace_mmiotrace.c 2011-11-11 15:19:27.000000000 -0500
66727+++ linux-3.1.1/kernel/trace/trace_mmiotrace.c 2011-11-16 18:39:08.000000000 -0500
66728@@ -24,7 +24,7 @@ struct header_iter {
66729 static struct trace_array *mmio_trace_array;
66730 static bool overrun_detected;
66731 static unsigned long prev_overruns;
66732-static atomic_t dropped_count;
66733+static atomic_unchecked_t dropped_count;
66734
66735 static void mmio_reset_data(struct trace_array *tr)
66736 {
66737@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
66738
66739 static unsigned long count_overruns(struct trace_iterator *iter)
66740 {
66741- unsigned long cnt = atomic_xchg(&dropped_count, 0);
66742+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66743 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66744
66745 if (over > prev_overruns)
66746@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
66747 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66748 sizeof(*entry), 0, pc);
66749 if (!event) {
66750- atomic_inc(&dropped_count);
66751+ atomic_inc_unchecked(&dropped_count);
66752 return;
66753 }
66754 entry = ring_buffer_event_data(event);
66755@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
66756 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66757 sizeof(*entry), 0, pc);
66758 if (!event) {
66759- atomic_inc(&dropped_count);
66760+ atomic_inc_unchecked(&dropped_count);
66761 return;
66762 }
66763 entry = ring_buffer_event_data(event);
66764diff -urNp linux-3.1.1/kernel/trace/trace_output.c linux-3.1.1/kernel/trace/trace_output.c
66765--- linux-3.1.1/kernel/trace/trace_output.c 2011-11-11 15:19:27.000000000 -0500
66766+++ linux-3.1.1/kernel/trace/trace_output.c 2011-11-16 18:39:08.000000000 -0500
66767@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
66768
66769 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66770 if (!IS_ERR(p)) {
66771- p = mangle_path(s->buffer + s->len, p, "\n");
66772+ p = mangle_path(s->buffer + s->len, p, "\n\\");
66773 if (p) {
66774 s->len = p - s->buffer;
66775 return 1;
66776diff -urNp linux-3.1.1/kernel/trace/trace_stack.c linux-3.1.1/kernel/trace/trace_stack.c
66777--- linux-3.1.1/kernel/trace/trace_stack.c 2011-11-11 15:19:27.000000000 -0500
66778+++ linux-3.1.1/kernel/trace/trace_stack.c 2011-11-16 18:39:08.000000000 -0500
66779@@ -50,7 +50,7 @@ static inline void check_stack(void)
66780 return;
66781
66782 /* we do not handle interrupt stacks yet */
66783- if (!object_is_on_stack(&this_size))
66784+ if (!object_starts_on_stack(&this_size))
66785 return;
66786
66787 local_irq_save(flags);
66788diff -urNp linux-3.1.1/kernel/trace/trace_workqueue.c linux-3.1.1/kernel/trace/trace_workqueue.c
66789--- linux-3.1.1/kernel/trace/trace_workqueue.c 2011-11-11 15:19:27.000000000 -0500
66790+++ linux-3.1.1/kernel/trace/trace_workqueue.c 2011-11-16 18:39:08.000000000 -0500
66791@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
66792 int cpu;
66793 pid_t pid;
66794 /* Can be inserted from interrupt or user context, need to be atomic */
66795- atomic_t inserted;
66796+ atomic_unchecked_t inserted;
66797 /*
66798 * Don't need to be atomic, works are serialized in a single workqueue thread
66799 * on a single CPU.
66800@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
66801 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66802 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66803 if (node->pid == wq_thread->pid) {
66804- atomic_inc(&node->inserted);
66805+ atomic_inc_unchecked(&node->inserted);
66806 goto found;
66807 }
66808 }
66809@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
66810 tsk = get_pid_task(pid, PIDTYPE_PID);
66811 if (tsk) {
66812 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66813- atomic_read(&cws->inserted), cws->executed,
66814+ atomic_read_unchecked(&cws->inserted), cws->executed,
66815 tsk->comm);
66816 put_task_struct(tsk);
66817 }
66818diff -urNp linux-3.1.1/lib/bitmap.c linux-3.1.1/lib/bitmap.c
66819--- linux-3.1.1/lib/bitmap.c 2011-11-11 15:19:27.000000000 -0500
66820+++ linux-3.1.1/lib/bitmap.c 2011-11-16 18:39:08.000000000 -0500
66821@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsi
66822 {
66823 int c, old_c, totaldigits, ndigits, nchunks, nbits;
66824 u32 chunk;
66825- const char __user *ubuf = buf;
66826+ const char __user *ubuf = (const char __force_user *)buf;
66827
66828 bitmap_zero(maskp, nmaskbits);
66829
66830@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user
66831 {
66832 if (!access_ok(VERIFY_READ, ubuf, ulen))
66833 return -EFAULT;
66834- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
66835+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
66836 }
66837 EXPORT_SYMBOL(bitmap_parse_user);
66838
66839@@ -594,7 +594,7 @@ static int __bitmap_parselist(const char
66840 {
66841 unsigned a, b;
66842 int c, old_c, totaldigits;
66843- const char __user *ubuf = buf;
66844+ const char __user *ubuf = (const char __force_user *)buf;
66845 int exp_digit, in_range;
66846
66847 totaldigits = c = 0;
66848@@ -694,7 +694,7 @@ int bitmap_parselist_user(const char __u
66849 {
66850 if (!access_ok(VERIFY_READ, ubuf, ulen))
66851 return -EFAULT;
66852- return __bitmap_parselist((const char *)ubuf,
66853+ return __bitmap_parselist((const char __force_kernel *)ubuf,
66854 ulen, 1, maskp, nmaskbits);
66855 }
66856 EXPORT_SYMBOL(bitmap_parselist_user);
66857diff -urNp linux-3.1.1/lib/bug.c linux-3.1.1/lib/bug.c
66858--- linux-3.1.1/lib/bug.c 2011-11-11 15:19:27.000000000 -0500
66859+++ linux-3.1.1/lib/bug.c 2011-11-16 18:39:08.000000000 -0500
66860@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
66861 return BUG_TRAP_TYPE_NONE;
66862
66863 bug = find_bug(bugaddr);
66864+ if (!bug)
66865+ return BUG_TRAP_TYPE_NONE;
66866
66867 file = NULL;
66868 line = 0;
66869diff -urNp linux-3.1.1/lib/debugobjects.c linux-3.1.1/lib/debugobjects.c
66870--- linux-3.1.1/lib/debugobjects.c 2011-11-11 15:19:27.000000000 -0500
66871+++ linux-3.1.1/lib/debugobjects.c 2011-11-16 18:39:08.000000000 -0500
66872@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
66873 if (limit > 4)
66874 return;
66875
66876- is_on_stack = object_is_on_stack(addr);
66877+ is_on_stack = object_starts_on_stack(addr);
66878 if (is_on_stack == onstack)
66879 return;
66880
66881diff -urNp linux-3.1.1/lib/devres.c linux-3.1.1/lib/devres.c
66882--- linux-3.1.1/lib/devres.c 2011-11-11 15:19:27.000000000 -0500
66883+++ linux-3.1.1/lib/devres.c 2011-11-16 18:39:08.000000000 -0500
66884@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
66885 void devm_iounmap(struct device *dev, void __iomem *addr)
66886 {
66887 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
66888- (void *)addr));
66889+ (void __force *)addr));
66890 iounmap(addr);
66891 }
66892 EXPORT_SYMBOL(devm_iounmap);
66893@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *de
66894 {
66895 ioport_unmap(addr);
66896 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
66897- devm_ioport_map_match, (void *)addr));
66898+ devm_ioport_map_match, (void __force *)addr));
66899 }
66900 EXPORT_SYMBOL(devm_ioport_unmap);
66901
66902diff -urNp linux-3.1.1/lib/dma-debug.c linux-3.1.1/lib/dma-debug.c
66903--- linux-3.1.1/lib/dma-debug.c 2011-11-11 15:19:27.000000000 -0500
66904+++ linux-3.1.1/lib/dma-debug.c 2011-11-16 18:39:08.000000000 -0500
66905@@ -870,7 +870,7 @@ out:
66906
66907 static void check_for_stack(struct device *dev, void *addr)
66908 {
66909- if (object_is_on_stack(addr))
66910+ if (object_starts_on_stack(addr))
66911 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66912 "stack [addr=%p]\n", addr);
66913 }
66914diff -urNp linux-3.1.1/lib/extable.c linux-3.1.1/lib/extable.c
66915--- linux-3.1.1/lib/extable.c 2011-11-11 15:19:27.000000000 -0500
66916+++ linux-3.1.1/lib/extable.c 2011-11-16 18:39:08.000000000 -0500
66917@@ -13,6 +13,7 @@
66918 #include <linux/init.h>
66919 #include <linux/sort.h>
66920 #include <asm/uaccess.h>
66921+#include <asm/pgtable.h>
66922
66923 #ifndef ARCH_HAS_SORT_EXTABLE
66924 /*
66925@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
66926 void sort_extable(struct exception_table_entry *start,
66927 struct exception_table_entry *finish)
66928 {
66929+ pax_open_kernel();
66930 sort(start, finish - start, sizeof(struct exception_table_entry),
66931 cmp_ex, NULL);
66932+ pax_close_kernel();
66933 }
66934
66935 #ifdef CONFIG_MODULES
66936diff -urNp linux-3.1.1/lib/inflate.c linux-3.1.1/lib/inflate.c
66937--- linux-3.1.1/lib/inflate.c 2011-11-11 15:19:27.000000000 -0500
66938+++ linux-3.1.1/lib/inflate.c 2011-11-16 18:39:08.000000000 -0500
66939@@ -269,7 +269,7 @@ static void free(void *where)
66940 malloc_ptr = free_mem_ptr;
66941 }
66942 #else
66943-#define malloc(a) kmalloc(a, GFP_KERNEL)
66944+#define malloc(a) kmalloc((a), GFP_KERNEL)
66945 #define free(a) kfree(a)
66946 #endif
66947
66948diff -urNp linux-3.1.1/lib/Kconfig.debug linux-3.1.1/lib/Kconfig.debug
66949--- linux-3.1.1/lib/Kconfig.debug 2011-11-11 15:19:27.000000000 -0500
66950+++ linux-3.1.1/lib/Kconfig.debug 2011-11-16 18:40:44.000000000 -0500
66951@@ -1091,6 +1091,7 @@ config LATENCYTOP
66952 depends on DEBUG_KERNEL
66953 depends on STACKTRACE_SUPPORT
66954 depends on PROC_FS
66955+ depends on !GRKERNSEC_HIDESYM
66956 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
66957 select KALLSYMS
66958 select KALLSYMS_ALL
66959diff -urNp linux-3.1.1/lib/kref.c linux-3.1.1/lib/kref.c
66960--- linux-3.1.1/lib/kref.c 2011-11-11 15:19:27.000000000 -0500
66961+++ linux-3.1.1/lib/kref.c 2011-11-16 18:39:08.000000000 -0500
66962@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
66963 */
66964 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66965 {
66966- WARN_ON(release == NULL);
66967+ BUG_ON(release == NULL);
66968 WARN_ON(release == (void (*)(struct kref *))kfree);
66969
66970 if (atomic_dec_and_test(&kref->refcount)) {
66971diff -urNp linux-3.1.1/lib/radix-tree.c linux-3.1.1/lib/radix-tree.c
66972--- linux-3.1.1/lib/radix-tree.c 2011-11-11 15:19:27.000000000 -0500
66973+++ linux-3.1.1/lib/radix-tree.c 2011-11-16 18:39:08.000000000 -0500
66974@@ -80,7 +80,7 @@ struct radix_tree_preload {
66975 int nr;
66976 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66977 };
66978-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66979+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66980
66981 static inline void *ptr_to_indirect(void *ptr)
66982 {
66983diff -urNp linux-3.1.1/lib/vsprintf.c linux-3.1.1/lib/vsprintf.c
66984--- linux-3.1.1/lib/vsprintf.c 2011-11-11 15:19:27.000000000 -0500
66985+++ linux-3.1.1/lib/vsprintf.c 2011-11-16 18:40:44.000000000 -0500
66986@@ -16,6 +16,9 @@
66987 * - scnprintf and vscnprintf
66988 */
66989
66990+#ifdef CONFIG_GRKERNSEC_HIDESYM
66991+#define __INCLUDED_BY_HIDESYM 1
66992+#endif
66993 #include <stdarg.h>
66994 #include <linux/module.h>
66995 #include <linux/types.h>
66996@@ -432,7 +435,7 @@ char *symbol_string(char *buf, char *end
66997 char sym[KSYM_SYMBOL_LEN];
66998 if (ext == 'B')
66999 sprint_backtrace(sym, value);
67000- else if (ext != 'f' && ext != 's')
67001+ else if (ext != 'f' && ext != 's' && ext != 'a')
67002 sprint_symbol(sym, value);
67003 else
67004 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67005@@ -796,7 +799,11 @@ char *uuid_string(char *buf, char *end,
67006 return string(buf, end, uuid, spec);
67007 }
67008
67009+#ifdef CONFIG_GRKERNSEC_HIDESYM
67010+int kptr_restrict __read_mostly = 2;
67011+#else
67012 int kptr_restrict __read_mostly;
67013+#endif
67014
67015 /*
67016 * Show a '%p' thing. A kernel extension is that the '%p' is followed
67017@@ -810,6 +817,8 @@ int kptr_restrict __read_mostly;
67018 * - 'S' For symbolic direct pointers with offset
67019 * - 's' For symbolic direct pointers without offset
67020 * - 'B' For backtraced symbolic direct pointers with offset
67021+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67022+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67023 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
67024 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
67025 * - 'M' For a 6-byte MAC address, it prints the address in the
67026@@ -854,12 +863,12 @@ char *pointer(const char *fmt, char *buf
67027 {
67028 if (!ptr && *fmt != 'K') {
67029 /*
67030- * Print (null) with the same width as a pointer so it makes
67031+ * Print (nil) with the same width as a pointer so it makes
67032 * tabular output look nice.
67033 */
67034 if (spec.field_width == -1)
67035 spec.field_width = 2 * sizeof(void *);
67036- return string(buf, end, "(null)", spec);
67037+ return string(buf, end, "(nil)", spec);
67038 }
67039
67040 switch (*fmt) {
67041@@ -869,6 +878,13 @@ char *pointer(const char *fmt, char *buf
67042 /* Fallthrough */
67043 case 'S':
67044 case 's':
67045+#ifdef CONFIG_GRKERNSEC_HIDESYM
67046+ break;
67047+#else
67048+ return symbol_string(buf, end, ptr, spec, *fmt);
67049+#endif
67050+ case 'A':
67051+ case 'a':
67052 case 'B':
67053 return symbol_string(buf, end, ptr, spec, *fmt);
67054 case 'R':
67055@@ -1627,11 +1643,11 @@ int bstr_printf(char *buf, size_t size,
67056 typeof(type) value; \
67057 if (sizeof(type) == 8) { \
67058 args = PTR_ALIGN(args, sizeof(u32)); \
67059- *(u32 *)&value = *(u32 *)args; \
67060- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67061+ *(u32 *)&value = *(const u32 *)args; \
67062+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67063 } else { \
67064 args = PTR_ALIGN(args, sizeof(type)); \
67065- value = *(typeof(type) *)args; \
67066+ value = *(const typeof(type) *)args; \
67067 } \
67068 args += sizeof(type); \
67069 value; \
67070@@ -1694,7 +1710,7 @@ int bstr_printf(char *buf, size_t size,
67071 case FORMAT_TYPE_STR: {
67072 const char *str_arg = args;
67073 args += strlen(str_arg) + 1;
67074- str = string(str, end, (char *)str_arg, spec);
67075+ str = string(str, end, str_arg, spec);
67076 break;
67077 }
67078
67079diff -urNp linux-3.1.1/localversion-grsec linux-3.1.1/localversion-grsec
67080--- linux-3.1.1/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
67081+++ linux-3.1.1/localversion-grsec 2011-11-16 18:40:44.000000000 -0500
67082@@ -0,0 +1 @@
67083+-grsec
67084diff -urNp linux-3.1.1/Makefile linux-3.1.1/Makefile
67085--- linux-3.1.1/Makefile 2011-11-11 15:19:27.000000000 -0500
67086+++ linux-3.1.1/Makefile 2011-11-20 19:43:17.000000000 -0500
67087@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
67088
67089 HOSTCC = gcc
67090 HOSTCXX = g++
67091-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
67092-HOSTCXXFLAGS = -O2
67093+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
67094+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
67095+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
67096
67097 # Decide whether to build built-in, modular, or both.
67098 # Normally, just do built-in.
67099@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
67100 # Rules shared between *config targets and build targets
67101
67102 # Basic helpers built in scripts/
67103-PHONY += scripts_basic
67104-scripts_basic:
67105+PHONY += scripts_basic gcc-plugins
67106+scripts_basic: gcc-plugins
67107 $(Q)$(MAKE) $(build)=scripts/basic
67108 $(Q)rm -f .tmp_quiet_recordmcount
67109
67110@@ -564,6 +565,41 @@ else
67111 KBUILD_CFLAGS += -O2
67112 endif
67113
67114+ifndef DISABLE_PAX_PLUGINS
67115+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
67116+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
67117+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
67118+endif
67119+ifdef CONFIG_PAX_MEMORY_STACKLEAK
67120+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
67121+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
67122+endif
67123+ifdef CONFIG_KALLOCSTAT_PLUGIN
67124+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
67125+endif
67126+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
67127+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
67128+endif
67129+ifdef CONFIG_CHECKER_PLUGIN
67130+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
67131+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
67132+endif
67133+endif
67134+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
67135+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
67136+gcc-plugins:
67137+ $(Q)$(MAKE) $(build)=tools/gcc
67138+else
67139+gcc-plugins:
67140+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
67141+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
67142+else
67143+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
67144+endif
67145+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
67146+endif
67147+endif
67148+
67149 include $(srctree)/arch/$(SRCARCH)/Makefile
67150
67151 ifneq ($(CONFIG_FRAME_WARN),0)
67152@@ -708,7 +744,7 @@ export mod_strip_cmd
67153
67154
67155 ifeq ($(KBUILD_EXTMOD),)
67156-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
67157+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
67158
67159 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
67160 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
67161@@ -932,6 +968,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-mai
67162
67163 # The actual objects are generated when descending,
67164 # make sure no implicit rule kicks in
67165+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
67166 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
67167
67168 # Handle descending into subdirectories listed in $(vmlinux-dirs)
67169@@ -941,7 +978,7 @@ $(sort $(vmlinux-init) $(vmlinux-main))
67170 # Error messages still appears in the original language
67171
67172 PHONY += $(vmlinux-dirs)
67173-$(vmlinux-dirs): prepare scripts
67174+$(vmlinux-dirs): gcc-plugins prepare scripts
67175 $(Q)$(MAKE) $(build)=$@
67176
67177 # Store (new) KERNELRELASE string in include/config/kernel.release
67178@@ -986,6 +1023,7 @@ prepare0: archprepare FORCE
67179 $(Q)$(MAKE) $(build)=. missing-syscalls
67180
67181 # All the preparing..
67182+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
67183 prepare: prepare0
67184
67185 # Generate some files
67186@@ -1087,6 +1125,7 @@ all: modules
67187 # using awk while concatenating to the final file.
67188
67189 PHONY += modules
67190+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
67191 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
67192 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
67193 @$(kecho) ' Building modules, stage 2.';
67194@@ -1102,7 +1141,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
67195
67196 # Target to prepare building external modules
67197 PHONY += modules_prepare
67198-modules_prepare: prepare scripts
67199+modules_prepare: gcc-plugins prepare scripts
67200
67201 # Target to install modules
67202 PHONY += modules_install
67203@@ -1198,7 +1237,7 @@ distclean: mrproper
67204 @find $(srctree) $(RCS_FIND_IGNORE) \
67205 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
67206 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
67207- -o -name '.*.rej' -o -size 0 \
67208+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
67209 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
67210 -type f -print | xargs rm -f
67211
67212@@ -1360,6 +1399,7 @@ PHONY += $(module-dirs) modules
67213 $(module-dirs): crmodverdir $(objtree)/Module.symvers
67214 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
67215
67216+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
67217 modules: $(module-dirs)
67218 @$(kecho) ' Building modules, stage 2.';
67219 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
67220@@ -1486,17 +1526,19 @@ else
67221 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
67222 endif
67223
67224-%.s: %.c prepare scripts FORCE
67225+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
67226+%.s: %.c gcc-plugins prepare scripts FORCE
67227 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67228 %.i: %.c prepare scripts FORCE
67229 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67230-%.o: %.c prepare scripts FORCE
67231+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
67232+%.o: %.c gcc-plugins prepare scripts FORCE
67233 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67234 %.lst: %.c prepare scripts FORCE
67235 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67236-%.s: %.S prepare scripts FORCE
67237+%.s: %.S gcc-plugins prepare scripts FORCE
67238 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67239-%.o: %.S prepare scripts FORCE
67240+%.o: %.S gcc-plugins prepare scripts FORCE
67241 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67242 %.symtypes: %.c prepare scripts FORCE
67243 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67244@@ -1506,11 +1548,13 @@ endif
67245 $(cmd_crmodverdir)
67246 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
67247 $(build)=$(build-dir)
67248-%/: prepare scripts FORCE
67249+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
67250+%/: gcc-plugins prepare scripts FORCE
67251 $(cmd_crmodverdir)
67252 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
67253 $(build)=$(build-dir)
67254-%.ko: prepare scripts FORCE
67255+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
67256+%.ko: gcc-plugins prepare scripts FORCE
67257 $(cmd_crmodverdir)
67258 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
67259 $(build)=$(build-dir) $(@:.ko=.o)
67260diff -urNp linux-3.1.1/mm/filemap.c linux-3.1.1/mm/filemap.c
67261--- linux-3.1.1/mm/filemap.c 2011-11-11 15:19:27.000000000 -0500
67262+++ linux-3.1.1/mm/filemap.c 2011-11-16 18:40:44.000000000 -0500
67263@@ -1784,7 +1784,7 @@ int generic_file_mmap(struct file * file
67264 struct address_space *mapping = file->f_mapping;
67265
67266 if (!mapping->a_ops->readpage)
67267- return -ENOEXEC;
67268+ return -ENODEV;
67269 file_accessed(file);
67270 vma->vm_ops = &generic_file_vm_ops;
67271 vma->vm_flags |= VM_CAN_NONLINEAR;
67272@@ -2190,6 +2190,7 @@ inline int generic_write_checks(struct f
67273 *pos = i_size_read(inode);
67274
67275 if (limit != RLIM_INFINITY) {
67276+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67277 if (*pos >= limit) {
67278 send_sig(SIGXFSZ, current, 0);
67279 return -EFBIG;
67280diff -urNp linux-3.1.1/mm/fremap.c linux-3.1.1/mm/fremap.c
67281--- linux-3.1.1/mm/fremap.c 2011-11-11 15:19:27.000000000 -0500
67282+++ linux-3.1.1/mm/fremap.c 2011-11-16 18:39:08.000000000 -0500
67283@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67284 retry:
67285 vma = find_vma(mm, start);
67286
67287+#ifdef CONFIG_PAX_SEGMEXEC
67288+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67289+ goto out;
67290+#endif
67291+
67292 /*
67293 * Make sure the vma is shared, that it supports prefaulting,
67294 * and that the remapped range is valid and fully within
67295diff -urNp linux-3.1.1/mm/highmem.c linux-3.1.1/mm/highmem.c
67296--- linux-3.1.1/mm/highmem.c 2011-11-11 15:19:27.000000000 -0500
67297+++ linux-3.1.1/mm/highmem.c 2011-11-16 18:39:08.000000000 -0500
67298@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67299 * So no dangers, even with speculative execution.
67300 */
67301 page = pte_page(pkmap_page_table[i]);
67302+ pax_open_kernel();
67303 pte_clear(&init_mm, (unsigned long)page_address(page),
67304 &pkmap_page_table[i]);
67305-
67306+ pax_close_kernel();
67307 set_page_address(page, NULL);
67308 need_flush = 1;
67309 }
67310@@ -186,9 +187,11 @@ start:
67311 }
67312 }
67313 vaddr = PKMAP_ADDR(last_pkmap_nr);
67314+
67315+ pax_open_kernel();
67316 set_pte_at(&init_mm, vaddr,
67317 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67318-
67319+ pax_close_kernel();
67320 pkmap_count[last_pkmap_nr] = 1;
67321 set_page_address(page, (void *)vaddr);
67322
67323diff -urNp linux-3.1.1/mm/huge_memory.c linux-3.1.1/mm/huge_memory.c
67324--- linux-3.1.1/mm/huge_memory.c 2011-11-11 15:19:27.000000000 -0500
67325+++ linux-3.1.1/mm/huge_memory.c 2011-11-16 18:39:08.000000000 -0500
67326@@ -702,7 +702,7 @@ out:
67327 * run pte_offset_map on the pmd, if an huge pmd could
67328 * materialize from under us from a different thread.
67329 */
67330- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67331+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67332 return VM_FAULT_OOM;
67333 /* if an huge pmd materialized from under us just retry later */
67334 if (unlikely(pmd_trans_huge(*pmd)))
67335diff -urNp linux-3.1.1/mm/hugetlb.c linux-3.1.1/mm/hugetlb.c
67336--- linux-3.1.1/mm/hugetlb.c 2011-11-11 15:19:27.000000000 -0500
67337+++ linux-3.1.1/mm/hugetlb.c 2011-11-16 18:39:08.000000000 -0500
67338@@ -2346,6 +2346,27 @@ static int unmap_ref_private(struct mm_s
67339 return 1;
67340 }
67341
67342+#ifdef CONFIG_PAX_SEGMEXEC
67343+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67344+{
67345+ struct mm_struct *mm = vma->vm_mm;
67346+ struct vm_area_struct *vma_m;
67347+ unsigned long address_m;
67348+ pte_t *ptep_m;
67349+
67350+ vma_m = pax_find_mirror_vma(vma);
67351+ if (!vma_m)
67352+ return;
67353+
67354+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67355+ address_m = address + SEGMEXEC_TASK_SIZE;
67356+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67357+ get_page(page_m);
67358+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
67359+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67360+}
67361+#endif
67362+
67363 /*
67364 * Hugetlb_cow() should be called with page lock of the original hugepage held.
67365 */
67366@@ -2447,6 +2468,11 @@ retry_avoidcopy:
67367 make_huge_pte(vma, new_page, 1));
67368 page_remove_rmap(old_page);
67369 hugepage_add_new_anon_rmap(new_page, vma, address);
67370+
67371+#ifdef CONFIG_PAX_SEGMEXEC
67372+ pax_mirror_huge_pte(vma, address, new_page);
67373+#endif
67374+
67375 /* Make the old page be freed below */
67376 new_page = old_page;
67377 mmu_notifier_invalidate_range_end(mm,
67378@@ -2598,6 +2624,10 @@ retry:
67379 && (vma->vm_flags & VM_SHARED)));
67380 set_huge_pte_at(mm, address, ptep, new_pte);
67381
67382+#ifdef CONFIG_PAX_SEGMEXEC
67383+ pax_mirror_huge_pte(vma, address, page);
67384+#endif
67385+
67386 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67387 /* Optimization, do the COW without a second fault */
67388 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67389@@ -2627,6 +2657,10 @@ int hugetlb_fault(struct mm_struct *mm,
67390 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67391 struct hstate *h = hstate_vma(vma);
67392
67393+#ifdef CONFIG_PAX_SEGMEXEC
67394+ struct vm_area_struct *vma_m;
67395+#endif
67396+
67397 ptep = huge_pte_offset(mm, address);
67398 if (ptep) {
67399 entry = huge_ptep_get(ptep);
67400@@ -2638,6 +2672,26 @@ int hugetlb_fault(struct mm_struct *mm,
67401 VM_FAULT_SET_HINDEX(h - hstates);
67402 }
67403
67404+#ifdef CONFIG_PAX_SEGMEXEC
67405+ vma_m = pax_find_mirror_vma(vma);
67406+ if (vma_m) {
67407+ unsigned long address_m;
67408+
67409+ if (vma->vm_start > vma_m->vm_start) {
67410+ address_m = address;
67411+ address -= SEGMEXEC_TASK_SIZE;
67412+ vma = vma_m;
67413+ h = hstate_vma(vma);
67414+ } else
67415+ address_m = address + SEGMEXEC_TASK_SIZE;
67416+
67417+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67418+ return VM_FAULT_OOM;
67419+ address_m &= HPAGE_MASK;
67420+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67421+ }
67422+#endif
67423+
67424 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67425 if (!ptep)
67426 return VM_FAULT_OOM;
67427diff -urNp linux-3.1.1/mm/internal.h linux-3.1.1/mm/internal.h
67428--- linux-3.1.1/mm/internal.h 2011-11-11 15:19:27.000000000 -0500
67429+++ linux-3.1.1/mm/internal.h 2011-11-16 18:39:08.000000000 -0500
67430@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page
67431 * in mm/page_alloc.c
67432 */
67433 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67434+extern void free_compound_page(struct page *page);
67435 extern void prep_compound_page(struct page *page, unsigned long order);
67436 #ifdef CONFIG_MEMORY_FAILURE
67437 extern bool is_free_buddy_page(struct page *page);
67438diff -urNp linux-3.1.1/mm/Kconfig linux-3.1.1/mm/Kconfig
67439--- linux-3.1.1/mm/Kconfig 2011-11-11 15:19:27.000000000 -0500
67440+++ linux-3.1.1/mm/Kconfig 2011-11-17 18:57:00.000000000 -0500
67441@@ -238,10 +238,10 @@ config KSM
67442 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
67443
67444 config DEFAULT_MMAP_MIN_ADDR
67445- int "Low address space to protect from user allocation"
67446+ int "Low address space to protect from user allocation"
67447 depends on MMU
67448- default 4096
67449- help
67450+ default 65536
67451+ help
67452 This is the portion of low virtual memory which should be protected
67453 from userspace allocation. Keeping a user from writing to low pages
67454 can help reduce the impact of kernel NULL pointer bugs.
67455diff -urNp linux-3.1.1/mm/kmemleak.c linux-3.1.1/mm/kmemleak.c
67456--- linux-3.1.1/mm/kmemleak.c 2011-11-11 15:19:27.000000000 -0500
67457+++ linux-3.1.1/mm/kmemleak.c 2011-11-16 18:40:44.000000000 -0500
67458@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
67459
67460 for (i = 0; i < object->trace_len; i++) {
67461 void *ptr = (void *)object->trace[i];
67462- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67463+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67464 }
67465 }
67466
67467diff -urNp linux-3.1.1/mm/maccess.c linux-3.1.1/mm/maccess.c
67468--- linux-3.1.1/mm/maccess.c 2011-11-11 15:19:27.000000000 -0500
67469+++ linux-3.1.1/mm/maccess.c 2011-11-16 18:39:08.000000000 -0500
67470@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, cons
67471 set_fs(KERNEL_DS);
67472 pagefault_disable();
67473 ret = __copy_from_user_inatomic(dst,
67474- (__force const void __user *)src, size);
67475+ (const void __force_user *)src, size);
67476 pagefault_enable();
67477 set_fs(old_fs);
67478
67479@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, con
67480
67481 set_fs(KERNEL_DS);
67482 pagefault_disable();
67483- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
67484+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
67485 pagefault_enable();
67486 set_fs(old_fs);
67487
67488diff -urNp linux-3.1.1/mm/madvise.c linux-3.1.1/mm/madvise.c
67489--- linux-3.1.1/mm/madvise.c 2011-11-11 15:19:27.000000000 -0500
67490+++ linux-3.1.1/mm/madvise.c 2011-11-16 18:39:08.000000000 -0500
67491@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
67492 pgoff_t pgoff;
67493 unsigned long new_flags = vma->vm_flags;
67494
67495+#ifdef CONFIG_PAX_SEGMEXEC
67496+ struct vm_area_struct *vma_m;
67497+#endif
67498+
67499 switch (behavior) {
67500 case MADV_NORMAL:
67501 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67502@@ -110,6 +114,13 @@ success:
67503 /*
67504 * vm_flags is protected by the mmap_sem held in write mode.
67505 */
67506+
67507+#ifdef CONFIG_PAX_SEGMEXEC
67508+ vma_m = pax_find_mirror_vma(vma);
67509+ if (vma_m)
67510+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67511+#endif
67512+
67513 vma->vm_flags = new_flags;
67514
67515 out:
67516@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
67517 struct vm_area_struct ** prev,
67518 unsigned long start, unsigned long end)
67519 {
67520+
67521+#ifdef CONFIG_PAX_SEGMEXEC
67522+ struct vm_area_struct *vma_m;
67523+#endif
67524+
67525 *prev = vma;
67526 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67527 return -EINVAL;
67528@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
67529 zap_page_range(vma, start, end - start, &details);
67530 } else
67531 zap_page_range(vma, start, end - start, NULL);
67532+
67533+#ifdef CONFIG_PAX_SEGMEXEC
67534+ vma_m = pax_find_mirror_vma(vma);
67535+ if (vma_m) {
67536+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67537+ struct zap_details details = {
67538+ .nonlinear_vma = vma_m,
67539+ .last_index = ULONG_MAX,
67540+ };
67541+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67542+ } else
67543+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67544+ }
67545+#endif
67546+
67547 return 0;
67548 }
67549
67550@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67551 if (end < start)
67552 goto out;
67553
67554+#ifdef CONFIG_PAX_SEGMEXEC
67555+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67556+ if (end > SEGMEXEC_TASK_SIZE)
67557+ goto out;
67558+ } else
67559+#endif
67560+
67561+ if (end > TASK_SIZE)
67562+ goto out;
67563+
67564 error = 0;
67565 if (end == start)
67566 goto out;
67567diff -urNp linux-3.1.1/mm/memory.c linux-3.1.1/mm/memory.c
67568--- linux-3.1.1/mm/memory.c 2011-11-11 15:19:27.000000000 -0500
67569+++ linux-3.1.1/mm/memory.c 2011-11-16 18:39:08.000000000 -0500
67570@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
67571 return;
67572
67573 pmd = pmd_offset(pud, start);
67574+
67575+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67576 pud_clear(pud);
67577 pmd_free_tlb(tlb, pmd, start);
67578+#endif
67579+
67580 }
67581
67582 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67583@@ -489,9 +493,12 @@ static inline void free_pud_range(struct
67584 if (end - 1 > ceiling - 1)
67585 return;
67586
67587+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67588 pud = pud_offset(pgd, start);
67589 pgd_clear(pgd);
67590 pud_free_tlb(tlb, pud, start);
67591+#endif
67592+
67593 }
67594
67595 /*
67596@@ -1566,12 +1573,6 @@ no_page_table:
67597 return page;
67598 }
67599
67600-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67601-{
67602- return stack_guard_page_start(vma, addr) ||
67603- stack_guard_page_end(vma, addr+PAGE_SIZE);
67604-}
67605-
67606 /**
67607 * __get_user_pages() - pin user pages in memory
67608 * @tsk: task_struct of target task
67609@@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct
67610 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67611 i = 0;
67612
67613- do {
67614+ while (nr_pages) {
67615 struct vm_area_struct *vma;
67616
67617- vma = find_extend_vma(mm, start);
67618+ vma = find_vma(mm, start);
67619 if (!vma && in_gate_area(mm, start)) {
67620 unsigned long pg = start & PAGE_MASK;
67621 pgd_t *pgd;
67622@@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct
67623 goto next_page;
67624 }
67625
67626- if (!vma ||
67627+ if (!vma || start < vma->vm_start ||
67628 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67629 !(vm_flags & vma->vm_flags))
67630 return i ? : -EFAULT;
67631@@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct
67632 int ret;
67633 unsigned int fault_flags = 0;
67634
67635- /* For mlock, just skip the stack guard page. */
67636- if (foll_flags & FOLL_MLOCK) {
67637- if (stack_guard_page(vma, start))
67638- goto next_page;
67639- }
67640 if (foll_flags & FOLL_WRITE)
67641 fault_flags |= FAULT_FLAG_WRITE;
67642 if (nonblocking)
67643@@ -1800,7 +1796,7 @@ next_page:
67644 start += PAGE_SIZE;
67645 nr_pages--;
67646 } while (nr_pages && start < vma->vm_end);
67647- } while (nr_pages);
67648+ }
67649 return i;
67650 }
67651 EXPORT_SYMBOL(__get_user_pages);
67652@@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_st
67653 page_add_file_rmap(page);
67654 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67655
67656+#ifdef CONFIG_PAX_SEGMEXEC
67657+ pax_mirror_file_pte(vma, addr, page, ptl);
67658+#endif
67659+
67660 retval = 0;
67661 pte_unmap_unlock(pte, ptl);
67662 return retval;
67663@@ -2041,10 +2041,22 @@ out:
67664 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67665 struct page *page)
67666 {
67667+
67668+#ifdef CONFIG_PAX_SEGMEXEC
67669+ struct vm_area_struct *vma_m;
67670+#endif
67671+
67672 if (addr < vma->vm_start || addr >= vma->vm_end)
67673 return -EFAULT;
67674 if (!page_count(page))
67675 return -EINVAL;
67676+
67677+#ifdef CONFIG_PAX_SEGMEXEC
67678+ vma_m = pax_find_mirror_vma(vma);
67679+ if (vma_m)
67680+ vma_m->vm_flags |= VM_INSERTPAGE;
67681+#endif
67682+
67683 vma->vm_flags |= VM_INSERTPAGE;
67684 return insert_page(vma, addr, page, vma->vm_page_prot);
67685 }
67686@@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struc
67687 unsigned long pfn)
67688 {
67689 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67690+ BUG_ON(vma->vm_mirror);
67691
67692 if (addr < vma->vm_start || addr >= vma->vm_end)
67693 return -EFAULT;
67694@@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct
67695 copy_user_highpage(dst, src, va, vma);
67696 }
67697
67698+#ifdef CONFIG_PAX_SEGMEXEC
67699+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67700+{
67701+ struct mm_struct *mm = vma->vm_mm;
67702+ spinlock_t *ptl;
67703+ pte_t *pte, entry;
67704+
67705+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67706+ entry = *pte;
67707+ if (!pte_present(entry)) {
67708+ if (!pte_none(entry)) {
67709+ BUG_ON(pte_file(entry));
67710+ free_swap_and_cache(pte_to_swp_entry(entry));
67711+ pte_clear_not_present_full(mm, address, pte, 0);
67712+ }
67713+ } else {
67714+ struct page *page;
67715+
67716+ flush_cache_page(vma, address, pte_pfn(entry));
67717+ entry = ptep_clear_flush(vma, address, pte);
67718+ BUG_ON(pte_dirty(entry));
67719+ page = vm_normal_page(vma, address, entry);
67720+ if (page) {
67721+ update_hiwater_rss(mm);
67722+ if (PageAnon(page))
67723+ dec_mm_counter_fast(mm, MM_ANONPAGES);
67724+ else
67725+ dec_mm_counter_fast(mm, MM_FILEPAGES);
67726+ page_remove_rmap(page);
67727+ page_cache_release(page);
67728+ }
67729+ }
67730+ pte_unmap_unlock(pte, ptl);
67731+}
67732+
67733+/* PaX: if vma is mirrored, synchronize the mirror's PTE
67734+ *
67735+ * the ptl of the lower mapped page is held on entry and is not released on exit
67736+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67737+ */
67738+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67739+{
67740+ struct mm_struct *mm = vma->vm_mm;
67741+ unsigned long address_m;
67742+ spinlock_t *ptl_m;
67743+ struct vm_area_struct *vma_m;
67744+ pmd_t *pmd_m;
67745+ pte_t *pte_m, entry_m;
67746+
67747+ BUG_ON(!page_m || !PageAnon(page_m));
67748+
67749+ vma_m = pax_find_mirror_vma(vma);
67750+ if (!vma_m)
67751+ return;
67752+
67753+ BUG_ON(!PageLocked(page_m));
67754+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67755+ address_m = address + SEGMEXEC_TASK_SIZE;
67756+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67757+ pte_m = pte_offset_map(pmd_m, address_m);
67758+ ptl_m = pte_lockptr(mm, pmd_m);
67759+ if (ptl != ptl_m) {
67760+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67761+ if (!pte_none(*pte_m))
67762+ goto out;
67763+ }
67764+
67765+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67766+ page_cache_get(page_m);
67767+ page_add_anon_rmap(page_m, vma_m, address_m);
67768+ inc_mm_counter_fast(mm, MM_ANONPAGES);
67769+ set_pte_at(mm, address_m, pte_m, entry_m);
67770+ update_mmu_cache(vma_m, address_m, entry_m);
67771+out:
67772+ if (ptl != ptl_m)
67773+ spin_unlock(ptl_m);
67774+ pte_unmap(pte_m);
67775+ unlock_page(page_m);
67776+}
67777+
67778+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67779+{
67780+ struct mm_struct *mm = vma->vm_mm;
67781+ unsigned long address_m;
67782+ spinlock_t *ptl_m;
67783+ struct vm_area_struct *vma_m;
67784+ pmd_t *pmd_m;
67785+ pte_t *pte_m, entry_m;
67786+
67787+ BUG_ON(!page_m || PageAnon(page_m));
67788+
67789+ vma_m = pax_find_mirror_vma(vma);
67790+ if (!vma_m)
67791+ return;
67792+
67793+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67794+ address_m = address + SEGMEXEC_TASK_SIZE;
67795+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67796+ pte_m = pte_offset_map(pmd_m, address_m);
67797+ ptl_m = pte_lockptr(mm, pmd_m);
67798+ if (ptl != ptl_m) {
67799+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67800+ if (!pte_none(*pte_m))
67801+ goto out;
67802+ }
67803+
67804+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67805+ page_cache_get(page_m);
67806+ page_add_file_rmap(page_m);
67807+ inc_mm_counter_fast(mm, MM_FILEPAGES);
67808+ set_pte_at(mm, address_m, pte_m, entry_m);
67809+ update_mmu_cache(vma_m, address_m, entry_m);
67810+out:
67811+ if (ptl != ptl_m)
67812+ spin_unlock(ptl_m);
67813+ pte_unmap(pte_m);
67814+}
67815+
67816+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67817+{
67818+ struct mm_struct *mm = vma->vm_mm;
67819+ unsigned long address_m;
67820+ spinlock_t *ptl_m;
67821+ struct vm_area_struct *vma_m;
67822+ pmd_t *pmd_m;
67823+ pte_t *pte_m, entry_m;
67824+
67825+ vma_m = pax_find_mirror_vma(vma);
67826+ if (!vma_m)
67827+ return;
67828+
67829+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67830+ address_m = address + SEGMEXEC_TASK_SIZE;
67831+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67832+ pte_m = pte_offset_map(pmd_m, address_m);
67833+ ptl_m = pte_lockptr(mm, pmd_m);
67834+ if (ptl != ptl_m) {
67835+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67836+ if (!pte_none(*pte_m))
67837+ goto out;
67838+ }
67839+
67840+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67841+ set_pte_at(mm, address_m, pte_m, entry_m);
67842+out:
67843+ if (ptl != ptl_m)
67844+ spin_unlock(ptl_m);
67845+ pte_unmap(pte_m);
67846+}
67847+
67848+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67849+{
67850+ struct page *page_m;
67851+ pte_t entry;
67852+
67853+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67854+ goto out;
67855+
67856+ entry = *pte;
67857+ page_m = vm_normal_page(vma, address, entry);
67858+ if (!page_m)
67859+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67860+ else if (PageAnon(page_m)) {
67861+ if (pax_find_mirror_vma(vma)) {
67862+ pte_unmap_unlock(pte, ptl);
67863+ lock_page(page_m);
67864+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67865+ if (pte_same(entry, *pte))
67866+ pax_mirror_anon_pte(vma, address, page_m, ptl);
67867+ else
67868+ unlock_page(page_m);
67869+ }
67870+ } else
67871+ pax_mirror_file_pte(vma, address, page_m, ptl);
67872+
67873+out:
67874+ pte_unmap_unlock(pte, ptl);
67875+}
67876+#endif
67877+
67878 /*
67879 * This routine handles present pages, when users try to write
67880 * to a shared page. It is done by copying the page to a new address
67881@@ -2656,6 +2849,12 @@ gotten:
67882 */
67883 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67884 if (likely(pte_same(*page_table, orig_pte))) {
67885+
67886+#ifdef CONFIG_PAX_SEGMEXEC
67887+ if (pax_find_mirror_vma(vma))
67888+ BUG_ON(!trylock_page(new_page));
67889+#endif
67890+
67891 if (old_page) {
67892 if (!PageAnon(old_page)) {
67893 dec_mm_counter_fast(mm, MM_FILEPAGES);
67894@@ -2707,6 +2906,10 @@ gotten:
67895 page_remove_rmap(old_page);
67896 }
67897
67898+#ifdef CONFIG_PAX_SEGMEXEC
67899+ pax_mirror_anon_pte(vma, address, new_page, ptl);
67900+#endif
67901+
67902 /* Free the old page.. */
67903 new_page = old_page;
67904 ret |= VM_FAULT_WRITE;
67905@@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct
67906 swap_free(entry);
67907 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67908 try_to_free_swap(page);
67909+
67910+#ifdef CONFIG_PAX_SEGMEXEC
67911+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67912+#endif
67913+
67914 unlock_page(page);
67915 if (swapcache) {
67916 /*
67917@@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct
67918
67919 /* No need to invalidate - it was non-present before */
67920 update_mmu_cache(vma, address, page_table);
67921+
67922+#ifdef CONFIG_PAX_SEGMEXEC
67923+ pax_mirror_anon_pte(vma, address, page, ptl);
67924+#endif
67925+
67926 unlock:
67927 pte_unmap_unlock(page_table, ptl);
67928 out:
67929@@ -3028,40 +3241,6 @@ out_release:
67930 }
67931
67932 /*
67933- * This is like a special single-page "expand_{down|up}wards()",
67934- * except we must first make sure that 'address{-|+}PAGE_SIZE'
67935- * doesn't hit another vma.
67936- */
67937-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67938-{
67939- address &= PAGE_MASK;
67940- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67941- struct vm_area_struct *prev = vma->vm_prev;
67942-
67943- /*
67944- * Is there a mapping abutting this one below?
67945- *
67946- * That's only ok if it's the same stack mapping
67947- * that has gotten split..
67948- */
67949- if (prev && prev->vm_end == address)
67950- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67951-
67952- expand_downwards(vma, address - PAGE_SIZE);
67953- }
67954- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67955- struct vm_area_struct *next = vma->vm_next;
67956-
67957- /* As VM_GROWSDOWN but s/below/above/ */
67958- if (next && next->vm_start == address + PAGE_SIZE)
67959- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67960-
67961- expand_upwards(vma, address + PAGE_SIZE);
67962- }
67963- return 0;
67964-}
67965-
67966-/*
67967 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67968 * but allow concurrent faults), and pte mapped but not yet locked.
67969 * We return with mmap_sem still held, but pte unmapped and unlocked.
67970@@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_s
67971 unsigned long address, pte_t *page_table, pmd_t *pmd,
67972 unsigned int flags)
67973 {
67974- struct page *page;
67975+ struct page *page = NULL;
67976 spinlock_t *ptl;
67977 pte_t entry;
67978
67979- pte_unmap(page_table);
67980-
67981- /* Check if we need to add a guard page to the stack */
67982- if (check_stack_guard_page(vma, address) < 0)
67983- return VM_FAULT_SIGBUS;
67984-
67985- /* Use the zero-page for reads */
67986 if (!(flags & FAULT_FLAG_WRITE)) {
67987 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67988 vma->vm_page_prot));
67989- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67990+ ptl = pte_lockptr(mm, pmd);
67991+ spin_lock(ptl);
67992 if (!pte_none(*page_table))
67993 goto unlock;
67994 goto setpte;
67995 }
67996
67997 /* Allocate our own private page. */
67998+ pte_unmap(page_table);
67999+
68000 if (unlikely(anon_vma_prepare(vma)))
68001 goto oom;
68002 page = alloc_zeroed_user_highpage_movable(vma, address);
68003@@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_s
68004 if (!pte_none(*page_table))
68005 goto release;
68006
68007+#ifdef CONFIG_PAX_SEGMEXEC
68008+ if (pax_find_mirror_vma(vma))
68009+ BUG_ON(!trylock_page(page));
68010+#endif
68011+
68012 inc_mm_counter_fast(mm, MM_ANONPAGES);
68013 page_add_new_anon_rmap(page, vma, address);
68014 setpte:
68015@@ -3116,6 +3296,12 @@ setpte:
68016
68017 /* No need to invalidate - it was non-present before */
68018 update_mmu_cache(vma, address, page_table);
68019+
68020+#ifdef CONFIG_PAX_SEGMEXEC
68021+ if (page)
68022+ pax_mirror_anon_pte(vma, address, page, ptl);
68023+#endif
68024+
68025 unlock:
68026 pte_unmap_unlock(page_table, ptl);
68027 return 0;
68028@@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *
68029 */
68030 /* Only go through if we didn't race with anybody else... */
68031 if (likely(pte_same(*page_table, orig_pte))) {
68032+
68033+#ifdef CONFIG_PAX_SEGMEXEC
68034+ if (anon && pax_find_mirror_vma(vma))
68035+ BUG_ON(!trylock_page(page));
68036+#endif
68037+
68038 flush_icache_page(vma, page);
68039 entry = mk_pte(page, vma->vm_page_prot);
68040 if (flags & FAULT_FLAG_WRITE)
68041@@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *
68042
68043 /* no need to invalidate: a not-present page won't be cached */
68044 update_mmu_cache(vma, address, page_table);
68045+
68046+#ifdef CONFIG_PAX_SEGMEXEC
68047+ if (anon)
68048+ pax_mirror_anon_pte(vma, address, page, ptl);
68049+ else
68050+ pax_mirror_file_pte(vma, address, page, ptl);
68051+#endif
68052+
68053 } else {
68054 if (cow_page)
68055 mem_cgroup_uncharge_page(cow_page);
68056@@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *m
68057 if (flags & FAULT_FLAG_WRITE)
68058 flush_tlb_fix_spurious_fault(vma, address);
68059 }
68060+
68061+#ifdef CONFIG_PAX_SEGMEXEC
68062+ pax_mirror_pte(vma, address, pte, pmd, ptl);
68063+ return 0;
68064+#endif
68065+
68066 unlock:
68067 pte_unmap_unlock(pte, ptl);
68068 return 0;
68069@@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm
68070 pmd_t *pmd;
68071 pte_t *pte;
68072
68073+#ifdef CONFIG_PAX_SEGMEXEC
68074+ struct vm_area_struct *vma_m;
68075+#endif
68076+
68077 __set_current_state(TASK_RUNNING);
68078
68079 count_vm_event(PGFAULT);
68080@@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm
68081 if (unlikely(is_vm_hugetlb_page(vma)))
68082 return hugetlb_fault(mm, vma, address, flags);
68083
68084+#ifdef CONFIG_PAX_SEGMEXEC
68085+ vma_m = pax_find_mirror_vma(vma);
68086+ if (vma_m) {
68087+ unsigned long address_m;
68088+ pgd_t *pgd_m;
68089+ pud_t *pud_m;
68090+ pmd_t *pmd_m;
68091+
68092+ if (vma->vm_start > vma_m->vm_start) {
68093+ address_m = address;
68094+ address -= SEGMEXEC_TASK_SIZE;
68095+ vma = vma_m;
68096+ } else
68097+ address_m = address + SEGMEXEC_TASK_SIZE;
68098+
68099+ pgd_m = pgd_offset(mm, address_m);
68100+ pud_m = pud_alloc(mm, pgd_m, address_m);
68101+ if (!pud_m)
68102+ return VM_FAULT_OOM;
68103+ pmd_m = pmd_alloc(mm, pud_m, address_m);
68104+ if (!pmd_m)
68105+ return VM_FAULT_OOM;
68106+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
68107+ return VM_FAULT_OOM;
68108+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68109+ }
68110+#endif
68111+
68112 pgd = pgd_offset(mm, address);
68113 pud = pud_alloc(mm, pgd, address);
68114 if (!pud)
68115@@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm
68116 * run pte_offset_map on the pmd, if an huge pmd could
68117 * materialize from under us from a different thread.
68118 */
68119- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
68120+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68121 return VM_FAULT_OOM;
68122 /* if an huge pmd materialized from under us just retry later */
68123 if (unlikely(pmd_trans_huge(*pmd)))
68124@@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
68125 gate_vma.vm_start = FIXADDR_USER_START;
68126 gate_vma.vm_end = FIXADDR_USER_END;
68127 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68128- gate_vma.vm_page_prot = __P101;
68129+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68130 /*
68131 * Make sure the vDSO gets into every core dump.
68132 * Dumping its contents makes post-mortem fully interpretable later
68133diff -urNp linux-3.1.1/mm/memory-failure.c linux-3.1.1/mm/memory-failure.c
68134--- linux-3.1.1/mm/memory-failure.c 2011-11-11 15:19:27.000000000 -0500
68135+++ linux-3.1.1/mm/memory-failure.c 2011-11-16 18:39:08.000000000 -0500
68136@@ -60,7 +60,7 @@ int sysctl_memory_failure_early_kill __r
68137
68138 int sysctl_memory_failure_recovery __read_mostly = 1;
68139
68140-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68141+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68142
68143 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
68144
68145@@ -201,7 +201,7 @@ static int kill_proc_ao(struct task_stru
68146 si.si_signo = SIGBUS;
68147 si.si_errno = 0;
68148 si.si_code = BUS_MCEERR_AO;
68149- si.si_addr = (void *)addr;
68150+ si.si_addr = (void __user *)addr;
68151 #ifdef __ARCH_SI_TRAPNO
68152 si.si_trapno = trapno;
68153 #endif
68154@@ -1009,7 +1009,7 @@ int __memory_failure(unsigned long pfn,
68155 }
68156
68157 nr_pages = 1 << compound_trans_order(hpage);
68158- atomic_long_add(nr_pages, &mce_bad_pages);
68159+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
68160
68161 /*
68162 * We need/can do nothing about count=0 pages.
68163@@ -1039,7 +1039,7 @@ int __memory_failure(unsigned long pfn,
68164 if (!PageHWPoison(hpage)
68165 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
68166 || (p != hpage && TestSetPageHWPoison(hpage))) {
68167- atomic_long_sub(nr_pages, &mce_bad_pages);
68168+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68169 return 0;
68170 }
68171 set_page_hwpoison_huge_page(hpage);
68172@@ -1097,7 +1097,7 @@ int __memory_failure(unsigned long pfn,
68173 }
68174 if (hwpoison_filter(p)) {
68175 if (TestClearPageHWPoison(p))
68176- atomic_long_sub(nr_pages, &mce_bad_pages);
68177+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68178 unlock_page(hpage);
68179 put_page(hpage);
68180 return 0;
68181@@ -1314,7 +1314,7 @@ int unpoison_memory(unsigned long pfn)
68182 return 0;
68183 }
68184 if (TestClearPageHWPoison(p))
68185- atomic_long_sub(nr_pages, &mce_bad_pages);
68186+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68187 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
68188 return 0;
68189 }
68190@@ -1328,7 +1328,7 @@ int unpoison_memory(unsigned long pfn)
68191 */
68192 if (TestClearPageHWPoison(page)) {
68193 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
68194- atomic_long_sub(nr_pages, &mce_bad_pages);
68195+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68196 freeit = 1;
68197 if (PageHuge(page))
68198 clear_page_hwpoison_huge_page(page);
68199@@ -1441,7 +1441,7 @@ static int soft_offline_huge_page(struct
68200 }
68201 done:
68202 if (!PageHWPoison(hpage))
68203- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
68204+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
68205 set_page_hwpoison_huge_page(hpage);
68206 dequeue_hwpoisoned_huge_page(hpage);
68207 /* keep elevated page count for bad page */
68208@@ -1572,7 +1572,7 @@ int soft_offline_page(struct page *page,
68209 return ret;
68210
68211 done:
68212- atomic_long_add(1, &mce_bad_pages);
68213+ atomic_long_add_unchecked(1, &mce_bad_pages);
68214 SetPageHWPoison(page);
68215 /* keep elevated page count for bad page */
68216 return ret;
68217diff -urNp linux-3.1.1/mm/mempolicy.c linux-3.1.1/mm/mempolicy.c
68218--- linux-3.1.1/mm/mempolicy.c 2011-11-11 15:19:27.000000000 -0500
68219+++ linux-3.1.1/mm/mempolicy.c 2011-11-16 18:40:44.000000000 -0500
68220@@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
68221 unsigned long vmstart;
68222 unsigned long vmend;
68223
68224+#ifdef CONFIG_PAX_SEGMEXEC
68225+ struct vm_area_struct *vma_m;
68226+#endif
68227+
68228 vma = find_vma_prev(mm, start, &prev);
68229 if (!vma || vma->vm_start > start)
68230 return -EFAULT;
68231@@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
68232 err = policy_vma(vma, new_pol);
68233 if (err)
68234 goto out;
68235+
68236+#ifdef CONFIG_PAX_SEGMEXEC
68237+ vma_m = pax_find_mirror_vma(vma);
68238+ if (vma_m) {
68239+ err = policy_vma(vma_m, new_pol);
68240+ if (err)
68241+ goto out;
68242+ }
68243+#endif
68244+
68245 }
68246
68247 out:
68248@@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
68249
68250 if (end < start)
68251 return -EINVAL;
68252+
68253+#ifdef CONFIG_PAX_SEGMEXEC
68254+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68255+ if (end > SEGMEXEC_TASK_SIZE)
68256+ return -EINVAL;
68257+ } else
68258+#endif
68259+
68260+ if (end > TASK_SIZE)
68261+ return -EINVAL;
68262+
68263 if (end == start)
68264 return 0;
68265
68266@@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68267 if (!mm)
68268 goto out;
68269
68270+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68271+ if (mm != current->mm &&
68272+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68273+ err = -EPERM;
68274+ goto out;
68275+ }
68276+#endif
68277+
68278 /*
68279 * Check if this process has the right to modify the specified
68280 * process. The right exists if the process has administrative
68281@@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68282 rcu_read_lock();
68283 tcred = __task_cred(task);
68284 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68285- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68286- !capable(CAP_SYS_NICE)) {
68287+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68288 rcu_read_unlock();
68289 err = -EPERM;
68290 goto out;
68291diff -urNp linux-3.1.1/mm/migrate.c linux-3.1.1/mm/migrate.c
68292--- linux-3.1.1/mm/migrate.c 2011-11-11 15:19:27.000000000 -0500
68293+++ linux-3.1.1/mm/migrate.c 2011-11-16 18:40:44.000000000 -0500
68294@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
68295 unsigned long chunk_start;
68296 int err;
68297
68298+ pax_track_stack();
68299+
68300 task_nodes = cpuset_mems_allowed(task);
68301
68302 err = -ENOMEM;
68303@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68304 if (!mm)
68305 return -EINVAL;
68306
68307+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68308+ if (mm != current->mm &&
68309+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68310+ err = -EPERM;
68311+ goto out;
68312+ }
68313+#endif
68314+
68315 /*
68316 * Check if this process has the right to modify the specified
68317 * process. The right exists if the process has administrative
68318@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68319 rcu_read_lock();
68320 tcred = __task_cred(task);
68321 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68322- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68323- !capable(CAP_SYS_NICE)) {
68324+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68325 rcu_read_unlock();
68326 err = -EPERM;
68327 goto out;
68328diff -urNp linux-3.1.1/mm/mlock.c linux-3.1.1/mm/mlock.c
68329--- linux-3.1.1/mm/mlock.c 2011-11-11 15:19:27.000000000 -0500
68330+++ linux-3.1.1/mm/mlock.c 2011-11-16 18:40:44.000000000 -0500
68331@@ -13,6 +13,7 @@
68332 #include <linux/pagemap.h>
68333 #include <linux/mempolicy.h>
68334 #include <linux/syscalls.h>
68335+#include <linux/security.h>
68336 #include <linux/sched.h>
68337 #include <linux/module.h>
68338 #include <linux/rmap.h>
68339@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
68340 return -EINVAL;
68341 if (end == start)
68342 return 0;
68343+ if (end > TASK_SIZE)
68344+ return -EINVAL;
68345+
68346 vma = find_vma_prev(current->mm, start, &prev);
68347 if (!vma || vma->vm_start > start)
68348 return -ENOMEM;
68349@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
68350 for (nstart = start ; ; ) {
68351 vm_flags_t newflags;
68352
68353+#ifdef CONFIG_PAX_SEGMEXEC
68354+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68355+ break;
68356+#endif
68357+
68358 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68359
68360 newflags = vma->vm_flags | VM_LOCKED;
68361@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
68362 lock_limit >>= PAGE_SHIFT;
68363
68364 /* check against resource limits */
68365+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68366 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68367 error = do_mlock(start, len, 1);
68368 up_write(&current->mm->mmap_sem);
68369@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
68370 static int do_mlockall(int flags)
68371 {
68372 struct vm_area_struct * vma, * prev = NULL;
68373- unsigned int def_flags = 0;
68374
68375 if (flags & MCL_FUTURE)
68376- def_flags = VM_LOCKED;
68377- current->mm->def_flags = def_flags;
68378+ current->mm->def_flags |= VM_LOCKED;
68379+ else
68380+ current->mm->def_flags &= ~VM_LOCKED;
68381 if (flags == MCL_FUTURE)
68382 goto out;
68383
68384 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68385 vm_flags_t newflags;
68386
68387+#ifdef CONFIG_PAX_SEGMEXEC
68388+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68389+ break;
68390+#endif
68391+
68392+ BUG_ON(vma->vm_end > TASK_SIZE);
68393 newflags = vma->vm_flags | VM_LOCKED;
68394 if (!(flags & MCL_CURRENT))
68395 newflags &= ~VM_LOCKED;
68396@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68397 lock_limit >>= PAGE_SHIFT;
68398
68399 ret = -ENOMEM;
68400+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68401 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68402 capable(CAP_IPC_LOCK))
68403 ret = do_mlockall(flags);
68404diff -urNp linux-3.1.1/mm/mmap.c linux-3.1.1/mm/mmap.c
68405--- linux-3.1.1/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
68406+++ linux-3.1.1/mm/mmap.c 2011-11-16 18:40:44.000000000 -0500
68407@@ -46,6 +46,16 @@
68408 #define arch_rebalance_pgtables(addr, len) (addr)
68409 #endif
68410
68411+static inline void verify_mm_writelocked(struct mm_struct *mm)
68412+{
68413+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68414+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68415+ up_read(&mm->mmap_sem);
68416+ BUG();
68417+ }
68418+#endif
68419+}
68420+
68421 static void unmap_region(struct mm_struct *mm,
68422 struct vm_area_struct *vma, struct vm_area_struct *prev,
68423 unsigned long start, unsigned long end);
68424@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
68425 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68426 *
68427 */
68428-pgprot_t protection_map[16] = {
68429+pgprot_t protection_map[16] __read_only = {
68430 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68431 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68432 };
68433
68434-pgprot_t vm_get_page_prot(unsigned long vm_flags)
68435+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68436 {
68437- return __pgprot(pgprot_val(protection_map[vm_flags &
68438+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68439 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68440 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68441+
68442+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68443+ if (!(__supported_pte_mask & _PAGE_NX) &&
68444+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68445+ (vm_flags & (VM_READ | VM_WRITE)))
68446+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68447+#endif
68448+
68449+ return prot;
68450 }
68451 EXPORT_SYMBOL(vm_get_page_prot);
68452
68453 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68454 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
68455 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68456+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68457 /*
68458 * Make sure vm_committed_as in one cacheline and not cacheline shared with
68459 * other variables. It can be updated by several CPUs frequently.
68460@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma
68461 struct vm_area_struct *next = vma->vm_next;
68462
68463 might_sleep();
68464+ BUG_ON(vma->vm_mirror);
68465 if (vma->vm_ops && vma->vm_ops->close)
68466 vma->vm_ops->close(vma);
68467 if (vma->vm_file) {
68468@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68469 * not page aligned -Ram Gupta
68470 */
68471 rlim = rlimit(RLIMIT_DATA);
68472+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68473 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68474 (mm->end_data - mm->start_data) > rlim)
68475 goto out;
68476@@ -689,6 +711,12 @@ static int
68477 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68478 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68479 {
68480+
68481+#ifdef CONFIG_PAX_SEGMEXEC
68482+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68483+ return 0;
68484+#endif
68485+
68486 if (is_mergeable_vma(vma, file, vm_flags) &&
68487 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68488 if (vma->vm_pgoff == vm_pgoff)
68489@@ -708,6 +736,12 @@ static int
68490 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68491 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68492 {
68493+
68494+#ifdef CONFIG_PAX_SEGMEXEC
68495+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68496+ return 0;
68497+#endif
68498+
68499 if (is_mergeable_vma(vma, file, vm_flags) &&
68500 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68501 pgoff_t vm_pglen;
68502@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struc
68503 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68504 struct vm_area_struct *prev, unsigned long addr,
68505 unsigned long end, unsigned long vm_flags,
68506- struct anon_vma *anon_vma, struct file *file,
68507+ struct anon_vma *anon_vma, struct file *file,
68508 pgoff_t pgoff, struct mempolicy *policy)
68509 {
68510 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68511 struct vm_area_struct *area, *next;
68512 int err;
68513
68514+#ifdef CONFIG_PAX_SEGMEXEC
68515+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68516+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68517+
68518+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68519+#endif
68520+
68521 /*
68522 * We later require that vma->vm_flags == vm_flags,
68523 * so this tests vma->vm_flags & VM_SPECIAL, too.
68524@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct
68525 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68526 next = next->vm_next;
68527
68528+#ifdef CONFIG_PAX_SEGMEXEC
68529+ if (prev)
68530+ prev_m = pax_find_mirror_vma(prev);
68531+ if (area)
68532+ area_m = pax_find_mirror_vma(area);
68533+ if (next)
68534+ next_m = pax_find_mirror_vma(next);
68535+#endif
68536+
68537 /*
68538 * Can it merge with the predecessor?
68539 */
68540@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct
68541 /* cases 1, 6 */
68542 err = vma_adjust(prev, prev->vm_start,
68543 next->vm_end, prev->vm_pgoff, NULL);
68544- } else /* cases 2, 5, 7 */
68545+
68546+#ifdef CONFIG_PAX_SEGMEXEC
68547+ if (!err && prev_m)
68548+ err = vma_adjust(prev_m, prev_m->vm_start,
68549+ next_m->vm_end, prev_m->vm_pgoff, NULL);
68550+#endif
68551+
68552+ } else { /* cases 2, 5, 7 */
68553 err = vma_adjust(prev, prev->vm_start,
68554 end, prev->vm_pgoff, NULL);
68555+
68556+#ifdef CONFIG_PAX_SEGMEXEC
68557+ if (!err && prev_m)
68558+ err = vma_adjust(prev_m, prev_m->vm_start,
68559+ end_m, prev_m->vm_pgoff, NULL);
68560+#endif
68561+
68562+ }
68563 if (err)
68564 return NULL;
68565 khugepaged_enter_vma_merge(prev);
68566@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct
68567 mpol_equal(policy, vma_policy(next)) &&
68568 can_vma_merge_before(next, vm_flags,
68569 anon_vma, file, pgoff+pglen)) {
68570- if (prev && addr < prev->vm_end) /* case 4 */
68571+ if (prev && addr < prev->vm_end) { /* case 4 */
68572 err = vma_adjust(prev, prev->vm_start,
68573 addr, prev->vm_pgoff, NULL);
68574- else /* cases 3, 8 */
68575+
68576+#ifdef CONFIG_PAX_SEGMEXEC
68577+ if (!err && prev_m)
68578+ err = vma_adjust(prev_m, prev_m->vm_start,
68579+ addr_m, prev_m->vm_pgoff, NULL);
68580+#endif
68581+
68582+ } else { /* cases 3, 8 */
68583 err = vma_adjust(area, addr, next->vm_end,
68584 next->vm_pgoff - pglen, NULL);
68585+
68586+#ifdef CONFIG_PAX_SEGMEXEC
68587+ if (!err && area_m)
68588+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
68589+ next_m->vm_pgoff - pglen, NULL);
68590+#endif
68591+
68592+ }
68593 if (err)
68594 return NULL;
68595 khugepaged_enter_vma_merge(area);
68596@@ -921,14 +1001,11 @@ none:
68597 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68598 struct file *file, long pages)
68599 {
68600- const unsigned long stack_flags
68601- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68602-
68603 if (file) {
68604 mm->shared_vm += pages;
68605 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68606 mm->exec_vm += pages;
68607- } else if (flags & stack_flags)
68608+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68609 mm->stack_vm += pages;
68610 if (flags & (VM_RESERVED|VM_IO))
68611 mm->reserved_vm += pages;
68612@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file
68613 * (the exception is when the underlying filesystem is noexec
68614 * mounted, in which case we dont add PROT_EXEC.)
68615 */
68616- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68617+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68618 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68619 prot |= PROT_EXEC;
68620
68621@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file
68622 /* Obtain the address to map to. we verify (or select) it and ensure
68623 * that it represents a valid section of the address space.
68624 */
68625- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68626+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68627 if (addr & ~PAGE_MASK)
68628 return addr;
68629
68630@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file
68631 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68632 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68633
68634+#ifdef CONFIG_PAX_MPROTECT
68635+ if (mm->pax_flags & MF_PAX_MPROTECT) {
68636+#ifndef CONFIG_PAX_MPROTECT_COMPAT
68637+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68638+ gr_log_rwxmmap(file);
68639+
68640+#ifdef CONFIG_PAX_EMUPLT
68641+ vm_flags &= ~VM_EXEC;
68642+#else
68643+ return -EPERM;
68644+#endif
68645+
68646+ }
68647+
68648+ if (!(vm_flags & VM_EXEC))
68649+ vm_flags &= ~VM_MAYEXEC;
68650+#else
68651+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68652+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68653+#endif
68654+ else
68655+ vm_flags &= ~VM_MAYWRITE;
68656+ }
68657+#endif
68658+
68659+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68660+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68661+ vm_flags &= ~VM_PAGEEXEC;
68662+#endif
68663+
68664 if (flags & MAP_LOCKED)
68665 if (!can_do_mlock())
68666 return -EPERM;
68667@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file
68668 locked += mm->locked_vm;
68669 lock_limit = rlimit(RLIMIT_MEMLOCK);
68670 lock_limit >>= PAGE_SHIFT;
68671+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68672 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68673 return -EAGAIN;
68674 }
68675@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file
68676 if (error)
68677 return error;
68678
68679+ if (!gr_acl_handle_mmap(file, prot))
68680+ return -EACCES;
68681+
68682 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68683 }
68684 EXPORT_SYMBOL(do_mmap_pgoff);
68685@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area
68686 vm_flags_t vm_flags = vma->vm_flags;
68687
68688 /* If it was private or non-writable, the write bit is already clear */
68689- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68690+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68691 return 0;
68692
68693 /* The backer wishes to know when pages are first written to? */
68694@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *f
68695 unsigned long charged = 0;
68696 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68697
68698+#ifdef CONFIG_PAX_SEGMEXEC
68699+ struct vm_area_struct *vma_m = NULL;
68700+#endif
68701+
68702+ /*
68703+ * mm->mmap_sem is required to protect against another thread
68704+ * changing the mappings in case we sleep.
68705+ */
68706+ verify_mm_writelocked(mm);
68707+
68708 /* Clear old maps */
68709 error = -ENOMEM;
68710-munmap_back:
68711 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68712 if (vma && vma->vm_start < addr + len) {
68713 if (do_munmap(mm, addr, len))
68714 return -ENOMEM;
68715- goto munmap_back;
68716+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68717+ BUG_ON(vma && vma->vm_start < addr + len);
68718 }
68719
68720 /* Check against address space limit. */
68721@@ -1258,6 +1379,16 @@ munmap_back:
68722 goto unacct_error;
68723 }
68724
68725+#ifdef CONFIG_PAX_SEGMEXEC
68726+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68727+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68728+ if (!vma_m) {
68729+ error = -ENOMEM;
68730+ goto free_vma;
68731+ }
68732+ }
68733+#endif
68734+
68735 vma->vm_mm = mm;
68736 vma->vm_start = addr;
68737 vma->vm_end = addr + len;
68738@@ -1281,6 +1412,19 @@ munmap_back:
68739 error = file->f_op->mmap(file, vma);
68740 if (error)
68741 goto unmap_and_free_vma;
68742+
68743+#ifdef CONFIG_PAX_SEGMEXEC
68744+ if (vma_m && (vm_flags & VM_EXECUTABLE))
68745+ added_exe_file_vma(mm);
68746+#endif
68747+
68748+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68749+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68750+ vma->vm_flags |= VM_PAGEEXEC;
68751+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68752+ }
68753+#endif
68754+
68755 if (vm_flags & VM_EXECUTABLE)
68756 added_exe_file_vma(mm);
68757
68758@@ -1316,6 +1460,11 @@ munmap_back:
68759 vma_link(mm, vma, prev, rb_link, rb_parent);
68760 file = vma->vm_file;
68761
68762+#ifdef CONFIG_PAX_SEGMEXEC
68763+ if (vma_m)
68764+ BUG_ON(pax_mirror_vma(vma_m, vma));
68765+#endif
68766+
68767 /* Once vma denies write, undo our temporary denial count */
68768 if (correct_wcount)
68769 atomic_inc(&inode->i_writecount);
68770@@ -1324,6 +1473,7 @@ out:
68771
68772 mm->total_vm += len >> PAGE_SHIFT;
68773 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68774+ track_exec_limit(mm, addr, addr + len, vm_flags);
68775 if (vm_flags & VM_LOCKED) {
68776 if (!mlock_vma_pages_range(vma, addr, addr + len))
68777 mm->locked_vm += (len >> PAGE_SHIFT);
68778@@ -1341,6 +1491,12 @@ unmap_and_free_vma:
68779 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68780 charged = 0;
68781 free_vma:
68782+
68783+#ifdef CONFIG_PAX_SEGMEXEC
68784+ if (vma_m)
68785+ kmem_cache_free(vm_area_cachep, vma_m);
68786+#endif
68787+
68788 kmem_cache_free(vm_area_cachep, vma);
68789 unacct_error:
68790 if (charged)
68791@@ -1348,6 +1504,44 @@ unacct_error:
68792 return error;
68793 }
68794
68795+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68796+{
68797+ if (!vma) {
68798+#ifdef CONFIG_STACK_GROWSUP
68799+ if (addr > sysctl_heap_stack_gap)
68800+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68801+ else
68802+ vma = find_vma(current->mm, 0);
68803+ if (vma && (vma->vm_flags & VM_GROWSUP))
68804+ return false;
68805+#endif
68806+ return true;
68807+ }
68808+
68809+ if (addr + len > vma->vm_start)
68810+ return false;
68811+
68812+ if (vma->vm_flags & VM_GROWSDOWN)
68813+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68814+#ifdef CONFIG_STACK_GROWSUP
68815+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68816+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68817+#endif
68818+
68819+ return true;
68820+}
68821+
68822+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68823+{
68824+ if (vma->vm_start < len)
68825+ return -ENOMEM;
68826+ if (!(vma->vm_flags & VM_GROWSDOWN))
68827+ return vma->vm_start - len;
68828+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
68829+ return vma->vm_start - len - sysctl_heap_stack_gap;
68830+ return -ENOMEM;
68831+}
68832+
68833 /* Get an address range which is currently unmapped.
68834 * For shmat() with addr=0.
68835 *
68836@@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp
68837 if (flags & MAP_FIXED)
68838 return addr;
68839
68840+#ifdef CONFIG_PAX_RANDMMAP
68841+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68842+#endif
68843+
68844 if (addr) {
68845 addr = PAGE_ALIGN(addr);
68846- vma = find_vma(mm, addr);
68847- if (TASK_SIZE - len >= addr &&
68848- (!vma || addr + len <= vma->vm_start))
68849- return addr;
68850+ if (TASK_SIZE - len >= addr) {
68851+ vma = find_vma(mm, addr);
68852+ if (check_heap_stack_gap(vma, addr, len))
68853+ return addr;
68854+ }
68855 }
68856 if (len > mm->cached_hole_size) {
68857- start_addr = addr = mm->free_area_cache;
68858+ start_addr = addr = mm->free_area_cache;
68859 } else {
68860- start_addr = addr = TASK_UNMAPPED_BASE;
68861- mm->cached_hole_size = 0;
68862+ start_addr = addr = mm->mmap_base;
68863+ mm->cached_hole_size = 0;
68864 }
68865
68866 full_search:
68867@@ -1396,34 +1595,40 @@ full_search:
68868 * Start a new search - just in case we missed
68869 * some holes.
68870 */
68871- if (start_addr != TASK_UNMAPPED_BASE) {
68872- addr = TASK_UNMAPPED_BASE;
68873- start_addr = addr;
68874+ if (start_addr != mm->mmap_base) {
68875+ start_addr = addr = mm->mmap_base;
68876 mm->cached_hole_size = 0;
68877 goto full_search;
68878 }
68879 return -ENOMEM;
68880 }
68881- if (!vma || addr + len <= vma->vm_start) {
68882- /*
68883- * Remember the place where we stopped the search:
68884- */
68885- mm->free_area_cache = addr + len;
68886- return addr;
68887- }
68888+ if (check_heap_stack_gap(vma, addr, len))
68889+ break;
68890 if (addr + mm->cached_hole_size < vma->vm_start)
68891 mm->cached_hole_size = vma->vm_start - addr;
68892 addr = vma->vm_end;
68893 }
68894+
68895+ /*
68896+ * Remember the place where we stopped the search:
68897+ */
68898+ mm->free_area_cache = addr + len;
68899+ return addr;
68900 }
68901 #endif
68902
68903 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68904 {
68905+
68906+#ifdef CONFIG_PAX_SEGMEXEC
68907+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68908+ return;
68909+#endif
68910+
68911 /*
68912 * Is this a new hole at the lowest possible address?
68913 */
68914- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68915+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68916 mm->free_area_cache = addr;
68917 mm->cached_hole_size = ~0UL;
68918 }
68919@@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct fi
68920 {
68921 struct vm_area_struct *vma;
68922 struct mm_struct *mm = current->mm;
68923- unsigned long addr = addr0;
68924+ unsigned long base = mm->mmap_base, addr = addr0;
68925
68926 /* requested length too big for entire address space */
68927 if (len > TASK_SIZE)
68928@@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct fi
68929 if (flags & MAP_FIXED)
68930 return addr;
68931
68932+#ifdef CONFIG_PAX_RANDMMAP
68933+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68934+#endif
68935+
68936 /* requesting a specific address */
68937 if (addr) {
68938 addr = PAGE_ALIGN(addr);
68939- vma = find_vma(mm, addr);
68940- if (TASK_SIZE - len >= addr &&
68941- (!vma || addr + len <= vma->vm_start))
68942- return addr;
68943+ if (TASK_SIZE - len >= addr) {
68944+ vma = find_vma(mm, addr);
68945+ if (check_heap_stack_gap(vma, addr, len))
68946+ return addr;
68947+ }
68948 }
68949
68950 /* check if free_area_cache is useful for us */
68951@@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct fi
68952 /* make sure it can fit in the remaining address space */
68953 if (addr > len) {
68954 vma = find_vma(mm, addr-len);
68955- if (!vma || addr <= vma->vm_start)
68956+ if (check_heap_stack_gap(vma, addr - len, len))
68957 /* remember the address as a hint for next time */
68958 return (mm->free_area_cache = addr-len);
68959 }
68960@@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct fi
68961 * return with success:
68962 */
68963 vma = find_vma(mm, addr);
68964- if (!vma || addr+len <= vma->vm_start)
68965+ if (check_heap_stack_gap(vma, addr, len))
68966 /* remember the address as a hint for next time */
68967 return (mm->free_area_cache = addr);
68968
68969@@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct fi
68970 mm->cached_hole_size = vma->vm_start - addr;
68971
68972 /* try just below the current vma->vm_start */
68973- addr = vma->vm_start-len;
68974- } while (len < vma->vm_start);
68975+ addr = skip_heap_stack_gap(vma, len);
68976+ } while (!IS_ERR_VALUE(addr));
68977
68978 bottomup:
68979 /*
68980@@ -1507,13 +1717,21 @@ bottomup:
68981 * can happen with large stack limits and large mmap()
68982 * allocations.
68983 */
68984+ mm->mmap_base = TASK_UNMAPPED_BASE;
68985+
68986+#ifdef CONFIG_PAX_RANDMMAP
68987+ if (mm->pax_flags & MF_PAX_RANDMMAP)
68988+ mm->mmap_base += mm->delta_mmap;
68989+#endif
68990+
68991+ mm->free_area_cache = mm->mmap_base;
68992 mm->cached_hole_size = ~0UL;
68993- mm->free_area_cache = TASK_UNMAPPED_BASE;
68994 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68995 /*
68996 * Restore the topdown base:
68997 */
68998- mm->free_area_cache = mm->mmap_base;
68999+ mm->mmap_base = base;
69000+ mm->free_area_cache = base;
69001 mm->cached_hole_size = ~0UL;
69002
69003 return addr;
69004@@ -1522,6 +1740,12 @@ bottomup:
69005
69006 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69007 {
69008+
69009+#ifdef CONFIG_PAX_SEGMEXEC
69010+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69011+ return;
69012+#endif
69013+
69014 /*
69015 * Is this a new hole at the highest possible address?
69016 */
69017@@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_s
69018 mm->free_area_cache = addr;
69019
69020 /* dont allow allocations above current base */
69021- if (mm->free_area_cache > mm->mmap_base)
69022+ if (mm->free_area_cache > mm->mmap_base) {
69023 mm->free_area_cache = mm->mmap_base;
69024+ mm->cached_hole_size = ~0UL;
69025+ }
69026 }
69027
69028 unsigned long
69029@@ -1638,6 +1864,28 @@ out:
69030 return prev ? prev->vm_next : vma;
69031 }
69032
69033+#ifdef CONFIG_PAX_SEGMEXEC
69034+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69035+{
69036+ struct vm_area_struct *vma_m;
69037+
69038+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69039+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69040+ BUG_ON(vma->vm_mirror);
69041+ return NULL;
69042+ }
69043+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69044+ vma_m = vma->vm_mirror;
69045+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69046+ BUG_ON(vma->vm_file != vma_m->vm_file);
69047+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69048+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
69049+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
69050+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69051+ return vma_m;
69052+}
69053+#endif
69054+
69055 /*
69056 * Verify that the stack growth is acceptable and
69057 * update accounting. This is shared with both the
69058@@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_a
69059 return -ENOMEM;
69060
69061 /* Stack limit test */
69062+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
69063 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
69064 return -ENOMEM;
69065
69066@@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_a
69067 locked = mm->locked_vm + grow;
69068 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
69069 limit >>= PAGE_SHIFT;
69070+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69071 if (locked > limit && !capable(CAP_IPC_LOCK))
69072 return -ENOMEM;
69073 }
69074@@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_a
69075 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69076 * vma is the last one with address > vma->vm_end. Have to extend vma.
69077 */
69078+#ifndef CONFIG_IA64
69079+static
69080+#endif
69081 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69082 {
69083 int error;
69084+ bool locknext;
69085
69086 if (!(vma->vm_flags & VM_GROWSUP))
69087 return -EFAULT;
69088
69089+ /* Also guard against wrapping around to address 0. */
69090+ if (address < PAGE_ALIGN(address+1))
69091+ address = PAGE_ALIGN(address+1);
69092+ else
69093+ return -ENOMEM;
69094+
69095 /*
69096 * We must make sure the anon_vma is allocated
69097 * so that the anon_vma locking is not a noop.
69098 */
69099 if (unlikely(anon_vma_prepare(vma)))
69100 return -ENOMEM;
69101+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69102+ if (locknext && anon_vma_prepare(vma->vm_next))
69103+ return -ENOMEM;
69104 vma_lock_anon_vma(vma);
69105+ if (locknext)
69106+ vma_lock_anon_vma(vma->vm_next);
69107
69108 /*
69109 * vma->vm_start/vm_end cannot change under us because the caller
69110 * is required to hold the mmap_sem in read mode. We need the
69111- * anon_vma lock to serialize against concurrent expand_stacks.
69112- * Also guard against wrapping around to address 0.
69113+ * anon_vma locks to serialize against concurrent expand_stacks
69114+ * and expand_upwards.
69115 */
69116- if (address < PAGE_ALIGN(address+4))
69117- address = PAGE_ALIGN(address+4);
69118- else {
69119- vma_unlock_anon_vma(vma);
69120- return -ENOMEM;
69121- }
69122 error = 0;
69123
69124 /* Somebody else might have raced and expanded it already */
69125- if (address > vma->vm_end) {
69126+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69127+ error = -ENOMEM;
69128+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69129 unsigned long size, grow;
69130
69131 size = address - vma->vm_start;
69132@@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct
69133 }
69134 }
69135 }
69136+ if (locknext)
69137+ vma_unlock_anon_vma(vma->vm_next);
69138 vma_unlock_anon_vma(vma);
69139 khugepaged_enter_vma_merge(vma);
69140 return error;
69141@@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_stru
69142 unsigned long address)
69143 {
69144 int error;
69145+ bool lockprev = false;
69146+ struct vm_area_struct *prev;
69147
69148 /*
69149 * We must make sure the anon_vma is allocated
69150@@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_stru
69151 if (error)
69152 return error;
69153
69154+ prev = vma->vm_prev;
69155+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69156+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69157+#endif
69158+ if (lockprev && anon_vma_prepare(prev))
69159+ return -ENOMEM;
69160+ if (lockprev)
69161+ vma_lock_anon_vma(prev);
69162+
69163 vma_lock_anon_vma(vma);
69164
69165 /*
69166@@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_stru
69167 */
69168
69169 /* Somebody else might have raced and expanded it already */
69170- if (address < vma->vm_start) {
69171+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69172+ error = -ENOMEM;
69173+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69174 unsigned long size, grow;
69175
69176+#ifdef CONFIG_PAX_SEGMEXEC
69177+ struct vm_area_struct *vma_m;
69178+
69179+ vma_m = pax_find_mirror_vma(vma);
69180+#endif
69181+
69182 size = vma->vm_end - address;
69183 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69184
69185@@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_stru
69186 if (!error) {
69187 vma->vm_start = address;
69188 vma->vm_pgoff -= grow;
69189+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69190+
69191+#ifdef CONFIG_PAX_SEGMEXEC
69192+ if (vma_m) {
69193+ vma_m->vm_start -= grow << PAGE_SHIFT;
69194+ vma_m->vm_pgoff -= grow;
69195+ }
69196+#endif
69197+
69198 perf_event_mmap(vma);
69199 }
69200 }
69201 }
69202 vma_unlock_anon_vma(vma);
69203+ if (lockprev)
69204+ vma_unlock_anon_vma(prev);
69205 khugepaged_enter_vma_merge(vma);
69206 return error;
69207 }
69208@@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_st
69209 do {
69210 long nrpages = vma_pages(vma);
69211
69212+#ifdef CONFIG_PAX_SEGMEXEC
69213+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69214+ vma = remove_vma(vma);
69215+ continue;
69216+ }
69217+#endif
69218+
69219 mm->total_vm -= nrpages;
69220 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69221 vma = remove_vma(vma);
69222@@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_str
69223 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69224 vma->vm_prev = NULL;
69225 do {
69226+
69227+#ifdef CONFIG_PAX_SEGMEXEC
69228+ if (vma->vm_mirror) {
69229+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69230+ vma->vm_mirror->vm_mirror = NULL;
69231+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
69232+ vma->vm_mirror = NULL;
69233+ }
69234+#endif
69235+
69236 rb_erase(&vma->vm_rb, &mm->mm_rb);
69237 mm->map_count--;
69238 tail_vma = vma;
69239@@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct
69240 struct vm_area_struct *new;
69241 int err = -ENOMEM;
69242
69243+#ifdef CONFIG_PAX_SEGMEXEC
69244+ struct vm_area_struct *vma_m, *new_m = NULL;
69245+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69246+#endif
69247+
69248 if (is_vm_hugetlb_page(vma) && (addr &
69249 ~(huge_page_mask(hstate_vma(vma)))))
69250 return -EINVAL;
69251
69252+#ifdef CONFIG_PAX_SEGMEXEC
69253+ vma_m = pax_find_mirror_vma(vma);
69254+#endif
69255+
69256 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69257 if (!new)
69258 goto out_err;
69259
69260+#ifdef CONFIG_PAX_SEGMEXEC
69261+ if (vma_m) {
69262+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69263+ if (!new_m) {
69264+ kmem_cache_free(vm_area_cachep, new);
69265+ goto out_err;
69266+ }
69267+ }
69268+#endif
69269+
69270 /* most fields are the same, copy all, and then fixup */
69271 *new = *vma;
69272
69273@@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct
69274 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69275 }
69276
69277+#ifdef CONFIG_PAX_SEGMEXEC
69278+ if (vma_m) {
69279+ *new_m = *vma_m;
69280+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
69281+ new_m->vm_mirror = new;
69282+ new->vm_mirror = new_m;
69283+
69284+ if (new_below)
69285+ new_m->vm_end = addr_m;
69286+ else {
69287+ new_m->vm_start = addr_m;
69288+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69289+ }
69290+ }
69291+#endif
69292+
69293 pol = mpol_dup(vma_policy(vma));
69294 if (IS_ERR(pol)) {
69295 err = PTR_ERR(pol);
69296@@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct
69297 else
69298 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69299
69300+#ifdef CONFIG_PAX_SEGMEXEC
69301+ if (!err && vma_m) {
69302+ if (anon_vma_clone(new_m, vma_m))
69303+ goto out_free_mpol;
69304+
69305+ mpol_get(pol);
69306+ vma_set_policy(new_m, pol);
69307+
69308+ if (new_m->vm_file) {
69309+ get_file(new_m->vm_file);
69310+ if (vma_m->vm_flags & VM_EXECUTABLE)
69311+ added_exe_file_vma(mm);
69312+ }
69313+
69314+ if (new_m->vm_ops && new_m->vm_ops->open)
69315+ new_m->vm_ops->open(new_m);
69316+
69317+ if (new_below)
69318+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69319+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69320+ else
69321+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69322+
69323+ if (err) {
69324+ if (new_m->vm_ops && new_m->vm_ops->close)
69325+ new_m->vm_ops->close(new_m);
69326+ if (new_m->vm_file) {
69327+ if (vma_m->vm_flags & VM_EXECUTABLE)
69328+ removed_exe_file_vma(mm);
69329+ fput(new_m->vm_file);
69330+ }
69331+ mpol_put(pol);
69332+ }
69333+ }
69334+#endif
69335+
69336 /* Success. */
69337 if (!err)
69338 return 0;
69339@@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct
69340 removed_exe_file_vma(mm);
69341 fput(new->vm_file);
69342 }
69343- unlink_anon_vmas(new);
69344 out_free_mpol:
69345 mpol_put(pol);
69346 out_free_vma:
69347+
69348+#ifdef CONFIG_PAX_SEGMEXEC
69349+ if (new_m) {
69350+ unlink_anon_vmas(new_m);
69351+ kmem_cache_free(vm_area_cachep, new_m);
69352+ }
69353+#endif
69354+
69355+ unlink_anon_vmas(new);
69356 kmem_cache_free(vm_area_cachep, new);
69357 out_err:
69358 return err;
69359@@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct
69360 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69361 unsigned long addr, int new_below)
69362 {
69363+
69364+#ifdef CONFIG_PAX_SEGMEXEC
69365+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69366+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69367+ if (mm->map_count >= sysctl_max_map_count-1)
69368+ return -ENOMEM;
69369+ } else
69370+#endif
69371+
69372 if (mm->map_count >= sysctl_max_map_count)
69373 return -ENOMEM;
69374
69375@@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, stru
69376 * work. This now handles partial unmappings.
69377 * Jeremy Fitzhardinge <jeremy@goop.org>
69378 */
69379+#ifdef CONFIG_PAX_SEGMEXEC
69380 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69381 {
69382+ int ret = __do_munmap(mm, start, len);
69383+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69384+ return ret;
69385+
69386+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69387+}
69388+
69389+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69390+#else
69391+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69392+#endif
69393+{
69394 unsigned long end;
69395 struct vm_area_struct *vma, *prev, *last;
69396
69397+ /*
69398+ * mm->mmap_sem is required to protect against another thread
69399+ * changing the mappings in case we sleep.
69400+ */
69401+ verify_mm_writelocked(mm);
69402+
69403 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69404 return -EINVAL;
69405
69406@@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsi
69407 /* Fix up all other VM information */
69408 remove_vma_list(mm, vma);
69409
69410+ track_exec_limit(mm, start, end, 0UL);
69411+
69412 return 0;
69413 }
69414
69415@@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
69416
69417 profile_munmap(addr);
69418
69419+#ifdef CONFIG_PAX_SEGMEXEC
69420+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69421+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69422+ return -EINVAL;
69423+#endif
69424+
69425 down_write(&mm->mmap_sem);
69426 ret = do_munmap(mm, addr, len);
69427 up_write(&mm->mmap_sem);
69428 return ret;
69429 }
69430
69431-static inline void verify_mm_writelocked(struct mm_struct *mm)
69432-{
69433-#ifdef CONFIG_DEBUG_VM
69434- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69435- WARN_ON(1);
69436- up_read(&mm->mmap_sem);
69437- }
69438-#endif
69439-}
69440-
69441 /*
69442 * this is really a simplified "do_mmap". it only handles
69443 * anonymous maps. eventually we may be able to do some
69444@@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr,
69445 struct rb_node ** rb_link, * rb_parent;
69446 pgoff_t pgoff = addr >> PAGE_SHIFT;
69447 int error;
69448+ unsigned long charged;
69449
69450 len = PAGE_ALIGN(len);
69451 if (!len)
69452@@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr,
69453
69454 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69455
69456+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69457+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69458+ flags &= ~VM_EXEC;
69459+
69460+#ifdef CONFIG_PAX_MPROTECT
69461+ if (mm->pax_flags & MF_PAX_MPROTECT)
69462+ flags &= ~VM_MAYEXEC;
69463+#endif
69464+
69465+ }
69466+#endif
69467+
69468 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69469 if (error & ~PAGE_MASK)
69470 return error;
69471
69472+ charged = len >> PAGE_SHIFT;
69473+
69474 /*
69475 * mlock MCL_FUTURE?
69476 */
69477 if (mm->def_flags & VM_LOCKED) {
69478 unsigned long locked, lock_limit;
69479- locked = len >> PAGE_SHIFT;
69480+ locked = charged;
69481 locked += mm->locked_vm;
69482 lock_limit = rlimit(RLIMIT_MEMLOCK);
69483 lock_limit >>= PAGE_SHIFT;
69484@@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr,
69485 /*
69486 * Clear old maps. this also does some error checking for us
69487 */
69488- munmap_back:
69489 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69490 if (vma && vma->vm_start < addr + len) {
69491 if (do_munmap(mm, addr, len))
69492 return -ENOMEM;
69493- goto munmap_back;
69494+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69495+ BUG_ON(vma && vma->vm_start < addr + len);
69496 }
69497
69498 /* Check against address space limits *after* clearing old maps... */
69499- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69500+ if (!may_expand_vm(mm, charged))
69501 return -ENOMEM;
69502
69503 if (mm->map_count > sysctl_max_map_count)
69504 return -ENOMEM;
69505
69506- if (security_vm_enough_memory(len >> PAGE_SHIFT))
69507+ if (security_vm_enough_memory(charged))
69508 return -ENOMEM;
69509
69510 /* Can we just expand an old private anonymous mapping? */
69511@@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr,
69512 */
69513 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69514 if (!vma) {
69515- vm_unacct_memory(len >> PAGE_SHIFT);
69516+ vm_unacct_memory(charged);
69517 return -ENOMEM;
69518 }
69519
69520@@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr,
69521 vma_link(mm, vma, prev, rb_link, rb_parent);
69522 out:
69523 perf_event_mmap(vma);
69524- mm->total_vm += len >> PAGE_SHIFT;
69525+ mm->total_vm += charged;
69526 if (flags & VM_LOCKED) {
69527 if (!mlock_vma_pages_range(vma, addr, addr + len))
69528- mm->locked_vm += (len >> PAGE_SHIFT);
69529+ mm->locked_vm += charged;
69530 }
69531+ track_exec_limit(mm, addr, addr + len, flags);
69532 return addr;
69533 }
69534
69535@@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm)
69536 * Walk the list again, actually closing and freeing it,
69537 * with preemption enabled, without holding any MM locks.
69538 */
69539- while (vma)
69540+ while (vma) {
69541+ vma->vm_mirror = NULL;
69542 vma = remove_vma(vma);
69543+ }
69544
69545 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69546 }
69547@@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct *
69548 struct vm_area_struct * __vma, * prev;
69549 struct rb_node ** rb_link, * rb_parent;
69550
69551+#ifdef CONFIG_PAX_SEGMEXEC
69552+ struct vm_area_struct *vma_m = NULL;
69553+#endif
69554+
69555+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
69556+ return -EPERM;
69557+
69558 /*
69559 * The vm_pgoff of a purely anonymous vma should be irrelevant
69560 * until its first write fault, when page's anon_vma and index
69561@@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct *
69562 if ((vma->vm_flags & VM_ACCOUNT) &&
69563 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69564 return -ENOMEM;
69565+
69566+#ifdef CONFIG_PAX_SEGMEXEC
69567+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69568+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69569+ if (!vma_m)
69570+ return -ENOMEM;
69571+ }
69572+#endif
69573+
69574 vma_link(mm, vma, prev, rb_link, rb_parent);
69575+
69576+#ifdef CONFIG_PAX_SEGMEXEC
69577+ if (vma_m)
69578+ BUG_ON(pax_mirror_vma(vma_m, vma));
69579+#endif
69580+
69581 return 0;
69582 }
69583
69584@@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct v
69585 struct rb_node **rb_link, *rb_parent;
69586 struct mempolicy *pol;
69587
69588+ BUG_ON(vma->vm_mirror);
69589+
69590 /*
69591 * If anonymous vma has not yet been faulted, update new pgoff
69592 * to match new location, to increase its chance of merging.
69593@@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct v
69594 return NULL;
69595 }
69596
69597+#ifdef CONFIG_PAX_SEGMEXEC
69598+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69599+{
69600+ struct vm_area_struct *prev_m;
69601+ struct rb_node **rb_link_m, *rb_parent_m;
69602+ struct mempolicy *pol_m;
69603+
69604+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69605+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69606+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69607+ *vma_m = *vma;
69608+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
69609+ if (anon_vma_clone(vma_m, vma))
69610+ return -ENOMEM;
69611+ pol_m = vma_policy(vma_m);
69612+ mpol_get(pol_m);
69613+ vma_set_policy(vma_m, pol_m);
69614+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69615+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69616+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69617+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69618+ if (vma_m->vm_file)
69619+ get_file(vma_m->vm_file);
69620+ if (vma_m->vm_ops && vma_m->vm_ops->open)
69621+ vma_m->vm_ops->open(vma_m);
69622+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69623+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69624+ vma_m->vm_mirror = vma;
69625+ vma->vm_mirror = vma_m;
69626+ return 0;
69627+}
69628+#endif
69629+
69630 /*
69631 * Return true if the calling process may expand its vm space by the passed
69632 * number of pages
69633@@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm,
69634 unsigned long lim;
69635
69636 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
69637-
69638+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69639 if (cur + npages > lim)
69640 return 0;
69641 return 1;
69642@@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_st
69643 vma->vm_start = addr;
69644 vma->vm_end = addr + len;
69645
69646+#ifdef CONFIG_PAX_MPROTECT
69647+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69648+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69649+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69650+ return -EPERM;
69651+ if (!(vm_flags & VM_EXEC))
69652+ vm_flags &= ~VM_MAYEXEC;
69653+#else
69654+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69655+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69656+#endif
69657+ else
69658+ vm_flags &= ~VM_MAYWRITE;
69659+ }
69660+#endif
69661+
69662 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69663 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69664
69665diff -urNp linux-3.1.1/mm/mprotect.c linux-3.1.1/mm/mprotect.c
69666--- linux-3.1.1/mm/mprotect.c 2011-11-11 15:19:27.000000000 -0500
69667+++ linux-3.1.1/mm/mprotect.c 2011-11-16 18:40:44.000000000 -0500
69668@@ -23,10 +23,16 @@
69669 #include <linux/mmu_notifier.h>
69670 #include <linux/migrate.h>
69671 #include <linux/perf_event.h>
69672+
69673+#ifdef CONFIG_PAX_MPROTECT
69674+#include <linux/elf.h>
69675+#endif
69676+
69677 #include <asm/uaccess.h>
69678 #include <asm/pgtable.h>
69679 #include <asm/cacheflush.h>
69680 #include <asm/tlbflush.h>
69681+#include <asm/mmu_context.h>
69682
69683 #ifndef pgprot_modify
69684 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69685@@ -141,6 +147,48 @@ static void change_protection(struct vm_
69686 flush_tlb_range(vma, start, end);
69687 }
69688
69689+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69690+/* called while holding the mmap semaphor for writing except stack expansion */
69691+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69692+{
69693+ unsigned long oldlimit, newlimit = 0UL;
69694+
69695+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
69696+ return;
69697+
69698+ spin_lock(&mm->page_table_lock);
69699+ oldlimit = mm->context.user_cs_limit;
69700+ if ((prot & VM_EXEC) && oldlimit < end)
69701+ /* USER_CS limit moved up */
69702+ newlimit = end;
69703+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69704+ /* USER_CS limit moved down */
69705+ newlimit = start;
69706+
69707+ if (newlimit) {
69708+ mm->context.user_cs_limit = newlimit;
69709+
69710+#ifdef CONFIG_SMP
69711+ wmb();
69712+ cpus_clear(mm->context.cpu_user_cs_mask);
69713+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69714+#endif
69715+
69716+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69717+ }
69718+ spin_unlock(&mm->page_table_lock);
69719+ if (newlimit == end) {
69720+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
69721+
69722+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
69723+ if (is_vm_hugetlb_page(vma))
69724+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69725+ else
69726+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69727+ }
69728+}
69729+#endif
69730+
69731 int
69732 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69733 unsigned long start, unsigned long end, unsigned long newflags)
69734@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
69735 int error;
69736 int dirty_accountable = 0;
69737
69738+#ifdef CONFIG_PAX_SEGMEXEC
69739+ struct vm_area_struct *vma_m = NULL;
69740+ unsigned long start_m, end_m;
69741+
69742+ start_m = start + SEGMEXEC_TASK_SIZE;
69743+ end_m = end + SEGMEXEC_TASK_SIZE;
69744+#endif
69745+
69746 if (newflags == oldflags) {
69747 *pprev = vma;
69748 return 0;
69749 }
69750
69751+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69752+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69753+
69754+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69755+ return -ENOMEM;
69756+
69757+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69758+ return -ENOMEM;
69759+ }
69760+
69761 /*
69762 * If we make a private mapping writable we increase our commit;
69763 * but (without finer accounting) cannot reduce our commit if we
69764@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
69765 }
69766 }
69767
69768+#ifdef CONFIG_PAX_SEGMEXEC
69769+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69770+ if (start != vma->vm_start) {
69771+ error = split_vma(mm, vma, start, 1);
69772+ if (error)
69773+ goto fail;
69774+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69775+ *pprev = (*pprev)->vm_next;
69776+ }
69777+
69778+ if (end != vma->vm_end) {
69779+ error = split_vma(mm, vma, end, 0);
69780+ if (error)
69781+ goto fail;
69782+ }
69783+
69784+ if (pax_find_mirror_vma(vma)) {
69785+ error = __do_munmap(mm, start_m, end_m - start_m);
69786+ if (error)
69787+ goto fail;
69788+ } else {
69789+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69790+ if (!vma_m) {
69791+ error = -ENOMEM;
69792+ goto fail;
69793+ }
69794+ vma->vm_flags = newflags;
69795+ error = pax_mirror_vma(vma_m, vma);
69796+ if (error) {
69797+ vma->vm_flags = oldflags;
69798+ goto fail;
69799+ }
69800+ }
69801+ }
69802+#endif
69803+
69804 /*
69805 * First try to merge with previous and/or next vma.
69806 */
69807@@ -204,9 +306,21 @@ success:
69808 * vm_flags and vm_page_prot are protected by the mmap_sem
69809 * held in write mode.
69810 */
69811+
69812+#ifdef CONFIG_PAX_SEGMEXEC
69813+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69814+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69815+#endif
69816+
69817 vma->vm_flags = newflags;
69818+
69819+#ifdef CONFIG_PAX_MPROTECT
69820+ if (mm->binfmt && mm->binfmt->handle_mprotect)
69821+ mm->binfmt->handle_mprotect(vma, newflags);
69822+#endif
69823+
69824 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69825- vm_get_page_prot(newflags));
69826+ vm_get_page_prot(vma->vm_flags));
69827
69828 if (vma_wants_writenotify(vma)) {
69829 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69830@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69831 end = start + len;
69832 if (end <= start)
69833 return -ENOMEM;
69834+
69835+#ifdef CONFIG_PAX_SEGMEXEC
69836+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69837+ if (end > SEGMEXEC_TASK_SIZE)
69838+ return -EINVAL;
69839+ } else
69840+#endif
69841+
69842+ if (end > TASK_SIZE)
69843+ return -EINVAL;
69844+
69845 if (!arch_validate_prot(prot))
69846 return -EINVAL;
69847
69848@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69849 /*
69850 * Does the application expect PROT_READ to imply PROT_EXEC:
69851 */
69852- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69853+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69854 prot |= PROT_EXEC;
69855
69856 vm_flags = calc_vm_prot_bits(prot);
69857@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69858 if (start > vma->vm_start)
69859 prev = vma;
69860
69861+#ifdef CONFIG_PAX_MPROTECT
69862+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69863+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
69864+#endif
69865+
69866 for (nstart = start ; ; ) {
69867 unsigned long newflags;
69868
69869@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69870
69871 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69872 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69873+ if (prot & (PROT_WRITE | PROT_EXEC))
69874+ gr_log_rwxmprotect(vma->vm_file);
69875+
69876+ error = -EACCES;
69877+ goto out;
69878+ }
69879+
69880+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69881 error = -EACCES;
69882 goto out;
69883 }
69884@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69885 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69886 if (error)
69887 goto out;
69888+
69889+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
69890+
69891 nstart = tmp;
69892
69893 if (nstart < prev->vm_end)
69894diff -urNp linux-3.1.1/mm/mremap.c linux-3.1.1/mm/mremap.c
69895--- linux-3.1.1/mm/mremap.c 2011-11-11 15:19:27.000000000 -0500
69896+++ linux-3.1.1/mm/mremap.c 2011-11-16 18:39:08.000000000 -0500
69897@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
69898 continue;
69899 pte = ptep_clear_flush(vma, old_addr, old_pte);
69900 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69901+
69902+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69903+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69904+ pte = pte_exprotect(pte);
69905+#endif
69906+
69907 set_pte_at(mm, new_addr, new_pte, pte);
69908 }
69909
69910@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
69911 if (is_vm_hugetlb_page(vma))
69912 goto Einval;
69913
69914+#ifdef CONFIG_PAX_SEGMEXEC
69915+ if (pax_find_mirror_vma(vma))
69916+ goto Einval;
69917+#endif
69918+
69919 /* We can't remap across vm area boundaries */
69920 if (old_len > vma->vm_end - addr)
69921 goto Efault;
69922@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
69923 unsigned long ret = -EINVAL;
69924 unsigned long charged = 0;
69925 unsigned long map_flags;
69926+ unsigned long pax_task_size = TASK_SIZE;
69927
69928 if (new_addr & ~PAGE_MASK)
69929 goto out;
69930
69931- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69932+#ifdef CONFIG_PAX_SEGMEXEC
69933+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69934+ pax_task_size = SEGMEXEC_TASK_SIZE;
69935+#endif
69936+
69937+ pax_task_size -= PAGE_SIZE;
69938+
69939+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69940 goto out;
69941
69942 /* Check if the location we're moving into overlaps the
69943 * old location at all, and fail if it does.
69944 */
69945- if ((new_addr <= addr) && (new_addr+new_len) > addr)
69946- goto out;
69947-
69948- if ((addr <= new_addr) && (addr+old_len) > new_addr)
69949+ if (addr + old_len > new_addr && new_addr + new_len > addr)
69950 goto out;
69951
69952 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69953@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
69954 struct vm_area_struct *vma;
69955 unsigned long ret = -EINVAL;
69956 unsigned long charged = 0;
69957+ unsigned long pax_task_size = TASK_SIZE;
69958
69959 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69960 goto out;
69961@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
69962 if (!new_len)
69963 goto out;
69964
69965+#ifdef CONFIG_PAX_SEGMEXEC
69966+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69967+ pax_task_size = SEGMEXEC_TASK_SIZE;
69968+#endif
69969+
69970+ pax_task_size -= PAGE_SIZE;
69971+
69972+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69973+ old_len > pax_task_size || addr > pax_task_size-old_len)
69974+ goto out;
69975+
69976 if (flags & MREMAP_FIXED) {
69977 if (flags & MREMAP_MAYMOVE)
69978 ret = mremap_to(addr, old_len, new_addr, new_len);
69979@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
69980 addr + new_len);
69981 }
69982 ret = addr;
69983+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69984 goto out;
69985 }
69986 }
69987@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
69988 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69989 if (ret)
69990 goto out;
69991+
69992+ map_flags = vma->vm_flags;
69993 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69994+ if (!(ret & ~PAGE_MASK)) {
69995+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69996+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69997+ }
69998 }
69999 out:
70000 if (ret & ~PAGE_MASK)
70001diff -urNp linux-3.1.1/mm/nobootmem.c linux-3.1.1/mm/nobootmem.c
70002--- linux-3.1.1/mm/nobootmem.c 2011-11-11 15:19:27.000000000 -0500
70003+++ linux-3.1.1/mm/nobootmem.c 2011-11-16 18:39:08.000000000 -0500
70004@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
70005 unsigned long __init free_all_memory_core_early(int nodeid)
70006 {
70007 int i;
70008- u64 start, end;
70009+ u64 start, end, startrange, endrange;
70010 unsigned long count = 0;
70011- struct range *range = NULL;
70012+ struct range *range = NULL, rangerange = { 0, 0 };
70013 int nr_range;
70014
70015 nr_range = get_free_all_memory_range(&range, nodeid);
70016+ startrange = __pa(range) >> PAGE_SHIFT;
70017+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
70018
70019 for (i = 0; i < nr_range; i++) {
70020 start = range[i].start;
70021 end = range[i].end;
70022+ if (start <= endrange && startrange < end) {
70023+ BUG_ON(rangerange.start | rangerange.end);
70024+ rangerange = range[i];
70025+ continue;
70026+ }
70027 count += end - start;
70028 __free_pages_memory(start, end);
70029 }
70030+ start = rangerange.start;
70031+ end = rangerange.end;
70032+ count += end - start;
70033+ __free_pages_memory(start, end);
70034
70035 return count;
70036 }
70037diff -urNp linux-3.1.1/mm/nommu.c linux-3.1.1/mm/nommu.c
70038--- linux-3.1.1/mm/nommu.c 2011-11-11 15:19:27.000000000 -0500
70039+++ linux-3.1.1/mm/nommu.c 2011-11-16 18:39:08.000000000 -0500
70040@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMI
70041 int sysctl_overcommit_ratio = 50; /* default is 50% */
70042 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70043 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70044-int heap_stack_gap = 0;
70045
70046 atomic_long_t mmap_pages_allocated;
70047
70048@@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct m
70049 EXPORT_SYMBOL(find_vma);
70050
70051 /*
70052- * find a VMA
70053- * - we don't extend stack VMAs under NOMMU conditions
70054- */
70055-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70056-{
70057- return find_vma(mm, addr);
70058-}
70059-
70060-/*
70061 * expand a stack to a given address
70062 * - not supported under NOMMU conditions
70063 */
70064@@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, stru
70065
70066 /* most fields are the same, copy all, and then fixup */
70067 *new = *vma;
70068+ INIT_LIST_HEAD(&new->anon_vma_chain);
70069 *region = *vma->vm_region;
70070 new->vm_region = region;
70071
70072diff -urNp linux-3.1.1/mm/oom_kill.c linux-3.1.1/mm/oom_kill.c
70073--- linux-3.1.1/mm/oom_kill.c 2011-11-11 15:19:27.000000000 -0500
70074+++ linux-3.1.1/mm/oom_kill.c 2011-11-18 18:44:21.000000000 -0500
70075@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct t
70076 unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
70077 const nodemask_t *nodemask, unsigned long totalpages)
70078 {
70079- int points;
70080+ long points;
70081
70082 if (oom_unkillable_task(p, mem, nodemask))
70083 return 0;
70084diff -urNp linux-3.1.1/mm/page_alloc.c linux-3.1.1/mm/page_alloc.c
70085--- linux-3.1.1/mm/page_alloc.c 2011-11-11 15:19:27.000000000 -0500
70086+++ linux-3.1.1/mm/page_alloc.c 2011-11-16 18:40:44.000000000 -0500
70087@@ -340,7 +340,7 @@ out:
70088 * This usage means that zero-order pages may not be compound.
70089 */
70090
70091-static void free_compound_page(struct page *page)
70092+void free_compound_page(struct page *page)
70093 {
70094 __free_pages_ok(page, compound_order(page));
70095 }
70096@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
70097 int i;
70098 int bad = 0;
70099
70100+#ifdef CONFIG_PAX_MEMORY_SANITIZE
70101+ unsigned long index = 1UL << order;
70102+#endif
70103+
70104 trace_mm_page_free_direct(page, order);
70105 kmemcheck_free_shadow(page, order);
70106
70107@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
70108 debug_check_no_obj_freed(page_address(page),
70109 PAGE_SIZE << order);
70110 }
70111+
70112+#ifdef CONFIG_PAX_MEMORY_SANITIZE
70113+ for (; index; --index)
70114+ sanitize_highpage(page + index - 1);
70115+#endif
70116+
70117 arch_free_page(page, order);
70118 kernel_map_pages(page, 1 << order, 0);
70119
70120@@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
70121 arch_alloc_page(page, order);
70122 kernel_map_pages(page, 1 << order, 1);
70123
70124+#ifndef CONFIG_PAX_MEMORY_SANITIZE
70125 if (gfp_flags & __GFP_ZERO)
70126 prep_zero_page(page, order, gfp_flags);
70127+#endif
70128
70129 if (order && (gfp_flags & __GFP_COMP))
70130 prep_compound_page(page, order);
70131@@ -2539,6 +2551,8 @@ void show_free_areas(unsigned int filter
70132 int cpu;
70133 struct zone *zone;
70134
70135+ pax_track_stack();
70136+
70137 for_each_populated_zone(zone) {
70138 if (skip_free_areas_node(filter, zone_to_nid(zone)))
70139 continue;
70140@@ -3350,7 +3364,13 @@ static int pageblock_is_reserved(unsigne
70141 unsigned long pfn;
70142
70143 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
70144+#ifdef CONFIG_X86_32
70145+ /* boot failures in VMware 8 on 32bit vanilla since
70146+ this change */
70147+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
70148+#else
70149 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
70150+#endif
70151 return 1;
70152 }
70153 return 0;
70154diff -urNp linux-3.1.1/mm/percpu.c linux-3.1.1/mm/percpu.c
70155--- linux-3.1.1/mm/percpu.c 2011-11-11 15:19:27.000000000 -0500
70156+++ linux-3.1.1/mm/percpu.c 2011-11-16 18:39:08.000000000 -0500
70157@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
70158 static unsigned int pcpu_last_unit_cpu __read_mostly;
70159
70160 /* the address of the first chunk which starts with the kernel static area */
70161-void *pcpu_base_addr __read_mostly;
70162+void *pcpu_base_addr __read_only;
70163 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70164
70165 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70166diff -urNp linux-3.1.1/mm/rmap.c linux-3.1.1/mm/rmap.c
70167--- linux-3.1.1/mm/rmap.c 2011-11-11 15:19:27.000000000 -0500
70168+++ linux-3.1.1/mm/rmap.c 2011-11-16 18:39:08.000000000 -0500
70169@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_stru
70170 struct anon_vma *anon_vma = vma->anon_vma;
70171 struct anon_vma_chain *avc;
70172
70173+#ifdef CONFIG_PAX_SEGMEXEC
70174+ struct anon_vma_chain *avc_m = NULL;
70175+#endif
70176+
70177 might_sleep();
70178 if (unlikely(!anon_vma)) {
70179 struct mm_struct *mm = vma->vm_mm;
70180@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_stru
70181 if (!avc)
70182 goto out_enomem;
70183
70184+#ifdef CONFIG_PAX_SEGMEXEC
70185+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70186+ if (!avc_m)
70187+ goto out_enomem_free_avc;
70188+#endif
70189+
70190 anon_vma = find_mergeable_anon_vma(vma);
70191 allocated = NULL;
70192 if (!anon_vma) {
70193@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_stru
70194 /* page_table_lock to protect against threads */
70195 spin_lock(&mm->page_table_lock);
70196 if (likely(!vma->anon_vma)) {
70197+
70198+#ifdef CONFIG_PAX_SEGMEXEC
70199+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70200+
70201+ if (vma_m) {
70202+ BUG_ON(vma_m->anon_vma);
70203+ vma_m->anon_vma = anon_vma;
70204+ avc_m->anon_vma = anon_vma;
70205+ avc_m->vma = vma;
70206+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70207+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
70208+ avc_m = NULL;
70209+ }
70210+#endif
70211+
70212 vma->anon_vma = anon_vma;
70213 avc->anon_vma = anon_vma;
70214 avc->vma = vma;
70215@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_stru
70216
70217 if (unlikely(allocated))
70218 put_anon_vma(allocated);
70219+
70220+#ifdef CONFIG_PAX_SEGMEXEC
70221+ if (unlikely(avc_m))
70222+ anon_vma_chain_free(avc_m);
70223+#endif
70224+
70225 if (unlikely(avc))
70226 anon_vma_chain_free(avc);
70227 }
70228 return 0;
70229
70230 out_enomem_free_avc:
70231+
70232+#ifdef CONFIG_PAX_SEGMEXEC
70233+ if (avc_m)
70234+ anon_vma_chain_free(avc_m);
70235+#endif
70236+
70237 anon_vma_chain_free(avc);
70238 out_enomem:
70239 return -ENOMEM;
70240@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct v
70241 * Attach the anon_vmas from src to dst.
70242 * Returns 0 on success, -ENOMEM on failure.
70243 */
70244-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70245+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70246 {
70247 struct anon_vma_chain *avc, *pavc;
70248 struct anon_vma *root = NULL;
70249@@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct
70250 * the corresponding VMA in the parent process is attached to.
70251 * Returns 0 on success, non-zero on failure.
70252 */
70253-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70254+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70255 {
70256 struct anon_vma_chain *avc;
70257 struct anon_vma *anon_vma;
70258diff -urNp linux-3.1.1/mm/shmem.c linux-3.1.1/mm/shmem.c
70259--- linux-3.1.1/mm/shmem.c 2011-11-11 15:19:27.000000000 -0500
70260+++ linux-3.1.1/mm/shmem.c 2011-11-16 19:28:28.000000000 -0500
70261@@ -31,7 +31,7 @@
70262 #include <linux/module.h>
70263 #include <linux/swap.h>
70264
70265-static struct vfsmount *shm_mnt;
70266+struct vfsmount *shm_mnt;
70267
70268 #ifdef CONFIG_SHMEM
70269 /*
70270@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
70271 #define BOGO_DIRENT_SIZE 20
70272
70273 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
70274-#define SHORT_SYMLINK_LEN 128
70275+#define SHORT_SYMLINK_LEN 64
70276
70277 struct shmem_xattr {
70278 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
70279@@ -769,6 +769,8 @@ static struct page *shmem_swapin(swp_ent
70280 struct mempolicy mpol, *spol;
70281 struct vm_area_struct pvma;
70282
70283+ pax_track_stack();
70284+
70285 spol = mpol_cond_copy(&mpol,
70286 mpol_shared_policy_lookup(&info->policy, index));
70287
70288@@ -2149,8 +2151,7 @@ int shmem_fill_super(struct super_block
70289 int err = -ENOMEM;
70290
70291 /* Round up to L1_CACHE_BYTES to resist false sharing */
70292- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70293- L1_CACHE_BYTES), GFP_KERNEL);
70294+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70295 if (!sbinfo)
70296 return -ENOMEM;
70297
70298diff -urNp linux-3.1.1/mm/slab.c linux-3.1.1/mm/slab.c
70299--- linux-3.1.1/mm/slab.c 2011-11-11 15:19:27.000000000 -0500
70300+++ linux-3.1.1/mm/slab.c 2011-11-16 18:40:44.000000000 -0500
70301@@ -151,7 +151,7 @@
70302
70303 /* Legal flag mask for kmem_cache_create(). */
70304 #if DEBUG
70305-# define CREATE_MASK (SLAB_RED_ZONE | \
70306+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70307 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70308 SLAB_CACHE_DMA | \
70309 SLAB_STORE_USER | \
70310@@ -159,7 +159,7 @@
70311 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70312 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70313 #else
70314-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70315+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70316 SLAB_CACHE_DMA | \
70317 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70318 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70319@@ -288,7 +288,7 @@ struct kmem_list3 {
70320 * Need this for bootstrapping a per node allocator.
70321 */
70322 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70323-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70324+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70325 #define CACHE_CACHE 0
70326 #define SIZE_AC MAX_NUMNODES
70327 #define SIZE_L3 (2 * MAX_NUMNODES)
70328@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
70329 if ((x)->max_freeable < i) \
70330 (x)->max_freeable = i; \
70331 } while (0)
70332-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70333-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70334-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70335-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70336+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70337+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70338+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70339+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70340 #else
70341 #define STATS_INC_ACTIVE(x) do { } while (0)
70342 #define STATS_DEC_ACTIVE(x) do { } while (0)
70343@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
70344 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70345 */
70346 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70347- const struct slab *slab, void *obj)
70348+ const struct slab *slab, const void *obj)
70349 {
70350 u32 offset = (obj - slab->s_mem);
70351 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70352@@ -564,7 +564,7 @@ struct cache_names {
70353 static struct cache_names __initdata cache_names[] = {
70354 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
70355 #include <linux/kmalloc_sizes.h>
70356- {NULL,}
70357+ {NULL}
70358 #undef CACHE
70359 };
70360
70361@@ -1571,7 +1571,7 @@ void __init kmem_cache_init(void)
70362 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70363 sizes[INDEX_AC].cs_size,
70364 ARCH_KMALLOC_MINALIGN,
70365- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70366+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70367 NULL);
70368
70369 if (INDEX_AC != INDEX_L3) {
70370@@ -1579,7 +1579,7 @@ void __init kmem_cache_init(void)
70371 kmem_cache_create(names[INDEX_L3].name,
70372 sizes[INDEX_L3].cs_size,
70373 ARCH_KMALLOC_MINALIGN,
70374- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70375+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70376 NULL);
70377 }
70378
70379@@ -1597,7 +1597,7 @@ void __init kmem_cache_init(void)
70380 sizes->cs_cachep = kmem_cache_create(names->name,
70381 sizes->cs_size,
70382 ARCH_KMALLOC_MINALIGN,
70383- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70384+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70385 NULL);
70386 }
70387 #ifdef CONFIG_ZONE_DMA
70388@@ -4324,10 +4324,10 @@ static int s_show(struct seq_file *m, vo
70389 }
70390 /* cpu stats */
70391 {
70392- unsigned long allochit = atomic_read(&cachep->allochit);
70393- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70394- unsigned long freehit = atomic_read(&cachep->freehit);
70395- unsigned long freemiss = atomic_read(&cachep->freemiss);
70396+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70397+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70398+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70399+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70400
70401 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70402 allochit, allocmiss, freehit, freemiss);
70403@@ -4584,15 +4584,70 @@ static const struct file_operations proc
70404
70405 static int __init slab_proc_init(void)
70406 {
70407- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
70408+ mode_t gr_mode = S_IRUGO;
70409+
70410+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70411+ gr_mode = S_IRUSR;
70412+#endif
70413+
70414+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
70415 #ifdef CONFIG_DEBUG_SLAB_LEAK
70416- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70417+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
70418 #endif
70419 return 0;
70420 }
70421 module_init(slab_proc_init);
70422 #endif
70423
70424+void check_object_size(const void *ptr, unsigned long n, bool to)
70425+{
70426+
70427+#ifdef CONFIG_PAX_USERCOPY
70428+ struct page *page;
70429+ struct kmem_cache *cachep = NULL;
70430+ struct slab *slabp;
70431+ unsigned int objnr;
70432+ unsigned long offset;
70433+ const char *type;
70434+
70435+ if (!n)
70436+ return;
70437+
70438+ type = "<null>";
70439+ if (ZERO_OR_NULL_PTR(ptr))
70440+ goto report;
70441+
70442+ if (!virt_addr_valid(ptr))
70443+ return;
70444+
70445+ page = virt_to_head_page(ptr);
70446+
70447+ type = "<process stack>";
70448+ if (!PageSlab(page)) {
70449+ if (object_is_on_stack(ptr, n) == -1)
70450+ goto report;
70451+ return;
70452+ }
70453+
70454+ cachep = page_get_cache(page);
70455+ type = cachep->name;
70456+ if (!(cachep->flags & SLAB_USERCOPY))
70457+ goto report;
70458+
70459+ slabp = page_get_slab(page);
70460+ objnr = obj_to_index(cachep, slabp, ptr);
70461+ BUG_ON(objnr >= cachep->num);
70462+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70463+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70464+ return;
70465+
70466+report:
70467+ pax_report_usercopy(ptr, n, to, type);
70468+#endif
70469+
70470+}
70471+EXPORT_SYMBOL(check_object_size);
70472+
70473 /**
70474 * ksize - get the actual amount of memory allocated for a given object
70475 * @objp: Pointer to the object
70476diff -urNp linux-3.1.1/mm/slob.c linux-3.1.1/mm/slob.c
70477--- linux-3.1.1/mm/slob.c 2011-11-11 15:19:27.000000000 -0500
70478+++ linux-3.1.1/mm/slob.c 2011-11-16 18:39:08.000000000 -0500
70479@@ -29,7 +29,7 @@
70480 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70481 * alloc_pages() directly, allocating compound pages so the page order
70482 * does not have to be separately tracked, and also stores the exact
70483- * allocation size in page->private so that it can be used to accurately
70484+ * allocation size in slob_page->size so that it can be used to accurately
70485 * provide ksize(). These objects are detected in kfree() because slob_page()
70486 * is false for them.
70487 *
70488@@ -58,6 +58,7 @@
70489 */
70490
70491 #include <linux/kernel.h>
70492+#include <linux/sched.h>
70493 #include <linux/slab.h>
70494 #include <linux/mm.h>
70495 #include <linux/swap.h> /* struct reclaim_state */
70496@@ -102,7 +103,8 @@ struct slob_page {
70497 unsigned long flags; /* mandatory */
70498 atomic_t _count; /* mandatory */
70499 slobidx_t units; /* free units left in page */
70500- unsigned long pad[2];
70501+ unsigned long pad[1];
70502+ unsigned long size; /* size when >=PAGE_SIZE */
70503 slob_t *free; /* first free slob_t in page */
70504 struct list_head list; /* linked list of free pages */
70505 };
70506@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
70507 */
70508 static inline int is_slob_page(struct slob_page *sp)
70509 {
70510- return PageSlab((struct page *)sp);
70511+ return PageSlab((struct page *)sp) && !sp->size;
70512 }
70513
70514 static inline void set_slob_page(struct slob_page *sp)
70515@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
70516
70517 static inline struct slob_page *slob_page(const void *addr)
70518 {
70519- return (struct slob_page *)virt_to_page(addr);
70520+ return (struct slob_page *)virt_to_head_page(addr);
70521 }
70522
70523 /*
70524@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
70525 /*
70526 * Return the size of a slob block.
70527 */
70528-static slobidx_t slob_units(slob_t *s)
70529+static slobidx_t slob_units(const slob_t *s)
70530 {
70531 if (s->units > 0)
70532 return s->units;
70533@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
70534 /*
70535 * Return the next free slob block pointer after this one.
70536 */
70537-static slob_t *slob_next(slob_t *s)
70538+static slob_t *slob_next(const slob_t *s)
70539 {
70540 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70541 slobidx_t next;
70542@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
70543 /*
70544 * Returns true if s is the last free block in its page.
70545 */
70546-static int slob_last(slob_t *s)
70547+static int slob_last(const slob_t *s)
70548 {
70549 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70550 }
70551@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
70552 if (!page)
70553 return NULL;
70554
70555+ set_slob_page(page);
70556 return page_address(page);
70557 }
70558
70559@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
70560 if (!b)
70561 return NULL;
70562 sp = slob_page(b);
70563- set_slob_page(sp);
70564
70565 spin_lock_irqsave(&slob_lock, flags);
70566 sp->units = SLOB_UNITS(PAGE_SIZE);
70567 sp->free = b;
70568+ sp->size = 0;
70569 INIT_LIST_HEAD(&sp->list);
70570 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70571 set_slob_page_free(sp, slob_list);
70572@@ -476,10 +479,9 @@ out:
70573 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
70574 */
70575
70576-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70577+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70578 {
70579- unsigned int *m;
70580- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70581+ slob_t *m;
70582 void *ret;
70583
70584 gfp &= gfp_allowed_mask;
70585@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t
70586
70587 if (!m)
70588 return NULL;
70589- *m = size;
70590+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70591+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70592+ m[0].units = size;
70593+ m[1].units = align;
70594 ret = (void *)m + align;
70595
70596 trace_kmalloc_node(_RET_IP_, ret,
70597@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t
70598 gfp |= __GFP_COMP;
70599 ret = slob_new_pages(gfp, order, node);
70600 if (ret) {
70601- struct page *page;
70602- page = virt_to_page(ret);
70603- page->private = size;
70604+ struct slob_page *sp;
70605+ sp = slob_page(ret);
70606+ sp->size = size;
70607 }
70608
70609 trace_kmalloc_node(_RET_IP_, ret,
70610 size, PAGE_SIZE << order, gfp, node);
70611 }
70612
70613- kmemleak_alloc(ret, size, 1, gfp);
70614+ return ret;
70615+}
70616+
70617+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70618+{
70619+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70620+ void *ret = __kmalloc_node_align(size, gfp, node, align);
70621+
70622+ if (!ZERO_OR_NULL_PTR(ret))
70623+ kmemleak_alloc(ret, size, 1, gfp);
70624 return ret;
70625 }
70626 EXPORT_SYMBOL(__kmalloc_node);
70627@@ -533,13 +547,92 @@ void kfree(const void *block)
70628 sp = slob_page(block);
70629 if (is_slob_page(sp)) {
70630 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70631- unsigned int *m = (unsigned int *)(block - align);
70632- slob_free(m, *m + align);
70633- } else
70634+ slob_t *m = (slob_t *)(block - align);
70635+ slob_free(m, m[0].units + align);
70636+ } else {
70637+ clear_slob_page(sp);
70638+ free_slob_page(sp);
70639+ sp->size = 0;
70640 put_page(&sp->page);
70641+ }
70642 }
70643 EXPORT_SYMBOL(kfree);
70644
70645+void check_object_size(const void *ptr, unsigned long n, bool to)
70646+{
70647+
70648+#ifdef CONFIG_PAX_USERCOPY
70649+ struct slob_page *sp;
70650+ const slob_t *free;
70651+ const void *base;
70652+ unsigned long flags;
70653+ const char *type;
70654+
70655+ if (!n)
70656+ return;
70657+
70658+ type = "<null>";
70659+ if (ZERO_OR_NULL_PTR(ptr))
70660+ goto report;
70661+
70662+ if (!virt_addr_valid(ptr))
70663+ return;
70664+
70665+ type = "<process stack>";
70666+ sp = slob_page(ptr);
70667+ if (!PageSlab((struct page*)sp)) {
70668+ if (object_is_on_stack(ptr, n) == -1)
70669+ goto report;
70670+ return;
70671+ }
70672+
70673+ type = "<slob>";
70674+ if (sp->size) {
70675+ base = page_address(&sp->page);
70676+ if (base <= ptr && n <= sp->size - (ptr - base))
70677+ return;
70678+ goto report;
70679+ }
70680+
70681+ /* some tricky double walking to find the chunk */
70682+ spin_lock_irqsave(&slob_lock, flags);
70683+ base = (void *)((unsigned long)ptr & PAGE_MASK);
70684+ free = sp->free;
70685+
70686+ while (!slob_last(free) && (void *)free <= ptr) {
70687+ base = free + slob_units(free);
70688+ free = slob_next(free);
70689+ }
70690+
70691+ while (base < (void *)free) {
70692+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70693+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
70694+ int offset;
70695+
70696+ if (ptr < base + align)
70697+ break;
70698+
70699+ offset = ptr - base - align;
70700+ if (offset >= m) {
70701+ base += size;
70702+ continue;
70703+ }
70704+
70705+ if (n > m - offset)
70706+ break;
70707+
70708+ spin_unlock_irqrestore(&slob_lock, flags);
70709+ return;
70710+ }
70711+
70712+ spin_unlock_irqrestore(&slob_lock, flags);
70713+report:
70714+ pax_report_usercopy(ptr, n, to, type);
70715+#endif
70716+
70717+}
70718+EXPORT_SYMBOL(check_object_size);
70719+
70720 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70721 size_t ksize(const void *block)
70722 {
70723@@ -552,10 +645,10 @@ size_t ksize(const void *block)
70724 sp = slob_page(block);
70725 if (is_slob_page(sp)) {
70726 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70727- unsigned int *m = (unsigned int *)(block - align);
70728- return SLOB_UNITS(*m) * SLOB_UNIT;
70729+ slob_t *m = (slob_t *)(block - align);
70730+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70731 } else
70732- return sp->page.private;
70733+ return sp->size;
70734 }
70735 EXPORT_SYMBOL(ksize);
70736
70737@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(con
70738 {
70739 struct kmem_cache *c;
70740
70741+#ifdef CONFIG_PAX_USERCOPY
70742+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
70743+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70744+#else
70745 c = slob_alloc(sizeof(struct kmem_cache),
70746 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70747+#endif
70748
70749 if (c) {
70750 c->name = name;
70751@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_
70752
70753 lockdep_trace_alloc(flags);
70754
70755+#ifdef CONFIG_PAX_USERCOPY
70756+ b = __kmalloc_node_align(c->size, flags, node, c->align);
70757+#else
70758 if (c->size < PAGE_SIZE) {
70759 b = slob_alloc(c->size, flags, c->align, node);
70760 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70761 SLOB_UNITS(c->size) * SLOB_UNIT,
70762 flags, node);
70763 } else {
70764+ struct slob_page *sp;
70765+
70766 b = slob_new_pages(flags, get_order(c->size), node);
70767+ sp = slob_page(b);
70768+ sp->size = c->size;
70769 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70770 PAGE_SIZE << get_order(c->size),
70771 flags, node);
70772 }
70773+#endif
70774
70775 if (c->ctor)
70776 c->ctor(b);
70777@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70778
70779 static void __kmem_cache_free(void *b, int size)
70780 {
70781- if (size < PAGE_SIZE)
70782+ struct slob_page *sp = slob_page(b);
70783+
70784+ if (is_slob_page(sp))
70785 slob_free(b, size);
70786- else
70787+ else {
70788+ clear_slob_page(sp);
70789+ free_slob_page(sp);
70790+ sp->size = 0;
70791 slob_free_pages(b, get_order(size));
70792+ }
70793 }
70794
70795 static void kmem_rcu_free(struct rcu_head *head)
70796@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_hea
70797
70798 void kmem_cache_free(struct kmem_cache *c, void *b)
70799 {
70800+ int size = c->size;
70801+
70802+#ifdef CONFIG_PAX_USERCOPY
70803+ if (size + c->align < PAGE_SIZE) {
70804+ size += c->align;
70805+ b -= c->align;
70806+ }
70807+#endif
70808+
70809 kmemleak_free_recursive(b, c->flags);
70810 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70811 struct slob_rcu *slob_rcu;
70812- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70813- slob_rcu->size = c->size;
70814+ slob_rcu = b + (size - sizeof(struct slob_rcu));
70815+ slob_rcu->size = size;
70816 call_rcu(&slob_rcu->head, kmem_rcu_free);
70817 } else {
70818- __kmem_cache_free(b, c->size);
70819+ __kmem_cache_free(b, size);
70820 }
70821
70822+#ifdef CONFIG_PAX_USERCOPY
70823+ trace_kfree(_RET_IP_, b);
70824+#else
70825 trace_kmem_cache_free(_RET_IP_, b);
70826+#endif
70827+
70828 }
70829 EXPORT_SYMBOL(kmem_cache_free);
70830
70831diff -urNp linux-3.1.1/mm/slub.c linux-3.1.1/mm/slub.c
70832--- linux-3.1.1/mm/slub.c 2011-11-11 15:19:27.000000000 -0500
70833+++ linux-3.1.1/mm/slub.c 2011-11-16 19:27:25.000000000 -0500
70834@@ -208,7 +208,7 @@ struct track {
70835
70836 enum track_item { TRACK_ALLOC, TRACK_FREE };
70837
70838-#ifdef CONFIG_SYSFS
70839+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70840 static int sysfs_slab_add(struct kmem_cache *);
70841 static int sysfs_slab_alias(struct kmem_cache *, const char *);
70842 static void sysfs_slab_remove(struct kmem_cache *);
70843@@ -556,7 +556,7 @@ static void print_track(const char *s, s
70844 if (!t->addr)
70845 return;
70846
70847- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70848+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70849 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70850 #ifdef CONFIG_STACKTRACE
70851 {
70852@@ -2456,6 +2456,8 @@ void kmem_cache_free(struct kmem_cache *
70853
70854 page = virt_to_head_page(x);
70855
70856+ BUG_ON(!PageSlab(page));
70857+
70858 slab_free(s, page, x, _RET_IP_);
70859
70860 trace_kmem_cache_free(_RET_IP_, x);
70861@@ -2489,7 +2491,7 @@ static int slub_min_objects;
70862 * Merge control. If this is set then no merging of slab caches will occur.
70863 * (Could be removed. This was introduced to pacify the merge skeptics.)
70864 */
70865-static int slub_nomerge;
70866+static int slub_nomerge = 1;
70867
70868 /*
70869 * Calculate the order of allocation given an slab object size.
70870@@ -2912,7 +2914,7 @@ static int kmem_cache_open(struct kmem_c
70871 * list to avoid pounding the page allocator excessively.
70872 */
70873 set_min_partial(s, ilog2(s->size));
70874- s->refcount = 1;
70875+ atomic_set(&s->refcount, 1);
70876 #ifdef CONFIG_NUMA
70877 s->remote_node_defrag_ratio = 1000;
70878 #endif
70879@@ -3017,8 +3019,7 @@ static inline int kmem_cache_close(struc
70880 void kmem_cache_destroy(struct kmem_cache *s)
70881 {
70882 down_write(&slub_lock);
70883- s->refcount--;
70884- if (!s->refcount) {
70885+ if (atomic_dec_and_test(&s->refcount)) {
70886 list_del(&s->list);
70887 if (kmem_cache_close(s)) {
70888 printk(KERN_ERR "SLUB %s: %s called for cache that "
70889@@ -3228,6 +3229,50 @@ void *__kmalloc_node(size_t size, gfp_t
70890 EXPORT_SYMBOL(__kmalloc_node);
70891 #endif
70892
70893+void check_object_size(const void *ptr, unsigned long n, bool to)
70894+{
70895+
70896+#ifdef CONFIG_PAX_USERCOPY
70897+ struct page *page;
70898+ struct kmem_cache *s = NULL;
70899+ unsigned long offset;
70900+ const char *type;
70901+
70902+ if (!n)
70903+ return;
70904+
70905+ type = "<null>";
70906+ if (ZERO_OR_NULL_PTR(ptr))
70907+ goto report;
70908+
70909+ if (!virt_addr_valid(ptr))
70910+ return;
70911+
70912+ page = virt_to_head_page(ptr);
70913+
70914+ type = "<process stack>";
70915+ if (!PageSlab(page)) {
70916+ if (object_is_on_stack(ptr, n) == -1)
70917+ goto report;
70918+ return;
70919+ }
70920+
70921+ s = page->slab;
70922+ type = s->name;
70923+ if (!(s->flags & SLAB_USERCOPY))
70924+ goto report;
70925+
70926+ offset = (ptr - page_address(page)) % s->size;
70927+ if (offset <= s->objsize && n <= s->objsize - offset)
70928+ return;
70929+
70930+report:
70931+ pax_report_usercopy(ptr, n, to, type);
70932+#endif
70933+
70934+}
70935+EXPORT_SYMBOL(check_object_size);
70936+
70937 size_t ksize(const void *object)
70938 {
70939 struct page *page;
70940@@ -3502,7 +3547,7 @@ static void __init kmem_cache_bootstrap_
70941 int node;
70942
70943 list_add(&s->list, &slab_caches);
70944- s->refcount = -1;
70945+ atomic_set(&s->refcount, -1);
70946
70947 for_each_node_state(node, N_NORMAL_MEMORY) {
70948 struct kmem_cache_node *n = get_node(s, node);
70949@@ -3619,17 +3664,17 @@ void __init kmem_cache_init(void)
70950
70951 /* Caches that are not of the two-to-the-power-of size */
70952 if (KMALLOC_MIN_SIZE <= 32) {
70953- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
70954+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
70955 caches++;
70956 }
70957
70958 if (KMALLOC_MIN_SIZE <= 64) {
70959- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
70960+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
70961 caches++;
70962 }
70963
70964 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70965- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
70966+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
70967 caches++;
70968 }
70969
70970@@ -3697,7 +3742,7 @@ static int slab_unmergeable(struct kmem_
70971 /*
70972 * We may have set a slab to be unmergeable during bootstrap.
70973 */
70974- if (s->refcount < 0)
70975+ if (atomic_read(&s->refcount) < 0)
70976 return 1;
70977
70978 return 0;
70979@@ -3756,7 +3801,7 @@ struct kmem_cache *kmem_cache_create(con
70980 down_write(&slub_lock);
70981 s = find_mergeable(size, align, flags, name, ctor);
70982 if (s) {
70983- s->refcount++;
70984+ atomic_inc(&s->refcount);
70985 /*
70986 * Adjust the object sizes so that we clear
70987 * the complete object on kzalloc.
70988@@ -3765,7 +3810,7 @@ struct kmem_cache *kmem_cache_create(con
70989 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
70990
70991 if (sysfs_slab_alias(s, name)) {
70992- s->refcount--;
70993+ atomic_dec(&s->refcount);
70994 goto err;
70995 }
70996 up_write(&slub_lock);
70997@@ -3893,7 +3938,7 @@ void *__kmalloc_node_track_caller(size_t
70998 }
70999 #endif
71000
71001-#ifdef CONFIG_SYSFS
71002+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71003 static int count_inuse(struct page *page)
71004 {
71005 return page->inuse;
71006@@ -4280,12 +4325,12 @@ static void resiliency_test(void)
71007 validate_slab_cache(kmalloc_caches[9]);
71008 }
71009 #else
71010-#ifdef CONFIG_SYSFS
71011+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71012 static void resiliency_test(void) {};
71013 #endif
71014 #endif
71015
71016-#ifdef CONFIG_SYSFS
71017+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71018 enum slab_stat_type {
71019 SL_ALL, /* All slabs */
71020 SL_PARTIAL, /* Only partially allocated slabs */
71021@@ -4495,7 +4540,7 @@ SLAB_ATTR_RO(ctor);
71022
71023 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
71024 {
71025- return sprintf(buf, "%d\n", s->refcount - 1);
71026+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
71027 }
71028 SLAB_ATTR_RO(aliases);
71029
71030@@ -5025,6 +5070,7 @@ static char *create_unique_id(struct kme
71031 return name;
71032 }
71033
71034+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71035 static int sysfs_slab_add(struct kmem_cache *s)
71036 {
71037 int err;
71038@@ -5087,6 +5133,7 @@ static void sysfs_slab_remove(struct kme
71039 kobject_del(&s->kobj);
71040 kobject_put(&s->kobj);
71041 }
71042+#endif
71043
71044 /*
71045 * Need to buffer aliases during bootup until sysfs becomes
71046@@ -5100,6 +5147,7 @@ struct saved_alias {
71047
71048 static struct saved_alias *alias_list;
71049
71050+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71051 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71052 {
71053 struct saved_alias *al;
71054@@ -5122,6 +5170,7 @@ static int sysfs_slab_alias(struct kmem_
71055 alias_list = al;
71056 return 0;
71057 }
71058+#endif
71059
71060 static int __init slab_sysfs_init(void)
71061 {
71062@@ -5257,7 +5306,13 @@ static const struct file_operations proc
71063
71064 static int __init slab_proc_init(void)
71065 {
71066- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
71067+ mode_t gr_mode = S_IRUGO;
71068+
71069+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71070+ gr_mode = S_IRUSR;
71071+#endif
71072+
71073+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
71074 return 0;
71075 }
71076 module_init(slab_proc_init);
71077diff -urNp linux-3.1.1/mm/swap.c linux-3.1.1/mm/swap.c
71078--- linux-3.1.1/mm/swap.c 2011-11-11 15:19:27.000000000 -0500
71079+++ linux-3.1.1/mm/swap.c 2011-11-16 18:39:08.000000000 -0500
71080@@ -31,6 +31,7 @@
71081 #include <linux/backing-dev.h>
71082 #include <linux/memcontrol.h>
71083 #include <linux/gfp.h>
71084+#include <linux/hugetlb.h>
71085
71086 #include "internal.h"
71087
71088@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
71089
71090 __page_cache_release(page);
71091 dtor = get_compound_page_dtor(page);
71092+ if (!PageHuge(page))
71093+ BUG_ON(dtor != free_compound_page);
71094 (*dtor)(page);
71095 }
71096
71097diff -urNp linux-3.1.1/mm/swapfile.c linux-3.1.1/mm/swapfile.c
71098--- linux-3.1.1/mm/swapfile.c 2011-11-11 15:19:27.000000000 -0500
71099+++ linux-3.1.1/mm/swapfile.c 2011-11-16 18:39:08.000000000 -0500
71100@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
71101
71102 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
71103 /* Activity counter to indicate that a swapon or swapoff has occurred */
71104-static atomic_t proc_poll_event = ATOMIC_INIT(0);
71105+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
71106
71107 static inline unsigned char swap_count(unsigned char ent)
71108 {
71109@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
71110 }
71111 filp_close(swap_file, NULL);
71112 err = 0;
71113- atomic_inc(&proc_poll_event);
71114+ atomic_inc_unchecked(&proc_poll_event);
71115 wake_up_interruptible(&proc_poll_wait);
71116
71117 out_dput:
71118@@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *
71119
71120 poll_wait(file, &proc_poll_wait, wait);
71121
71122- if (seq->poll_event != atomic_read(&proc_poll_event)) {
71123- seq->poll_event = atomic_read(&proc_poll_event);
71124+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
71125+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71126 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
71127 }
71128
71129@@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inod
71130 return ret;
71131
71132 seq = file->private_data;
71133- seq->poll_event = atomic_read(&proc_poll_event);
71134+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71135 return 0;
71136 }
71137
71138@@ -2124,7 +2124,7 @@ SYSCALL_DEFINE2(swapon, const char __use
71139 (p->flags & SWP_DISCARDABLE) ? "D" : "");
71140
71141 mutex_unlock(&swapon_mutex);
71142- atomic_inc(&proc_poll_event);
71143+ atomic_inc_unchecked(&proc_poll_event);
71144 wake_up_interruptible(&proc_poll_wait);
71145
71146 if (S_ISREG(inode->i_mode))
71147diff -urNp linux-3.1.1/mm/util.c linux-3.1.1/mm/util.c
71148--- linux-3.1.1/mm/util.c 2011-11-11 15:19:27.000000000 -0500
71149+++ linux-3.1.1/mm/util.c 2011-11-16 18:39:08.000000000 -0500
71150@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
71151 * allocated buffer. Use this if you don't want to free the buffer immediately
71152 * like, for example, with RCU.
71153 */
71154+#undef __krealloc
71155 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
71156 {
71157 void *ret;
71158@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
71159 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
71160 * %NULL pointer, the object pointed to is freed.
71161 */
71162+#undef krealloc
71163 void *krealloc(const void *p, size_t new_size, gfp_t flags)
71164 {
71165 void *ret;
71166@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
71167 void arch_pick_mmap_layout(struct mm_struct *mm)
71168 {
71169 mm->mmap_base = TASK_UNMAPPED_BASE;
71170+
71171+#ifdef CONFIG_PAX_RANDMMAP
71172+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71173+ mm->mmap_base += mm->delta_mmap;
71174+#endif
71175+
71176 mm->get_unmapped_area = arch_get_unmapped_area;
71177 mm->unmap_area = arch_unmap_area;
71178 }
71179diff -urNp linux-3.1.1/mm/vmalloc.c linux-3.1.1/mm/vmalloc.c
71180--- linux-3.1.1/mm/vmalloc.c 2011-11-11 15:19:27.000000000 -0500
71181+++ linux-3.1.1/mm/vmalloc.c 2011-11-16 18:40:44.000000000 -0500
71182@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
71183
71184 pte = pte_offset_kernel(pmd, addr);
71185 do {
71186- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71187- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71188+
71189+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71190+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71191+ BUG_ON(!pte_exec(*pte));
71192+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71193+ continue;
71194+ }
71195+#endif
71196+
71197+ {
71198+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71199+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71200+ }
71201 } while (pte++, addr += PAGE_SIZE, addr != end);
71202 }
71203
71204@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
71205 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71206 {
71207 pte_t *pte;
71208+ int ret = -ENOMEM;
71209
71210 /*
71211 * nr is a running index into the array which helps higher level
71212@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
71213 pte = pte_alloc_kernel(pmd, addr);
71214 if (!pte)
71215 return -ENOMEM;
71216+
71217+ pax_open_kernel();
71218 do {
71219 struct page *page = pages[*nr];
71220
71221- if (WARN_ON(!pte_none(*pte)))
71222- return -EBUSY;
71223- if (WARN_ON(!page))
71224- return -ENOMEM;
71225+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71226+ if (pgprot_val(prot) & _PAGE_NX)
71227+#endif
71228+
71229+ if (WARN_ON(!pte_none(*pte))) {
71230+ ret = -EBUSY;
71231+ goto out;
71232+ }
71233+ if (WARN_ON(!page)) {
71234+ ret = -ENOMEM;
71235+ goto out;
71236+ }
71237 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71238 (*nr)++;
71239 } while (pte++, addr += PAGE_SIZE, addr != end);
71240- return 0;
71241+ ret = 0;
71242+out:
71243+ pax_close_kernel();
71244+ return ret;
71245 }
71246
71247 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71248@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
71249 * and fall back on vmalloc() if that fails. Others
71250 * just put it in the vmalloc space.
71251 */
71252-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71253+#ifdef CONFIG_MODULES
71254+#ifdef MODULES_VADDR
71255 unsigned long addr = (unsigned long)x;
71256 if (addr >= MODULES_VADDR && addr < MODULES_END)
71257 return 1;
71258 #endif
71259+
71260+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71261+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71262+ return 1;
71263+#endif
71264+
71265+#endif
71266+
71267 return is_vmalloc_addr(x);
71268 }
71269
71270@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
71271
71272 if (!pgd_none(*pgd)) {
71273 pud_t *pud = pud_offset(pgd, addr);
71274+#ifdef CONFIG_X86
71275+ if (!pud_large(*pud))
71276+#endif
71277 if (!pud_none(*pud)) {
71278 pmd_t *pmd = pmd_offset(pud, addr);
71279+#ifdef CONFIG_X86
71280+ if (!pmd_large(*pmd))
71281+#endif
71282 if (!pmd_none(*pmd)) {
71283 pte_t *ptep, pte;
71284
71285@@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_n
71286 struct vm_struct *area;
71287
71288 BUG_ON(in_interrupt());
71289+
71290+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71291+ if (flags & VM_KERNEXEC) {
71292+ if (start != VMALLOC_START || end != VMALLOC_END)
71293+ return NULL;
71294+ start = (unsigned long)MODULES_EXEC_VADDR;
71295+ end = (unsigned long)MODULES_EXEC_END;
71296+ }
71297+#endif
71298+
71299 if (flags & VM_IOREMAP) {
71300 int bit = fls(size);
71301
71302@@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned
71303 if (count > totalram_pages)
71304 return NULL;
71305
71306+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71307+ if (!(pgprot_val(prot) & _PAGE_NX))
71308+ flags |= VM_KERNEXEC;
71309+#endif
71310+
71311 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71312 __builtin_return_address(0));
71313 if (!area)
71314@@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long
71315 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71316 return NULL;
71317
71318+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71319+ if (!(pgprot_val(prot) & _PAGE_NX))
71320+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
71321+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
71322+ else
71323+#endif
71324+
71325 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
71326 start, end, node, gfp_mask, caller);
71327
71328@@ -1672,6 +1734,7 @@ static void *__vmalloc_node(unsigned lon
71329 gfp_mask, prot, node, caller);
71330 }
71331
71332+#undef __vmalloc
71333 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71334 {
71335 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71336@@ -1695,6 +1758,7 @@ static inline void *__vmalloc_node_flags
71337 * For tight control over page level allocator and protection flags
71338 * use __vmalloc() instead.
71339 */
71340+#undef vmalloc
71341 void *vmalloc(unsigned long size)
71342 {
71343 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71344@@ -1711,6 +1775,7 @@ EXPORT_SYMBOL(vmalloc);
71345 * For tight control over page level allocator and protection flags
71346 * use __vmalloc() instead.
71347 */
71348+#undef vzalloc
71349 void *vzalloc(unsigned long size)
71350 {
71351 return __vmalloc_node_flags(size, -1,
71352@@ -1725,6 +1790,7 @@ EXPORT_SYMBOL(vzalloc);
71353 * The resulting memory area is zeroed so it can be mapped to userspace
71354 * without leaking data.
71355 */
71356+#undef vmalloc_user
71357 void *vmalloc_user(unsigned long size)
71358 {
71359 struct vm_struct *area;
71360@@ -1752,6 +1818,7 @@ EXPORT_SYMBOL(vmalloc_user);
71361 * For tight control over page level allocator and protection flags
71362 * use __vmalloc() instead.
71363 */
71364+#undef vmalloc_node
71365 void *vmalloc_node(unsigned long size, int node)
71366 {
71367 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71368@@ -1771,6 +1838,7 @@ EXPORT_SYMBOL(vmalloc_node);
71369 * For tight control over page level allocator and protection flags
71370 * use __vmalloc_node() instead.
71371 */
71372+#undef vzalloc_node
71373 void *vzalloc_node(unsigned long size, int node)
71374 {
71375 return __vmalloc_node_flags(size, node,
71376@@ -1793,10 +1861,10 @@ EXPORT_SYMBOL(vzalloc_node);
71377 * For tight control over page level allocator and protection flags
71378 * use __vmalloc() instead.
71379 */
71380-
71381+#undef vmalloc_exec
71382 void *vmalloc_exec(unsigned long size)
71383 {
71384- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71385+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71386 -1, __builtin_return_address(0));
71387 }
71388
71389@@ -1815,6 +1883,7 @@ void *vmalloc_exec(unsigned long size)
71390 * Allocate enough 32bit PA addressable pages to cover @size from the
71391 * page level allocator and map them into contiguous kernel virtual space.
71392 */
71393+#undef vmalloc_32
71394 void *vmalloc_32(unsigned long size)
71395 {
71396 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
71397@@ -1829,6 +1898,7 @@ EXPORT_SYMBOL(vmalloc_32);
71398 * The resulting memory area is 32bit addressable and zeroed so it can be
71399 * mapped to userspace without leaking data.
71400 */
71401+#undef vmalloc_32_user
71402 void *vmalloc_32_user(unsigned long size)
71403 {
71404 struct vm_struct *area;
71405@@ -2091,6 +2161,8 @@ int remap_vmalloc_range(struct vm_area_s
71406 unsigned long uaddr = vma->vm_start;
71407 unsigned long usize = vma->vm_end - vma->vm_start;
71408
71409+ BUG_ON(vma->vm_mirror);
71410+
71411 if ((PAGE_SIZE-1) & (unsigned long)addr)
71412 return -EINVAL;
71413
71414diff -urNp linux-3.1.1/mm/vmstat.c linux-3.1.1/mm/vmstat.c
71415--- linux-3.1.1/mm/vmstat.c 2011-11-11 15:19:27.000000000 -0500
71416+++ linux-3.1.1/mm/vmstat.c 2011-11-16 18:40:44.000000000 -0500
71417@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
71418 *
71419 * vm_stat contains the global counters
71420 */
71421-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71422+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71423 EXPORT_SYMBOL(vm_stat);
71424
71425 #ifdef CONFIG_SMP
71426@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
71427 v = p->vm_stat_diff[i];
71428 p->vm_stat_diff[i] = 0;
71429 local_irq_restore(flags);
71430- atomic_long_add(v, &zone->vm_stat[i]);
71431+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
71432 global_diff[i] += v;
71433 #ifdef CONFIG_NUMA
71434 /* 3 seconds idle till flush */
71435@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
71436
71437 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
71438 if (global_diff[i])
71439- atomic_long_add(global_diff[i], &vm_stat[i]);
71440+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
71441 }
71442
71443 #endif
71444@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
71445 start_cpu_timer(cpu);
71446 #endif
71447 #ifdef CONFIG_PROC_FS
71448- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
71449- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
71450- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
71451- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
71452+ {
71453+ mode_t gr_mode = S_IRUGO;
71454+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71455+ gr_mode = S_IRUSR;
71456+#endif
71457+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
71458+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
71459+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71460+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
71461+#else
71462+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
71463+#endif
71464+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
71465+ }
71466 #endif
71467 return 0;
71468 }
71469diff -urNp linux-3.1.1/net/8021q/vlan.c linux-3.1.1/net/8021q/vlan.c
71470--- linux-3.1.1/net/8021q/vlan.c 2011-11-11 15:19:27.000000000 -0500
71471+++ linux-3.1.1/net/8021q/vlan.c 2011-11-16 18:39:08.000000000 -0500
71472@@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net
71473 err = -EPERM;
71474 if (!capable(CAP_NET_ADMIN))
71475 break;
71476- if ((args.u.name_type >= 0) &&
71477- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
71478+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
71479 struct vlan_net *vn;
71480
71481 vn = net_generic(net, vlan_net_id);
71482diff -urNp linux-3.1.1/net/9p/trans_fd.c linux-3.1.1/net/9p/trans_fd.c
71483--- linux-3.1.1/net/9p/trans_fd.c 2011-11-11 15:19:27.000000000 -0500
71484+++ linux-3.1.1/net/9p/trans_fd.c 2011-11-16 18:39:08.000000000 -0500
71485@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client
71486 oldfs = get_fs();
71487 set_fs(get_ds());
71488 /* The cast to a user pointer is valid due to the set_fs() */
71489- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
71490+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
71491 set_fs(oldfs);
71492
71493 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
71494diff -urNp linux-3.1.1/net/9p/trans_virtio.c linux-3.1.1/net/9p/trans_virtio.c
71495--- linux-3.1.1/net/9p/trans_virtio.c 2011-11-11 15:19:27.000000000 -0500
71496+++ linux-3.1.1/net/9p/trans_virtio.c 2011-11-16 18:39:08.000000000 -0500
71497@@ -327,7 +327,7 @@ req_retry_pinned:
71498 } else {
71499 char *pbuf;
71500 if (req->tc->pubuf)
71501- pbuf = (__force char *) req->tc->pubuf;
71502+ pbuf = (char __force_kernel *) req->tc->pubuf;
71503 else
71504 pbuf = req->tc->pkbuf;
71505 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
71506@@ -357,7 +357,7 @@ req_retry_pinned:
71507 } else {
71508 char *pbuf;
71509 if (req->tc->pubuf)
71510- pbuf = (__force char *) req->tc->pubuf;
71511+ pbuf = (char __force_kernel *) req->tc->pubuf;
71512 else
71513 pbuf = req->tc->pkbuf;
71514
71515diff -urNp linux-3.1.1/net/atm/atm_misc.c linux-3.1.1/net/atm/atm_misc.c
71516--- linux-3.1.1/net/atm/atm_misc.c 2011-11-11 15:19:27.000000000 -0500
71517+++ linux-3.1.1/net/atm/atm_misc.c 2011-11-16 18:39:08.000000000 -0500
71518@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
71519 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71520 return 1;
71521 atm_return(vcc, truesize);
71522- atomic_inc(&vcc->stats->rx_drop);
71523+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71524 return 0;
71525 }
71526 EXPORT_SYMBOL(atm_charge);
71527@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
71528 }
71529 }
71530 atm_return(vcc, guess);
71531- atomic_inc(&vcc->stats->rx_drop);
71532+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71533 return NULL;
71534 }
71535 EXPORT_SYMBOL(atm_alloc_charge);
71536@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
71537
71538 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71539 {
71540-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71541+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71542 __SONET_ITEMS
71543 #undef __HANDLE_ITEM
71544 }
71545@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
71546
71547 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71548 {
71549-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71550+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71551 __SONET_ITEMS
71552 #undef __HANDLE_ITEM
71553 }
71554diff -urNp linux-3.1.1/net/atm/lec.h linux-3.1.1/net/atm/lec.h
71555--- linux-3.1.1/net/atm/lec.h 2011-11-11 15:19:27.000000000 -0500
71556+++ linux-3.1.1/net/atm/lec.h 2011-11-16 18:39:08.000000000 -0500
71557@@ -48,7 +48,7 @@ struct lane2_ops {
71558 const u8 *tlvs, u32 sizeoftlvs);
71559 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71560 const u8 *tlvs, u32 sizeoftlvs);
71561-};
71562+} __no_const;
71563
71564 /*
71565 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71566diff -urNp linux-3.1.1/net/atm/mpc.h linux-3.1.1/net/atm/mpc.h
71567--- linux-3.1.1/net/atm/mpc.h 2011-11-11 15:19:27.000000000 -0500
71568+++ linux-3.1.1/net/atm/mpc.h 2011-11-16 18:39:08.000000000 -0500
71569@@ -33,7 +33,7 @@ struct mpoa_client {
71570 struct mpc_parameters parameters; /* parameters for this client */
71571
71572 const struct net_device_ops *old_ops;
71573- struct net_device_ops new_ops;
71574+ net_device_ops_no_const new_ops;
71575 };
71576
71577
71578diff -urNp linux-3.1.1/net/atm/mpoa_caches.c linux-3.1.1/net/atm/mpoa_caches.c
71579--- linux-3.1.1/net/atm/mpoa_caches.c 2011-11-11 15:19:27.000000000 -0500
71580+++ linux-3.1.1/net/atm/mpoa_caches.c 2011-11-16 18:40:44.000000000 -0500
71581@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
71582 struct timeval now;
71583 struct k_message msg;
71584
71585+ pax_track_stack();
71586+
71587 do_gettimeofday(&now);
71588
71589 read_lock_bh(&client->ingress_lock);
71590diff -urNp linux-3.1.1/net/atm/proc.c linux-3.1.1/net/atm/proc.c
71591--- linux-3.1.1/net/atm/proc.c 2011-11-11 15:19:27.000000000 -0500
71592+++ linux-3.1.1/net/atm/proc.c 2011-11-16 18:39:08.000000000 -0500
71593@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
71594 const struct k_atm_aal_stats *stats)
71595 {
71596 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71597- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
71598- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
71599- atomic_read(&stats->rx_drop));
71600+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71601+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71602+ atomic_read_unchecked(&stats->rx_drop));
71603 }
71604
71605 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71606diff -urNp linux-3.1.1/net/atm/resources.c linux-3.1.1/net/atm/resources.c
71607--- linux-3.1.1/net/atm/resources.c 2011-11-11 15:19:27.000000000 -0500
71608+++ linux-3.1.1/net/atm/resources.c 2011-11-16 18:39:08.000000000 -0500
71609@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
71610 static void copy_aal_stats(struct k_atm_aal_stats *from,
71611 struct atm_aal_stats *to)
71612 {
71613-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71614+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71615 __AAL_STAT_ITEMS
71616 #undef __HANDLE_ITEM
71617 }
71618@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
71619 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71620 struct atm_aal_stats *to)
71621 {
71622-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71623+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71624 __AAL_STAT_ITEMS
71625 #undef __HANDLE_ITEM
71626 }
71627diff -urNp linux-3.1.1/net/batman-adv/hard-interface.c linux-3.1.1/net/batman-adv/hard-interface.c
71628--- linux-3.1.1/net/batman-adv/hard-interface.c 2011-11-11 15:19:27.000000000 -0500
71629+++ linux-3.1.1/net/batman-adv/hard-interface.c 2011-11-16 18:39:08.000000000 -0500
71630@@ -347,8 +347,8 @@ int hardif_enable_interface(struct hard_
71631 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
71632 dev_add_pack(&hard_iface->batman_adv_ptype);
71633
71634- atomic_set(&hard_iface->seqno, 1);
71635- atomic_set(&hard_iface->frag_seqno, 1);
71636+ atomic_set_unchecked(&hard_iface->seqno, 1);
71637+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
71638 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
71639 hard_iface->net_dev->name);
71640
71641diff -urNp linux-3.1.1/net/batman-adv/routing.c linux-3.1.1/net/batman-adv/routing.c
71642--- linux-3.1.1/net/batman-adv/routing.c 2011-11-11 15:19:27.000000000 -0500
71643+++ linux-3.1.1/net/batman-adv/routing.c 2011-11-16 18:39:08.000000000 -0500
71644@@ -656,7 +656,7 @@ void receive_bat_packet(const struct eth
71645 return;
71646
71647 /* could be changed by schedule_own_packet() */
71648- if_incoming_seqno = atomic_read(&if_incoming->seqno);
71649+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
71650
71651 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
71652
71653diff -urNp linux-3.1.1/net/batman-adv/send.c linux-3.1.1/net/batman-adv/send.c
71654--- linux-3.1.1/net/batman-adv/send.c 2011-11-11 15:19:27.000000000 -0500
71655+++ linux-3.1.1/net/batman-adv/send.c 2011-11-16 18:39:08.000000000 -0500
71656@@ -326,7 +326,7 @@ void schedule_own_packet(struct hard_ifa
71657
71658 /* change sequence number to network order */
71659 batman_packet->seqno =
71660- htonl((uint32_t)atomic_read(&hard_iface->seqno));
71661+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
71662
71663 batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
71664 batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
71665@@ -343,7 +343,7 @@ void schedule_own_packet(struct hard_ifa
71666 else
71667 batman_packet->gw_flags = NO_FLAGS;
71668
71669- atomic_inc(&hard_iface->seqno);
71670+ atomic_inc_unchecked(&hard_iface->seqno);
71671
71672 slide_own_bcast_window(hard_iface);
71673 send_time = own_send_time(bat_priv);
71674diff -urNp linux-3.1.1/net/batman-adv/soft-interface.c linux-3.1.1/net/batman-adv/soft-interface.c
71675--- linux-3.1.1/net/batman-adv/soft-interface.c 2011-11-11 15:19:27.000000000 -0500
71676+++ linux-3.1.1/net/batman-adv/soft-interface.c 2011-11-16 18:39:08.000000000 -0500
71677@@ -632,7 +632,7 @@ static int interface_tx(struct sk_buff *
71678
71679 /* set broadcast sequence number */
71680 bcast_packet->seqno =
71681- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
71682+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
71683
71684 add_bcast_packet_to_list(bat_priv, skb, 1);
71685
71686@@ -824,7 +824,7 @@ struct net_device *softif_create(const c
71687 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
71688
71689 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
71690- atomic_set(&bat_priv->bcast_seqno, 1);
71691+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
71692 atomic_set(&bat_priv->ttvn, 0);
71693 atomic_set(&bat_priv->tt_local_changes, 0);
71694 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
71695diff -urNp linux-3.1.1/net/batman-adv/types.h linux-3.1.1/net/batman-adv/types.h
71696--- linux-3.1.1/net/batman-adv/types.h 2011-11-11 15:19:27.000000000 -0500
71697+++ linux-3.1.1/net/batman-adv/types.h 2011-11-16 18:39:08.000000000 -0500
71698@@ -38,8 +38,8 @@ struct hard_iface {
71699 int16_t if_num;
71700 char if_status;
71701 struct net_device *net_dev;
71702- atomic_t seqno;
71703- atomic_t frag_seqno;
71704+ atomic_unchecked_t seqno;
71705+ atomic_unchecked_t frag_seqno;
71706 unsigned char *packet_buff;
71707 int packet_len;
71708 struct kobject *hardif_obj;
71709@@ -153,7 +153,7 @@ struct bat_priv {
71710 atomic_t orig_interval; /* uint */
71711 atomic_t hop_penalty; /* uint */
71712 atomic_t log_level; /* uint */
71713- atomic_t bcast_seqno;
71714+ atomic_unchecked_t bcast_seqno;
71715 atomic_t bcast_queue_left;
71716 atomic_t batman_queue_left;
71717 atomic_t ttvn; /* tranlation table version number */
71718diff -urNp linux-3.1.1/net/batman-adv/unicast.c linux-3.1.1/net/batman-adv/unicast.c
71719--- linux-3.1.1/net/batman-adv/unicast.c 2011-11-11 15:19:27.000000000 -0500
71720+++ linux-3.1.1/net/batman-adv/unicast.c 2011-11-16 18:39:08.000000000 -0500
71721@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, s
71722 frag1->flags = UNI_FRAG_HEAD | large_tail;
71723 frag2->flags = large_tail;
71724
71725- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
71726+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
71727 frag1->seqno = htons(seqno - 1);
71728 frag2->seqno = htons(seqno);
71729
71730diff -urNp linux-3.1.1/net/bluetooth/hci_conn.c linux-3.1.1/net/bluetooth/hci_conn.c
71731--- linux-3.1.1/net/bluetooth/hci_conn.c 2011-11-11 15:19:27.000000000 -0500
71732+++ linux-3.1.1/net/bluetooth/hci_conn.c 2011-11-16 18:39:08.000000000 -0500
71733@@ -218,7 +218,7 @@ void hci_le_start_enc(struct hci_conn *c
71734 cp.handle = cpu_to_le16(conn->handle);
71735 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
71736 cp.ediv = ediv;
71737- memcpy(cp.rand, rand, sizeof(rand));
71738+ memcpy(cp.rand, rand, sizeof(cp.rand));
71739
71740 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
71741 }
71742@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *c
71743 memset(&cp, 0, sizeof(cp));
71744
71745 cp.handle = cpu_to_le16(conn->handle);
71746- memcpy(cp.ltk, ltk, sizeof(ltk));
71747+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
71748
71749 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
71750 }
71751diff -urNp linux-3.1.1/net/bridge/br_multicast.c linux-3.1.1/net/bridge/br_multicast.c
71752--- linux-3.1.1/net/bridge/br_multicast.c 2011-11-11 15:19:27.000000000 -0500
71753+++ linux-3.1.1/net/bridge/br_multicast.c 2011-11-16 18:39:08.000000000 -0500
71754@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
71755 nexthdr = ip6h->nexthdr;
71756 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
71757
71758- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
71759+ if (nexthdr != IPPROTO_ICMPV6)
71760 return 0;
71761
71762 /* Okay, we found ICMPv6 header */
71763diff -urNp linux-3.1.1/net/bridge/netfilter/ebtables.c linux-3.1.1/net/bridge/netfilter/ebtables.c
71764--- linux-3.1.1/net/bridge/netfilter/ebtables.c 2011-11-11 15:19:27.000000000 -0500
71765+++ linux-3.1.1/net/bridge/netfilter/ebtables.c 2011-11-16 18:40:44.000000000 -0500
71766@@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *s
71767 tmp.valid_hooks = t->table->valid_hooks;
71768 }
71769 mutex_unlock(&ebt_mutex);
71770- if (copy_to_user(user, &tmp, *len) != 0){
71771+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
71772 BUGPRINT("c2u Didn't work\n");
71773 ret = -EFAULT;
71774 break;
71775@@ -1781,6 +1781,8 @@ static int compat_copy_everything_to_use
71776 int ret;
71777 void __user *pos;
71778
71779+ pax_track_stack();
71780+
71781 memset(&tinfo, 0, sizeof(tinfo));
71782
71783 if (cmd == EBT_SO_GET_ENTRIES) {
71784diff -urNp linux-3.1.1/net/caif/caif_socket.c linux-3.1.1/net/caif/caif_socket.c
71785--- linux-3.1.1/net/caif/caif_socket.c 2011-11-11 15:19:27.000000000 -0500
71786+++ linux-3.1.1/net/caif/caif_socket.c 2011-11-16 18:39:08.000000000 -0500
71787@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
71788 #ifdef CONFIG_DEBUG_FS
71789 struct debug_fs_counter {
71790 atomic_t caif_nr_socks;
71791- atomic_t caif_sock_create;
71792- atomic_t num_connect_req;
71793- atomic_t num_connect_resp;
71794- atomic_t num_connect_fail_resp;
71795- atomic_t num_disconnect;
71796- atomic_t num_remote_shutdown_ind;
71797- atomic_t num_tx_flow_off_ind;
71798- atomic_t num_tx_flow_on_ind;
71799- atomic_t num_rx_flow_off;
71800- atomic_t num_rx_flow_on;
71801+ atomic_unchecked_t caif_sock_create;
71802+ atomic_unchecked_t num_connect_req;
71803+ atomic_unchecked_t num_connect_resp;
71804+ atomic_unchecked_t num_connect_fail_resp;
71805+ atomic_unchecked_t num_disconnect;
71806+ atomic_unchecked_t num_remote_shutdown_ind;
71807+ atomic_unchecked_t num_tx_flow_off_ind;
71808+ atomic_unchecked_t num_tx_flow_on_ind;
71809+ atomic_unchecked_t num_rx_flow_off;
71810+ atomic_unchecked_t num_rx_flow_on;
71811 };
71812 static struct debug_fs_counter cnt;
71813 #define dbfs_atomic_inc(v) atomic_inc_return(v)
71814+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
71815 #define dbfs_atomic_dec(v) atomic_dec_return(v)
71816 #else
71817 #define dbfs_atomic_inc(v) 0
71818@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
71819 atomic_read(&cf_sk->sk.sk_rmem_alloc),
71820 sk_rcvbuf_lowwater(cf_sk));
71821 set_rx_flow_off(cf_sk);
71822- dbfs_atomic_inc(&cnt.num_rx_flow_off);
71823+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71824 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71825 }
71826
71827@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
71828 set_rx_flow_off(cf_sk);
71829 if (net_ratelimit())
71830 pr_debug("sending flow OFF due to rmem_schedule\n");
71831- dbfs_atomic_inc(&cnt.num_rx_flow_off);
71832+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71833 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71834 }
71835 skb->dev = NULL;
71836@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
71837 switch (flow) {
71838 case CAIF_CTRLCMD_FLOW_ON_IND:
71839 /* OK from modem to start sending again */
71840- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
71841+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
71842 set_tx_flow_on(cf_sk);
71843 cf_sk->sk.sk_state_change(&cf_sk->sk);
71844 break;
71845
71846 case CAIF_CTRLCMD_FLOW_OFF_IND:
71847 /* Modem asks us to shut up */
71848- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
71849+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
71850 set_tx_flow_off(cf_sk);
71851 cf_sk->sk.sk_state_change(&cf_sk->sk);
71852 break;
71853@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
71854 /* We're now connected */
71855 caif_client_register_refcnt(&cf_sk->layer,
71856 cfsk_hold, cfsk_put);
71857- dbfs_atomic_inc(&cnt.num_connect_resp);
71858+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
71859 cf_sk->sk.sk_state = CAIF_CONNECTED;
71860 set_tx_flow_on(cf_sk);
71861 cf_sk->sk.sk_state_change(&cf_sk->sk);
71862@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
71863
71864 case CAIF_CTRLCMD_INIT_FAIL_RSP:
71865 /* Connect request failed */
71866- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
71867+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
71868 cf_sk->sk.sk_err = ECONNREFUSED;
71869 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
71870 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71871@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
71872
71873 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
71874 /* Modem has closed this connection, or device is down. */
71875- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
71876+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
71877 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71878 cf_sk->sk.sk_err = ECONNRESET;
71879 set_rx_flow_on(cf_sk);
71880@@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
71881 return;
71882
71883 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
71884- dbfs_atomic_inc(&cnt.num_rx_flow_on);
71885+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
71886 set_rx_flow_on(cf_sk);
71887 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
71888 }
71889@@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
71890 /*ifindex = id of the interface.*/
71891 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
71892
71893- dbfs_atomic_inc(&cnt.num_connect_req);
71894+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
71895 cf_sk->layer.receive = caif_sktrecv_cb;
71896
71897 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
71898@@ -943,7 +944,7 @@ static int caif_release(struct socket *s
71899 spin_unlock_bh(&sk->sk_receive_queue.lock);
71900 sock->sk = NULL;
71901
71902- dbfs_atomic_inc(&cnt.num_disconnect);
71903+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
71904
71905 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
71906 if (cf_sk->debugfs_socket_dir != NULL)
71907@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
71908 cf_sk->conn_req.protocol = protocol;
71909 /* Increase the number of sockets created. */
71910 dbfs_atomic_inc(&cnt.caif_nr_socks);
71911- num = dbfs_atomic_inc(&cnt.caif_sock_create);
71912+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
71913 #ifdef CONFIG_DEBUG_FS
71914 if (!IS_ERR(debugfsdir)) {
71915
71916diff -urNp linux-3.1.1/net/caif/cfctrl.c linux-3.1.1/net/caif/cfctrl.c
71917--- linux-3.1.1/net/caif/cfctrl.c 2011-11-11 15:19:27.000000000 -0500
71918+++ linux-3.1.1/net/caif/cfctrl.c 2011-11-16 18:40:44.000000000 -0500
71919@@ -9,6 +9,7 @@
71920 #include <linux/stddef.h>
71921 #include <linux/spinlock.h>
71922 #include <linux/slab.h>
71923+#include <linux/sched.h>
71924 #include <net/caif/caif_layer.h>
71925 #include <net/caif/cfpkt.h>
71926 #include <net/caif/cfctrl.h>
71927@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
71928 dev_info.id = 0xff;
71929 memset(this, 0, sizeof(*this));
71930 cfsrvl_init(&this->serv, 0, &dev_info, false);
71931- atomic_set(&this->req_seq_no, 1);
71932- atomic_set(&this->rsp_seq_no, 1);
71933+ atomic_set_unchecked(&this->req_seq_no, 1);
71934+ atomic_set_unchecked(&this->rsp_seq_no, 1);
71935 this->serv.layer.receive = cfctrl_recv;
71936 sprintf(this->serv.layer.name, "ctrl");
71937 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
71938@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
71939 struct cfctrl_request_info *req)
71940 {
71941 spin_lock_bh(&ctrl->info_list_lock);
71942- atomic_inc(&ctrl->req_seq_no);
71943- req->sequence_no = atomic_read(&ctrl->req_seq_no);
71944+ atomic_inc_unchecked(&ctrl->req_seq_no);
71945+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
71946 list_add_tail(&req->list, &ctrl->list);
71947 spin_unlock_bh(&ctrl->info_list_lock);
71948 }
71949@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
71950 if (p != first)
71951 pr_warn("Requests are not received in order\n");
71952
71953- atomic_set(&ctrl->rsp_seq_no,
71954+ atomic_set_unchecked(&ctrl->rsp_seq_no,
71955 p->sequence_no);
71956 list_del(&p->list);
71957 goto out;
71958@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
71959 struct cfctrl *cfctrl = container_obj(layer);
71960 struct cfctrl_request_info rsp, *req;
71961
71962+ pax_track_stack();
71963
71964 cfpkt_extr_head(pkt, &cmdrsp, 1);
71965 cmd = cmdrsp & CFCTRL_CMD_MASK;
71966diff -urNp linux-3.1.1/net/compat.c linux-3.1.1/net/compat.c
71967--- linux-3.1.1/net/compat.c 2011-11-11 15:19:27.000000000 -0500
71968+++ linux-3.1.1/net/compat.c 2011-11-16 18:39:08.000000000 -0500
71969@@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kms
71970 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
71971 __get_user(kmsg->msg_flags, &umsg->msg_flags))
71972 return -EFAULT;
71973- kmsg->msg_name = compat_ptr(tmp1);
71974- kmsg->msg_iov = compat_ptr(tmp2);
71975- kmsg->msg_control = compat_ptr(tmp3);
71976+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
71977+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
71978+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
71979 return 0;
71980 }
71981
71982@@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *k
71983
71984 if (kern_msg->msg_namelen) {
71985 if (mode == VERIFY_READ) {
71986- int err = move_addr_to_kernel(kern_msg->msg_name,
71987+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
71988 kern_msg->msg_namelen,
71989 kern_address);
71990 if (err < 0)
71991@@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *k
71992 kern_msg->msg_name = NULL;
71993
71994 tot_len = iov_from_user_compat_to_kern(kern_iov,
71995- (struct compat_iovec __user *)kern_msg->msg_iov,
71996+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
71997 kern_msg->msg_iovlen);
71998 if (tot_len >= 0)
71999 kern_msg->msg_iov = kern_iov;
72000@@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *k
72001
72002 #define CMSG_COMPAT_FIRSTHDR(msg) \
72003 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
72004- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
72005+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
72006 (struct compat_cmsghdr __user *)NULL)
72007
72008 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
72009 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
72010 (ucmlen) <= (unsigned long) \
72011 ((mhdr)->msg_controllen - \
72012- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
72013+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
72014
72015 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
72016 struct compat_cmsghdr __user *cmsg, int cmsg_len)
72017 {
72018 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
72019- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
72020+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
72021 msg->msg_controllen)
72022 return NULL;
72023 return (struct compat_cmsghdr __user *)ptr;
72024@@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
72025 {
72026 struct compat_timeval ctv;
72027 struct compat_timespec cts[3];
72028- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72029+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72030 struct compat_cmsghdr cmhdr;
72031 int cmlen;
72032
72033@@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
72034
72035 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
72036 {
72037- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72038+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72039 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
72040 int fdnum = scm->fp->count;
72041 struct file **fp = scm->fp->fp;
72042@@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct so
72043 return -EFAULT;
72044 old_fs = get_fs();
72045 set_fs(KERNEL_DS);
72046- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
72047+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
72048 set_fs(old_fs);
72049
72050 return err;
72051@@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct so
72052 len = sizeof(ktime);
72053 old_fs = get_fs();
72054 set_fs(KERNEL_DS);
72055- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
72056+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
72057 set_fs(old_fs);
72058
72059 if (!err) {
72060@@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *so
72061 case MCAST_JOIN_GROUP:
72062 case MCAST_LEAVE_GROUP:
72063 {
72064- struct compat_group_req __user *gr32 = (void *)optval;
72065+ struct compat_group_req __user *gr32 = (void __user *)optval;
72066 struct group_req __user *kgr =
72067 compat_alloc_user_space(sizeof(struct group_req));
72068 u32 interface;
72069@@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *so
72070 case MCAST_BLOCK_SOURCE:
72071 case MCAST_UNBLOCK_SOURCE:
72072 {
72073- struct compat_group_source_req __user *gsr32 = (void *)optval;
72074+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
72075 struct group_source_req __user *kgsr = compat_alloc_user_space(
72076 sizeof(struct group_source_req));
72077 u32 interface;
72078@@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *so
72079 }
72080 case MCAST_MSFILTER:
72081 {
72082- struct compat_group_filter __user *gf32 = (void *)optval;
72083+ struct compat_group_filter __user *gf32 = (void __user *)optval;
72084 struct group_filter __user *kgf;
72085 u32 interface, fmode, numsrc;
72086
72087@@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *so
72088 char __user *optval, int __user *optlen,
72089 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
72090 {
72091- struct compat_group_filter __user *gf32 = (void *)optval;
72092+ struct compat_group_filter __user *gf32 = (void __user *)optval;
72093 struct group_filter __user *kgf;
72094 int __user *koptlen;
72095 u32 interface, fmode, numsrc;
72096diff -urNp linux-3.1.1/net/core/datagram.c linux-3.1.1/net/core/datagram.c
72097--- linux-3.1.1/net/core/datagram.c 2011-11-11 15:19:27.000000000 -0500
72098+++ linux-3.1.1/net/core/datagram.c 2011-11-16 18:39:08.000000000 -0500
72099@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
72100 }
72101
72102 kfree_skb(skb);
72103- atomic_inc(&sk->sk_drops);
72104+ atomic_inc_unchecked(&sk->sk_drops);
72105 sk_mem_reclaim_partial(sk);
72106
72107 return err;
72108diff -urNp linux-3.1.1/net/core/dev.c linux-3.1.1/net/core/dev.c
72109--- linux-3.1.1/net/core/dev.c 2011-11-11 15:19:27.000000000 -0500
72110+++ linux-3.1.1/net/core/dev.c 2011-11-16 18:40:44.000000000 -0500
72111@@ -1135,10 +1135,14 @@ void dev_load(struct net *net, const cha
72112 if (no_module && capable(CAP_NET_ADMIN))
72113 no_module = request_module("netdev-%s", name);
72114 if (no_module && capable(CAP_SYS_MODULE)) {
72115+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72116+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
72117+#else
72118 if (!request_module("%s", name))
72119 pr_err("Loading kernel module for a network device "
72120 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
72121 "instead\n", name);
72122+#endif
72123 }
72124 }
72125 EXPORT_SYMBOL(dev_load);
72126@@ -1977,7 +1981,7 @@ static int illegal_highdma(struct net_de
72127
72128 struct dev_gso_cb {
72129 void (*destructor)(struct sk_buff *skb);
72130-};
72131+} __no_const;
72132
72133 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
72134
72135@@ -2930,7 +2934,7 @@ int netif_rx_ni(struct sk_buff *skb)
72136 }
72137 EXPORT_SYMBOL(netif_rx_ni);
72138
72139-static void net_tx_action(struct softirq_action *h)
72140+static void net_tx_action(void)
72141 {
72142 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72143
72144@@ -3779,7 +3783,7 @@ void netif_napi_del(struct napi_struct *
72145 }
72146 EXPORT_SYMBOL(netif_napi_del);
72147
72148-static void net_rx_action(struct softirq_action *h)
72149+static void net_rx_action(void)
72150 {
72151 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72152 unsigned long time_limit = jiffies + 2;
72153diff -urNp linux-3.1.1/net/core/flow.c linux-3.1.1/net/core/flow.c
72154--- linux-3.1.1/net/core/flow.c 2011-11-11 15:19:27.000000000 -0500
72155+++ linux-3.1.1/net/core/flow.c 2011-11-16 18:39:08.000000000 -0500
72156@@ -61,7 +61,7 @@ struct flow_cache {
72157 struct timer_list rnd_timer;
72158 };
72159
72160-atomic_t flow_cache_genid = ATOMIC_INIT(0);
72161+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
72162 EXPORT_SYMBOL(flow_cache_genid);
72163 static struct flow_cache flow_cache_global;
72164 static struct kmem_cache *flow_cachep __read_mostly;
72165@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsig
72166
72167 static int flow_entry_valid(struct flow_cache_entry *fle)
72168 {
72169- if (atomic_read(&flow_cache_genid) != fle->genid)
72170+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
72171 return 0;
72172 if (fle->object && !fle->object->ops->check(fle->object))
72173 return 0;
72174@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const
72175 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
72176 fcp->hash_count++;
72177 }
72178- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
72179+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
72180 flo = fle->object;
72181 if (!flo)
72182 goto ret_object;
72183@@ -280,7 +280,7 @@ nocache:
72184 }
72185 flo = resolver(net, key, family, dir, flo, ctx);
72186 if (fle) {
72187- fle->genid = atomic_read(&flow_cache_genid);
72188+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
72189 if (!IS_ERR(flo))
72190 fle->object = flo;
72191 else
72192diff -urNp linux-3.1.1/net/core/iovec.c linux-3.1.1/net/core/iovec.c
72193--- linux-3.1.1/net/core/iovec.c 2011-11-11 15:19:27.000000000 -0500
72194+++ linux-3.1.1/net/core/iovec.c 2011-11-16 18:39:08.000000000 -0500
72195@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struc
72196 if (m->msg_namelen) {
72197 if (mode == VERIFY_READ) {
72198 void __user *namep;
72199- namep = (void __user __force *) m->msg_name;
72200+ namep = (void __force_user *) m->msg_name;
72201 err = move_addr_to_kernel(namep, m->msg_namelen,
72202 address);
72203 if (err < 0)
72204@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struc
72205 }
72206
72207 size = m->msg_iovlen * sizeof(struct iovec);
72208- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72209+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72210 return -EFAULT;
72211
72212 m->msg_iov = iov;
72213diff -urNp linux-3.1.1/net/core/rtnetlink.c linux-3.1.1/net/core/rtnetlink.c
72214--- linux-3.1.1/net/core/rtnetlink.c 2011-11-11 15:19:27.000000000 -0500
72215+++ linux-3.1.1/net/core/rtnetlink.c 2011-11-16 18:39:08.000000000 -0500
72216@@ -57,7 +57,7 @@ struct rtnl_link {
72217 rtnl_doit_func doit;
72218 rtnl_dumpit_func dumpit;
72219 rtnl_calcit_func calcit;
72220-};
72221+} __no_const;
72222
72223 static DEFINE_MUTEX(rtnl_mutex);
72224 static u16 min_ifinfo_dump_size;
72225diff -urNp linux-3.1.1/net/core/scm.c linux-3.1.1/net/core/scm.c
72226--- linux-3.1.1/net/core/scm.c 2011-11-11 15:19:27.000000000 -0500
72227+++ linux-3.1.1/net/core/scm.c 2011-11-16 18:39:08.000000000 -0500
72228@@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send);
72229 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72230 {
72231 struct cmsghdr __user *cm
72232- = (__force struct cmsghdr __user *)msg->msg_control;
72233+ = (struct cmsghdr __force_user *)msg->msg_control;
72234 struct cmsghdr cmhdr;
72235 int cmlen = CMSG_LEN(len);
72236 int err;
72237@@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int le
72238 err = -EFAULT;
72239 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72240 goto out;
72241- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72242+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72243 goto out;
72244 cmlen = CMSG_SPACE(len);
72245 if (msg->msg_controllen < cmlen)
72246@@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg);
72247 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72248 {
72249 struct cmsghdr __user *cm
72250- = (__force struct cmsghdr __user*)msg->msg_control;
72251+ = (struct cmsghdr __force_user *)msg->msg_control;
72252
72253 int fdmax = 0;
72254 int fdnum = scm->fp->count;
72255@@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg,
72256 if (fdnum < fdmax)
72257 fdmax = fdnum;
72258
72259- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72260+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72261 i++, cmfptr++)
72262 {
72263 int new_fd;
72264diff -urNp linux-3.1.1/net/core/skbuff.c linux-3.1.1/net/core/skbuff.c
72265--- linux-3.1.1/net/core/skbuff.c 2011-11-11 15:19:27.000000000 -0500
72266+++ linux-3.1.1/net/core/skbuff.c 2011-11-16 18:40:44.000000000 -0500
72267@@ -1650,6 +1650,8 @@ int skb_splice_bits(struct sk_buff *skb,
72268 struct sock *sk = skb->sk;
72269 int ret = 0;
72270
72271+ pax_track_stack();
72272+
72273 if (splice_grow_spd(pipe, &spd))
72274 return -ENOMEM;
72275
72276diff -urNp linux-3.1.1/net/core/sock.c linux-3.1.1/net/core/sock.c
72277--- linux-3.1.1/net/core/sock.c 2011-11-11 15:19:27.000000000 -0500
72278+++ linux-3.1.1/net/core/sock.c 2011-11-16 18:40:44.000000000 -0500
72279@@ -293,7 +293,7 @@ int sock_queue_rcv_skb(struct sock *sk,
72280 */
72281 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
72282 (unsigned)sk->sk_rcvbuf) {
72283- atomic_inc(&sk->sk_drops);
72284+ atomic_inc_unchecked(&sk->sk_drops);
72285 trace_sock_rcvqueue_full(sk, skb);
72286 return -ENOMEM;
72287 }
72288@@ -303,7 +303,7 @@ int sock_queue_rcv_skb(struct sock *sk,
72289 return err;
72290
72291 if (!sk_rmem_schedule(sk, skb->truesize)) {
72292- atomic_inc(&sk->sk_drops);
72293+ atomic_inc_unchecked(&sk->sk_drops);
72294 return -ENOBUFS;
72295 }
72296
72297@@ -323,7 +323,7 @@ int sock_queue_rcv_skb(struct sock *sk,
72298 skb_dst_force(skb);
72299
72300 spin_lock_irqsave(&list->lock, flags);
72301- skb->dropcount = atomic_read(&sk->sk_drops);
72302+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72303 __skb_queue_tail(list, skb);
72304 spin_unlock_irqrestore(&list->lock, flags);
72305
72306@@ -343,7 +343,7 @@ int sk_receive_skb(struct sock *sk, stru
72307 skb->dev = NULL;
72308
72309 if (sk_rcvqueues_full(sk, skb)) {
72310- atomic_inc(&sk->sk_drops);
72311+ atomic_inc_unchecked(&sk->sk_drops);
72312 goto discard_and_relse;
72313 }
72314 if (nested)
72315@@ -361,7 +361,7 @@ int sk_receive_skb(struct sock *sk, stru
72316 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
72317 } else if (sk_add_backlog(sk, skb)) {
72318 bh_unlock_sock(sk);
72319- atomic_inc(&sk->sk_drops);
72320+ atomic_inc_unchecked(&sk->sk_drops);
72321 goto discard_and_relse;
72322 }
72323
72324@@ -924,7 +924,7 @@ int sock_getsockopt(struct socket *sock,
72325 if (len > sizeof(peercred))
72326 len = sizeof(peercred);
72327 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
72328- if (copy_to_user(optval, &peercred, len))
72329+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
72330 return -EFAULT;
72331 goto lenout;
72332 }
72333@@ -937,7 +937,7 @@ int sock_getsockopt(struct socket *sock,
72334 return -ENOTCONN;
72335 if (lv < len)
72336 return -EINVAL;
72337- if (copy_to_user(optval, address, len))
72338+ if (len > sizeof(address) || copy_to_user(optval, address, len))
72339 return -EFAULT;
72340 goto lenout;
72341 }
72342@@ -970,7 +970,7 @@ int sock_getsockopt(struct socket *sock,
72343
72344 if (len > lv)
72345 len = lv;
72346- if (copy_to_user(optval, &v, len))
72347+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
72348 return -EFAULT;
72349 lenout:
72350 if (put_user(len, optlen))
72351@@ -2029,7 +2029,7 @@ void sock_init_data(struct socket *sock,
72352 */
72353 smp_wmb();
72354 atomic_set(&sk->sk_refcnt, 1);
72355- atomic_set(&sk->sk_drops, 0);
72356+ atomic_set_unchecked(&sk->sk_drops, 0);
72357 }
72358 EXPORT_SYMBOL(sock_init_data);
72359
72360diff -urNp linux-3.1.1/net/decnet/sysctl_net_decnet.c linux-3.1.1/net/decnet/sysctl_net_decnet.c
72361--- linux-3.1.1/net/decnet/sysctl_net_decnet.c 2011-11-11 15:19:27.000000000 -0500
72362+++ linux-3.1.1/net/decnet/sysctl_net_decnet.c 2011-11-16 18:39:08.000000000 -0500
72363@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_t
72364
72365 if (len > *lenp) len = *lenp;
72366
72367- if (copy_to_user(buffer, addr, len))
72368+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
72369 return -EFAULT;
72370
72371 *lenp = len;
72372@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table
72373
72374 if (len > *lenp) len = *lenp;
72375
72376- if (copy_to_user(buffer, devname, len))
72377+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
72378 return -EFAULT;
72379
72380 *lenp = len;
72381diff -urNp linux-3.1.1/net/econet/Kconfig linux-3.1.1/net/econet/Kconfig
72382--- linux-3.1.1/net/econet/Kconfig 2011-11-11 15:19:27.000000000 -0500
72383+++ linux-3.1.1/net/econet/Kconfig 2011-11-16 18:40:44.000000000 -0500
72384@@ -4,7 +4,7 @@
72385
72386 config ECONET
72387 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
72388- depends on EXPERIMENTAL && INET
72389+ depends on EXPERIMENTAL && INET && BROKEN
72390 ---help---
72391 Econet is a fairly old and slow networking protocol mainly used by
72392 Acorn computers to access file and print servers. It uses native
72393diff -urNp linux-3.1.1/net/ipv4/fib_frontend.c linux-3.1.1/net/ipv4/fib_frontend.c
72394--- linux-3.1.1/net/ipv4/fib_frontend.c 2011-11-11 15:19:27.000000000 -0500
72395+++ linux-3.1.1/net/ipv4/fib_frontend.c 2011-11-16 18:39:08.000000000 -0500
72396@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
72397 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72398 fib_sync_up(dev);
72399 #endif
72400- atomic_inc(&net->ipv4.dev_addr_genid);
72401+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72402 rt_cache_flush(dev_net(dev), -1);
72403 break;
72404 case NETDEV_DOWN:
72405 fib_del_ifaddr(ifa, NULL);
72406- atomic_inc(&net->ipv4.dev_addr_genid);
72407+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72408 if (ifa->ifa_dev->ifa_list == NULL) {
72409 /* Last address was deleted from this interface.
72410 * Disable IP.
72411@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
72412 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72413 fib_sync_up(dev);
72414 #endif
72415- atomic_inc(&net->ipv4.dev_addr_genid);
72416+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72417 rt_cache_flush(dev_net(dev), -1);
72418 break;
72419 case NETDEV_DOWN:
72420diff -urNp linux-3.1.1/net/ipv4/fib_semantics.c linux-3.1.1/net/ipv4/fib_semantics.c
72421--- linux-3.1.1/net/ipv4/fib_semantics.c 2011-11-11 15:19:27.000000000 -0500
72422+++ linux-3.1.1/net/ipv4/fib_semantics.c 2011-11-16 18:39:08.000000000 -0500
72423@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct n
72424 nh->nh_saddr = inet_select_addr(nh->nh_dev,
72425 nh->nh_gw,
72426 nh->nh_parent->fib_scope);
72427- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
72428+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
72429
72430 return nh->nh_saddr;
72431 }
72432diff -urNp linux-3.1.1/net/ipv4/inet_diag.c linux-3.1.1/net/ipv4/inet_diag.c
72433--- linux-3.1.1/net/ipv4/inet_diag.c 2011-11-11 15:19:27.000000000 -0500
72434+++ linux-3.1.1/net/ipv4/inet_diag.c 2011-11-16 18:40:44.000000000 -0500
72435@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
72436 r->idiag_retrans = 0;
72437
72438 r->id.idiag_if = sk->sk_bound_dev_if;
72439+
72440+#ifdef CONFIG_GRKERNSEC_HIDESYM
72441+ r->id.idiag_cookie[0] = 0;
72442+ r->id.idiag_cookie[1] = 0;
72443+#else
72444 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
72445 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
72446+#endif
72447
72448 r->id.idiag_sport = inet->inet_sport;
72449 r->id.idiag_dport = inet->inet_dport;
72450@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
72451 r->idiag_family = tw->tw_family;
72452 r->idiag_retrans = 0;
72453 r->id.idiag_if = tw->tw_bound_dev_if;
72454+
72455+#ifdef CONFIG_GRKERNSEC_HIDESYM
72456+ r->id.idiag_cookie[0] = 0;
72457+ r->id.idiag_cookie[1] = 0;
72458+#else
72459 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
72460 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
72461+#endif
72462+
72463 r->id.idiag_sport = tw->tw_sport;
72464 r->id.idiag_dport = tw->tw_dport;
72465 r->id.idiag_src[0] = tw->tw_rcv_saddr;
72466@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
72467 if (sk == NULL)
72468 goto unlock;
72469
72470+#ifndef CONFIG_GRKERNSEC_HIDESYM
72471 err = -ESTALE;
72472 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
72473 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
72474 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
72475 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
72476 goto out;
72477+#endif
72478
72479 err = -ENOMEM;
72480 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
72481@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
72482 r->idiag_retrans = req->retrans;
72483
72484 r->id.idiag_if = sk->sk_bound_dev_if;
72485+
72486+#ifdef CONFIG_GRKERNSEC_HIDESYM
72487+ r->id.idiag_cookie[0] = 0;
72488+ r->id.idiag_cookie[1] = 0;
72489+#else
72490 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
72491 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
72492+#endif
72493
72494 tmo = req->expires - jiffies;
72495 if (tmo < 0)
72496diff -urNp linux-3.1.1/net/ipv4/inet_hashtables.c linux-3.1.1/net/ipv4/inet_hashtables.c
72497--- linux-3.1.1/net/ipv4/inet_hashtables.c 2011-11-11 15:19:27.000000000 -0500
72498+++ linux-3.1.1/net/ipv4/inet_hashtables.c 2011-11-16 18:40:44.000000000 -0500
72499@@ -18,12 +18,15 @@
72500 #include <linux/sched.h>
72501 #include <linux/slab.h>
72502 #include <linux/wait.h>
72503+#include <linux/security.h>
72504
72505 #include <net/inet_connection_sock.h>
72506 #include <net/inet_hashtables.h>
72507 #include <net/secure_seq.h>
72508 #include <net/ip.h>
72509
72510+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
72511+
72512 /*
72513 * Allocate and initialize a new local port bind bucket.
72514 * The bindhash mutex for snum's hash chain must be held here.
72515@@ -530,6 +533,8 @@ ok:
72516 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
72517 spin_unlock(&head->lock);
72518
72519+ gr_update_task_in_ip_table(current, inet_sk(sk));
72520+
72521 if (tw) {
72522 inet_twsk_deschedule(tw, death_row);
72523 while (twrefcnt) {
72524diff -urNp linux-3.1.1/net/ipv4/inetpeer.c linux-3.1.1/net/ipv4/inetpeer.c
72525--- linux-3.1.1/net/ipv4/inetpeer.c 2011-11-11 15:19:27.000000000 -0500
72526+++ linux-3.1.1/net/ipv4/inetpeer.c 2011-11-16 19:18:22.000000000 -0500
72527@@ -400,6 +400,8 @@ struct inet_peer *inet_getpeer(const str
72528 unsigned int sequence;
72529 int invalidated, gccnt = 0;
72530
72531+ pax_track_stack();
72532+
72533 /* Attempt a lockless lookup first.
72534 * Because of a concurrent writer, we might not find an existing entry.
72535 */
72536@@ -436,8 +438,8 @@ relookup:
72537 if (p) {
72538 p->daddr = *daddr;
72539 atomic_set(&p->refcnt, 1);
72540- atomic_set(&p->rid, 0);
72541- atomic_set(&p->ip_id_count,
72542+ atomic_set_unchecked(&p->rid, 0);
72543+ atomic_set_unchecked(&p->ip_id_count,
72544 (daddr->family == AF_INET) ?
72545 secure_ip_id(daddr->addr.a4) :
72546 secure_ipv6_id(daddr->addr.a6));
72547diff -urNp linux-3.1.1/net/ipv4/ipconfig.c linux-3.1.1/net/ipv4/ipconfig.c
72548--- linux-3.1.1/net/ipv4/ipconfig.c 2011-11-11 15:19:27.000000000 -0500
72549+++ linux-3.1.1/net/ipv4/ipconfig.c 2011-11-16 18:39:08.000000000 -0500
72550@@ -313,7 +313,7 @@ static int __init ic_devinet_ioctl(unsig
72551
72552 mm_segment_t oldfs = get_fs();
72553 set_fs(get_ds());
72554- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72555+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72556 set_fs(oldfs);
72557 return res;
72558 }
72559@@ -324,7 +324,7 @@ static int __init ic_dev_ioctl(unsigned
72560
72561 mm_segment_t oldfs = get_fs();
72562 set_fs(get_ds());
72563- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72564+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72565 set_fs(oldfs);
72566 return res;
72567 }
72568@@ -335,7 +335,7 @@ static int __init ic_route_ioctl(unsigne
72569
72570 mm_segment_t oldfs = get_fs();
72571 set_fs(get_ds());
72572- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
72573+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
72574 set_fs(oldfs);
72575 return res;
72576 }
72577diff -urNp linux-3.1.1/net/ipv4/ip_fragment.c linux-3.1.1/net/ipv4/ip_fragment.c
72578--- linux-3.1.1/net/ipv4/ip_fragment.c 2011-11-11 15:19:27.000000000 -0500
72579+++ linux-3.1.1/net/ipv4/ip_fragment.c 2011-11-16 18:39:08.000000000 -0500
72580@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct
72581 return 0;
72582
72583 start = qp->rid;
72584- end = atomic_inc_return(&peer->rid);
72585+ end = atomic_inc_return_unchecked(&peer->rid);
72586 qp->rid = end;
72587
72588 rc = qp->q.fragments && (end - start) > max;
72589diff -urNp linux-3.1.1/net/ipv4/ip_sockglue.c linux-3.1.1/net/ipv4/ip_sockglue.c
72590--- linux-3.1.1/net/ipv4/ip_sockglue.c 2011-11-11 15:19:27.000000000 -0500
72591+++ linux-3.1.1/net/ipv4/ip_sockglue.c 2011-11-16 18:40:44.000000000 -0500
72592@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
72593 int val;
72594 int len;
72595
72596+ pax_track_stack();
72597+
72598 if (level != SOL_IP)
72599 return -EOPNOTSUPP;
72600
72601@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
72602 len = min_t(unsigned int, len, opt->optlen);
72603 if (put_user(len, optlen))
72604 return -EFAULT;
72605- if (copy_to_user(optval, opt->__data, len))
72606+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
72607+ copy_to_user(optval, opt->__data, len))
72608 return -EFAULT;
72609 return 0;
72610 }
72611@@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock
72612 if (sk->sk_type != SOCK_STREAM)
72613 return -ENOPROTOOPT;
72614
72615- msg.msg_control = optval;
72616+ msg.msg_control = (void __force_kernel *)optval;
72617 msg.msg_controllen = len;
72618 msg.msg_flags = flags;
72619
72620diff -urNp linux-3.1.1/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.1.1/net/ipv4/netfilter/nf_nat_snmp_basic.c
72621--- linux-3.1.1/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-11-11 15:19:27.000000000 -0500
72622+++ linux-3.1.1/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-11-16 18:39:08.000000000 -0500
72623@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
72624
72625 *len = 0;
72626
72627- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
72628+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
72629 if (*octets == NULL) {
72630 if (net_ratelimit())
72631 pr_notice("OOM in bsalg (%d)\n", __LINE__);
72632diff -urNp linux-3.1.1/net/ipv4/ping.c linux-3.1.1/net/ipv4/ping.c
72633--- linux-3.1.1/net/ipv4/ping.c 2011-11-11 15:19:27.000000000 -0500
72634+++ linux-3.1.1/net/ipv4/ping.c 2011-11-16 18:39:08.000000000 -0500
72635@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
72636 sk_rmem_alloc_get(sp),
72637 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72638 atomic_read(&sp->sk_refcnt), sp,
72639- atomic_read(&sp->sk_drops), len);
72640+ atomic_read_unchecked(&sp->sk_drops), len);
72641 }
72642
72643 static int ping_seq_show(struct seq_file *seq, void *v)
72644diff -urNp linux-3.1.1/net/ipv4/raw.c linux-3.1.1/net/ipv4/raw.c
72645--- linux-3.1.1/net/ipv4/raw.c 2011-11-11 15:19:27.000000000 -0500
72646+++ linux-3.1.1/net/ipv4/raw.c 2011-11-17 18:58:40.000000000 -0500
72647@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
72648 int raw_rcv(struct sock *sk, struct sk_buff *skb)
72649 {
72650 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
72651- atomic_inc(&sk->sk_drops);
72652+ atomic_inc_unchecked(&sk->sk_drops);
72653 kfree_skb(skb);
72654 return NET_RX_DROP;
72655 }
72656@@ -737,16 +737,20 @@ static int raw_init(struct sock *sk)
72657
72658 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
72659 {
72660+ struct icmp_filter filter;
72661+
72662 if (optlen > sizeof(struct icmp_filter))
72663 optlen = sizeof(struct icmp_filter);
72664- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
72665+ if (copy_from_user(&filter, optval, optlen))
72666 return -EFAULT;
72667+ raw_sk(sk)->filter = filter;
72668 return 0;
72669 }
72670
72671 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
72672 {
72673 int len, ret = -EFAULT;
72674+ struct icmp_filter filter;
72675
72676 if (get_user(len, optlen))
72677 goto out;
72678@@ -756,8 +760,8 @@ static int raw_geticmpfilter(struct sock
72679 if (len > sizeof(struct icmp_filter))
72680 len = sizeof(struct icmp_filter);
72681 ret = -EFAULT;
72682- if (put_user(len, optlen) ||
72683- copy_to_user(optval, &raw_sk(sk)->filter, len))
72684+ filter = raw_sk(sk)->filter;
72685+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
72686 goto out;
72687 ret = 0;
72688 out: return ret;
72689@@ -985,7 +989,13 @@ static void raw_sock_seq_show(struct seq
72690 sk_wmem_alloc_get(sp),
72691 sk_rmem_alloc_get(sp),
72692 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72693- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72694+ atomic_read(&sp->sk_refcnt),
72695+#ifdef CONFIG_GRKERNSEC_HIDESYM
72696+ NULL,
72697+#else
72698+ sp,
72699+#endif
72700+ atomic_read_unchecked(&sp->sk_drops));
72701 }
72702
72703 static int raw_seq_show(struct seq_file *seq, void *v)
72704diff -urNp linux-3.1.1/net/ipv4/route.c linux-3.1.1/net/ipv4/route.c
72705--- linux-3.1.1/net/ipv4/route.c 2011-11-11 15:19:27.000000000 -0500
72706+++ linux-3.1.1/net/ipv4/route.c 2011-11-16 18:39:08.000000000 -0500
72707@@ -308,7 +308,7 @@ static inline unsigned int rt_hash(__be3
72708
72709 static inline int rt_genid(struct net *net)
72710 {
72711- return atomic_read(&net->ipv4.rt_genid);
72712+ return atomic_read_unchecked(&net->ipv4.rt_genid);
72713 }
72714
72715 #ifdef CONFIG_PROC_FS
72716@@ -837,7 +837,7 @@ static void rt_cache_invalidate(struct n
72717 unsigned char shuffle;
72718
72719 get_random_bytes(&shuffle, sizeof(shuffle));
72720- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
72721+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
72722 }
72723
72724 /*
72725@@ -2872,7 +2872,7 @@ static int rt_fill_info(struct net *net,
72726 error = rt->dst.error;
72727 if (peer) {
72728 inet_peer_refcheck(rt->peer);
72729- id = atomic_read(&peer->ip_id_count) & 0xffff;
72730+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
72731 if (peer->tcp_ts_stamp) {
72732 ts = peer->tcp_ts;
72733 tsage = get_seconds() - peer->tcp_ts_stamp;
72734diff -urNp linux-3.1.1/net/ipv4/tcp.c linux-3.1.1/net/ipv4/tcp.c
72735--- linux-3.1.1/net/ipv4/tcp.c 2011-11-11 15:19:27.000000000 -0500
72736+++ linux-3.1.1/net/ipv4/tcp.c 2011-11-16 18:40:44.000000000 -0500
72737@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
72738 int val;
72739 int err = 0;
72740
72741+ pax_track_stack();
72742+
72743 /* These are data/string values, all the others are ints */
72744 switch (optname) {
72745 case TCP_CONGESTION: {
72746@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
72747 struct tcp_sock *tp = tcp_sk(sk);
72748 int val, len;
72749
72750+ pax_track_stack();
72751+
72752 if (get_user(len, optlen))
72753 return -EFAULT;
72754
72755diff -urNp linux-3.1.1/net/ipv4/tcp_ipv4.c linux-3.1.1/net/ipv4/tcp_ipv4.c
72756--- linux-3.1.1/net/ipv4/tcp_ipv4.c 2011-11-11 15:19:27.000000000 -0500
72757+++ linux-3.1.1/net/ipv4/tcp_ipv4.c 2011-11-16 18:40:44.000000000 -0500
72758@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
72759 int sysctl_tcp_low_latency __read_mostly;
72760 EXPORT_SYMBOL(sysctl_tcp_low_latency);
72761
72762+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72763+extern int grsec_enable_blackhole;
72764+#endif
72765
72766 #ifdef CONFIG_TCP_MD5SIG
72767 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
72768@@ -1622,6 +1625,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
72769 return 0;
72770
72771 reset:
72772+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72773+ if (!grsec_enable_blackhole)
72774+#endif
72775 tcp_v4_send_reset(rsk, skb);
72776 discard:
72777 kfree_skb(skb);
72778@@ -1684,12 +1690,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
72779 TCP_SKB_CB(skb)->sacked = 0;
72780
72781 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72782- if (!sk)
72783+ if (!sk) {
72784+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72785+ ret = 1;
72786+#endif
72787 goto no_tcp_socket;
72788-
72789+ }
72790 process:
72791- if (sk->sk_state == TCP_TIME_WAIT)
72792+ if (sk->sk_state == TCP_TIME_WAIT) {
72793+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72794+ ret = 2;
72795+#endif
72796 goto do_time_wait;
72797+ }
72798
72799 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
72800 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
72801@@ -1739,6 +1752,10 @@ no_tcp_socket:
72802 bad_packet:
72803 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72804 } else {
72805+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72806+ if (!grsec_enable_blackhole || (ret == 1 &&
72807+ (skb->dev->flags & IFF_LOOPBACK)))
72808+#endif
72809 tcp_v4_send_reset(NULL, skb);
72810 }
72811
72812@@ -2403,7 +2420,11 @@ static void get_openreq4(struct sock *sk
72813 0, /* non standard timer */
72814 0, /* open_requests have no inode */
72815 atomic_read(&sk->sk_refcnt),
72816+#ifdef CONFIG_GRKERNSEC_HIDESYM
72817+ NULL,
72818+#else
72819 req,
72820+#endif
72821 len);
72822 }
72823
72824@@ -2453,7 +2474,12 @@ static void get_tcp4_sock(struct sock *s
72825 sock_i_uid(sk),
72826 icsk->icsk_probes_out,
72827 sock_i_ino(sk),
72828- atomic_read(&sk->sk_refcnt), sk,
72829+ atomic_read(&sk->sk_refcnt),
72830+#ifdef CONFIG_GRKERNSEC_HIDESYM
72831+ NULL,
72832+#else
72833+ sk,
72834+#endif
72835 jiffies_to_clock_t(icsk->icsk_rto),
72836 jiffies_to_clock_t(icsk->icsk_ack.ato),
72837 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
72838@@ -2481,7 +2507,13 @@ static void get_timewait4_sock(struct in
72839 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
72840 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
72841 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72842- atomic_read(&tw->tw_refcnt), tw, len);
72843+ atomic_read(&tw->tw_refcnt),
72844+#ifdef CONFIG_GRKERNSEC_HIDESYM
72845+ NULL,
72846+#else
72847+ tw,
72848+#endif
72849+ len);
72850 }
72851
72852 #define TMPSZ 150
72853diff -urNp linux-3.1.1/net/ipv4/tcp_minisocks.c linux-3.1.1/net/ipv4/tcp_minisocks.c
72854--- linux-3.1.1/net/ipv4/tcp_minisocks.c 2011-11-11 15:19:27.000000000 -0500
72855+++ linux-3.1.1/net/ipv4/tcp_minisocks.c 2011-11-16 18:40:44.000000000 -0500
72856@@ -27,6 +27,10 @@
72857 #include <net/inet_common.h>
72858 #include <net/xfrm.h>
72859
72860+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72861+extern int grsec_enable_blackhole;
72862+#endif
72863+
72864 int sysctl_tcp_syncookies __read_mostly = 1;
72865 EXPORT_SYMBOL(sysctl_tcp_syncookies);
72866
72867@@ -750,6 +754,10 @@ listen_overflow:
72868
72869 embryonic_reset:
72870 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
72871+
72872+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72873+ if (!grsec_enable_blackhole)
72874+#endif
72875 if (!(flg & TCP_FLAG_RST))
72876 req->rsk_ops->send_reset(sk, skb);
72877
72878diff -urNp linux-3.1.1/net/ipv4/tcp_output.c linux-3.1.1/net/ipv4/tcp_output.c
72879--- linux-3.1.1/net/ipv4/tcp_output.c 2011-11-11 15:19:27.000000000 -0500
72880+++ linux-3.1.1/net/ipv4/tcp_output.c 2011-11-16 18:40:44.000000000 -0500
72881@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
72882 int mss;
72883 int s_data_desired = 0;
72884
72885+ pax_track_stack();
72886+
72887 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
72888 s_data_desired = cvp->s_data_desired;
72889 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
72890diff -urNp linux-3.1.1/net/ipv4/tcp_probe.c linux-3.1.1/net/ipv4/tcp_probe.c
72891--- linux-3.1.1/net/ipv4/tcp_probe.c 2011-11-11 15:19:27.000000000 -0500
72892+++ linux-3.1.1/net/ipv4/tcp_probe.c 2011-11-16 18:39:08.000000000 -0500
72893@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
72894 if (cnt + width >= len)
72895 break;
72896
72897- if (copy_to_user(buf + cnt, tbuf, width))
72898+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
72899 return -EFAULT;
72900 cnt += width;
72901 }
72902diff -urNp linux-3.1.1/net/ipv4/tcp_timer.c linux-3.1.1/net/ipv4/tcp_timer.c
72903--- linux-3.1.1/net/ipv4/tcp_timer.c 2011-11-11 15:19:27.000000000 -0500
72904+++ linux-3.1.1/net/ipv4/tcp_timer.c 2011-11-16 18:40:44.000000000 -0500
72905@@ -22,6 +22,10 @@
72906 #include <linux/gfp.h>
72907 #include <net/tcp.h>
72908
72909+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72910+extern int grsec_lastack_retries;
72911+#endif
72912+
72913 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
72914 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
72915 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
72916@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
72917 }
72918 }
72919
72920+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72921+ if ((sk->sk_state == TCP_LAST_ACK) &&
72922+ (grsec_lastack_retries > 0) &&
72923+ (grsec_lastack_retries < retry_until))
72924+ retry_until = grsec_lastack_retries;
72925+#endif
72926+
72927 if (retransmits_timed_out(sk, retry_until,
72928 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
72929 /* Has it gone just too far? */
72930diff -urNp linux-3.1.1/net/ipv4/udp.c linux-3.1.1/net/ipv4/udp.c
72931--- linux-3.1.1/net/ipv4/udp.c 2011-11-11 15:19:27.000000000 -0500
72932+++ linux-3.1.1/net/ipv4/udp.c 2011-11-16 19:17:54.000000000 -0500
72933@@ -86,6 +86,7 @@
72934 #include <linux/types.h>
72935 #include <linux/fcntl.h>
72936 #include <linux/module.h>
72937+#include <linux/security.h>
72938 #include <linux/socket.h>
72939 #include <linux/sockios.h>
72940 #include <linux/igmp.h>
72941@@ -108,6 +109,10 @@
72942 #include <trace/events/udp.h>
72943 #include "udp_impl.h"
72944
72945+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72946+extern int grsec_enable_blackhole;
72947+#endif
72948+
72949 struct udp_table udp_table __read_mostly;
72950 EXPORT_SYMBOL(udp_table);
72951
72952@@ -565,6 +570,9 @@ found:
72953 return s;
72954 }
72955
72956+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
72957+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
72958+
72959 /*
72960 * This routine is called by the ICMP module when it gets some
72961 * sort of error condition. If err < 0 then the socket should
72962@@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
72963 dport = usin->sin_port;
72964 if (dport == 0)
72965 return -EINVAL;
72966+
72967+ err = gr_search_udp_sendmsg(sk, usin);
72968+ if (err)
72969+ return err;
72970 } else {
72971 if (sk->sk_state != TCP_ESTABLISHED)
72972 return -EDESTADDRREQ;
72973+
72974+ err = gr_search_udp_sendmsg(sk, NULL);
72975+ if (err)
72976+ return err;
72977+
72978 daddr = inet->inet_daddr;
72979 dport = inet->inet_dport;
72980 /* Open fast path for connected socket.
72981@@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(
72982 udp_lib_checksum_complete(skb)) {
72983 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
72984 IS_UDPLITE(sk));
72985- atomic_inc(&sk->sk_drops);
72986+ atomic_inc_unchecked(&sk->sk_drops);
72987 __skb_unlink(skb, rcvq);
72988 __skb_queue_tail(&list_kill, skb);
72989 }
72990@@ -1185,6 +1202,10 @@ try_again:
72991 if (!skb)
72992 goto out;
72993
72994+ err = gr_search_udp_recvmsg(sk, skb);
72995+ if (err)
72996+ goto out_free;
72997+
72998 ulen = skb->len - sizeof(struct udphdr);
72999 if (len > ulen)
73000 len = ulen;
73001@@ -1485,7 +1506,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
73002
73003 drop:
73004 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73005- atomic_inc(&sk->sk_drops);
73006+ atomic_inc_unchecked(&sk->sk_drops);
73007 kfree_skb(skb);
73008 return -1;
73009 }
73010@@ -1504,7 +1525,7 @@ static void flush_stack(struct sock **st
73011 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
73012
73013 if (!skb1) {
73014- atomic_inc(&sk->sk_drops);
73015+ atomic_inc_unchecked(&sk->sk_drops);
73016 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
73017 IS_UDPLITE(sk));
73018 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73019@@ -1673,6 +1694,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
73020 goto csum_error;
73021
73022 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
73023+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73024+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73025+#endif
73026 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
73027
73028 /*
73029@@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock
73030 sk_wmem_alloc_get(sp),
73031 sk_rmem_alloc_get(sp),
73032 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73033- atomic_read(&sp->sk_refcnt), sp,
73034- atomic_read(&sp->sk_drops), len);
73035+ atomic_read(&sp->sk_refcnt),
73036+#ifdef CONFIG_GRKERNSEC_HIDESYM
73037+ NULL,
73038+#else
73039+ sp,
73040+#endif
73041+ atomic_read_unchecked(&sp->sk_drops), len);
73042 }
73043
73044 int udp4_seq_show(struct seq_file *seq, void *v)
73045diff -urNp linux-3.1.1/net/ipv6/addrconf.c linux-3.1.1/net/ipv6/addrconf.c
73046--- linux-3.1.1/net/ipv6/addrconf.c 2011-11-11 15:19:27.000000000 -0500
73047+++ linux-3.1.1/net/ipv6/addrconf.c 2011-11-16 18:39:08.000000000 -0500
73048@@ -2083,7 +2083,7 @@ int addrconf_set_dstaddr(struct net *net
73049 p.iph.ihl = 5;
73050 p.iph.protocol = IPPROTO_IPV6;
73051 p.iph.ttl = 64;
73052- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
73053+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
73054
73055 if (ops->ndo_do_ioctl) {
73056 mm_segment_t oldfs = get_fs();
73057diff -urNp linux-3.1.1/net/ipv6/inet6_connection_sock.c linux-3.1.1/net/ipv6/inet6_connection_sock.c
73058--- linux-3.1.1/net/ipv6/inet6_connection_sock.c 2011-11-11 15:19:27.000000000 -0500
73059+++ linux-3.1.1/net/ipv6/inet6_connection_sock.c 2011-11-16 18:39:08.000000000 -0500
73060@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
73061 #ifdef CONFIG_XFRM
73062 {
73063 struct rt6_info *rt = (struct rt6_info *)dst;
73064- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
73065+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
73066 }
73067 #endif
73068 }
73069@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
73070 #ifdef CONFIG_XFRM
73071 if (dst) {
73072 struct rt6_info *rt = (struct rt6_info *)dst;
73073- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
73074+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
73075 __sk_dst_reset(sk);
73076 dst = NULL;
73077 }
73078diff -urNp linux-3.1.1/net/ipv6/ipv6_sockglue.c linux-3.1.1/net/ipv6/ipv6_sockglue.c
73079--- linux-3.1.1/net/ipv6/ipv6_sockglue.c 2011-11-11 15:19:27.000000000 -0500
73080+++ linux-3.1.1/net/ipv6/ipv6_sockglue.c 2011-11-16 18:40:44.000000000 -0500
73081@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
73082 int val, valbool;
73083 int retv = -ENOPROTOOPT;
73084
73085+ pax_track_stack();
73086+
73087 if (optval == NULL)
73088 val=0;
73089 else {
73090@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
73091 int len;
73092 int val;
73093
73094+ pax_track_stack();
73095+
73096 if (ip6_mroute_opt(optname))
73097 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
73098
73099@@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct soc
73100 if (sk->sk_type != SOCK_STREAM)
73101 return -ENOPROTOOPT;
73102
73103- msg.msg_control = optval;
73104+ msg.msg_control = (void __force_kernel *)optval;
73105 msg.msg_controllen = len;
73106 msg.msg_flags = flags;
73107
73108diff -urNp linux-3.1.1/net/ipv6/raw.c linux-3.1.1/net/ipv6/raw.c
73109--- linux-3.1.1/net/ipv6/raw.c 2011-11-11 15:19:27.000000000 -0500
73110+++ linux-3.1.1/net/ipv6/raw.c 2011-11-16 18:40:44.000000000 -0500
73111@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
73112 {
73113 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
73114 skb_checksum_complete(skb)) {
73115- atomic_inc(&sk->sk_drops);
73116+ atomic_inc_unchecked(&sk->sk_drops);
73117 kfree_skb(skb);
73118 return NET_RX_DROP;
73119 }
73120@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
73121 struct raw6_sock *rp = raw6_sk(sk);
73122
73123 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
73124- atomic_inc(&sk->sk_drops);
73125+ atomic_inc_unchecked(&sk->sk_drops);
73126 kfree_skb(skb);
73127 return NET_RX_DROP;
73128 }
73129@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
73130
73131 if (inet->hdrincl) {
73132 if (skb_checksum_complete(skb)) {
73133- atomic_inc(&sk->sk_drops);
73134+ atomic_inc_unchecked(&sk->sk_drops);
73135 kfree_skb(skb);
73136 return NET_RX_DROP;
73137 }
73138@@ -601,7 +601,7 @@ out:
73139 return err;
73140 }
73141
73142-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
73143+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
73144 struct flowi6 *fl6, struct dst_entry **dstp,
73145 unsigned int flags)
73146 {
73147@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
73148 u16 proto;
73149 int err;
73150
73151+ pax_track_stack();
73152+
73153 /* Rough check on arithmetic overflow,
73154 better check is made in ip6_append_data().
73155 */
73156@@ -909,12 +911,15 @@ do_confirm:
73157 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
73158 char __user *optval, int optlen)
73159 {
73160+ struct icmp6_filter filter;
73161+
73162 switch (optname) {
73163 case ICMPV6_FILTER:
73164 if (optlen > sizeof(struct icmp6_filter))
73165 optlen = sizeof(struct icmp6_filter);
73166- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
73167+ if (copy_from_user(&filter, optval, optlen))
73168 return -EFAULT;
73169+ raw6_sk(sk)->filter = filter;
73170 return 0;
73171 default:
73172 return -ENOPROTOOPT;
73173@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
73174 char __user *optval, int __user *optlen)
73175 {
73176 int len;
73177+ struct icmp6_filter filter;
73178
73179 switch (optname) {
73180 case ICMPV6_FILTER:
73181@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
73182 len = sizeof(struct icmp6_filter);
73183 if (put_user(len, optlen))
73184 return -EFAULT;
73185- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73186+ filter = raw6_sk(sk)->filter;
73187+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
73188 return -EFAULT;
73189 return 0;
73190 default:
73191@@ -1245,7 +1252,13 @@ static void raw6_sock_seq_show(struct se
73192 0, 0L, 0,
73193 sock_i_uid(sp), 0,
73194 sock_i_ino(sp),
73195- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73196+ atomic_read(&sp->sk_refcnt),
73197+#ifdef CONFIG_GRKERNSEC_HIDESYM
73198+ NULL,
73199+#else
73200+ sp,
73201+#endif
73202+ atomic_read_unchecked(&sp->sk_drops));
73203 }
73204
73205 static int raw6_seq_show(struct seq_file *seq, void *v)
73206diff -urNp linux-3.1.1/net/ipv6/tcp_ipv6.c linux-3.1.1/net/ipv6/tcp_ipv6.c
73207--- linux-3.1.1/net/ipv6/tcp_ipv6.c 2011-11-11 15:19:27.000000000 -0500
73208+++ linux-3.1.1/net/ipv6/tcp_ipv6.c 2011-11-16 18:40:44.000000000 -0500
73209@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
73210 }
73211 #endif
73212
73213+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73214+extern int grsec_enable_blackhole;
73215+#endif
73216+
73217 static void tcp_v6_hash(struct sock *sk)
73218 {
73219 if (sk->sk_state != TCP_CLOSE) {
73220@@ -1647,6 +1651,9 @@ static int tcp_v6_do_rcv(struct sock *sk
73221 return 0;
73222
73223 reset:
73224+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73225+ if (!grsec_enable_blackhole)
73226+#endif
73227 tcp_v6_send_reset(sk, skb);
73228 discard:
73229 if (opt_skb)
73230@@ -1726,12 +1733,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
73231 TCP_SKB_CB(skb)->sacked = 0;
73232
73233 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73234- if (!sk)
73235+ if (!sk) {
73236+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73237+ ret = 1;
73238+#endif
73239 goto no_tcp_socket;
73240+ }
73241
73242 process:
73243- if (sk->sk_state == TCP_TIME_WAIT)
73244+ if (sk->sk_state == TCP_TIME_WAIT) {
73245+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73246+ ret = 2;
73247+#endif
73248 goto do_time_wait;
73249+ }
73250
73251 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73252 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73253@@ -1779,6 +1794,10 @@ no_tcp_socket:
73254 bad_packet:
73255 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73256 } else {
73257+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73258+ if (!grsec_enable_blackhole || (ret == 1 &&
73259+ (skb->dev->flags & IFF_LOOPBACK)))
73260+#endif
73261 tcp_v6_send_reset(NULL, skb);
73262 }
73263
73264@@ -2039,7 +2058,13 @@ static void get_openreq6(struct seq_file
73265 uid,
73266 0, /* non standard timer */
73267 0, /* open_requests have no inode */
73268- 0, req);
73269+ 0,
73270+#ifdef CONFIG_GRKERNSEC_HIDESYM
73271+ NULL
73272+#else
73273+ req
73274+#endif
73275+ );
73276 }
73277
73278 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73279@@ -2089,7 +2114,12 @@ static void get_tcp6_sock(struct seq_fil
73280 sock_i_uid(sp),
73281 icsk->icsk_probes_out,
73282 sock_i_ino(sp),
73283- atomic_read(&sp->sk_refcnt), sp,
73284+ atomic_read(&sp->sk_refcnt),
73285+#ifdef CONFIG_GRKERNSEC_HIDESYM
73286+ NULL,
73287+#else
73288+ sp,
73289+#endif
73290 jiffies_to_clock_t(icsk->icsk_rto),
73291 jiffies_to_clock_t(icsk->icsk_ack.ato),
73292 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73293@@ -2124,7 +2154,13 @@ static void get_timewait6_sock(struct se
73294 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73295 tw->tw_substate, 0, 0,
73296 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73297- atomic_read(&tw->tw_refcnt), tw);
73298+ atomic_read(&tw->tw_refcnt),
73299+#ifdef CONFIG_GRKERNSEC_HIDESYM
73300+ NULL
73301+#else
73302+ tw
73303+#endif
73304+ );
73305 }
73306
73307 static int tcp6_seq_show(struct seq_file *seq, void *v)
73308diff -urNp linux-3.1.1/net/ipv6/udp.c linux-3.1.1/net/ipv6/udp.c
73309--- linux-3.1.1/net/ipv6/udp.c 2011-11-11 15:19:27.000000000 -0500
73310+++ linux-3.1.1/net/ipv6/udp.c 2011-11-16 18:40:44.000000000 -0500
73311@@ -50,6 +50,10 @@
73312 #include <linux/seq_file.h>
73313 #include "udp_impl.h"
73314
73315+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73316+extern int grsec_enable_blackhole;
73317+#endif
73318+
73319 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73320 {
73321 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73322@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
73323
73324 return 0;
73325 drop:
73326- atomic_inc(&sk->sk_drops);
73327+ atomic_inc_unchecked(&sk->sk_drops);
73328 drop_no_sk_drops_inc:
73329 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73330 kfree_skb(skb);
73331@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
73332 continue;
73333 }
73334 drop:
73335- atomic_inc(&sk->sk_drops);
73336+ atomic_inc_unchecked(&sk->sk_drops);
73337 UDP6_INC_STATS_BH(sock_net(sk),
73338 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73339 UDP6_INC_STATS_BH(sock_net(sk),
73340@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
73341 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73342 proto == IPPROTO_UDPLITE);
73343
73344+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73345+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73346+#endif
73347 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73348
73349 kfree_skb(skb);
73350@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
73351 if (!sock_owned_by_user(sk))
73352 udpv6_queue_rcv_skb(sk, skb);
73353 else if (sk_add_backlog(sk, skb)) {
73354- atomic_inc(&sk->sk_drops);
73355+ atomic_inc_unchecked(&sk->sk_drops);
73356 bh_unlock_sock(sk);
73357 sock_put(sk);
73358 goto discard;
73359@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
73360 0, 0L, 0,
73361 sock_i_uid(sp), 0,
73362 sock_i_ino(sp),
73363- atomic_read(&sp->sk_refcnt), sp,
73364- atomic_read(&sp->sk_drops));
73365+ atomic_read(&sp->sk_refcnt),
73366+#ifdef CONFIG_GRKERNSEC_HIDESYM
73367+ NULL,
73368+#else
73369+ sp,
73370+#endif
73371+ atomic_read_unchecked(&sp->sk_drops));
73372 }
73373
73374 int udp6_seq_show(struct seq_file *seq, void *v)
73375diff -urNp linux-3.1.1/net/irda/ircomm/ircomm_tty.c linux-3.1.1/net/irda/ircomm/ircomm_tty.c
73376--- linux-3.1.1/net/irda/ircomm/ircomm_tty.c 2011-11-11 15:19:27.000000000 -0500
73377+++ linux-3.1.1/net/irda/ircomm/ircomm_tty.c 2011-11-16 18:39:08.000000000 -0500
73378@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
73379 add_wait_queue(&self->open_wait, &wait);
73380
73381 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73382- __FILE__,__LINE__, tty->driver->name, self->open_count );
73383+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73384
73385 /* As far as I can see, we protect open_count - Jean II */
73386 spin_lock_irqsave(&self->spinlock, flags);
73387 if (!tty_hung_up_p(filp)) {
73388 extra_count = 1;
73389- self->open_count--;
73390+ local_dec(&self->open_count);
73391 }
73392 spin_unlock_irqrestore(&self->spinlock, flags);
73393- self->blocked_open++;
73394+ local_inc(&self->blocked_open);
73395
73396 while (1) {
73397 if (tty->termios->c_cflag & CBAUD) {
73398@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
73399 }
73400
73401 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
73402- __FILE__,__LINE__, tty->driver->name, self->open_count );
73403+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73404
73405 schedule();
73406 }
73407@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
73408 if (extra_count) {
73409 /* ++ is not atomic, so this should be protected - Jean II */
73410 spin_lock_irqsave(&self->spinlock, flags);
73411- self->open_count++;
73412+ local_inc(&self->open_count);
73413 spin_unlock_irqrestore(&self->spinlock, flags);
73414 }
73415- self->blocked_open--;
73416+ local_dec(&self->blocked_open);
73417
73418 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
73419- __FILE__,__LINE__, tty->driver->name, self->open_count);
73420+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
73421
73422 if (!retval)
73423 self->flags |= ASYNC_NORMAL_ACTIVE;
73424@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
73425 }
73426 /* ++ is not atomic, so this should be protected - Jean II */
73427 spin_lock_irqsave(&self->spinlock, flags);
73428- self->open_count++;
73429+ local_inc(&self->open_count);
73430
73431 tty->driver_data = self;
73432 self->tty = tty;
73433 spin_unlock_irqrestore(&self->spinlock, flags);
73434
73435 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
73436- self->line, self->open_count);
73437+ self->line, local_read(&self->open_count));
73438
73439 /* Not really used by us, but lets do it anyway */
73440 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
73441@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
73442 return;
73443 }
73444
73445- if ((tty->count == 1) && (self->open_count != 1)) {
73446+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
73447 /*
73448 * Uh, oh. tty->count is 1, which means that the tty
73449 * structure will be freed. state->count should always
73450@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
73451 */
73452 IRDA_DEBUG(0, "%s(), bad serial port count; "
73453 "tty->count is 1, state->count is %d\n", __func__ ,
73454- self->open_count);
73455- self->open_count = 1;
73456+ local_read(&self->open_count));
73457+ local_set(&self->open_count, 1);
73458 }
73459
73460- if (--self->open_count < 0) {
73461+ if (local_dec_return(&self->open_count) < 0) {
73462 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
73463- __func__, self->line, self->open_count);
73464- self->open_count = 0;
73465+ __func__, self->line, local_read(&self->open_count));
73466+ local_set(&self->open_count, 0);
73467 }
73468- if (self->open_count) {
73469+ if (local_read(&self->open_count)) {
73470 spin_unlock_irqrestore(&self->spinlock, flags);
73471
73472 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
73473@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
73474 tty->closing = 0;
73475 self->tty = NULL;
73476
73477- if (self->blocked_open) {
73478+ if (local_read(&self->blocked_open)) {
73479 if (self->close_delay)
73480 schedule_timeout_interruptible(self->close_delay);
73481 wake_up_interruptible(&self->open_wait);
73482@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
73483 spin_lock_irqsave(&self->spinlock, flags);
73484 self->flags &= ~ASYNC_NORMAL_ACTIVE;
73485 self->tty = NULL;
73486- self->open_count = 0;
73487+ local_set(&self->open_count, 0);
73488 spin_unlock_irqrestore(&self->spinlock, flags);
73489
73490 wake_up_interruptible(&self->open_wait);
73491@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
73492 seq_putc(m, '\n');
73493
73494 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
73495- seq_printf(m, "Open count: %d\n", self->open_count);
73496+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
73497 seq_printf(m, "Max data size: %d\n", self->max_data_size);
73498 seq_printf(m, "Max header size: %d\n", self->max_header_size);
73499
73500diff -urNp linux-3.1.1/net/iucv/af_iucv.c linux-3.1.1/net/iucv/af_iucv.c
73501--- linux-3.1.1/net/iucv/af_iucv.c 2011-11-11 15:19:27.000000000 -0500
73502+++ linux-3.1.1/net/iucv/af_iucv.c 2011-11-16 18:39:08.000000000 -0500
73503@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
73504
73505 write_lock_bh(&iucv_sk_list.lock);
73506
73507- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
73508+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73509 while (__iucv_get_sock_by_name(name)) {
73510 sprintf(name, "%08x",
73511- atomic_inc_return(&iucv_sk_list.autobind_name));
73512+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73513 }
73514
73515 write_unlock_bh(&iucv_sk_list.lock);
73516diff -urNp linux-3.1.1/net/key/af_key.c linux-3.1.1/net/key/af_key.c
73517--- linux-3.1.1/net/key/af_key.c 2011-11-11 15:19:27.000000000 -0500
73518+++ linux-3.1.1/net/key/af_key.c 2011-11-16 18:40:44.000000000 -0500
73519@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
73520 struct xfrm_migrate m[XFRM_MAX_DEPTH];
73521 struct xfrm_kmaddress k;
73522
73523+ pax_track_stack();
73524+
73525 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
73526 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
73527 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
73528@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
73529 static u32 get_acqseq(void)
73530 {
73531 u32 res;
73532- static atomic_t acqseq;
73533+ static atomic_unchecked_t acqseq;
73534
73535 do {
73536- res = atomic_inc_return(&acqseq);
73537+ res = atomic_inc_return_unchecked(&acqseq);
73538 } while (!res);
73539 return res;
73540 }
73541diff -urNp linux-3.1.1/net/lapb/lapb_iface.c linux-3.1.1/net/lapb/lapb_iface.c
73542--- linux-3.1.1/net/lapb/lapb_iface.c 2011-11-11 15:19:27.000000000 -0500
73543+++ linux-3.1.1/net/lapb/lapb_iface.c 2011-11-16 18:39:08.000000000 -0500
73544@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
73545 goto out;
73546
73547 lapb->dev = dev;
73548- lapb->callbacks = *callbacks;
73549+ lapb->callbacks = callbacks;
73550
73551 __lapb_insert_cb(lapb);
73552
73553@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
73554
73555 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
73556 {
73557- if (lapb->callbacks.connect_confirmation)
73558- lapb->callbacks.connect_confirmation(lapb->dev, reason);
73559+ if (lapb->callbacks->connect_confirmation)
73560+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
73561 }
73562
73563 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
73564 {
73565- if (lapb->callbacks.connect_indication)
73566- lapb->callbacks.connect_indication(lapb->dev, reason);
73567+ if (lapb->callbacks->connect_indication)
73568+ lapb->callbacks->connect_indication(lapb->dev, reason);
73569 }
73570
73571 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
73572 {
73573- if (lapb->callbacks.disconnect_confirmation)
73574- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
73575+ if (lapb->callbacks->disconnect_confirmation)
73576+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
73577 }
73578
73579 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
73580 {
73581- if (lapb->callbacks.disconnect_indication)
73582- lapb->callbacks.disconnect_indication(lapb->dev, reason);
73583+ if (lapb->callbacks->disconnect_indication)
73584+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
73585 }
73586
73587 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
73588 {
73589- if (lapb->callbacks.data_indication)
73590- return lapb->callbacks.data_indication(lapb->dev, skb);
73591+ if (lapb->callbacks->data_indication)
73592+ return lapb->callbacks->data_indication(lapb->dev, skb);
73593
73594 kfree_skb(skb);
73595 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
73596@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
73597 {
73598 int used = 0;
73599
73600- if (lapb->callbacks.data_transmit) {
73601- lapb->callbacks.data_transmit(lapb->dev, skb);
73602+ if (lapb->callbacks->data_transmit) {
73603+ lapb->callbacks->data_transmit(lapb->dev, skb);
73604 used = 1;
73605 }
73606
73607diff -urNp linux-3.1.1/net/mac80211/debugfs_sta.c linux-3.1.1/net/mac80211/debugfs_sta.c
73608--- linux-3.1.1/net/mac80211/debugfs_sta.c 2011-11-11 15:19:27.000000000 -0500
73609+++ linux-3.1.1/net/mac80211/debugfs_sta.c 2011-11-16 18:40:44.000000000 -0500
73610@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
73611 struct tid_ampdu_rx *tid_rx;
73612 struct tid_ampdu_tx *tid_tx;
73613
73614+ pax_track_stack();
73615+
73616 rcu_read_lock();
73617
73618 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
73619@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
73620 struct sta_info *sta = file->private_data;
73621 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
73622
73623+ pax_track_stack();
73624+
73625 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
73626 htc->ht_supported ? "" : "not ");
73627 if (htc->ht_supported) {
73628diff -urNp linux-3.1.1/net/mac80211/ieee80211_i.h linux-3.1.1/net/mac80211/ieee80211_i.h
73629--- linux-3.1.1/net/mac80211/ieee80211_i.h 2011-11-11 15:19:27.000000000 -0500
73630+++ linux-3.1.1/net/mac80211/ieee80211_i.h 2011-11-16 18:39:08.000000000 -0500
73631@@ -27,6 +27,7 @@
73632 #include <net/ieee80211_radiotap.h>
73633 #include <net/cfg80211.h>
73634 #include <net/mac80211.h>
73635+#include <asm/local.h>
73636 #include "key.h"
73637 #include "sta_info.h"
73638
73639@@ -754,7 +755,7 @@ struct ieee80211_local {
73640 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73641 spinlock_t queue_stop_reason_lock;
73642
73643- int open_count;
73644+ local_t open_count;
73645 int monitors, cooked_mntrs;
73646 /* number of interfaces with corresponding FIF_ flags */
73647 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
73648diff -urNp linux-3.1.1/net/mac80211/iface.c linux-3.1.1/net/mac80211/iface.c
73649--- linux-3.1.1/net/mac80211/iface.c 2011-11-11 15:19:27.000000000 -0500
73650+++ linux-3.1.1/net/mac80211/iface.c 2011-11-16 18:39:08.000000000 -0500
73651@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
73652 break;
73653 }
73654
73655- if (local->open_count == 0) {
73656+ if (local_read(&local->open_count) == 0) {
73657 res = drv_start(local);
73658 if (res)
73659 goto err_del_bss;
73660@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
73661 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
73662
73663 if (!is_valid_ether_addr(dev->dev_addr)) {
73664- if (!local->open_count)
73665+ if (!local_read(&local->open_count))
73666 drv_stop(local);
73667 return -EADDRNOTAVAIL;
73668 }
73669@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
73670 mutex_unlock(&local->mtx);
73671
73672 if (coming_up)
73673- local->open_count++;
73674+ local_inc(&local->open_count);
73675
73676 if (hw_reconf_flags) {
73677 ieee80211_hw_config(local, hw_reconf_flags);
73678@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
73679 err_del_interface:
73680 drv_remove_interface(local, &sdata->vif);
73681 err_stop:
73682- if (!local->open_count)
73683+ if (!local_read(&local->open_count))
73684 drv_stop(local);
73685 err_del_bss:
73686 sdata->bss = NULL;
73687@@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct iee
73688 }
73689
73690 if (going_down)
73691- local->open_count--;
73692+ local_dec(&local->open_count);
73693
73694 switch (sdata->vif.type) {
73695 case NL80211_IFTYPE_AP_VLAN:
73696@@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct iee
73697
73698 ieee80211_recalc_ps(local, -1);
73699
73700- if (local->open_count == 0) {
73701+ if (local_read(&local->open_count) == 0) {
73702 if (local->ops->napi_poll)
73703 napi_disable(&local->napi);
73704 ieee80211_clear_tx_pending(local);
73705diff -urNp linux-3.1.1/net/mac80211/main.c linux-3.1.1/net/mac80211/main.c
73706--- linux-3.1.1/net/mac80211/main.c 2011-11-11 15:19:27.000000000 -0500
73707+++ linux-3.1.1/net/mac80211/main.c 2011-11-16 18:39:08.000000000 -0500
73708@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
73709 local->hw.conf.power_level = power;
73710 }
73711
73712- if (changed && local->open_count) {
73713+ if (changed && local_read(&local->open_count)) {
73714 ret = drv_config(local, changed);
73715 /*
73716 * Goal:
73717diff -urNp linux-3.1.1/net/mac80211/mlme.c linux-3.1.1/net/mac80211/mlme.c
73718--- linux-3.1.1/net/mac80211/mlme.c 2011-11-11 15:19:27.000000000 -0500
73719+++ linux-3.1.1/net/mac80211/mlme.c 2011-11-16 18:40:44.000000000 -0500
73720@@ -1464,6 +1464,8 @@ static bool ieee80211_assoc_success(stru
73721 bool have_higher_than_11mbit = false;
73722 u16 ap_ht_cap_flags;
73723
73724+ pax_track_stack();
73725+
73726 /* AssocResp and ReassocResp have identical structure */
73727
73728 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
73729diff -urNp linux-3.1.1/net/mac80211/pm.c linux-3.1.1/net/mac80211/pm.c
73730--- linux-3.1.1/net/mac80211/pm.c 2011-11-11 15:19:27.000000000 -0500
73731+++ linux-3.1.1/net/mac80211/pm.c 2011-11-16 18:39:08.000000000 -0500
73732@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211
73733 struct ieee80211_sub_if_data *sdata;
73734 struct sta_info *sta;
73735
73736- if (!local->open_count)
73737+ if (!local_read(&local->open_count))
73738 goto suspend;
73739
73740 ieee80211_scan_cancel(local);
73741@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211
73742 cancel_work_sync(&local->dynamic_ps_enable_work);
73743 del_timer_sync(&local->dynamic_ps_timer);
73744
73745- local->wowlan = wowlan && local->open_count;
73746+ local->wowlan = wowlan && local_read(&local->open_count);
73747 if (local->wowlan) {
73748 int err = drv_suspend(local, wowlan);
73749 if (err < 0) {
73750@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211
73751 }
73752
73753 /* stop hardware - this must stop RX */
73754- if (local->open_count)
73755+ if (local_read(&local->open_count))
73756 ieee80211_stop_device(local);
73757
73758 suspend:
73759diff -urNp linux-3.1.1/net/mac80211/rate.c linux-3.1.1/net/mac80211/rate.c
73760--- linux-3.1.1/net/mac80211/rate.c 2011-11-11 15:19:27.000000000 -0500
73761+++ linux-3.1.1/net/mac80211/rate.c 2011-11-16 18:39:08.000000000 -0500
73762@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
73763
73764 ASSERT_RTNL();
73765
73766- if (local->open_count)
73767+ if (local_read(&local->open_count))
73768 return -EBUSY;
73769
73770 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
73771diff -urNp linux-3.1.1/net/mac80211/rc80211_pid_debugfs.c linux-3.1.1/net/mac80211/rc80211_pid_debugfs.c
73772--- linux-3.1.1/net/mac80211/rc80211_pid_debugfs.c 2011-11-11 15:19:27.000000000 -0500
73773+++ linux-3.1.1/net/mac80211/rc80211_pid_debugfs.c 2011-11-16 18:39:08.000000000 -0500
73774@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
73775
73776 spin_unlock_irqrestore(&events->lock, status);
73777
73778- if (copy_to_user(buf, pb, p))
73779+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
73780 return -EFAULT;
73781
73782 return p;
73783diff -urNp linux-3.1.1/net/mac80211/util.c linux-3.1.1/net/mac80211/util.c
73784--- linux-3.1.1/net/mac80211/util.c 2011-11-11 15:19:27.000000000 -0500
73785+++ linux-3.1.1/net/mac80211/util.c 2011-11-16 18:39:08.000000000 -0500
73786@@ -1166,7 +1166,7 @@ int ieee80211_reconfig(struct ieee80211_
73787 drv_set_coverage_class(local, hw->wiphy->coverage_class);
73788
73789 /* everything else happens only if HW was up & running */
73790- if (!local->open_count)
73791+ if (!local_read(&local->open_count))
73792 goto wake_up;
73793
73794 /*
73795diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_conn.c linux-3.1.1/net/netfilter/ipvs/ip_vs_conn.c
73796--- linux-3.1.1/net/netfilter/ipvs/ip_vs_conn.c 2011-11-11 15:19:27.000000000 -0500
73797+++ linux-3.1.1/net/netfilter/ipvs/ip_vs_conn.c 2011-11-16 18:39:08.000000000 -0500
73798@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
73799 /* Increase the refcnt counter of the dest */
73800 atomic_inc(&dest->refcnt);
73801
73802- conn_flags = atomic_read(&dest->conn_flags);
73803+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
73804 if (cp->protocol != IPPROTO_UDP)
73805 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
73806 /* Bind with the destination and its corresponding transmitter */
73807@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
73808 atomic_set(&cp->refcnt, 1);
73809
73810 atomic_set(&cp->n_control, 0);
73811- atomic_set(&cp->in_pkts, 0);
73812+ atomic_set_unchecked(&cp->in_pkts, 0);
73813
73814 atomic_inc(&ipvs->conn_count);
73815 if (flags & IP_VS_CONN_F_NO_CPORT)
73816@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
73817
73818 /* Don't drop the entry if its number of incoming packets is not
73819 located in [0, 8] */
73820- i = atomic_read(&cp->in_pkts);
73821+ i = atomic_read_unchecked(&cp->in_pkts);
73822 if (i > 8 || i < 0) return 0;
73823
73824 if (!todrop_rate[i]) return 0;
73825diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_core.c linux-3.1.1/net/netfilter/ipvs/ip_vs_core.c
73826--- linux-3.1.1/net/netfilter/ipvs/ip_vs_core.c 2011-11-11 15:19:27.000000000 -0500
73827+++ linux-3.1.1/net/netfilter/ipvs/ip_vs_core.c 2011-11-16 18:39:08.000000000 -0500
73828@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
73829 ret = cp->packet_xmit(skb, cp, pd->pp);
73830 /* do not touch skb anymore */
73831
73832- atomic_inc(&cp->in_pkts);
73833+ atomic_inc_unchecked(&cp->in_pkts);
73834 ip_vs_conn_put(cp);
73835 return ret;
73836 }
73837@@ -1612,7 +1612,7 @@ ip_vs_in(unsigned int hooknum, struct sk
73838 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
73839 pkts = sysctl_sync_threshold(ipvs);
73840 else
73841- pkts = atomic_add_return(1, &cp->in_pkts);
73842+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73843
73844 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
73845 cp->protocol == IPPROTO_SCTP) {
73846diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_ctl.c linux-3.1.1/net/netfilter/ipvs/ip_vs_ctl.c
73847--- linux-3.1.1/net/netfilter/ipvs/ip_vs_ctl.c 2011-11-11 15:19:27.000000000 -0500
73848+++ linux-3.1.1/net/netfilter/ipvs/ip_vs_ctl.c 2011-11-16 19:13:12.000000000 -0500
73849@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
73850 ip_vs_rs_hash(ipvs, dest);
73851 write_unlock_bh(&ipvs->rs_lock);
73852 }
73853- atomic_set(&dest->conn_flags, conn_flags);
73854+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
73855
73856 /* bind the service */
73857 if (!dest->svc) {
73858@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
73859 " %-7s %-6d %-10d %-10d\n",
73860 &dest->addr.in6,
73861 ntohs(dest->port),
73862- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73863+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73864 atomic_read(&dest->weight),
73865 atomic_read(&dest->activeconns),
73866 atomic_read(&dest->inactconns));
73867@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
73868 "%-7s %-6d %-10d %-10d\n",
73869 ntohl(dest->addr.ip),
73870 ntohs(dest->port),
73871- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73872+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73873 atomic_read(&dest->weight),
73874 atomic_read(&dest->activeconns),
73875 atomic_read(&dest->inactconns));
73876@@ -2285,6 +2285,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
73877 struct ip_vs_dest_user_kern udest;
73878 struct netns_ipvs *ipvs = net_ipvs(net);
73879
73880+ pax_track_stack();
73881+
73882 if (!capable(CAP_NET_ADMIN))
73883 return -EPERM;
73884
73885@@ -2508,7 +2510,7 @@ __ip_vs_get_dest_entries(struct net *net
73886
73887 entry.addr = dest->addr.ip;
73888 entry.port = dest->port;
73889- entry.conn_flags = atomic_read(&dest->conn_flags);
73890+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
73891 entry.weight = atomic_read(&dest->weight);
73892 entry.u_threshold = dest->u_threshold;
73893 entry.l_threshold = dest->l_threshold;
73894@@ -3041,7 +3043,7 @@ static int ip_vs_genl_fill_dest(struct s
73895 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
73896
73897 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
73898- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73899+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73900 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
73901 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
73902 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
73903diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_sync.c linux-3.1.1/net/netfilter/ipvs/ip_vs_sync.c
73904--- linux-3.1.1/net/netfilter/ipvs/ip_vs_sync.c 2011-11-11 15:19:27.000000000 -0500
73905+++ linux-3.1.1/net/netfilter/ipvs/ip_vs_sync.c 2011-11-16 18:39:08.000000000 -0500
73906@@ -649,7 +649,7 @@ control:
73907 * i.e only increment in_pkts for Templates.
73908 */
73909 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
73910- int pkts = atomic_add_return(1, &cp->in_pkts);
73911+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73912
73913 if (pkts % sysctl_sync_period(ipvs) != 1)
73914 return;
73915@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *
73916
73917 if (opt)
73918 memcpy(&cp->in_seq, opt, sizeof(*opt));
73919- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73920+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73921 cp->state = state;
73922 cp->old_state = cp->state;
73923 /*
73924diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_xmit.c linux-3.1.1/net/netfilter/ipvs/ip_vs_xmit.c
73925--- linux-3.1.1/net/netfilter/ipvs/ip_vs_xmit.c 2011-11-11 15:19:27.000000000 -0500
73926+++ linux-3.1.1/net/netfilter/ipvs/ip_vs_xmit.c 2011-11-16 18:39:08.000000000 -0500
73927@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
73928 else
73929 rc = NF_ACCEPT;
73930 /* do not touch skb anymore */
73931- atomic_inc(&cp->in_pkts);
73932+ atomic_inc_unchecked(&cp->in_pkts);
73933 goto out;
73934 }
73935
73936@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
73937 else
73938 rc = NF_ACCEPT;
73939 /* do not touch skb anymore */
73940- atomic_inc(&cp->in_pkts);
73941+ atomic_inc_unchecked(&cp->in_pkts);
73942 goto out;
73943 }
73944
73945diff -urNp linux-3.1.1/net/netfilter/Kconfig linux-3.1.1/net/netfilter/Kconfig
73946--- linux-3.1.1/net/netfilter/Kconfig 2011-11-11 15:19:27.000000000 -0500
73947+++ linux-3.1.1/net/netfilter/Kconfig 2011-11-16 18:40:44.000000000 -0500
73948@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
73949
73950 To compile it as a module, choose M here. If unsure, say N.
73951
73952+config NETFILTER_XT_MATCH_GRADM
73953+ tristate '"gradm" match support'
73954+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73955+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73956+ ---help---
73957+ The gradm match allows to match on grsecurity RBAC being enabled.
73958+ It is useful when iptables rules are applied early on bootup to
73959+ prevent connections to the machine (except from a trusted host)
73960+ while the RBAC system is disabled.
73961+
73962 config NETFILTER_XT_MATCH_HASHLIMIT
73963 tristate '"hashlimit" match support'
73964 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73965diff -urNp linux-3.1.1/net/netfilter/Makefile linux-3.1.1/net/netfilter/Makefile
73966--- linux-3.1.1/net/netfilter/Makefile 2011-11-11 15:19:27.000000000 -0500
73967+++ linux-3.1.1/net/netfilter/Makefile 2011-11-16 18:40:44.000000000 -0500
73968@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
73969 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
73970 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73971 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73972+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73973 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73974 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73975 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73976diff -urNp linux-3.1.1/net/netfilter/nfnetlink_log.c linux-3.1.1/net/netfilter/nfnetlink_log.c
73977--- linux-3.1.1/net/netfilter/nfnetlink_log.c 2011-11-11 15:19:27.000000000 -0500
73978+++ linux-3.1.1/net/netfilter/nfnetlink_log.c 2011-11-16 18:39:08.000000000 -0500
73979@@ -70,7 +70,7 @@ struct nfulnl_instance {
73980 };
73981
73982 static DEFINE_SPINLOCK(instances_lock);
73983-static atomic_t global_seq;
73984+static atomic_unchecked_t global_seq;
73985
73986 #define INSTANCE_BUCKETS 16
73987 static struct hlist_head instance_table[INSTANCE_BUCKETS];
73988@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
73989 /* global sequence number */
73990 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
73991 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
73992- htonl(atomic_inc_return(&global_seq)));
73993+ htonl(atomic_inc_return_unchecked(&global_seq)));
73994
73995 if (data_len) {
73996 struct nlattr *nla;
73997diff -urNp linux-3.1.1/net/netfilter/xt_gradm.c linux-3.1.1/net/netfilter/xt_gradm.c
73998--- linux-3.1.1/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
73999+++ linux-3.1.1/net/netfilter/xt_gradm.c 2011-11-16 18:40:44.000000000 -0500
74000@@ -0,0 +1,51 @@
74001+/*
74002+ * gradm match for netfilter
74003